void X509_STORE_free(X509_STORE *vfy) { int i; STACK_OF(X509_LOOKUP) *sk; X509_LOOKUP *lu; if (vfy == NULL) return; CRYPTO_atomic_add(&vfy->references, -1, &i, vfy->lock); REF_PRINT_COUNT("X509_STORE", vfy); if (i > 0) return; REF_ASSERT_ISNT(i < 0); sk = vfy->get_cert_methods; for (i = 0; i < sk_X509_LOOKUP_num(sk); i++) { lu = sk_X509_LOOKUP_value(sk, i); X509_LOOKUP_shutdown(lu); X509_LOOKUP_free(lu); } sk_X509_LOOKUP_free(sk); sk_X509_OBJECT_pop_free(vfy->objs, cleanup); CRYPTO_free_ex_data(CRYPTO_EX_INDEX_X509_STORE, vfy, &vfy->ex_data); X509_VERIFY_PARAM_free(vfy->param); CRYPTO_THREAD_lock_free(vfy->lock); OPENSSL_free(vfy); }
static void ec_pre_comp_clear_free(void *pre_) { int i; EC_PRE_COMP *pre = pre_; if (!pre) return; CRYPTO_atomic_add(&pre->references, -1, &i, pre->lock); if (i > 0) return; CRYPTO_thread_cleanup(pre->lock); if (pre->points) { EC_POINT **p; for (p = pre->points; *p != NULL; p++) { EC_POINT_clear_free(*p); vigortls_zeroize(p, sizeof *p); } free(pre->points); } vigortls_zeroize(pre, sizeof *pre); free(pre); }
int BIO_free(BIO *a) { int i; if (a == NULL) return 0; if (CRYPTO_atomic_add(&a->references, -1, &i, a->lock) <= 0) return 0; REF_PRINT_COUNT("BIO", a); if (i > 0) return 1; REF_ASSERT_ISNT(i < 0); if ((a->callback != NULL) && ((i = (int)a->callback(a, BIO_CB_FREE, NULL, 0, 0L, 1L)) <= 0)) return i; CRYPTO_free_ex_data(CRYPTO_EX_INDEX_BIO, a, &a->ex_data); CRYPTO_THREAD_lock_free(a->lock); if ((a->method != NULL) && (a->method->destroy != NULL)) a->method->destroy(a); OPENSSL_free(a); return 1; }
int asn1_do_lock(ASN1_VALUE **pval, int op, const ASN1_ITEM *it) { const ASN1_AUX *aux; int *lck, ret; CRYPTO_MUTEX **lock; if ((it->itype != ASN1_ITYPE_SEQUENCE) && (it->itype != ASN1_ITYPE_NDEF_SEQUENCE)) return 0; aux = it->funcs; if (!aux || !(aux->flags & ASN1_AFLG_REFCOUNT)) return 0; lck = offset2ptr(*pval, aux->ref_offset); lock = offset2ptr(*pval, aux->ref_lock); if (op == 0) { *lck = 1; *lock = CRYPTO_thread_new(); if (*lock == NULL) return 0; return 1; } CRYPTO_atomic_add(lck, op, &ret, *lock); if (ret == 0) CRYPTO_thread_cleanup(*lock); return ret; }
void EC_KEY_free(EC_KEY *r) { int i; if (r == NULL) return; CRYPTO_atomic_add(&r->references, -1, &i, r->lock); REF_PRINT_COUNT("EC_KEY", r); if (i > 0) return; REF_ASSERT_ISNT(i < 0); if (r->meth->finish != NULL) r->meth->finish(r); #ifndef OPENSSL_NO_ENGINE ENGINE_finish(r->engine); #endif if (r->group && r->group->meth->keyfinish) r->group->meth->keyfinish(r); CRYPTO_free_ex_data(CRYPTO_EX_INDEX_EC_KEY, r, &r->ex_data); CRYPTO_THREAD_lock_free(r->lock); EC_GROUP_free(r->group); EC_POINT_free(r->pub_key); BN_clear_free(r->priv_key); OPENSSL_clear_free((void *)r, sizeof(EC_KEY)); }
int engine_free_util(ENGINE *e, int not_locked) { int i; if (e == NULL) return 1; #ifdef HAVE_ATOMICS CRYPTO_DOWN_REF(&e->struct_ref, &i, global_engine_lock); #else if (not_locked) CRYPTO_atomic_add(&e->struct_ref, -1, &i, global_engine_lock); else i = --e->struct_ref; #endif engine_ref_debug(e, 0, -1) if (i > 0) return 1; REF_ASSERT_ISNT(i < 0); /* Free up any dynamically allocated public key methods */ engine_pkey_meths_free(e); engine_pkey_asn1_meths_free(e); /* * Give the ENGINE a chance to do any structural cleanup corresponding to * allocation it did in its constructor (eg. unload error strings) */ if (e->destroy) e->destroy(e); CRYPTO_free_ex_data(CRYPTO_EX_INDEX_ENGINE, e, &e->ex_data); OPENSSL_free(e); return 1; }
void DSA_free(DSA *r) { int i; if (r == NULL) return; CRYPTO_atomic_add(&r->references, -1, &i, r->lock); REF_PRINT_COUNT("DSA", r); if (i > 0) return; REF_ASSERT_ISNT(i < 0); if (r->meth->finish) r->meth->finish(r); #ifndef OPENSSL_NO_ENGINE ENGINE_finish(r->engine); #endif CRYPTO_free_ex_data(CRYPTO_EX_INDEX_DSA, r, &r->ex_data); CRYPTO_THREAD_lock_free(r->lock); BN_clear_free(r->p); BN_clear_free(r->q); BN_clear_free(r->g); BN_clear_free(r->pub_key); BN_clear_free(r->priv_key); OPENSSL_free(r); }
int DSO_free(DSO *dso) { int i; if (dso == NULL) { DSOerr(DSO_F_DSO_FREE, ERR_R_PASSED_NULL_PARAMETER); return 0; } if (CRYPTO_atomic_add(&dso->references, -1, &i, dso->lock) <= 0) return 0; if (i > 0) return 1; if ((dso->meth->dso_unload != NULL) && !dso->meth->dso_unload(dso)) { DSOerr(DSO_F_DSO_FREE, DSO_R_UNLOAD_FAILED); return 0; } if ((dso->meth->finish != NULL) && !dso->meth->finish(dso)) { DSOerr(DSO_F_DSO_FREE, DSO_R_FINISH_FAILED); return 0; } sk_void_free(dso->meth_data); free(dso->filename); free(dso->loaded_filename); CRYPTO_thread_cleanup(dso->lock); free(dso); return 1; }
int DSO_free(DSO *dso) { int i; if (dso == NULL) return (1); if (CRYPTO_atomic_add(&dso->references, -1, &i, dso->lock) <= 0) return 0; REF_PRINT_COUNT("DSO", dso); if (i > 0) return 1; REF_ASSERT_ISNT(i < 0); if ((dso->flags & DSO_FLAG_NO_UNLOAD_ON_FREE) == 0) { if ((dso->meth->dso_unload != NULL) && !dso->meth->dso_unload(dso)) { DSOerr(DSO_F_DSO_FREE, DSO_R_UNLOAD_FAILED); return 0; } } if ((dso->meth->finish != NULL) && !dso->meth->finish(dso)) { DSOerr(DSO_F_DSO_FREE, DSO_R_FINISH_FAILED); return 0; } sk_void_free(dso->meth_data); OPENSSL_free(dso->filename); OPENSSL_free(dso->loaded_filename); CRYPTO_THREAD_lock_free(dso->lock); OPENSSL_free(dso); return 1; }
int asn1_do_lock(ASN1_VALUE **pval, int op, const ASN1_ITEM *it) { const ASN1_AUX *aux; int *lck, ret; CRYPTO_RWLOCK **lock; if ((it->itype != ASN1_ITYPE_SEQUENCE) && (it->itype != ASN1_ITYPE_NDEF_SEQUENCE)) return 0; aux = it->funcs; if (!aux || !(aux->flags & ASN1_AFLG_REFCOUNT)) return 0; lck = offset2ptr(*pval, aux->ref_offset); lock = offset2ptr(*pval, aux->ref_lock); if (op == 0) { *lck = 1; *lock = CRYPTO_THREAD_lock_new(); if (*lock == NULL) return 0; return 1; } CRYPTO_atomic_add(lck, op, &ret, *lock); #ifdef REF_PRINT fprintf(stderr, "%p:%4d:%s\n", it, *lck, it->sname); #endif REF_ASSERT_ISNT(ret < 0); if (ret == 0) CRYPTO_THREAD_lock_free(*lock); return ret; }
void ssl_cert_free(CERT *c) { int i; if (c == NULL) return; CRYPTO_atomic_add(&c->references, -1, &i, c->lock); REF_PRINT_COUNT("CERT", c); if (i > 0) return; REF_ASSERT_ISNT(i < 0); #ifndef OPENSSL_NO_DH EVP_PKEY_free(c->dh_tmp); #endif ssl_cert_clear_certs(c); OPENSSL_free(c->conf_sigalgs); OPENSSL_free(c->client_sigalgs); OPENSSL_free(c->shared_sigalgs); OPENSSL_free(c->ctypes); X509_STORE_free(c->verify_store); X509_STORE_free(c->chain_store); custom_exts_free(&c->cli_ext); custom_exts_free(&c->srv_ext); #ifndef OPENSSL_NO_PSK OPENSSL_free(c->psk_identity_hint); #endif CRYPTO_THREAD_lock_free(c->lock); OPENSSL_free(c); }
int BIO_up_ref(BIO *a) { int i; if (CRYPTO_atomic_add(&a->references, 1, &i, a->lock) <= 0) return 0; REF_PRINT_COUNT("BIO", a); REF_ASSERT_ISNT(i < 2); return ((i > 1) ? 1 : 0); }
int EVP_PKEY_up_ref(EVP_PKEY *pkey) { int i; if (CRYPTO_atomic_add(&pkey->references, 1, &i, pkey->lock) <= 0) return 0; REF_PRINT_COUNT("EVP_PKEY", pkey); REF_ASSERT_ISNT(i < 2); return ((i > 1) ? 1 : 0); }
int X509_CRL_up_ref(X509_CRL *crl) { int i; if (CRYPTO_atomic_add(&crl->references, 1, &i, crl->lock) <= 0) return 0; REF_PRINT_COUNT("X509_CRL", crl); REF_ASSERT_ISNT(i < 2); return ((i > 1) ? 1 : 0); }
int DSA_up_ref(DSA *r) { int i; if (CRYPTO_atomic_add(&r->references, 1, &i, r->lock) <= 0) return 0; REF_PRINT_COUNT("DSA", r); REF_ASSERT_ISNT(i < 2); return ((i > 1) ? 1 : 0); }
int X509_STORE_up_ref(X509_STORE *vfy) { int i; if (CRYPTO_atomic_add(&vfy->references, 1, &i, vfy->lock) <= 0) return 0; REF_PRINT_COUNT("X509_STORE", a); REF_ASSERT_ISNT(i < 2); return ((i > 1) ? 1 : 0); }
static void *ec_pre_comp_dup(void *src_) { int i; EC_PRE_COMP *src = src_; /* no need to actually copy, these objects never change! */ CRYPTO_atomic_add(&src->references, 1, &i, src->lock); return src_; }
int DSO_up_ref(DSO *dso) { int i; if (dso == NULL) { DSOerr(DSO_F_DSO_UP_REF, ERR_R_PASSED_NULL_PARAMETER); return 0; } if (CRYPTO_atomic_add(&dso->references, 1, &i, dso->lock) <= 0) return 0; return ((i > 1) ? 1 : 0); }
void EVP_PKEY_free(EVP_PKEY *x) { int i; if (x == NULL) return; CRYPTO_atomic_add(&x->references, -1, &i, x->lock); REF_PRINT_COUNT("EVP_PKEY", x); if (i > 0) return; REF_ASSERT_ISNT(i < 0); EVP_PKEY_free_it(x); sk_X509_ATTRIBUTE_pop_free(x->attributes, X509_ATTRIBUTE_free); OPENSSL_free(x); }
void GOST_KEY_free(GOST_KEY *r) { int i; if (r == NULL) return; CRYPTO_atomic_add(&r->references, -1, &i, r->lock); if (i > 0) return; EC_GROUP_free(r->group); EC_POINT_free(r->pub_key); BN_clear_free(r->priv_key); OPENSSL_cleanse((void *)r, sizeof(GOST_KEY)); free(r); }
static void ec_pre_comp_free(void *pre_) { int i; EC_PRE_COMP *pre = pre_; if (pre == NULL) return; CRYPTO_atomic_add(&pre->references, -1, &i, pre->lock); if (i > 0) return; CRYPTO_thread_cleanup(pre->lock); if (pre->points) { EC_POINT **p; for (p = pre->points; *p != NULL; p++) EC_POINT_free(*p); free(pre->points); } free(pre); }
void RSA_free(RSA *r) { int i; if (r == NULL) return; CRYPTO_atomic_add(&r->references, -1, &i, r->lock); REF_PRINT_COUNT("RSA", r); if (i > 0) return; REF_ASSERT_ISNT(i < 0); if (r->meth != NULL && r->meth->finish != NULL) r->meth->finish(r); #ifndef OPENSSL_NO_ENGINE ENGINE_finish(r->engine); #endif CRYPTO_free_ex_data(CRYPTO_EX_INDEX_RSA, r, &r->ex_data); CRYPTO_THREAD_lock_free(r->lock); BN_clear_free(r->n); BN_clear_free(r->e); BN_clear_free(r->d); BN_clear_free(r->p); BN_clear_free(r->q); BN_clear_free(r->dmp1); BN_clear_free(r->dmq1); BN_clear_free(r->iqmp); BN_BLINDING_free(r->blinding); BN_BLINDING_free(r->mt_blinding); OPENSSL_free(r->bignum_data); OPENSSL_free(r); }
/*- * ssl_get_prev attempts to find an SSL_SESSION to be used to resume this * connection. It is only called by servers. * * hello: The parsed ClientHello data * * Returns: * -1: fatal error * 0: no session found * 1: a session may have been found. * * Side effects: * - If a session is found then s->session is pointed at it (after freeing an * existing session if need be) and s->verify_result is set from the session. * - Both for new and resumed sessions, s->ext.ticket_expected is set to 1 * if the server should issue a new session ticket (to 0 otherwise). */ int ssl_get_prev_session(SSL *s, CLIENTHELLO_MSG *hello) { /* This is used only by servers. */ SSL_SESSION *ret = NULL; int fatal = 0, discard; int try_session_cache = 0; TICKET_RETURN r; if (SSL_IS_TLS13(s)) { if (!tls_parse_extension(s, TLSEXT_IDX_psk_kex_modes, SSL_EXT_CLIENT_HELLO, hello->pre_proc_exts, NULL, 0) || !tls_parse_extension(s, TLSEXT_IDX_psk, SSL_EXT_CLIENT_HELLO, hello->pre_proc_exts, NULL, 0)) return -1; ret = s->session; } else { /* sets s->ext.ticket_expected */ r = tls_get_ticket_from_client(s, hello, &ret); switch (r) { case TICKET_FATAL_ERR_MALLOC: case TICKET_FATAL_ERR_OTHER: fatal = 1; SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_SSL_GET_PREV_SESSION, ERR_R_INTERNAL_ERROR); goto err; case TICKET_NONE: case TICKET_EMPTY: if (hello->session_id_len > 0) try_session_cache = 1; break; case TICKET_NO_DECRYPT: case TICKET_SUCCESS: case TICKET_SUCCESS_RENEW: break; } } if (try_session_cache && ret == NULL && !(s->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_LOOKUP)) { SSL_SESSION data; data.ssl_version = s->version; memcpy(data.session_id, hello->session_id, hello->session_id_len); data.session_id_length = hello->session_id_len; CRYPTO_THREAD_read_lock(s->session_ctx->lock); ret = lh_SSL_SESSION_retrieve(s->session_ctx->sessions, &data); if (ret != NULL) { /* don't allow other threads to steal it: */ SSL_SESSION_up_ref(ret); } CRYPTO_THREAD_unlock(s->session_ctx->lock); if (ret == NULL) CRYPTO_atomic_add(&s->session_ctx->stats.sess_miss, 1, &discard, s->session_ctx->lock); } if (try_session_cache && ret == NULL && s->session_ctx->get_session_cb != NULL) { int copy = 1; ret = s->session_ctx->get_session_cb(s, hello->session_id, hello->session_id_len, ©); if (ret != NULL) { CRYPTO_atomic_add(&s->session_ctx->stats.sess_cb_hit, 1, &discard, s->session_ctx->lock); /* * Increment reference count now if the session callback asks us * to do so (note that if the session structures returned by the * callback are shared between threads, it must handle the * reference count itself [i.e. copy == 0], or things won't be * thread-safe). */ if (copy) SSL_SESSION_up_ref(ret); /* * Add the externally cached session to the internal cache as * well if and only if we are supposed to. */ if (! (s->session_ctx->session_cache_mode & SSL_SESS_CACHE_NO_INTERNAL_STORE)) { /* * Either return value of SSL_CTX_add_session should not * interrupt the session resumption process. The return * value is intentionally ignored. */ SSL_CTX_add_session(s->session_ctx, ret); } } } if (ret == NULL) goto err; /* Now ret is non-NULL and we own one of its reference counts. */ /* Check TLS version consistency */ if (ret->ssl_version != s->version) goto err; if (ret->sid_ctx_length != s->sid_ctx_length || memcmp(ret->sid_ctx, s->sid_ctx, ret->sid_ctx_length)) { /* * We have the session requested by the client, but we don't want to * use it in this context. */ goto err; /* treat like cache miss */ } if ((s->verify_mode & SSL_VERIFY_PEER) && s->sid_ctx_length == 0) { /* * We can't be sure if this session is being used out of context, * which is especially important for SSL_VERIFY_PEER. The application * should have used SSL[_CTX]_set_session_id_context. For this error * case, we generate an error instead of treating the event like a * cache miss (otherwise it would be easy for applications to * effectively disable the session cache by accident without anyone * noticing). */ SSLfatal(s, SSL_AD_INTERNAL_ERROR, SSL_F_SSL_GET_PREV_SESSION, SSL_R_SESSION_ID_CONTEXT_UNINITIALIZED); fatal = 1; goto err; } if (ret->timeout < (long)(time(NULL) - ret->time)) { /* timeout */ CRYPTO_atomic_add(&s->session_ctx->stats.sess_timeout, 1, &discard, s->session_ctx->lock); if (try_session_cache) { /* session was from the cache, so remove it */ SSL_CTX_remove_session(s->session_ctx, ret); } goto err; } /* Check extended master secret extension consistency */ if (ret->flags & SSL_SESS_FLAG_EXTMS) { /* If old session includes extms, but new does not: abort handshake */ if (!(s->s3->flags & TLS1_FLAGS_RECEIVED_EXTMS)) { SSLfatal(s, SSL_AD_ILLEGAL_PARAMETER, SSL_F_SSL_GET_PREV_SESSION, SSL_R_INCONSISTENT_EXTMS); fatal = 1; goto err; } } else if (s->s3->flags & TLS1_FLAGS_RECEIVED_EXTMS) { /* If new session includes extms, but old does not: do not resume */ goto err; } if (!SSL_IS_TLS13(s)) { /* We already did this for TLS1.3 */ SSL_SESSION_free(s->session); s->session = ret; } CRYPTO_atomic_add(&s->session_ctx->stats.sess_hit, 1, &discard, s->session_ctx->lock); s->verify_result = s->session->verify_result; return 1; err: if (ret != NULL) { SSL_SESSION_free(ret); /* In TLSv1.3 s->session was already set to ret, so we NULL it out */ if (SSL_IS_TLS13(s)) s->session = NULL; if (!try_session_cache) { /* * The session was from a ticket, so we should issue a ticket for * the new session */ s->ext.ticket_expected = 1; } } if (fatal) return -1; return 0; }
void EVP_PKEY_up_ref(EVP_PKEY *pkey) { int i; CRYPTO_atomic_add(&pkey->references, 1, &i, pkey->lock); }
int SSL_CTX_add_session(SSL_CTX *ctx, SSL_SESSION *c) { int ret = 0, discard; SSL_SESSION *s; /* * add just 1 reference count for the SSL_CTX's session cache even though * it has two ways of access: each session is in a doubly linked list and * an lhash */ SSL_SESSION_up_ref(c); /* * if session c is in already in cache, we take back the increment later */ CRYPTO_THREAD_write_lock(ctx->lock); s = lh_SSL_SESSION_insert(ctx->sessions, c); /* * s != NULL iff we already had a session with the given PID. In this * case, s == c should hold (then we did not really modify * ctx->sessions), or we're in trouble. */ if (s != NULL && s != c) { /* We *are* in trouble ... */ SSL_SESSION_list_remove(ctx, s); SSL_SESSION_free(s); /* * ... so pretend the other session did not exist in cache (we cannot * handle two SSL_SESSION structures with identical session ID in the * same cache, which could happen e.g. when two threads concurrently * obtain the same session from an external cache) */ s = NULL; } else if (s == NULL && lh_SSL_SESSION_retrieve(ctx->sessions, c) == NULL) { /* s == NULL can also mean OOM error in lh_SSL_SESSION_insert ... */ /* * ... so take back the extra reference and also don't add * the session to the SSL_SESSION_list at this time */ s = c; } /* Put at the head of the queue unless it is already in the cache */ if (s == NULL) SSL_SESSION_list_add(ctx, c); if (s != NULL) { /* * existing cache entry -- decrement previously incremented reference * count because it already takes into account the cache */ SSL_SESSION_free(s); /* s == c */ ret = 0; } else { /* * new cache entry -- remove old ones if cache has become too large */ ret = 1; if (SSL_CTX_sess_get_cache_size(ctx) > 0) { while (SSL_CTX_sess_number(ctx) > SSL_CTX_sess_get_cache_size(ctx)) { if (!remove_session_lock(ctx, ctx->session_cache_tail, 0)) break; else CRYPTO_atomic_add(&ctx->stats.sess_cache_full, 1, &discard, ctx->lock); } } } CRYPTO_THREAD_unlock(ctx->lock); return ret; }