// rand_state_get pops a |rand_state| from the head of // |rand_state_free_list| and returns it. If the list is empty, it // creates a fresh |rand_state| and returns that instead. static struct rand_state *rand_state_get(void) { struct rand_state *state = NULL; CRYPTO_STATIC_MUTEX_lock_write(rand_state_lock_bss_get()); state = *rand_state_free_list_bss_get(); if (state != NULL) { *rand_state_free_list_bss_get() = state->next; } CRYPTO_STATIC_MUTEX_unlock_write(rand_state_lock_bss_get()); if (state != NULL) { return state; } state = OPENSSL_malloc(sizeof(struct rand_state)); if (state == NULL) { return NULL; } rand_state_init(state); #if defined(BORINGSSL_FIPS) CRYPTO_STATIC_MUTEX_lock_write(rand_state_lock_bss_get()); state->next_all = *rand_state_all_list_bss_get(); *rand_state_all_list_bss_get() = state; CRYPTO_STATIC_MUTEX_unlock_write(rand_state_lock_bss_get()); #endif return state; }
static void rand_state_clear_all(void) { CRYPTO_STATIC_MUTEX_lock_write(rand_drbg_lock_bss_get()); CRYPTO_STATIC_MUTEX_lock_write(rand_state_lock_bss_get()); for (struct rand_state *cur = *rand_state_all_list_bss_get(); cur != NULL; cur = cur->next_all) { CTR_DRBG_clear(&cur->drbg); } // Both locks are deliberately left locked so that any threads that are still // running will hang if they try to call |RAND_bytes|. }
void RAND_enable_fork_unsafe_buffering(int fd) { if (fd >= 0) { fd = dup(fd); if (fd < 0) { abort(); } } else { fd = kUnset; } CRYPTO_STATIC_MUTEX_lock_write(&requested_lock); urandom_buffering_requested = 1; urandom_fd_requested = fd; CRYPTO_STATIC_MUTEX_unlock_write(&requested_lock); CRYPTO_once(&once, init_once); if (urandom_buffering != 1) { abort(); // Already initialized } if (fd >= 0) { if (urandom_fd == kHaveGetrandom) { close(fd); } else if (urandom_fd != fd) { abort(); // Already initialized. } } }
void RAND_set_urandom_fd(int fd) { CRYPTO_STATIC_MUTEX_lock_write(&global_lock); if (urandom_fd != -2) { /* |RAND_set_urandom_fd| may not be called after the RNG is used. */ abort(); } do { urandom_fd = dup(fd); } while (urandom_fd == -1 && errno == EINTR); if (urandom_fd < 0) { abort(); } CRYPTO_STATIC_MUTEX_unlock(&global_lock); }
/* RAND_cleanup frees all buffers, closes any cached file descriptor * and resets the global state. */ void RAND_cleanup(void) { struct rand_buffer *cur; CRYPTO_STATIC_MUTEX_lock_write(&global_lock); while ((cur = list_head)) { list_head = cur->next; OPENSSL_free(cur); } if (urandom_fd >= 0) { close(urandom_fd); } urandom_fd = -2; list_head = NULL; CRYPTO_STATIC_MUTEX_unlock(&global_lock); }
void RAND_set_urandom_fd(int fd) { fd = dup(fd); if (fd < 0) { abort(); } CRYPTO_STATIC_MUTEX_lock_write(&requested_lock); urandom_fd_requested = fd; CRYPTO_STATIC_MUTEX_unlock_write(&requested_lock); CRYPTO_once(&once, init_once); if (urandom_fd == kHaveGetrandom) { close(fd); } else if (urandom_fd != fd) { abort(); // Already initialized. } }
const X509_POLICY_CACHE *policy_cache_set(X509 *x) { X509_POLICY_CACHE *cache; CRYPTO_STATIC_MUTEX_lock_read(&g_x509_policy_cache_lock); cache = x->policy_cache; CRYPTO_STATIC_MUTEX_unlock(&g_x509_policy_cache_lock); if (cache != NULL) return cache; CRYPTO_STATIC_MUTEX_lock_write(&g_x509_policy_cache_lock); if (x->policy_cache == NULL) policy_cache_new(x); cache = x->policy_cache; CRYPTO_STATIC_MUTEX_unlock(&g_x509_policy_cache_lock); return cache; }
// rand_state_put pushes |state| onto |rand_state_free_list|. static void rand_state_put(struct rand_state *state) { CRYPTO_STATIC_MUTEX_lock_write(rand_state_lock_bss_get()); state->next = *rand_state_free_list_bss_get(); *rand_state_free_list_bss_get() = state; CRYPTO_STATIC_MUTEX_unlock_write(rand_state_lock_bss_get()); }
static void x509v3_cache_extensions(X509 *x) { BASIC_CONSTRAINTS *bs; PROXY_CERT_INFO_EXTENSION *pci; ASN1_BIT_STRING *usage; ASN1_BIT_STRING *ns; EXTENDED_KEY_USAGE *extusage; X509_EXTENSION *ex; size_t i; int j; CRYPTO_STATIC_MUTEX_lock_write(&g_x509_cache_extensions_lock); if(x->ex_flags & EXFLAG_SET) { CRYPTO_STATIC_MUTEX_unlock(&g_x509_cache_extensions_lock); return; } X509_digest(x, EVP_sha1(), x->sha1_hash, NULL); /* V1 should mean no extensions ... */ if(!X509_get_version(x)) x->ex_flags |= EXFLAG_V1; /* Handle basic constraints */ if((bs=X509_get_ext_d2i(x, NID_basic_constraints, NULL, NULL))) { if(bs->ca) x->ex_flags |= EXFLAG_CA; if(bs->pathlen) { if((bs->pathlen->type == V_ASN1_NEG_INTEGER) || !bs->ca) { x->ex_flags |= EXFLAG_INVALID; x->ex_pathlen = 0; } else x->ex_pathlen = ASN1_INTEGER_get(bs->pathlen); } else x->ex_pathlen = -1; BASIC_CONSTRAINTS_free(bs); x->ex_flags |= EXFLAG_BCONS; } /* Handle proxy certificates */ if((pci=X509_get_ext_d2i(x, NID_proxyCertInfo, NULL, NULL))) { if (x->ex_flags & EXFLAG_CA || X509_get_ext_by_NID(x, NID_subject_alt_name, -1) >= 0 || X509_get_ext_by_NID(x, NID_issuer_alt_name, -1) >= 0) { x->ex_flags |= EXFLAG_INVALID; } if (pci->pcPathLengthConstraint) { x->ex_pcpathlen = ASN1_INTEGER_get(pci->pcPathLengthConstraint); } else x->ex_pcpathlen = -1; PROXY_CERT_INFO_EXTENSION_free(pci); x->ex_flags |= EXFLAG_PROXY; } /* Handle key usage */ if((usage=X509_get_ext_d2i(x, NID_key_usage, NULL, NULL))) { if(usage->length > 0) { x->ex_kusage = usage->data[0]; if(usage->length > 1) x->ex_kusage |= usage->data[1] << 8; } else x->ex_kusage = 0; x->ex_flags |= EXFLAG_KUSAGE; ASN1_BIT_STRING_free(usage); } x->ex_xkusage = 0; if((extusage=X509_get_ext_d2i(x, NID_ext_key_usage, NULL, NULL))) { x->ex_flags |= EXFLAG_XKUSAGE; for(i = 0; i < sk_ASN1_OBJECT_num(extusage); i++) { switch(OBJ_obj2nid(sk_ASN1_OBJECT_value(extusage,i))) { case NID_server_auth: x->ex_xkusage |= XKU_SSL_SERVER; break; case NID_client_auth: x->ex_xkusage |= XKU_SSL_CLIENT; break; case NID_email_protect: x->ex_xkusage |= XKU_SMIME; break; case NID_code_sign: x->ex_xkusage |= XKU_CODE_SIGN; break; case NID_ms_sgc: case NID_ns_sgc: x->ex_xkusage |= XKU_SGC; break; case NID_OCSP_sign: x->ex_xkusage |= XKU_OCSP_SIGN; break; case NID_time_stamp: x->ex_xkusage |= XKU_TIMESTAMP; break; case NID_dvcs: x->ex_xkusage |= XKU_DVCS; break; case NID_anyExtendedKeyUsage: x->ex_xkusage |= XKU_ANYEKU; break; } } sk_ASN1_OBJECT_pop_free(extusage, ASN1_OBJECT_free); } if((ns=X509_get_ext_d2i(x, NID_netscape_cert_type, NULL, NULL))) { if(ns->length > 0) x->ex_nscert = ns->data[0]; else x->ex_nscert = 0; x->ex_flags |= EXFLAG_NSCERT; ASN1_BIT_STRING_free(ns); } x->skid =X509_get_ext_d2i(x, NID_subject_key_identifier, NULL, NULL); x->akid =X509_get_ext_d2i(x, NID_authority_key_identifier, NULL, NULL); /* Does subject name match issuer ? */ if(!X509_NAME_cmp(X509_get_subject_name(x), X509_get_issuer_name(x))) { x->ex_flags |= EXFLAG_SI; /* If SKID matches AKID also indicate self signed */ if (X509_check_akid(x, x->akid) == X509_V_OK) x->ex_flags |= EXFLAG_SS; } x->altname = X509_get_ext_d2i(x, NID_subject_alt_name, NULL, NULL); x->nc = X509_get_ext_d2i(x, NID_name_constraints, &j, NULL); if (!x->nc && (j != -1)) x->ex_flags |= EXFLAG_INVALID; setup_crldp(x); for (j = 0; j < X509_get_ext_count(x); j++) { ex = X509_get_ext(x, j); if (OBJ_obj2nid(X509_EXTENSION_get_object(ex)) == NID_freshest_crl) x->ex_flags |= EXFLAG_FRESHEST; if (!X509_EXTENSION_get_critical(ex)) continue; if (!X509_supported_extension(ex)) { x->ex_flags |= EXFLAG_CRITICAL; break; } } x->ex_flags |= EXFLAG_SET; CRYPTO_STATIC_MUTEX_unlock(&g_x509_cache_extensions_lock); }
EVP_PKEY *X509_PUBKEY_get(X509_PUBKEY *key) { EVP_PKEY *ret=NULL; if (key == NULL) goto error; CRYPTO_STATIC_MUTEX_lock_read(&g_pubkey_lock); if (key->pkey != NULL) { CRYPTO_STATIC_MUTEX_unlock(&g_pubkey_lock); return EVP_PKEY_up_ref(key->pkey); } CRYPTO_STATIC_MUTEX_unlock(&g_pubkey_lock); if (key->public_key == NULL) goto error; if ((ret = EVP_PKEY_new()) == NULL) { OPENSSL_PUT_ERROR(X509, X509_PUBKEY_get, ERR_R_MALLOC_FAILURE); goto error; } if (!EVP_PKEY_set_type(ret, OBJ_obj2nid(key->algor->algorithm))) { OPENSSL_PUT_ERROR(X509, X509_PUBKEY_get, X509_R_UNSUPPORTED_ALGORITHM); goto error; } if (ret->ameth->pub_decode) { if (!ret->ameth->pub_decode(ret, key)) { OPENSSL_PUT_ERROR(X509, X509_PUBKEY_get, X509_R_PUBLIC_KEY_DECODE_ERROR); goto error; } } else { OPENSSL_PUT_ERROR(X509, X509_PUBKEY_get, X509_R_METHOD_NOT_SUPPORTED); goto error; } /* Check to see if another thread set key->pkey first */ CRYPTO_STATIC_MUTEX_lock_write(&g_pubkey_lock); if (key->pkey) { CRYPTO_STATIC_MUTEX_unlock(&g_pubkey_lock); EVP_PKEY_free(ret); ret = key->pkey; } else { key->pkey = ret; CRYPTO_STATIC_MUTEX_unlock(&g_pubkey_lock); } return EVP_PKEY_up_ref(ret); error: if (ret != NULL) EVP_PKEY_free(ret); return(NULL); }
/* CRYPTO_sysrand puts |num| random bytes into |out|. */ void CRYPTO_sysrand(uint8_t *out, size_t requested) { int fd; struct rand_buffer *buf; size_t todo; pid_t pid, ppid; if (requested == 0) { return; } CRYPTO_STATIC_MUTEX_lock_write(&global_lock); fd = urandom_get_fd_locked(); if (fd < 0) { CRYPTO_STATIC_MUTEX_unlock(&global_lock); abort(); return; } /* If buffering is not enabled, or if the request is large, then the * result comes directly from urandom. */ if (!urandom_buffering || requested > BUF_SIZE / 2) { CRYPTO_STATIC_MUTEX_unlock(&global_lock); if (!read_full(fd, out, requested)) { abort(); } return; } pid = getpid(); ppid = getppid(); for (;;) { buf = list_head; if (buf && buf->pid == pid && buf->ppid == ppid && rand_bytes_per_buf - buf->used >= requested) { memcpy(out, &buf->rand[buf->used], requested); buf->used += requested; CRYPTO_STATIC_MUTEX_unlock(&global_lock); return; } /* If we don't immediately have enough entropy with the correct * PID, remove the buffer from the list in order to gain * exclusive access and unlock. */ if (buf) { list_head = buf->next; } CRYPTO_STATIC_MUTEX_unlock(&global_lock); if (!buf) { buf = (struct rand_buffer *)OPENSSL_malloc(BUF_SIZE); if (!buf) { abort(); return; } /* The buffer doesn't contain any random bytes yet * so we mark it as fully used so that it will be * filled below. */ buf->used = rand_bytes_per_buf; buf->next = NULL; buf->pid = pid; buf->ppid = ppid; } if (buf->pid == pid && buf->ppid == ppid) { break; } /* We have forked and so cannot use these bytes as they * may have been used in another process. */ OPENSSL_free(buf); CRYPTO_STATIC_MUTEX_lock_write(&global_lock); } while (requested > 0) { todo = rand_bytes_per_buf - buf->used; if (todo > requested) { todo = requested; } memcpy(out, &buf->rand[buf->used], todo); requested -= todo; out += todo; buf->used += todo; if (buf->used < rand_bytes_per_buf) { break; } if (!read_full(fd, buf->rand, rand_bytes_per_buf)) { OPENSSL_free(buf); abort(); return; } buf->used = 0; } CRYPTO_STATIC_MUTEX_lock_write(&global_lock); assert(list_head != buf); buf->next = list_head; list_head = buf; CRYPTO_STATIC_MUTEX_unlock(&global_lock); }