Exemple #1
0
static BN_BLINDING *rsa_get_blinding(RSA *rsa, int *local, BN_CTX *ctx)
{
    BN_BLINDING *ret;
    int got_write_lock = 0;
    CRYPTO_THREADID cur;

    CRYPTO_r_lock(CRYPTO_LOCK_RSA);

    if (rsa->blinding == NULL) {
        CRYPTO_r_unlock(CRYPTO_LOCK_RSA);
        CRYPTO_w_lock(CRYPTO_LOCK_RSA);
        got_write_lock = 1;

        if (rsa->blinding == NULL)
            rsa->blinding = RSA_setup_blinding(rsa, ctx);
    }

    ret = rsa->blinding;
    if (ret == NULL)
        goto err;

    CRYPTO_THREADID_current(&cur);
    if (!CRYPTO_THREADID_cmp(&cur, BN_BLINDING_thread_id(ret))) {
        /* rsa->blinding is ours! */

        *local = 1;
    } else {
        /* resort to rsa->mt_blinding instead */

        /*
         * instructs rsa_blinding_convert(), rsa_blinding_invert() that the
         * BN_BLINDING is shared, meaning that accesses require locks, and
         * that the blinding factor must be stored outside the BN_BLINDING
         */
        *local = 0;

        if (rsa->mt_blinding == NULL) {
            if (!got_write_lock) {
                CRYPTO_r_unlock(CRYPTO_LOCK_RSA);
                CRYPTO_w_lock(CRYPTO_LOCK_RSA);
                got_write_lock = 1;
            }

            if (rsa->mt_blinding == NULL)
                rsa->mt_blinding = RSA_setup_blinding(rsa, ctx);
        }
        ret = rsa->mt_blinding;
    }

 err:
    if (got_write_lock)
        CRYPTO_w_unlock(CRYPTO_LOCK_RSA);
    else
        CRYPTO_r_unlock(CRYPTO_LOCK_RSA);
    return ret;
}
Exemple #2
0
int fips_clear_owning_thread(void)
{
    int ret = 0;

    if (fips_started) {
        CRYPTO_w_lock(CRYPTO_LOCK_FIPS2);
        if (fips_thread_set) {
            CRYPTO_THREADID cur;
            CRYPTO_THREADID_current(&cur);
            if (!CRYPTO_THREADID_cmp(&cur, &fips_thread))
                fips_thread_set = 0;
        }
        CRYPTO_w_unlock(CRYPTO_LOCK_FIPS2);
    }
    return ret;
}
static int ssleay_rand_status(void)
	{
	CRYPTO_THREADID cur;
	int ret;
	int do_not_lock;

	CRYPTO_THREADID_current(&cur);
	/* check if we already have the lock
	 * (could happen if a RAND_poll() implementation calls RAND_status()) */
	if (crypto_lock_rand)
		{
		CRYPTO_r_lock(CRYPTO_LOCK_RAND2);
		do_not_lock = !CRYPTO_THREADID_cmp(&locking_threadid, &cur);
		CRYPTO_r_unlock(CRYPTO_LOCK_RAND2);
		}
	else
		do_not_lock = 0;
	
	if (!do_not_lock)
		{
		CRYPTO_w_lock(CRYPTO_LOCK_RAND);
		
		/* prevent ssleay_rand_bytes() from trying to obtain the lock again */
		CRYPTO_w_lock(CRYPTO_LOCK_RAND2);
		CRYPTO_THREADID_cpy(&locking_threadid, &cur);
		CRYPTO_w_unlock(CRYPTO_LOCK_RAND2);
		crypto_lock_rand = 1;
		}
	
	if (!initialized)
		{
		RAND_poll();
		initialized = 1;
		}

	ret = entropy >= ENTROPY_NEEDED;

	if (!do_not_lock)
		{
		/* before unlocking, we must clear 'crypto_lock_rand' */
		crypto_lock_rand = 0;
		
		CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
		}
	
	return ret;
	}
Exemple #4
0
static int err_state_cmp(const ERR_STATE *a, const ERR_STATE *b)
{
    return CRYPTO_THREADID_cmp(&a->tid, &b->tid);
}
static void ssleay_rand_add(const void *buf, int num, double add)
	{
	int i,j,k,st_idx;
	long md_c[2];
	unsigned char local_md[MD_DIGEST_LENGTH];
	EVP_MD_CTX m;
	int do_not_lock;

	/*
	 * (Based on the rand(3) manpage)
	 *
	 * The input is chopped up into units of 20 bytes (or less for
	 * the last block).  Each of these blocks is run through the hash
	 * function as follows:  The data passed to the hash function
	 * is the current 'md', the same number of bytes from the 'state'
	 * (the location determined by in incremented looping index) as
	 * the current 'block', the new key data 'block', and 'count'
	 * (which is incremented after each use).
	 * The result of this is kept in 'md' and also xored into the
	 * 'state' at the same locations that were used as input into the
         * hash function.
	 */

	/* check if we already have the lock */
	if (crypto_lock_rand)
		{
		CRYPTO_THREADID cur;
		CRYPTO_THREADID_current(&cur);
		CRYPTO_r_lock(CRYPTO_LOCK_RAND2);
		do_not_lock = !CRYPTO_THREADID_cmp(&locking_threadid, &cur);
		CRYPTO_r_unlock(CRYPTO_LOCK_RAND2);
		}
	else
		do_not_lock = 0;

	if (!do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
	st_idx=state_index;

	/* use our own copies of the counters so that even
	 * if a concurrent thread seeds with exactly the
	 * same data and uses the same subarray there's _some_
	 * difference */
	md_c[0] = md_count[0];
	md_c[1] = md_count[1];

	TINYCLR_SSL_MEMCPY(local_md, md, sizeof md);

	/* state_index <= state_num <= STATE_SIZE */
	state_index += num;
	if (state_index >= STATE_SIZE)
		{
		state_index%=STATE_SIZE;
		state_num=STATE_SIZE;
		}
	else if (state_num < STATE_SIZE)	
		{
		if (state_index > state_num)
			state_num=state_index;
		}
	/* state_index <= state_num <= STATE_SIZE */

	/* state[st_idx], ..., state[(st_idx + num - 1) % STATE_SIZE]
	 * are what we will use now, but other threads may use them
	 * as well */

	md_count[1] += (num / MD_DIGEST_LENGTH) + (num % MD_DIGEST_LENGTH > 0);

	if (!do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);

	EVP_MD_CTX_init(&m);
	for (i=0; i<num; i+=MD_DIGEST_LENGTH)
		{
		j=(num-i);
		j=(j > MD_DIGEST_LENGTH)?MD_DIGEST_LENGTH:j;

		MD_Init(&m);
		MD_Update(&m,local_md,MD_DIGEST_LENGTH);
		k=(st_idx+j)-STATE_SIZE;
		if (k > 0)
			{
			MD_Update(&m,&(state[st_idx]),j-k);
			MD_Update(&m,&(state[0]),k);
			}
		else
			MD_Update(&m,&(state[st_idx]),j);

		/* DO NOT REMOVE THE FOLLOWING CALL TO MD_Update()! */
		MD_Update(&m,buf,j);
		/* We know that line may cause programs such as
		   purify and valgrind to complain about use of
		   uninitialized data.  The problem is not, it's
		   with the caller.  Removing that line will make
		   sure you get really bad randomness and thereby
		   other problems such as very insecure keys. */

		MD_Update(&m,(unsigned char *)&(md_c[0]),sizeof(md_c));
		MD_Final(&m,local_md);
		md_c[1]++;

		buf=(const char *)buf + j;

		for (k=0; k<j; k++)
			{
			/* Parallel threads may interfere with this,
			 * but always each byte of the new state is
			 * the XOR of some previous value of its
			 * and local_md (itermediate values may be lost).
			 * Alway using locking could hurt performance more
			 * than necessary given that conflicts occur only
			 * when the total seeding is longer than the random
			 * state. */
			state[st_idx++]^=local_md[k];
			if (st_idx >= STATE_SIZE)
				st_idx=0;
			}
		}
	EVP_MD_CTX_cleanup(&m);

	if (!do_not_lock) CRYPTO_w_lock(CRYPTO_LOCK_RAND);
	/* Don't just copy back local_md into md -- this could mean that
	 * other thread's seeding remains without effect (except for
	 * the incremented counter).  By XORing it we keep at least as
	 * much entropy as fits into md. */
	for (k = 0; k < (int)sizeof(md); k++)
		{
		md[k] ^= local_md[k];
		}
	if (entropy < ENTROPY_NEEDED) /* stop counting when we have enough */
	    entropy += add;
	if (!do_not_lock) CRYPTO_w_unlock(CRYPTO_LOCK_RAND);
	
#if !defined(OPENSSL_THREADS) && !defined(OPENSSL_SYS_WIN32) && !defined(OPENSSL_SYS_ARM) && !defined(OPENSSL_SYS_SH)
	TINYCLR_SSL_ASSERT(md_c[1] == md_count[1]);
#endif
	}