void
rndsinks_distribute(void)
{
	uint8_t buffer[RNDSINK_MAX_BYTES];
	struct rndsink *rndsink;

	explicit_memset(buffer, 0, sizeof(buffer)); /* paranoia */

	mutex_spin_enter(&rndsinks_lock);
	while ((rndsink = TAILQ_FIRST(&rndsinks)) != NULL) {
		KASSERT(rndsink->rsink_state == RNDSINK_QUEUED);

		/* Bail if we can't get some entropy for this rndsink.  */
		if (!rndpool_maybe_extract(buffer, rndsink->rsink_bytes))
			break;

		/*
		 * Got some entropy.  Take the sink off the queue and
		 * feed the entropy to the callback, with rndsinks_lock
		 * dropped.  While running the callback, lock out
		 * rndsink_destroy by marking the sink in flight.
		 */
		TAILQ_REMOVE(&rndsinks, rndsink, rsink_entry);
		rndsink->rsink_state = RNDSINK_IN_FLIGHT;
		mutex_spin_exit(&rndsinks_lock);

		(*rndsink->rsink_callback)(rndsink->rsink_arg, buffer,
		    rndsink->rsink_bytes);
		explicit_memset(buffer, 0, rndsink->rsink_bytes);

		mutex_spin_enter(&rndsinks_lock);

		/*
		 * If, while the callback was running, anyone requested
		 * it be queued up again, do so now.  Otherwise, idle.
		 * Either way, it is now safe to destroy, so wake the
		 * pending rndsink_destroy, if there is one.
		 */
		if (rndsink->rsink_state == RNDSINK_REQUEUED) {
			TAILQ_INSERT_TAIL(&rndsinks, rndsink, rsink_entry);
			rndsink->rsink_state = RNDSINK_QUEUED;
		} else {
			KASSERT(rndsink->rsink_state == RNDSINK_IN_FLIGHT);
			rndsink->rsink_state = RNDSINK_IDLE;
		}
		cv_broadcast(&rndsink->rsink_cv);
	}
	mutex_spin_exit(&rndsinks_lock);

	explicit_memset(buffer, 0, sizeof(buffer));	/* paranoia */
}
Exemple #2
0
Aes_ecb_encryptor::~Aes_ecb_encryptor ()
{
	// Note: Explicit destructor necessary because class contains an auto_ptr
	// which contains an incomplete type when the auto_ptr is declared.

	explicit_memset(&impl->key, '\0', sizeof(impl->key));
}
Exemple #3
0
/*
 * Generate some output and apply a statistical RNG test to it.
 */
static void
cprng_strong_rngtest(struct cprng_strong *cprng)
{

	KASSERT(mutex_owned(&cprng->cs_lock));

	/* XXX Switch to a pool cache instead?  */
	rngtest_t *const rt = kmem_intr_alloc(sizeof(*rt), KM_NOSLEEP);
	if (rt == NULL)
		/* XXX Warn?  */
		return;

	(void)strlcpy(rt->rt_name, cprng->cs_name, sizeof(rt->rt_name));

	if (nist_ctr_drbg_generate(&cprng->cs_drbg, rt->rt_b, sizeof(rt->rt_b),
		NULL, 0))
		panic("cprng %s: NIST CTR_DRBG failed after reseed",
		    cprng->cs_name);

	if (rngtest(rt)) {
		printf("cprng %s: failed statistical RNG test\n",
		    cprng->cs_name);
		/* XXX Not clear that this does any good...  */
		cprng->cs_ready = false;
		rndsink_schedule(cprng->cs_rndsink);
	}

	explicit_memset(rt, 0, sizeof(*rt)); /* paranoia */
	kmem_intr_free(rt, sizeof(*rt));
}
Exemple #4
0
static void
bcmrng_get(struct bcm2835rng_softc *sc)
{
    uint32_t status, cnt;
    uint32_t buf[RNG_DATA_MAX]; /* 1k on the stack */

    mutex_spin_enter(&sc->sc_intr_lock);
    while (sc->sc_bytes_wanted) {
        status = bus_space_read_4(sc->sc_iot, sc->sc_ioh, RNG_STATUS);
        cnt = __SHIFTOUT(status, RNG_STATUS_CNT);
        KASSERT(cnt < RNG_DATA_MAX);
        if (cnt == 0)
            continue;	/* XXX Busy-waiting seems wrong...  */
        bus_space_read_multi_4(sc->sc_iot, sc->sc_ioh, RNG_DATA, buf,
                               cnt);

        /*
         * This lock dance is necessary because rnd_add_data
         * may call bcmrng_get_cb which takes the intr lock.
         */
        mutex_spin_exit(&sc->sc_intr_lock);
        mutex_spin_enter(&sc->sc_rnd_lock);
        rnd_add_data(&sc->sc_rndsource, buf, (cnt * 4),
                     (cnt * 4 * NBBY));
        mutex_spin_exit(&sc->sc_rnd_lock);
        mutex_spin_enter(&sc->sc_intr_lock);
        sc->sc_bytes_wanted -= MIN(sc->sc_bytes_wanted, (cnt * 4));
    }
    explicit_memset(buf, 0, sizeof(buf));
    mutex_spin_exit(&sc->sc_intr_lock);
}
Exemple #5
0
/*
 * Reseed with whatever we can get from the system entropy pool right now.
 */
static void
cprng_strong_reseed(struct cprng_strong *cprng)
{
	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];

	KASSERT(mutex_owned(&cprng->cs_lock));

	const bool full_entropy = rndsink_request(cprng->cs_rndsink, seed,
	    sizeof(seed));
	cprng_strong_reseed_from(cprng, seed, sizeof(seed), full_entropy);
	explicit_memset(seed, 0, sizeof(seed));
}
Exemple #6
0
R_API void r_mem_memzero(void *dst, size_t l) {
#ifdef _MSC_VER
	RtlSecureZeroMemory (dst, l);
#else
#if HAVE_EXPLICIT_BZERO
	explicit_bzero (dst, l);
#elif HAVE_EXPLICIT_MEMSET
	(void)explicit_memset (dst, 0, l);
#else
	memset (dst, 0, l);
	__asm__ volatile ("" :: "r"(dst) : "memory");
#endif
#endif
}
Exemple #7
0
static void
ingenic_rng_get(size_t bytes_wanted, void *priv)
{
	struct ingenic_rng_softc * const sc = priv;
	uint32_t data;

	mutex_spin_enter(&sc->sc_lock);
	while (bytes_wanted) {
		data = bus_space_read_4(sc->sc_bst, sc->sc_bsh, 0);
		delay(1);
		rnd_add_data_sync(&sc->sc_rndsource, &data, sizeof(data),
		    sizeof(data) * NBBY);
		bytes_wanted -= MIN(bytes_wanted, sizeof(data));
	}
	explicit_memset(&data, 0, sizeof(data));
	mutex_spin_exit(&sc->sc_lock);
}
Exemple #8
0
struct cprng_strong *
cprng_strong_create(const char *name, int ipl, int flags)
{
	const uint32_t cc = cprng_counter();
	struct cprng_strong *const cprng = kmem_alloc(sizeof(*cprng),
	    KM_SLEEP);

	/*
	 * rndsink_request takes a spin lock at IPL_VM, so we can be no
	 * higher than that.
	 */
	KASSERT(ipl != IPL_SCHED && ipl != IPL_HIGH);

	/* Initialize the easy fields.  */
	(void)strlcpy(cprng->cs_name, name, sizeof(cprng->cs_name));
	cprng->cs_flags = flags;
	mutex_init(&cprng->cs_lock, MUTEX_DEFAULT, ipl);
	cv_init(&cprng->cs_cv, cprng->cs_name);
	selinit(&cprng->cs_selq);
	cprng->cs_rndsink = rndsink_create(NIST_BLOCK_KEYLEN_BYTES,
	    &cprng_strong_rndsink_callback, cprng);

	/* Get some initial entropy.  Record whether it is full entropy.  */
	uint8_t seed[NIST_BLOCK_KEYLEN_BYTES];
	mutex_enter(&cprng->cs_lock);
	cprng->cs_ready = rndsink_request(cprng->cs_rndsink, seed,
	    sizeof(seed));
	if (nist_ctr_drbg_instantiate(&cprng->cs_drbg, seed, sizeof(seed),
		&cc, sizeof(cc), cprng->cs_name, sizeof(cprng->cs_name)))
		/* XXX Fix nist_ctr_drbg API so this can't happen.  */
		panic("cprng %s: NIST CTR_DRBG instantiation failed",
		    cprng->cs_name);
	explicit_memset(seed, 0, sizeof(seed));

	if (ISSET(flags, CPRNG_HARD))
		cprng->cs_remaining = NIST_BLOCK_KEYLEN_BYTES;
	else
		cprng->cs_remaining = 0;

	if (!cprng->cs_ready && !ISSET(flags, CPRNG_INIT_ANY))
		printf("cprng %s: creating with partial entropy\n",
		    cprng->cs_name);
	mutex_exit(&cprng->cs_lock);

	return cprng;
}
Exemple #9
0
static void
ingenic_rng_get(struct ingenic_rng_softc *sc)
{
	uint32_t data;

	mutex_spin_enter(&sc->sc_intr_lock);
	while (sc->sc_bytes_wanted) {
		bus_space_read_region_4(sc->sc_bst, sc->sc_bsh, 0, &data, 1);
#if 0
		device_printf(sc->sc_dev, "random output: %x\n", data);
#endif
		mutex_spin_exit(&sc->sc_intr_lock);
		mutex_spin_enter(&sc->sc_rnd_lock);
		rnd_add_data(&sc->sc_rndsource, &data, sizeof(data),
		    sizeof(data) * NBBY);
		mutex_spin_exit(&sc->sc_rnd_lock);
		mutex_spin_enter(&sc->sc_intr_lock);
		sc->sc_bytes_wanted -= MIN(sc->sc_bytes_wanted, sizeof(data));
	}
	explicit_memset(&data, 0, sizeof(data));
	mutex_spin_exit(&sc->sc_intr_lock);
}
Exemple #10
0
void
cprng_strong_destroy(struct cprng_strong *cprng)
{

	/*
	 * Destroy the rndsink first to prevent calls to the callback.
	 */
	rndsink_destroy(cprng->cs_rndsink);

	KASSERT(!cv_has_waiters(&cprng->cs_cv));
#if 0
	KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */
#endif

	nist_ctr_drbg_destroy(&cprng->cs_drbg);
	seldestroy(&cprng->cs_selq);
	cv_destroy(&cprng->cs_cv);
	mutex_destroy(&cprng->cs_lock);

	explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */
	kmem_free(cprng, sizeof(*cprng));
}
Exemple #11
0
Key_file::Entry::Entry ()
{
	version = 0;
	explicit_memset(aes_key, 0, AES_KEY_LEN);
	explicit_memset(hmac_key, 0, HMAC_KEY_LEN);
}
Exemple #12
0
/* ARGSUSED */
static int
cgd_ioctl_set(struct cgd_softc *cs, void *data, struct lwp *l)
{
	struct	 cgd_ioctl *ci = data;
	struct	 vnode *vp;
	int	 ret;
	size_t	 i;
	size_t	 keybytes;			/* key length in bytes */
	const char *cp;
	struct pathbuf *pb;
	char	 *inbuf;
	struct dk_softc *dksc = &cs->sc_dksc;

	cp = ci->ci_disk;

	ret = pathbuf_copyin(ci->ci_disk, &pb);
	if (ret != 0) {
		return ret;
	}
	ret = dk_lookup(pb, l, &vp);
	pathbuf_destroy(pb);
	if (ret != 0) {
		return ret;
	}

	inbuf = malloc(MAX_KEYSIZE, M_TEMP, M_WAITOK);

	if ((ret = cgdinit(cs, cp, vp, l)) != 0)
		goto bail;

	(void)memset(inbuf, 0, MAX_KEYSIZE);
	ret = copyinstr(ci->ci_alg, inbuf, 256, NULL);
	if (ret)
		goto bail;
	cs->sc_cfuncs = cryptfuncs_find(inbuf);
	if (!cs->sc_cfuncs) {
		ret = EINVAL;
		goto bail;
	}

	(void)memset(inbuf, 0, MAX_KEYSIZE);
	ret = copyinstr(ci->ci_ivmethod, inbuf, MAX_KEYSIZE, NULL);
	if (ret)
		goto bail;

	for (i = 0; i < __arraycount(encblkno); i++)
		if (strcmp(encblkno[i].n, inbuf) == 0)
			break;

	if (i == __arraycount(encblkno)) {
		ret = EINVAL;
		goto bail;
	}

	keybytes = ci->ci_keylen / 8 + 1;
	if (keybytes > MAX_KEYSIZE) {
		ret = EINVAL;
		goto bail;
	}

	(void)memset(inbuf, 0, MAX_KEYSIZE);
	ret = copyin(ci->ci_key, inbuf, keybytes);
	if (ret)
		goto bail;

	cs->sc_cdata.cf_blocksize = ci->ci_blocksize;
	cs->sc_cdata.cf_mode = encblkno[i].v;
	cs->sc_cdata.cf_keylen = ci->ci_keylen;
	cs->sc_cdata.cf_priv = cs->sc_cfuncs->cf_init(ci->ci_keylen, inbuf,
	    &cs->sc_cdata.cf_blocksize);
	if (cs->sc_cdata.cf_blocksize > CGD_MAXBLOCKSIZE) {
	    log(LOG_WARNING, "cgd: Disallowed cipher with blocksize %zu > %u\n",
		cs->sc_cdata.cf_blocksize, CGD_MAXBLOCKSIZE);
	    cs->sc_cdata.cf_priv = NULL;
	}

	/*
	 * The blocksize is supposed to be in bytes. Unfortunately originally
	 * it was expressed in bits. For compatibility we maintain encblkno
	 * and encblkno8.
	 */
	cs->sc_cdata.cf_blocksize /= encblkno[i].d;
	(void)explicit_memset(inbuf, 0, MAX_KEYSIZE);
	if (!cs->sc_cdata.cf_priv) {
		ret = EINVAL;		/* XXX is this the right error? */
		goto bail;
	}
	free(inbuf, M_TEMP);

	bufq_alloc(&dksc->sc_bufq, "fcfs", 0);

	cs->sc_data = malloc(MAXPHYS, M_DEVBUF, M_WAITOK);
	cs->sc_data_used = 0;

	/* Attach the disk. */
	dk_attach(dksc);
	disk_attach(&dksc->sc_dkdev);

	disk_set_info(dksc->sc_dev, &dksc->sc_dkdev, NULL);

	/* Discover wedges on this disk. */
	dkwedge_discover(&dksc->sc_dkdev);

	return 0;

bail:
	free(inbuf, M_TEMP);
	(void)vn_close(vp, FREAD|FWRITE, l->l_cred);
	return ret;
}