Example #1
0
int
aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
{
	struct thread *td;
	int error;

	switch (encini->cri_klen) {
	case 128:
		ses->rounds = AES128_ROUNDS;
		break;
	case 192:
		ses->rounds = AES192_ROUNDS;
		break;
	case 256:
		ses->rounds = AES256_ROUNDS;
		break;
	default:
		return (EINVAL);
	}

	td = curthread;
	error = fpu_kern_enter(td, &ses->fpu_ctx, FPU_KERN_NORMAL);
	if (error == 0) {
		aesni_set_enckey(encini->cri_key, ses->enc_schedule,
		    ses->rounds);
		aesni_set_deckey(ses->enc_schedule, ses->dec_schedule,
		    ses->rounds);
		arc4rand(ses->iv, sizeof(ses->iv), 0);
		fpu_kern_leave(td, &ses->fpu_ctx);
	}
	return (error);
}
Example #2
0
int
padlock_hash_process(struct padlock_session *ses, struct cryptodesc *maccrd,
    struct cryptop *crp)
{
	struct thread *td;
	int error, saved_ctx;

	td = curthread;
	if (!is_fpu_kern_thread(0)) {
		error = fpu_kern_enter(td, ses->ses_fpu_ctx, FPU_KERN_NORMAL);
		saved_ctx = 1;
	} else {
		error = 0;
		saved_ctx = 0;
	}
	if (error != 0)
		return (error);
	if ((maccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0)
		padlock_hash_key_setup(ses, maccrd->crd_key, maccrd->crd_klen);

	error = padlock_authcompute(ses, maccrd, crp->crp_buf, crp->crp_flags);
	if (saved_ctx)
		fpu_kern_leave(td, ses->ses_fpu_ctx);
	return (error);
}
Example #3
0
int
aesni_cipher_setup(struct aesni_session *ses, struct cryptoini *encini)
{
	struct thread *td;
	int error = 0;
#if 0
	int saved_ctx;
#endif

	td = curthread;
#if 0
	if (!is_fpu_kern_thread(0)) {
		error = fpu_kern_enter(td, &ses->fpu_ctx, FPU_KERN_NORMAL);
		saved_ctx = 1;
	} else {
		error = 0;
		saved_ctx = 0;
	}
#endif
	if (error == 0) {
		error = aesni_cipher_setup_common(ses, encini->cri_key,
		    encini->cri_klen);
#if 0
		if (saved_ctx)
			fpu_kern_leave(td, &ses->fpu_ctx);
#endif
	}
	return (error);
}
Example #4
0
static void
pefs_aesni_leave(struct pefs_session *xses)
{
	struct pefs_aesni_ses *ses = &xses->o.ps_aesni;

	if (ses->fpu_saved <= 0)
		return;

	fpu_kern_leave(ses->td, ses->fpu_ctx);
	DPCPU_ID_SET(ses->fpu_cpuid, pefs_aesni_fpu, ses->fpu_ctx);
}
Example #5
0
int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
    struct cryptop *crp)
{
	struct thread *td;
	uint8_t *buf;
	int error, allocated;

	buf = aesni_cipher_alloc(enccrd, crp, &allocated);
	if (buf == NULL) {
		error = ENOMEM;
		goto out;
	}

	td = curthread;
	error = fpu_kern_enter(td, &ses->fpu_ctx, FPU_KERN_NORMAL);
	if (error != 0)
		goto out1;

	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);

		if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
			crypto_copyback(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);

		aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
		    enccrd->crd_len, buf, buf, ses->iv);
	} else {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
		else
			crypto_copydata(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
		aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
		    enccrd->crd_len, buf, ses->iv);
	}
	fpu_kern_leave(td, &ses->fpu_ctx);
	if (allocated)
		crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
		    enccrd->crd_len, buf);
	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
		crypto_copydata(crp->crp_flags, crp->crp_buf,
		    enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
		    AES_BLOCK_LEN, ses->iv);
 out1:
	if (allocated) {
		bzero(buf, enccrd->crd_len);
		free(buf, M_AESNI);
	}
 out:
	return (error);
}
Example #6
0
static int
pefs_aesni_keysetup(const struct pefs_session *xses,
    struct pefs_ctx *xctx, const uint8_t *key, uint32_t keybits)
{
	const struct pefs_aesni_ses *ses = &xses->o.ps_aesni;
	struct pefs_aesni_ctx *ctx = &xctx->o.pctx_aesni;
	struct fpu_kern_ctx *tmpctx = NULL;

	switch (keybits) {
	case 128:
		ctx->rounds = AES128_ROUNDS;
		break;
	case 192:
		ctx->rounds = AES192_ROUNDS;
		break;
	case 256:
		ctx->rounds = AES256_ROUNDS;
		break;
	default:
		printf("pefs: AESNI: invalid key length: %d", keybits);
		return (EINVAL);
	}

	if (ses->fpu_saved < 0) {
		tmpctx = fpu_kern_alloc_ctx(FPU_KERN_NORMAL);
		if (tmpctx == NULL)
			return (ENOMEM);
		fpu_kern_enter(curthread, tmpctx, FPU_KERN_NORMAL);
	}

	aesni_set_enckey(key, ctx->enc_schedule, ctx->rounds);
	aesni_set_deckey(ctx->enc_schedule, ctx->dec_schedule, ctx->rounds);
	rijndael_set_key(&ctx->sw, key, keybits);

	if (tmpctx != NULL) {
		fpu_kern_leave(curthread, tmpctx);
		fpu_kern_free_ctx(tmpctx);
	}

	return (0);
}
Example #7
0
/* It is specifically allowed that buf is a multiple of sizeof(long) */
static u_int
random_nehemiah_read(void *buf, u_int c)
{
	uint8_t *b;
	size_t count, ret;
	uint64_t tmp;

	if ((fpu_kern_enter(curthread, fpu_ctx_save, FPU_KERN_NORMAL) == 0)) {
		b = buf;
		for (count = c; count > 0; count -= ret) {
			ret = MIN(VIA_RNG_store(&tmp), count);
			memcpy(b, &tmp, ret);
			b += ret;
		}
		fpu_kern_leave(curthread, fpu_ctx_save);
	}
	else
		c = 0;

	return (c);
}
Example #8
0
int
aesni_cipher_process(struct aesni_session *ses, struct cryptodesc *enccrd,
    struct cryptop *crp)
{
	struct thread *td;
	uint8_t *buf;
	int error, allocated, saved_ctx;

	buf = aesni_cipher_alloc(enccrd, crp, &allocated);
	if (buf == NULL)
		return (ENOMEM);

	td = curthread;
	if (!is_fpu_kern_thread(0)) {
		error = fpu_kern_enter(td, ses->fpu_ctx, FPU_KERN_NORMAL);
		if (error != 0)
			goto out;
		saved_ctx = 1;
	} else {
		saved_ctx = 0;
		error = 0;
	}

	if ((enccrd->crd_flags & CRD_F_KEY_EXPLICIT) != 0) {
		error = aesni_cipher_setup_common(ses, enccrd->crd_key,
		    enccrd->crd_klen);
		if (error != 0)
			goto out;
	}

	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0) {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
		if ((enccrd->crd_flags & CRD_F_IV_PRESENT) == 0)
			crypto_copyback(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
		if (ses->algo == CRYPTO_AES_CBC) {
			aesni_encrypt_cbc(ses->rounds, ses->enc_schedule,
			    enccrd->crd_len, buf, buf, ses->iv);
		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
			aesni_encrypt_xts(ses->rounds, ses->enc_schedule,
			    ses->xts_schedule, enccrd->crd_len, buf, buf,
			    ses->iv);
		}
	} else {
		if ((enccrd->crd_flags & CRD_F_IV_EXPLICIT) != 0)
			bcopy(enccrd->crd_iv, ses->iv, AES_BLOCK_LEN);
		else
			crypto_copydata(crp->crp_flags, crp->crp_buf,
			    enccrd->crd_inject, AES_BLOCK_LEN, ses->iv);
		if (ses->algo == CRYPTO_AES_CBC) {
			aesni_decrypt_cbc(ses->rounds, ses->dec_schedule,
			    enccrd->crd_len, buf, ses->iv);
		} else /* if (ses->algo == CRYPTO_AES_XTS) */ {
			aesni_decrypt_xts(ses->rounds, ses->dec_schedule,
			    ses->xts_schedule, enccrd->crd_len, buf, buf,
			    ses->iv);
		}
	}
	if (saved_ctx)
		fpu_kern_leave(td, ses->fpu_ctx);
	if (allocated)
		crypto_copyback(crp->crp_flags, crp->crp_buf, enccrd->crd_skip,
		    enccrd->crd_len, buf);
	if ((enccrd->crd_flags & CRD_F_ENCRYPT) != 0)
		crypto_copydata(crp->crp_flags, crp->crp_buf,
		    enccrd->crd_skip + enccrd->crd_len - AES_BLOCK_LEN,
		    AES_BLOCK_LEN, ses->iv);
 out:
	if (allocated) {
		bzero(buf, enccrd->crd_len);
		free(buf, M_AESNI);
	}
	return (error);
}