示例#1
0
/*
 * Same as crypto_digest_init_prov(), but relies on the KCF scheduler
 * to choose a provider. See crypto_digest_init_prov() comments for
 * more information.
 */
int
crypto_digest_init(crypto_mechanism_t *mech, crypto_context_t *ctxp,
    crypto_call_req_t  *crq)
{
	int error;
	kcf_provider_desc_t *pd;
	kcf_prov_tried_t *list = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error,
	    list, CRYPTO_FG_DIGEST, CHECK_RESTRICT(crq), 0)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	error = crypto_digest_init_prov(pd, pd->pd_sid, mech, ctxp, crq);

	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);
	KCF_PROV_REFRELE(pd);
	return (error);
}
示例#2
0
/*
 * This routine is called for blocking reads.
 *
 * The argument is_taskq_thr indicates whether the caller is
 * the taskq thread dispatched by the timeout handler routine.
 * In this case, we cycle through all the providers
 * submitting a request to each provider to generate random numbers.
 *
 * For other cases, we pick a provider and submit a request to generate
 * random numbers. We retry using another provider if we get an error.
 *
 * Returns the number of bytes that are written to 'ptr'. Returns -1
 * if no provider is found. ptr and need are unchanged.
 */
static int
rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t is_taskq_thr)
{
	int rv;
	int prov_cnt = 0;
	int total_bytes = 0;
	kcf_provider_desc_t *pd;
	kcf_req_params_t params;
	kcf_prov_tried_t *list = NULL;

	while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
	    list, CRYPTO_FG_RANDOM, 0)) != NULL) {

		prov_cnt++;

		KCF_WRAP_RANDOM_OPS_PARAMS(&params, KCF_OP_RANDOM_GENERATE,
		    pd->pd_sid, ptr, need, 0, 0);
		rv = kcf_submit_request(pd, NULL, NULL, &params, B_FALSE);
		ASSERT(rv != CRYPTO_QUEUED);

		if (rv == CRYPTO_SUCCESS) {
			total_bytes += need;
			if (is_taskq_thr)
				rndc_addbytes(ptr, need);
			else {
				KCF_PROV_REFRELE(pd);
				break;
			}
		}

		if (is_taskq_thr || rv != CRYPTO_SUCCESS) {
			/* Add pd to the linked list of providers tried. */
			if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) {
				KCF_PROV_REFRELE(pd);
				break;
			}
		}

	}

	if (list != NULL)
		kcf_free_triedlist(list);

	if (prov_cnt == 0) { /* no provider could be found. */
		rng_prov_found = B_FALSE;
		return (-1);
	} else {
		rng_prov_found = B_TRUE;
		/* See comments in kcf_rngprov_check() */
		rng_ok_to_log = B_TRUE;
	}

	return (total_bytes);
}
示例#3
0
int
crypto_sign_init(crypto_mechanism_t *mech, crypto_key_t *key,
    crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *crq)
{
	int error;
	kcf_mech_entry_t *me;
	kcf_provider_desc_t *pd;
	kcf_prov_tried_t *list = NULL;
	kcf_ctx_template_t *ctx_tmpl;
	crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
	    list, CRYPTO_FG_SIGN, CHECK_RESTRICT(crq), 0)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	/*
	 * For SW providers, check the validity of the context template
	 * It is very rare that the generation number mis-matches, so
	 * it is acceptable to fail here, and let the consumer recover by
	 * freeing this tmpl and create a new one for the key and new SW
	 * provider.
	 */
	if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
	    ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
		if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
			if (list != NULL)
				kcf_free_triedlist(list);
			KCF_PROV_REFRELE(pd);
			return (CRYPTO_OLD_CTX_TEMPLATE);
		} else {
			spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
		}
	}

	error = crypto_sign_init_prov(pd, pd->pd_sid, mech, key, spi_ctx_tmpl,
	    ctxp, crq);

	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);
	KCF_PROV_REFRELE(pd);
	return (error);
}
示例#4
0
/*
 * Same as crypto_digest_prov(), but relies on the KCF scheduler to
 * choose a provider. See crypto_digest_prov() comments for more information.
 */
int
crypto_digest(crypto_mechanism_t *mech, crypto_data_t *data,
    crypto_data_t *digest, crypto_call_req_t *crq)
{
	int error;
	kcf_provider_desc_t *pd;
	kcf_req_params_t params;
	kcf_prov_tried_t *list = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error, list,
	    CRYPTO_FG_DIGEST_ATOMIC, CHECK_RESTRICT(crq),
	    data->cd_length)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	/* The fast path for SW providers. */
	if (CHECK_FASTPATH(crq, pd)) {
		crypto_mechanism_t lmech;

		lmech = *mech;
		KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
		error = KCF_PROV_DIGEST_ATOMIC(pd, pd->pd_sid, &lmech, data,
		    digest, KCF_SWFP_RHNDL(crq));
		KCF_PROV_INCRSTATS(pd, error);
	} else {
		KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC, pd->pd_sid,
		    mech, NULL, data, digest);

		/* no crypto context to carry between multiple parts. */
		error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
	}

	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);

	KCF_PROV_REFRELE(pd);
	return (error);
}
示例#5
0
/*
 * Return TRUE if at least one provider exists that can
 * supply random numbers.
 */
boolean_t
kcf_rngprov_check(void)
{
	int rv;
	kcf_provider_desc_t *pd;

	if ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
	    NULL, CRYPTO_FG_RANDOM, 0)) != NULL) {
		KCF_PROV_REFRELE(pd);
		/*
		 * We logged a warning once about no provider being available
		 * and now a provider became available. So, set the flag so
		 * that we can log again if the problem recurs.
		 */
		rng_ok_to_log = B_TRUE;
		rng_prov_found = B_TRUE;
		return (B_TRUE);
	} else {
		rng_prov_found = B_FALSE;
		return (B_FALSE);
	}
}
示例#6
0
/*
 * Same as crypto_mac_init_prov(), but relies on the KCF scheduler to
 * choose a provider. See crypto_mac_init_prov() comments for more
 * information.
 */
int
crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key,
    crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
    crypto_call_req_t  *crq)
{
	int error;
	kcf_mech_entry_t *me;
	kcf_provider_desc_t *pd;
	kcf_ctx_template_t *ctx_tmpl;
	crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
	kcf_prov_tried_t *list = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
	    list, CRYPTO_FG_MAC, CHECK_RESTRICT(crq), 0)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	/*
	 * For SW providers, check the validity of the context template
	 * It is very rare that the generation number mis-matches, so
	 * is acceptable to fail here, and let the consumer recover by
	 * freeing this tmpl and create a new one for the key and new SW
	 * provider
	 */

	if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
	    ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
		if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
			if (list != NULL)
				kcf_free_triedlist(list);
			KCF_PROV_REFRELE(pd);
			return (CRYPTO_OLD_CTX_TEMPLATE);
		} else {
			spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
		}
	}

	if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
	    (pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) {
		/*
		 * The hardware provider has limited HMAC support.
		 * So, we fallback early here to using a software provider.
		 *
		 * XXX - need to enhance to do the fallback later in
		 * crypto_mac_update() if the size of accumulated input data
		 * exceeds the maximum size digestable by hardware provider.
		 */
		error = CRYPTO_BUFFER_TOO_BIG;
	} else {
		error = crypto_mac_init_prov(pd, pd->pd_sid, mech, key,
		    spi_ctx_tmpl, ctxp, crq);
	}
	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);

	KCF_PROV_REFRELE(pd);
	return (error);
}
示例#7
0
/*
 * Same as crypto_mac_verify_prov(), but relies on the KCF scheduler to choose
 * a provider. See crypto_mac_verify_prov() comments for more information.
 */
int
crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data,
    crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
    crypto_call_req_t *crq)
{
	int error;
	kcf_mech_entry_t *me;
	kcf_req_params_t params;
	kcf_provider_desc_t *pd;
	kcf_ctx_template_t *ctx_tmpl;
	crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
	kcf_prov_tried_t *list = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
	    list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq),
	    data->cd_length)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	/*
	 * For SW providers, check the validity of the context template
	 * It is very rare that the generation number mis-matches, so
	 * is acceptable to fail here, and let the consumer recover by
	 * freeing this tmpl and create a new one for the key and new SW
	 * provider
	 */
	if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
	    ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
		if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
			if (list != NULL)
				kcf_free_triedlist(list);
			KCF_PROV_REFRELE(pd);
			return (CRYPTO_OLD_CTX_TEMPLATE);
		} else {
			spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
		}
	}

	/* The fast path for SW providers. */
	if (CHECK_FASTPATH(crq, pd)) {
		crypto_mechanism_t lmech;

		lmech = *mech;
		KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);

		error = KCF_PROV_MAC_VERIFY_ATOMIC(pd, pd->pd_sid, &lmech, key,
		    data, mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
		KCF_PROV_INCRSTATS(pd, error);
	} else {
		if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
		    (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
		    (data->cd_length > pd->pd_hash_limit)) {
			/* see comments in crypto_mac() */
			error = CRYPTO_BUFFER_TOO_BIG;
		} else {
			KCF_WRAP_MAC_OPS_PARAMS(&params,
			    KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech,
			    key, data, mac, spi_ctx_tmpl);

			error = kcf_submit_request(pd, NULL, crq, &params,
			    KCF_ISDUALREQ(crq));
		}
	}

	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);

	KCF_PROV_REFRELE(pd);
	return (error);
}
示例#8
0
/*
 * Cycle through all the providers submitting a request to each provider
 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT
 * and ALWAYS_EXTRACT.
 *
 * Returns the number of bytes that are written to 'ptr'. Returns -1
 * if no provider is found. ptr and len are unchanged.
 */
static int
rngprov_getbytes_nblk(uint8_t *ptr, size_t len)
{
	int rv, total_bytes;
	size_t blen;
	uchar_t *rndbuf;
	kcf_provider_desc_t *pd;
	kcf_req_params_t params;
	crypto_call_req_t req;
	kcf_prov_tried_t *list = NULL;
	int prov_cnt = 0;

	blen = 0;
	total_bytes = 0;
	req.cr_flag = CRYPTO_SKIP_REQID;
	req.cr_callback_func = notify_done;

	while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
	    list, CRYPTO_FG_RANDOM, 0)) != NULL) {

		prov_cnt ++;
		switch (pd->pd_prov_type) {
		case CRYPTO_HW_PROVIDER:
			/*
			 * We have to allocate a buffer here as we can not
			 * assume that the input buffer will remain valid
			 * when the callback comes. We use a fixed size buffer
			 * to simplify the book keeping.
			 */
			rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP);
			if (rndbuf == NULL) {
				KCF_PROV_REFRELE(pd);
				if (list != NULL)
					kcf_free_triedlist(list);
				return (total_bytes);
			}
			req.cr_callback_arg = rndbuf;
			KCF_WRAP_RANDOM_OPS_PARAMS(&params,
			    KCF_OP_RANDOM_GENERATE,
			    pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0);
			break;

		case CRYPTO_SW_PROVIDER:
			/*
			 * We do not need to allocate a buffer in the software
			 * provider case as there is no callback involved. We
			 * avoid any extra data copy by directly passing 'ptr'.
			 */
			KCF_WRAP_RANDOM_OPS_PARAMS(&params,
			    KCF_OP_RANDOM_GENERATE,
			    pd->pd_sid, ptr, len, 0, 0);
			break;
		}

		rv = kcf_submit_request(pd, NULL, &req, &params, B_FALSE);
		if (rv == CRYPTO_SUCCESS) {
			switch (pd->pd_prov_type) {
			case CRYPTO_HW_PROVIDER:
				/*
				 * Since we have the input buffer handy,
				 * we directly copy to it rather than
				 * adding to the pool.
				 */
				blen = min(MINEXTRACTBYTES, len);
				bcopy(rndbuf, ptr, blen);
				if (len < MINEXTRACTBYTES)
					rndc_addbytes(rndbuf + len,
					    MINEXTRACTBYTES - len);
				ptr += blen;
				len -= blen;
				total_bytes += blen;
				break;

			case CRYPTO_SW_PROVIDER:
				total_bytes += len;
				len = 0;
				break;
			}
		}

		/*
		 * We free the buffer in the callback routine
		 * for the CRYPTO_QUEUED case.
		 */
		if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
		    rv != CRYPTO_QUEUED) {
			bzero(rndbuf, MINEXTRACTBYTES);
			kmem_free(rndbuf, MINEXTRACTBYTES);
		}

		if (len == 0) {
			KCF_PROV_REFRELE(pd);
			break;
		}

		if (rv != CRYPTO_SUCCESS) {
			/* Add pd to the linked list of providers tried. */
			if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) ==
			    NULL) {
				KCF_PROV_REFRELE(pd);
				break;
			}
		}
	}

	if (list != NULL) {
		kcf_free_triedlist(list);
	}

	if (prov_cnt == 0) { /* no provider could be found. */
		rng_prov_found = B_FALSE;
		return (-1);
	} else {
		rng_prov_found = B_TRUE;
		/* See comments in kcf_rngprov_check() */
		rng_ok_to_log = B_TRUE;
	}

	return (total_bytes);
}
示例#9
0
/*
 * This routine is called when a request to a provider has failed
 * with a recoverable error. This routine tries to find another provider
 * and dispatches the request to the new provider, if one is available.
 * We reuse the request structure.
 *
 * A return value of NULL from kcf_get_mech_provider() indicates
 * we have tried the last provider.
 */
static int
kcf_resubmit_request(kcf_areq_node_t *areq)
{
	int error = CRYPTO_FAILED;
	kcf_context_t *ictx;
	kcf_provider_desc_t *old_pd;
	kcf_provider_desc_t *new_pd;
	crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
	crypto_mech_type_t prov_mt1, prov_mt2;
	crypto_func_group_t fg = 0;

	if (!can_resubmit(areq, &mech1, &mech2, &fg))
		return (error);

	old_pd = areq->an_provider;
	/*
	 * Add old_pd to the list of providers already tried. We release
	 * the hold on old_pd (from the earlier kcf_get_mech_provider()) in
	 * kcf_free_triedlist().
	 */
	if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
	    KM_NOSLEEP) == NULL)
		return (error);

	if (mech1 && !mech2) {
		new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
		    areq->an_tried_plist, fg,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
	} else {
		ASSERT(mech1 != NULL && mech2 != NULL);

		new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1,
		    &prov_mt2, &error, areq->an_tried_plist, fg, fg,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
	}

	if (new_pd == NULL)
		return (error);

	/*
	 * We reuse the old context by resetting provider specific
	 * fields in it.
	 */
	if ((ictx = areq->an_context) != NULL) {
		crypto_ctx_t *ctx;

		ASSERT(old_pd == ictx->kc_prov_desc);
		KCF_PROV_REFRELE(ictx->kc_prov_desc);
		KCF_PROV_REFHOLD(new_pd);
		ictx->kc_prov_desc = new_pd;

		ctx = &ictx->kc_glbl_ctx;
		ctx->cc_provider = new_pd->pd_prov_handle;
		ctx->cc_session = new_pd->pd_sid;
		ctx->cc_provider_private = NULL;
	}

	/* We reuse areq. by resetting the provider and context fields. */
	KCF_PROV_REFRELE(old_pd);
	KCF_PROV_REFHOLD(new_pd);
	areq->an_provider = new_pd;
	mutex_enter(&areq->an_lock);
	areq->an_state = REQ_WAITING;
	mutex_exit(&areq->an_lock);

	switch (new_pd->pd_prov_type) {
	case CRYPTO_SW_PROVIDER:
		error = kcf_disp_sw_request(areq);
		break;

	case CRYPTO_HW_PROVIDER: {
		taskq_t *taskq = new_pd->pd_sched_info.ks_taskq;

		if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
		    TASKQID_INVALID) {
			error = CRYPTO_HOST_MEMORY;
		} else {
			error = CRYPTO_QUEUED;
		}

		break;
	default:
		break;
	}
	}

	return (error);
}
示例#10
0
/*
 * Callback routine for the next part of a simulated dual part.
 * Schedules the next step.
 *
 * This routine can be called from interrupt context.
 */
void
kcf_next_req(void *next_req_arg, int status)
{
	kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
	kcf_req_params_t *params = &(next_req->kr_params);
	kcf_areq_node_t *areq = next_req->kr_areq;
	int error = status;
	kcf_provider_desc_t *pd = NULL;
	crypto_dual_data_t *ct = NULL;

	/* Stop the processing if an error occured at this step */
	if (error != CRYPTO_SUCCESS) {
out:
		areq->an_reqarg = next_req->kr_callreq;
		KCF_AREQ_REFRELE(areq);
		kmem_free(next_req, sizeof (kcf_dual_req_t));
		areq->an_isdual = B_FALSE;
		kcf_aop_done(areq, error);
		return;
	}

	switch (params->rp_opgrp) {
	case KCF_OG_MAC: {

		/*
		 * The next req is submitted with the same reqid as the
		 * first part. The consumer only got back that reqid, and
		 * should still be able to cancel the operation during its
		 * second step.
		 */
		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
		crypto_ctx_template_t mac_tmpl;
		kcf_mech_entry_t *me;

		ct = (crypto_dual_data_t *)mops->mo_data;
		mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;

		/* No expected recoverable failures, so no retry list */
		pd = kcf_get_mech_provider(mops->mo_framework_mechtype,
		    &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2);

		if (pd == NULL) {
			error = CRYPTO_MECH_NOT_SUPPORTED;
			goto out;
		}
		/* Validate the MAC context template here */
		if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
		    (mac_tmpl != NULL)) {
			kcf_ctx_template_t *ctx_mac_tmpl;

			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;

			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
				KCF_PROV_REFRELE(pd);
				error = CRYPTO_OLD_CTX_TEMPLATE;
				goto out;
			}
			mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
		}

		break;
	}
	case KCF_OG_DECRYPT: {
		kcf_decrypt_ops_params_t *dcrops =
		    &(params->rp_u.decrypt_params);

		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
		/* No expected recoverable failures, so no retry list */
		pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
		    NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1);

		if (pd == NULL) {
			error = CRYPTO_MECH_NOT_SUPPORTED;
			goto out;
		}
		break;
	}
	default:
		break;
	}

	/* The second step uses len2 and offset2 of the dual_data */
	next_req->kr_saveoffset = ct->dd_offset1;
	next_req->kr_savelen = ct->dd_len1;
	ct->dd_offset1 = ct->dd_offset2;
	ct->dd_len1 = ct->dd_len2;

	/* preserve if the caller is restricted */
	if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) {
		areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED;
	} else {
		areq->an_reqarg.cr_flag = 0;
	}

	areq->an_reqarg.cr_callback_func = kcf_last_req;
	areq->an_reqarg.cr_callback_arg = next_req;
	areq->an_isdual = B_TRUE;

	/*
	 * We would like to call kcf_submit_request() here. But,
	 * that is not possible as that routine allocates a new
	 * kcf_areq_node_t request structure, while we need to
	 * reuse the existing request structure.
	 */
	switch (pd->pd_prov_type) {
	case CRYPTO_SW_PROVIDER:
		error = common_submit_request(pd, NULL, params,
		    KCF_RHNDL(KM_NOSLEEP));
		break;

	case CRYPTO_HW_PROVIDER: {
		kcf_provider_desc_t *old_pd;
		taskq_t *taskq = pd->pd_sched_info.ks_taskq;

		/*
		 * Set the params for the second step in the
		 * dual-ops.
		 */
		areq->an_params = *params;
		old_pd = areq->an_provider;
		KCF_PROV_REFRELE(old_pd);
		KCF_PROV_REFHOLD(pd);
		areq->an_provider = pd;

		/*
		 * Note that we have to do a taskq_dispatch()
		 * here as we may be in interrupt context.
		 */
		if (taskq_dispatch(taskq, process_req_hwp, areq,
		    TQ_NOSLEEP) == (taskqid_t)0) {
			error = CRYPTO_HOST_MEMORY;
		} else {
			error = CRYPTO_QUEUED;
		}
		break;
	}
	default:
		break;
	}

	/*
	 * We have to release the holds on the request and the provider
	 * in all cases.
	 */
	KCF_AREQ_REFRELE(areq);
	KCF_PROV_REFRELE(pd);

	if (error != CRYPTO_QUEUED) {
		/* restore, clean up, and invoke the client's callback */

		ct->dd_offset1 = next_req->kr_saveoffset;
		ct->dd_len1 = next_req->kr_savelen;
		areq->an_reqarg = next_req->kr_callreq;
		kmem_free(next_req, sizeof (kcf_dual_req_t));
		areq->an_isdual = B_FALSE;
		kcf_aop_done(areq, error);
	}
}
示例#11
0
static int
sign_sr_atomic_common(crypto_mechanism_t *mech, crypto_key_t *key,
    crypto_data_t *data, crypto_ctx_template_t tmpl, crypto_data_t *signature,
    crypto_call_req_t *crq, crypto_func_group_t fg)
{
	int error;
	kcf_mech_entry_t *me;
	kcf_provider_desc_t *pd;
	kcf_req_params_t params;
	kcf_prov_tried_t *list = NULL;
	kcf_ctx_template_t *ctx_tmpl;
	crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, list, fg,
	    CHECK_RESTRICT(crq), data->cd_length)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	/*
	 * For SW providers, check the validity of the context template
	 * It is very rare that the generation number mis-matches, so
	 * it is acceptable to fail here, and let the consumer recover by
	 * freeing this tmpl and create a new one for the key and new SW
	 * provider.
	 */
	if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
	    ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
		if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
			if (list != NULL)
				kcf_free_triedlist(list);
			KCF_PROV_REFRELE(pd);
			return (CRYPTO_OLD_CTX_TEMPLATE);
		} else {
			spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
		}
	}

	/* The fast path for SW providers. */
	if (CHECK_FASTPATH(crq, pd)) {
		crypto_mechanism_t lmech;

		lmech = *mech;
		KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
		if (fg == CRYPTO_FG_SIGN_ATOMIC)
			error = KCF_PROV_SIGN_ATOMIC(pd, pd->pd_sid, &lmech,
			    key, data, spi_ctx_tmpl, signature,
			    KCF_SWFP_RHNDL(crq));
		else
			error = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, pd->pd_sid,
			    &lmech, key, data, spi_ctx_tmpl, signature,
			    KCF_SWFP_RHNDL(crq));
		KCF_PROV_INCRSTATS(pd, error);
	} else {
		kcf_op_type_t op = ((fg == CRYPTO_FG_SIGN_ATOMIC) ?
		    KCF_OP_ATOMIC : KCF_OP_SIGN_RECOVER_ATOMIC);

		KCF_WRAP_SIGN_OPS_PARAMS(&params, op, pd->pd_sid,
		    mech, key, data, signature, spi_ctx_tmpl);

		/* no crypto context to carry between multiple parts. */
		error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
	}

	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);

	KCF_PROV_REFRELE(pd);
	return (error);
}