Example #1
0
/*
 * This routine is used to notify the framework the result of
 * an asynchronous request handled by a provider. Valid error
 * codes are the same as the CRYPTO_* errors defined in common.h.
 *
 * This routine can be called from user or interrupt context.
 */
void
crypto_op_notification(crypto_req_handle_t handle, int error)
{
	kcf_call_type_t ctype;

	if (handle == NULL)
		return;

	if ((ctype = GET_REQ_TYPE(handle)) == CRYPTO_SYNCH) {
		kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)handle;

		if (error != CRYPTO_SUCCESS)
			sreq->sn_provider->pd_sched_info.ks_nfails++;
		KCF_PROV_IREFRELE(sreq->sn_provider);
		kcf_sop_done(sreq, error);
	} else {
		kcf_areq_node_t *areq = (kcf_areq_node_t *)handle;

		ASSERT(ctype == CRYPTO_ASYNCH);
		if (error != CRYPTO_SUCCESS)
			areq->an_provider->pd_sched_info.ks_nfails++;
		KCF_PROV_IREFRELE(areq->an_provider);
		kcf_aop_done(areq, error);
	}
}
Example #2
0
/*
 * Last part of an emulated dual operation.
 * Clean up and restore ...
 */
void
kcf_last_req(void *last_req_arg, int status)
{
	kcf_dual_req_t *last_req = (kcf_dual_req_t *)last_req_arg;

	kcf_req_params_t *params = &(last_req->kr_params);
	kcf_areq_node_t *areq = last_req->kr_areq;
	crypto_dual_data_t *ct = NULL;

	switch (params->rp_opgrp) {
	case KCF_OG_MAC: {
		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);

		ct = (crypto_dual_data_t *)mops->mo_data;
		break;
	}
	case KCF_OG_DECRYPT: {
		kcf_decrypt_ops_params_t *dcrops =
		    &(params->rp_u.decrypt_params);

		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
		break;
	}
	default: {
		panic("invalid kcf_op_group_t %d", (int)params->rp_opgrp);
		return;
	}
	}
	ct->dd_offset1 = last_req->kr_saveoffset;
	ct->dd_len1 = last_req->kr_savelen;

	/* The submitter used kcf_last_req as its callback */

	if (areq == NULL) {
		crypto_call_req_t *cr = &last_req->kr_callreq;

		(*(cr->cr_callback_func))(cr->cr_callback_arg, status);
		kmem_free(last_req, sizeof (kcf_dual_req_t));
		return;
	}
	areq->an_reqarg = last_req->kr_callreq;
	KCF_AREQ_REFRELE(areq);
	kmem_free(last_req, sizeof (kcf_dual_req_t));
	areq->an_isdual = B_FALSE;
	kcf_aop_done(areq, status);
}
Example #3
0
/*
 * This routine is called by the taskq associated with
 * each hardware provider. We notify the kernel consumer
 * via the callback routine in case of CRYPTO_SUCCESS or
 * a failure.
 *
 * A request can be of type kcf_areq_node_t or of type
 * kcf_sreq_node_t.
 */
static void
process_req_hwp(void *ireq)
{
	int error = 0;
	crypto_ctx_t *ctx;
	kcf_call_type_t ctype;
	kcf_provider_desc_t *pd;
	kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;

	pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
	    sreq->sn_provider : areq->an_provider;

	/*
	 * Wait if flow control is in effect for the provider. A
	 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
	 * notification will signal us. We also get signaled if
	 * the provider is unregistering.
	 */
	if (pd->pd_state == KCF_PROV_BUSY) {
		mutex_enter(&pd->pd_lock);
		while (pd->pd_state == KCF_PROV_BUSY)
			cv_wait(&pd->pd_resume_cv, &pd->pd_lock);
		mutex_exit(&pd->pd_lock);
	}

	/*
	 * Bump the internal reference count while the request is being
	 * processed. This is how we know when it's safe to unregister
	 * a provider. This step must precede the pd_state check below.
	 */
	KCF_PROV_IREFHOLD(pd);

	/*
	 * Fail the request if the provider has failed. We return a
	 * recoverable error and the notified clients attempt any
	 * recovery. For async clients this is done in kcf_aop_done()
	 * and for sync clients it is done in the k-api routines.
	 */
	if (pd->pd_state >= KCF_PROV_FAILED) {
		error = CRYPTO_DEVICE_ERROR;
		goto bail;
	}

	if (ctype == CRYPTO_SYNCH) {
		mutex_enter(&sreq->sn_lock);
		sreq->sn_state = REQ_INPROGRESS;
		mutex_exit(&sreq->sn_lock);

		ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
		error = common_submit_request(sreq->sn_provider, ctx,
		    sreq->sn_params, sreq);
	} else {
		kcf_context_t *ictx;
		ASSERT(ctype == CRYPTO_ASYNCH);

		/*
		 * We are in the per-hardware provider thread context and
		 * hence can sleep. Note that the caller would have done
		 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
		 */
		ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL;

		mutex_enter(&areq->an_lock);
		/*
		 * We need to maintain ordering for multi-part requests.
		 * an_is_my_turn is set to B_TRUE initially for a request
		 * when it is enqueued and there are no other requests
		 * for that context. It is set later from kcf_aop_done() when
		 * the request before us in the chain of requests for the
		 * context completes. We get signaled at that point.
		 */
		if (ictx != NULL) {
			ASSERT(ictx->kc_prov_desc == areq->an_provider);

			while (areq->an_is_my_turn == B_FALSE) {
				cv_wait(&areq->an_turn_cv, &areq->an_lock);
			}
		}
		areq->an_state = REQ_INPROGRESS;
		mutex_exit(&areq->an_lock);

		error = common_submit_request(areq->an_provider, ctx,
		    &areq->an_params, areq);
	}

bail:
	if (error == CRYPTO_QUEUED) {
		/*
		 * The request is queued by the provider and we should
		 * get a crypto_op_notification() from the provider later.
		 * We notify the consumer at that time.
		 */
		return;
	} else {		/* CRYPTO_SUCCESS or other failure */
		KCF_PROV_IREFRELE(pd);
		if (ctype == CRYPTO_SYNCH)
			kcf_sop_done(sreq, error);
		else
			kcf_aop_done(areq, error);
	}
}
Example #4
0
/*
 * Callback routine for the next part of a simulated dual part.
 * Schedules the next step.
 *
 * This routine can be called from interrupt context.
 */
void
kcf_next_req(void *next_req_arg, int status)
{
	kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
	kcf_req_params_t *params = &(next_req->kr_params);
	kcf_areq_node_t *areq = next_req->kr_areq;
	int error = status;
	kcf_provider_desc_t *pd = NULL;
	crypto_dual_data_t *ct = NULL;

	/* Stop the processing if an error occured at this step */
	if (error != CRYPTO_SUCCESS) {
out:
		areq->an_reqarg = next_req->kr_callreq;
		KCF_AREQ_REFRELE(areq);
		kmem_free(next_req, sizeof (kcf_dual_req_t));
		areq->an_isdual = B_FALSE;
		kcf_aop_done(areq, error);
		return;
	}

	switch (params->rp_opgrp) {
	case KCF_OG_MAC: {

		/*
		 * The next req is submitted with the same reqid as the
		 * first part. The consumer only got back that reqid, and
		 * should still be able to cancel the operation during its
		 * second step.
		 */
		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
		crypto_ctx_template_t mac_tmpl;
		kcf_mech_entry_t *me;

		ct = (crypto_dual_data_t *)mops->mo_data;
		mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;

		/* No expected recoverable failures, so no retry list */
		pd = kcf_get_mech_provider(mops->mo_framework_mechtype,
		    &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2);

		if (pd == NULL) {
			error = CRYPTO_MECH_NOT_SUPPORTED;
			goto out;
		}
		/* Validate the MAC context template here */
		if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
		    (mac_tmpl != NULL)) {
			kcf_ctx_template_t *ctx_mac_tmpl;

			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;

			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
				KCF_PROV_REFRELE(pd);
				error = CRYPTO_OLD_CTX_TEMPLATE;
				goto out;
			}
			mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
		}

		break;
	}
	case KCF_OG_DECRYPT: {
		kcf_decrypt_ops_params_t *dcrops =
		    &(params->rp_u.decrypt_params);

		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
		/* No expected recoverable failures, so no retry list */
		pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
		    NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1);

		if (pd == NULL) {
			error = CRYPTO_MECH_NOT_SUPPORTED;
			goto out;
		}
		break;
	}
	default:
		break;
	}

	/* The second step uses len2 and offset2 of the dual_data */
	next_req->kr_saveoffset = ct->dd_offset1;
	next_req->kr_savelen = ct->dd_len1;
	ct->dd_offset1 = ct->dd_offset2;
	ct->dd_len1 = ct->dd_len2;

	/* preserve if the caller is restricted */
	if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) {
		areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED;
	} else {
		areq->an_reqarg.cr_flag = 0;
	}

	areq->an_reqarg.cr_callback_func = kcf_last_req;
	areq->an_reqarg.cr_callback_arg = next_req;
	areq->an_isdual = B_TRUE;

	/*
	 * We would like to call kcf_submit_request() here. But,
	 * that is not possible as that routine allocates a new
	 * kcf_areq_node_t request structure, while we need to
	 * reuse the existing request structure.
	 */
	switch (pd->pd_prov_type) {
	case CRYPTO_SW_PROVIDER:
		error = common_submit_request(pd, NULL, params,
		    KCF_RHNDL(KM_NOSLEEP));
		break;

	case CRYPTO_HW_PROVIDER: {
		kcf_provider_desc_t *old_pd;
		taskq_t *taskq = pd->pd_sched_info.ks_taskq;

		/*
		 * Set the params for the second step in the
		 * dual-ops.
		 */
		areq->an_params = *params;
		old_pd = areq->an_provider;
		KCF_PROV_REFRELE(old_pd);
		KCF_PROV_REFHOLD(pd);
		areq->an_provider = pd;

		/*
		 * Note that we have to do a taskq_dispatch()
		 * here as we may be in interrupt context.
		 */
		if (taskq_dispatch(taskq, process_req_hwp, areq,
		    TQ_NOSLEEP) == (taskqid_t)0) {
			error = CRYPTO_HOST_MEMORY;
		} else {
			error = CRYPTO_QUEUED;
		}
		break;
	}
	default:
		break;
	}

	/*
	 * We have to release the holds on the request and the provider
	 * in all cases.
	 */
	KCF_AREQ_REFRELE(areq);
	KCF_PROV_REFRELE(pd);

	if (error != CRYPTO_QUEUED) {
		/* restore, clean up, and invoke the client's callback */

		ct->dd_offset1 = next_req->kr_saveoffset;
		ct->dd_len1 = next_req->kr_savelen;
		areq->an_reqarg = next_req->kr_callreq;
		kmem_free(next_req, sizeof (kcf_dual_req_t));
		areq->an_isdual = B_FALSE;
		kcf_aop_done(areq, error);
	}
}