Ejemplo n.º 1
0
/*
 * Utility routine called from crypto_load_soft_disabled(). Callers
 * should have done a prior undo_register_provider().
 */
void
redo_register_provider(kcf_provider_desc_t *pd)
{
	/* process the mechanisms supported by the provider */
	(void) init_prov_mechs(NULL, pd);

	/*
	 * Hold provider in providers table. We should not call
	 * kcf_prov_tab_add_provider() here as the provider descriptor
	 * is still valid which means it has an entry in the provider
	 * table.
	 */
	KCF_PROV_REFHOLD(pd);
	KCF_PROV_IREFHOLD(pd);
}
Ejemplo n.º 2
0
/*
 * We're done with this framework context, so free it. Note that freeing
 * framework context (kcf_context) frees the global context (crypto_ctx).
 *
 * The provider is responsible for freeing provider private context after a
 * final or single operation and resetting the cc_provider_private field
 * to NULL. It should do this before it notifies the framework of the
 * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases
 * like crypto_cancel_ctx(9f).
 */
void
kcf_free_context(kcf_context_t *kcf_ctx)
{
	kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
	crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
	kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;

	/* Release the second context, if any */

	if (kcf_secondctx != NULL)
		KCF_CONTEXT_REFRELE(kcf_secondctx);

	if (gctx->cc_provider_private != NULL) {
		mutex_enter(&pd->pd_lock);
		if (!KCF_IS_PROV_REMOVED(pd)) {
			/*
			 * Increment the provider's internal refcnt so it
			 * doesn't unregister from the framework while
			 * we're calling the entry point.
			 */
			KCF_PROV_IREFHOLD(pd);
			mutex_exit(&pd->pd_lock);
			(void) KCF_PROV_FREE_CONTEXT(pd, gctx);
			KCF_PROV_IREFRELE(pd);
		} else {
			mutex_exit(&pd->pd_lock);
		}
	}

	/* kcf_ctx->kc_prov_desc has a hold on pd */
	KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc);

	/* check if this context is shared with a software provider */
	if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) &&
	    kcf_ctx->kc_sw_prov_desc != NULL) {
		KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc);
	}

	kmem_cache_free(kcf_context_cache, kcf_ctx);
}
Ejemplo n.º 3
0
/*
 * Add a provider to the provider table. If no free entry can be found
 * for the new provider, returns CRYPTO_HOST_MEMORY. Otherwise, add
 * the provider to the table, initialize the pd_prov_id field
 * of the specified provider descriptor to the index in that table,
 * and return CRYPTO_SUCCESS. Note that a REFHOLD is done on the
 * provider when pointed to by a table entry.
 */
int
kcf_prov_tab_add_provider(kcf_provider_desc_t *prov_desc)
{
    uint_t i;

    ASSERT(prov_tab != NULL);

    mutex_enter(&prov_tab_mutex);

    /* find free slot in providers table */
    for (i = 1; i < KCF_MAX_PROVIDERS && prov_tab[i] != NULL; i++)
        ;
    if (i == KCF_MAX_PROVIDERS) {
        /* ran out of providers entries */
        mutex_exit(&prov_tab_mutex);
        cmn_err(CE_WARN, "out of providers entries");
        return (CRYPTO_HOST_MEMORY);
    }

    /* initialize entry */
    prov_tab[i] = prov_desc;
    KCF_PROV_REFHOLD(prov_desc);
    KCF_PROV_IREFHOLD(prov_desc);
    prov_tab_num++;

    mutex_exit(&prov_tab_mutex);

    /* update provider descriptor */
    prov_desc->pd_prov_id = i;

    /*
     * The KCF-private provider handle is defined as the internal
     * provider id.
     */
    prov_desc->pd_kcf_prov_handle =
        (crypto_kcf_provider_handle_t)prov_desc->pd_prov_id;

    return (CRYPTO_SUCCESS);
}
Ejemplo n.º 4
0
/*
 * Routine called by both ioctl and k-api. The consumer should
 * bundle the parameters into a kcf_req_params_t structure. A bunch
 * of macros are available in ops_impl.h for this bundling. They are:
 *
 * 	KCF_WRAP_DIGEST_OPS_PARAMS()
 *	KCF_WRAP_MAC_OPS_PARAMS()
 *	KCF_WRAP_ENCRYPT_OPS_PARAMS()
 *	KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc.
 *
 * It is the caller's responsibility to free the ctx argument when
 * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details.
 */
int
kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx,
    crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont)
{
	int error = CRYPTO_SUCCESS;
	kcf_areq_node_t *areq;
	kcf_sreq_node_t *sreq;
	kcf_context_t *kcf_ctx;
	taskq_t *taskq = pd->pd_sched_info.ks_taskq;

	kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL;

	/* Synchronous cases */
	if (crq == NULL) {
		switch (pd->pd_prov_type) {
		case CRYPTO_SW_PROVIDER:
			error = common_submit_request(pd, ctx, params,
			    KCF_RHNDL(KM_SLEEP));
			break;

		case CRYPTO_HW_PROVIDER:
			/*
			 * Special case for CRYPTO_SYNCHRONOUS providers that
			 * never return a CRYPTO_QUEUED error. We skip any
			 * request allocation and call the SPI directly.
			 */
			if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) &&
			    EMPTY_TASKQ(taskq)) {
				KCF_PROV_IREFHOLD(pd);
				if (pd->pd_state == KCF_PROV_READY) {
					error = common_submit_request(pd, ctx,
					    params, KCF_RHNDL(KM_SLEEP));
					KCF_PROV_IREFRELE(pd);
					ASSERT(error != CRYPTO_QUEUED);
					break;
				}
				KCF_PROV_IREFRELE(pd);
			}

			sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP);
			sreq->sn_state = REQ_ALLOCATED;
			sreq->sn_rv = CRYPTO_FAILED;
			sreq->sn_params = params;

			/*
			 * Note that we do not need to hold the context
			 * for synchronous case as the context will never
			 * become invalid underneath us. We do not need to hold
			 * the provider here either as the caller has a hold.
			 */
			sreq->sn_context = kcf_ctx;
			ASSERT(KCF_PROV_REFHELD(pd));
			sreq->sn_provider = pd;

			ASSERT(taskq != NULL);
			/*
			 * Call the SPI directly if the taskq is empty and the
			 * provider is not busy, else dispatch to the taskq.
			 * Calling directly is fine as this is the synchronous
			 * case. This is unlike the asynchronous case where we
			 * must always dispatch to the taskq.
			 */
			if (EMPTY_TASKQ(taskq) &&
			    pd->pd_state == KCF_PROV_READY) {
				process_req_hwp(sreq);
			} else {
				/*
				 * We can not tell from taskq_dispatch() return
				 * value if we exceeded maxalloc. Hence the
				 * check here. Since we are allowed to wait in
				 * the synchronous case, we wait for the taskq
				 * to become empty.
				 */
				if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
					taskq_wait(taskq);
				}

				(void) taskq_dispatch(taskq, process_req_hwp,
				    sreq, TQ_SLEEP);
			}

			/*
			 * Wait for the notification to arrive,
			 * if the operation is not done yet.
			 * Bug# 4722589 will make the wait a cv_wait_sig().
			 */
			mutex_enter(&sreq->sn_lock);
			while (sreq->sn_state < REQ_DONE)
				cv_wait(&sreq->sn_cv, &sreq->sn_lock);
			mutex_exit(&sreq->sn_lock);

			error = sreq->sn_rv;
			kmem_cache_free(kcf_sreq_cache, sreq);

			break;

		default:
			error = CRYPTO_FAILED;
			break;
		}

	} else {	/* Asynchronous cases */
		switch (pd->pd_prov_type) {
		case CRYPTO_SW_PROVIDER:
			if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) {
				/*
				 * This case has less overhead since there is
				 * no switching of context.
				 */
				error = common_submit_request(pd, ctx, params,
				    KCF_RHNDL(KM_NOSLEEP));
			} else {
				/*
				 * CRYPTO_ALWAYS_QUEUE is set. We need to
				 * queue the request and return.
				 */
				areq = kcf_areqnode_alloc(pd, kcf_ctx, crq,
				    params, cont);
				if (areq == NULL)
					error = CRYPTO_HOST_MEMORY;
				else {
					if (!(crq->cr_flag
					    & CRYPTO_SKIP_REQID)) {
					/*
					 * Set the request handle. This handle
					 * is used for any crypto_cancel_req(9f)
					 * calls from the consumer. We have to
					 * do this before dispatching the
					 * request.
					 */
					crq->cr_reqid = kcf_reqid_insert(areq);
					}

					error = kcf_disp_sw_request(areq);
					/*
					 * There is an error processing this
					 * request. Remove the handle and
					 * release the request structure.
					 */
					if (error != CRYPTO_QUEUED) {
						if (!(crq->cr_flag
						    & CRYPTO_SKIP_REQID))
							kcf_reqid_delete(areq);
						KCF_AREQ_REFRELE(areq);
					}
				}
			}
			break;

		case CRYPTO_HW_PROVIDER:
			/*
			 * We need to queue the request and return.
			 */
			areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params,
			    cont);
			if (areq == NULL) {
				error = CRYPTO_HOST_MEMORY;
				goto done;
			}

			ASSERT(taskq != NULL);
			/*
			 * We can not tell from taskq_dispatch() return
			 * value if we exceeded maxalloc. Hence the check
			 * here.
			 */
			if (taskq->tq_nalloc >= crypto_taskq_maxalloc) {
				error = CRYPTO_BUSY;
				KCF_AREQ_REFRELE(areq);
				goto done;
			}

			if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) {
			/*
			 * Set the request handle. This handle is used
			 * for any crypto_cancel_req(9f) calls from the
			 * consumer. We have to do this before dispatching
			 * the request.
			 */
			crq->cr_reqid = kcf_reqid_insert(areq);
			}

			if (taskq_dispatch(taskq,
			    process_req_hwp, areq, TQ_NOSLEEP) ==
			    TASKQID_INVALID) {
				error = CRYPTO_HOST_MEMORY;
				if (!(crq->cr_flag & CRYPTO_SKIP_REQID))
					kcf_reqid_delete(areq);
				KCF_AREQ_REFRELE(areq);
			} else {
				error = CRYPTO_QUEUED;
			}
			break;

		default:
			error = CRYPTO_FAILED;
			break;
		}
	}

done:
	return (error);
}
Ejemplo n.º 5
0
/*
 * This routine is called by the taskq associated with
 * each hardware provider. We notify the kernel consumer
 * via the callback routine in case of CRYPTO_SUCCESS or
 * a failure.
 *
 * A request can be of type kcf_areq_node_t or of type
 * kcf_sreq_node_t.
 */
static void
process_req_hwp(void *ireq)
{
	int error = 0;
	crypto_ctx_t *ctx;
	kcf_call_type_t ctype;
	kcf_provider_desc_t *pd;
	kcf_areq_node_t *areq = (kcf_areq_node_t *)ireq;
	kcf_sreq_node_t *sreq = (kcf_sreq_node_t *)ireq;

	pd = ((ctype = GET_REQ_TYPE(ireq)) == CRYPTO_SYNCH) ?
	    sreq->sn_provider : areq->an_provider;

	/*
	 * Wait if flow control is in effect for the provider. A
	 * CRYPTO_PROVIDER_READY or CRYPTO_PROVIDER_FAILED
	 * notification will signal us. We also get signaled if
	 * the provider is unregistering.
	 */
	if (pd->pd_state == KCF_PROV_BUSY) {
		mutex_enter(&pd->pd_lock);
		while (pd->pd_state == KCF_PROV_BUSY)
			cv_wait(&pd->pd_resume_cv, &pd->pd_lock);
		mutex_exit(&pd->pd_lock);
	}

	/*
	 * Bump the internal reference count while the request is being
	 * processed. This is how we know when it's safe to unregister
	 * a provider. This step must precede the pd_state check below.
	 */
	KCF_PROV_IREFHOLD(pd);

	/*
	 * Fail the request if the provider has failed. We return a
	 * recoverable error and the notified clients attempt any
	 * recovery. For async clients this is done in kcf_aop_done()
	 * and for sync clients it is done in the k-api routines.
	 */
	if (pd->pd_state >= KCF_PROV_FAILED) {
		error = CRYPTO_DEVICE_ERROR;
		goto bail;
	}

	if (ctype == CRYPTO_SYNCH) {
		mutex_enter(&sreq->sn_lock);
		sreq->sn_state = REQ_INPROGRESS;
		mutex_exit(&sreq->sn_lock);

		ctx = sreq->sn_context ? &sreq->sn_context->kc_glbl_ctx : NULL;
		error = common_submit_request(sreq->sn_provider, ctx,
		    sreq->sn_params, sreq);
	} else {
		kcf_context_t *ictx;
		ASSERT(ctype == CRYPTO_ASYNCH);

		/*
		 * We are in the per-hardware provider thread context and
		 * hence can sleep. Note that the caller would have done
		 * a taskq_dispatch(..., TQ_NOSLEEP) and would have returned.
		 */
		ctx = (ictx = areq->an_context) ? &ictx->kc_glbl_ctx : NULL;

		mutex_enter(&areq->an_lock);
		/*
		 * We need to maintain ordering for multi-part requests.
		 * an_is_my_turn is set to B_TRUE initially for a request
		 * when it is enqueued and there are no other requests
		 * for that context. It is set later from kcf_aop_done() when
		 * the request before us in the chain of requests for the
		 * context completes. We get signaled at that point.
		 */
		if (ictx != NULL) {
			ASSERT(ictx->kc_prov_desc == areq->an_provider);

			while (areq->an_is_my_turn == B_FALSE) {
				cv_wait(&areq->an_turn_cv, &areq->an_lock);
			}
		}
		areq->an_state = REQ_INPROGRESS;
		mutex_exit(&areq->an_lock);

		error = common_submit_request(areq->an_provider, ctx,
		    &areq->an_params, areq);
	}

bail:
	if (error == CRYPTO_QUEUED) {
		/*
		 * The request is queued by the provider and we should
		 * get a crypto_op_notification() from the provider later.
		 * We notify the consumer at that time.
		 */
		return;
	} else {		/* CRYPTO_SUCCESS or other failure */
		KCF_PROV_IREFRELE(pd);
		if (ctype == CRYPTO_SYNCH)
			kcf_sop_done(sreq, error);
		else
			kcf_aop_done(areq, error);
	}
}
Ejemplo n.º 6
0
/*
 * This routine is used to add cryptographic providers to the KEF framework.
 * Providers pass a crypto_provider_info structure to crypto_register_provider()
 * and get back a handle.  The crypto_provider_info structure contains a
 * list of mechanisms supported by the provider and an ops vector containing
 * provider entry points.  Hardware providers call this routine in their attach
 * routines.  Software providers call this routine in their _init() routine.
 */
int
crypto_register_provider(crypto_provider_info_t *info,
    crypto_kcf_provider_handle_t *handle)
{
	char ks_name[KSTAT_STRLEN];

	kcf_provider_desc_t *prov_desc = NULL;
	int ret = CRYPTO_ARGUMENTS_BAD;

	if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
		return (CRYPTO_VERSION_MISMATCH);

	/*
	 * Check provider type, must be software, hardware, or logical.
	 */
	if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
	    info->pi_provider_type != CRYPTO_SW_PROVIDER &&
	    info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
		return (CRYPTO_ARGUMENTS_BAD);

	/*
	 * Allocate and initialize a new provider descriptor. We also
	 * hold it and release it when done.
	 */
	prov_desc = kcf_alloc_provider_desc(info);
	KCF_PROV_REFHOLD(prov_desc);

	prov_desc->pd_prov_type = info->pi_provider_type;

	/* provider-private handle, opaque to KCF */
	prov_desc->pd_prov_handle = info->pi_provider_handle;

	/* copy provider description string */
	if (info->pi_provider_description != NULL) {
		/*
		 * pi_provider_descriptor is a string that can contain
		 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
		 * INCLUDING the terminating null character. A bcopy()
		 * is necessary here as pd_description should not have
		 * a null character. See comments in kcf_alloc_provider_desc()
		 * for details on pd_description field.
		 */
		bcopy(info->pi_provider_description, prov_desc->pd_description,
		    MIN(strlen(info->pi_provider_description),
		    (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN));
	}

	if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
		if (info->pi_ops_vector == NULL) {
			goto bail;
		}
		copy_ops_vector_v1(info->pi_ops_vector,
		    prov_desc->pd_ops_vector);
		if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
			copy_ops_vector_v2(info->pi_ops_vector,
			    prov_desc->pd_ops_vector);
			prov_desc->pd_flags = info->pi_flags;
		}
		if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
			copy_ops_vector_v3(info->pi_ops_vector,
			    prov_desc->pd_ops_vector);
		}
	}

	/* object_ops and nostore_key_ops are mutually exclusive */
	if (prov_desc->pd_ops_vector->co_object_ops &&
	    prov_desc->pd_ops_vector->co_nostore_key_ops) {
		goto bail;
	}

	/* process the mechanisms supported by the provider */
	if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
		goto bail;

	/*
	 * Add provider to providers tables, also sets the descriptor
	 * pd_prov_id field.
	 */
	if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
		undo_register_provider(prov_desc, B_FALSE);
		goto bail;
	}

	/*
	 * We create a taskq only for a hardware provider. The global
	 * software queue is used for software providers. We handle ordering
	 * of multi-part requests in the taskq routine. So, it is safe to
	 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
	 * to keep some entries cached to improve performance.
	 */
	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
		prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
		    crypto_taskq_threads, minclsyspri,
		    crypto_taskq_minalloc, crypto_taskq_maxalloc,
		    TASKQ_PREPOPULATE);
	else
		prov_desc->pd_sched_info.ks_taskq = NULL;

	/* no kernel session to logical providers */
	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
		/*
		 * Open a session for session-oriented providers. This session
		 * is used for all kernel consumers. This is fine as a provider
		 * is required to support multiple thread access to a session.
		 * We can do this only after the taskq has been created as we
		 * do a kcf_submit_request() to open the session.
		 */
		if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
			kcf_req_params_t params;

			KCF_WRAP_SESSION_OPS_PARAMS(&params,
			    KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
			    CRYPTO_USER, NULL, 0, prov_desc);
			ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
			    B_FALSE);

			if (ret != CRYPTO_SUCCESS) {
				undo_register_provider(prov_desc, B_TRUE);
				ret = CRYPTO_FAILED;
				goto bail;
			}
		}
	}

	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
		/*
		 * Create the kstat for this provider. There is a kstat
		 * installed for each successfully registered provider.
		 * This kstat is deleted, when the provider unregisters.
		 */
		if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
			    "NONAME", "provider_stats");
		} else {
			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
			    "NONAME", 0,
			    prov_desc->pd_prov_id, "provider_stats");
		}

		prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
		    KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
		    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);

		if (prov_desc->pd_kstat != NULL) {
			bcopy(&kcf_stats_ks_data_template,
			    &prov_desc->pd_ks_data,
			    sizeof (kcf_stats_ks_data_template));
			prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
			KCF_PROV_REFHOLD(prov_desc);
			KCF_PROV_IREFHOLD(prov_desc);
			prov_desc->pd_kstat->ks_private = prov_desc;
			prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
			kstat_install(prov_desc->pd_kstat);
		}
	}

	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
		process_logical_providers(info, prov_desc);

	mutex_enter(&prov_desc->pd_lock);
	prov_desc->pd_state = KCF_PROV_READY;
	mutex_exit(&prov_desc->pd_lock);
	kcf_do_notify(prov_desc, B_TRUE);

	*handle = prov_desc->pd_kcf_prov_handle;
	ret = CRYPTO_SUCCESS;

bail:
	KCF_PROV_REFRELE(prov_desc);
	return (ret);
}