示例#1
0
int
crypto_sign_recover_init_prov(crypto_provider_t provider,
    crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_key_t *key,
    crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *crq)
{
	int rv;
	crypto_ctx_t *ctx;
	kcf_req_params_t params;
	kcf_provider_desc_t *pd = provider;
	kcf_provider_desc_t *real_provider = pd;

	ASSERT(KCF_PROV_REFHELD(pd));

	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
		rv = kcf_get_hardware_provider(mech->cm_type,
		    CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
		    &real_provider, CRYPTO_FG_SIGN_RECOVER);

		if (rv != CRYPTO_SUCCESS)
			return (rv);
	}

	/* Allocate and initialize the canonical context */
	if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
		if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
			KCF_PROV_REFRELE(real_provider);
		return (CRYPTO_HOST_MEMORY);
	}

	KCF_WRAP_SIGN_OPS_PARAMS(&params, KCF_OP_SIGN_RECOVER_INIT, sid, mech,
	    key, NULL, NULL, tmpl);
	rv = kcf_submit_request(real_provider, ctx, crq, &params, B_FALSE);
	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
		KCF_PROV_REFRELE(real_provider);

	if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED))
		*ctxp = (crypto_context_t)ctx;
	else {
		/* Release the hold done in kcf_new_ctx(). */
		KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private);
	}

	return (rv);
}
示例#2
0
/*
 * Free the request after releasing all the holds.
 */
void
kcf_free_req(kcf_areq_node_t *areq)
{
	KCF_PROV_REFRELE(areq->an_provider);
	if (areq->an_context != NULL)
		KCF_CONTEXT_REFRELE(areq->an_context);

	if (areq->an_tried_plist != NULL)
		kcf_free_triedlist(areq->an_tried_plist);
	kmem_cache_free(kcf_areq_cache, areq);
}
示例#3
0
void
kcf_free_triedlist(kcf_prov_tried_t *list)
{
	kcf_prov_tried_t *l;

	while ((l = list) != NULL) {
		list = list->pt_next;
		KCF_PROV_REFRELE(l->pt_pd);
		kmem_free(l, sizeof (kcf_prov_tried_t));
	}
}
示例#4
0
/*
 * Pick a software-based provider and submit a request to seed
 * its random number generator.
 */
static void
rngprov_seed(uint8_t *buf, int len, uint_t entropy_est, uint32_t flags)
{
	kcf_provider_desc_t *pd = NULL;

	if (kcf_get_sw_prov(rngmech_type, &pd, NULL, B_FALSE) ==
	    CRYPTO_SUCCESS) {
		(void) KCF_PROV_SEED_RANDOM(pd, pd->pd_sid, buf, len,
		    entropy_est, flags, NULL);
		KCF_PROV_REFRELE(pd);
	}
}
示例#5
0
/*
 * We're done with this framework context, so free it. Note that freeing
 * framework context (kcf_context) frees the global context (crypto_ctx).
 *
 * The provider is responsible for freeing provider private context after a
 * final or single operation and resetting the cc_provider_private field
 * to NULL. It should do this before it notifies the framework of the
 * completion. We still need to call KCF_PROV_FREE_CONTEXT to handle cases
 * like crypto_cancel_ctx(9f).
 */
void
kcf_free_context(kcf_context_t *kcf_ctx)
{
	kcf_provider_desc_t *pd = kcf_ctx->kc_prov_desc;
	crypto_ctx_t *gctx = &kcf_ctx->kc_glbl_ctx;
	kcf_context_t *kcf_secondctx = kcf_ctx->kc_secondctx;

	/* Release the second context, if any */

	if (kcf_secondctx != NULL)
		KCF_CONTEXT_REFRELE(kcf_secondctx);

	if (gctx->cc_provider_private != NULL) {
		mutex_enter(&pd->pd_lock);
		if (!KCF_IS_PROV_REMOVED(pd)) {
			/*
			 * Increment the provider's internal refcnt so it
			 * doesn't unregister from the framework while
			 * we're calling the entry point.
			 */
			KCF_PROV_IREFHOLD(pd);
			mutex_exit(&pd->pd_lock);
			(void) KCF_PROV_FREE_CONTEXT(pd, gctx);
			KCF_PROV_IREFRELE(pd);
		} else {
			mutex_exit(&pd->pd_lock);
		}
	}

	/* kcf_ctx->kc_prov_desc has a hold on pd */
	KCF_PROV_REFRELE(kcf_ctx->kc_prov_desc);

	/* check if this context is shared with a software provider */
	if ((gctx->cc_flags & CRYPTO_INIT_OPSTATE) &&
	    kcf_ctx->kc_sw_prov_desc != NULL) {
		KCF_PROV_REFRELE(kcf_ctx->kc_sw_prov_desc);
	}

	kmem_cache_free(kcf_context_cache, kcf_ctx);
}
/*
 * Free an array of hardware provider descriptors.  A REFRELE
 * is done on each descriptor before the table is freed.
 */
void
kcf_free_provider_tab(uint_t count, kcf_provider_desc_t **array)
{
	kcf_provider_desc_t *prov_desc;
	int i;

	for (i = 0; i < count; i++) {
		if ((prov_desc = array[i]) != NULL) {
			KCF_PROV_REFRELE(prov_desc);
		}
	}
	kmem_free(array, count * sizeof (kcf_provider_desc_t *));
}
示例#7
0
/*
 * Same as crypto_digest_prov(), but relies on the KCF scheduler to
 * choose a provider. See crypto_digest_prov() comments for more information.
 */
int
crypto_digest(crypto_mechanism_t *mech, crypto_data_t *data,
    crypto_data_t *digest, crypto_call_req_t *crq)
{
	int error;
	kcf_provider_desc_t *pd;
	kcf_req_params_t params;
	kcf_prov_tried_t *list = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error, list,
	    CRYPTO_FG_DIGEST_ATOMIC, CHECK_RESTRICT(crq),
	    data->cd_length)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	/* The fast path for SW providers. */
	if (CHECK_FASTPATH(crq, pd)) {
		crypto_mechanism_t lmech;

		lmech = *mech;
		KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
		error = KCF_PROV_DIGEST_ATOMIC(pd, pd->pd_sid, &lmech, data,
		    digest, KCF_SWFP_RHNDL(crq));
		KCF_PROV_INCRSTATS(pd, error);
	} else {
		KCF_WRAP_DIGEST_OPS_PARAMS(&params, KCF_OP_ATOMIC, pd->pd_sid,
		    mech, NULL, data, digest);

		/* no crypto context to carry between multiple parts. */
		error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
	}

	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);

	KCF_PROV_REFRELE(pd);
	return (error);
}
/*
 * Remove the provider specified by its id. A REFRELE is done on the
 * corresponding provider descriptor before this function returns.
 * Returns CRYPTO_UNKNOWN_PROVIDER if the provider id is not valid.
 */
int
kcf_prov_tab_rem_provider(crypto_provider_id_t prov_id)
{
	kcf_provider_desc_t *prov_desc;

	ASSERT(prov_tab != NULL);
	ASSERT(prov_tab_num >= 0);

	/*
	 * Validate provider id, since it can be specified by a 3rd-party
	 * provider.
	 */

	mutex_enter(&prov_tab_mutex);
	if (prov_id >= KCF_MAX_PROVIDERS ||
	    ((prov_desc = prov_tab[prov_id]) == NULL)) {
		mutex_exit(&prov_tab_mutex);
		return (CRYPTO_INVALID_PROVIDER_ID);
	}

	if (kcf_need_provtab_walk)
		kcf_free_unregistered_provs();
	mutex_exit(&prov_tab_mutex);

	/*
	 * The provider id must remain valid until the associated provider
	 * descriptor is freed. For this reason, we simply release our
	 * reference to the descriptor here. When the reference count
	 * reaches zero, kcf_free_provider_desc() will be invoked and
	 * the associated entry in the providers table will be released
	 * at that time.
	 */

	KCF_PROV_REFRELE(prov_desc);

#if DEBUG
	if (kcf_frmwrk_debug >= 1)
		kcf_prov_tab_dump("kcf_prov_tab_rem_provider");
#endif /* DEBUG */

	return (CRYPTO_SUCCESS);
}
示例#9
0
/* called from the CRYPTO_GET_SOFT_INFO ioctl */
int
crypto_get_soft_info(caddr_t name, uint_t *count, crypto_mech_name_t **array)
{
	ddi_modhandle_t modh = NULL;
	kcf_provider_desc_t *provider;
	int rv;

	provider = kcf_prov_tab_lookup_by_name(name);
	if (provider == NULL) {
		if (in_soft_config_list(name)) {
			char *tmp;
			int name_len;

			/* strlen("crypto/") + NULL terminator == 8 */
			name_len = strlen(name);
			tmp = kmem_alloc(name_len + 8, KM_SLEEP);
			bcopy("crypto/", tmp, 7);
			bcopy(name, &tmp[7], name_len);
			tmp[name_len + 7] = '\0';

			modh = ddi_modopen(tmp, KRTLD_MODE_FIRST, NULL);
			kmem_free(tmp, name_len + 8);

			if (modh == NULL) {
				return (CRYPTO_ARGUMENTS_BAD);
			}

			provider = kcf_prov_tab_lookup_by_name(name);
			if (provider == NULL) {
				return (CRYPTO_ARGUMENTS_BAD);
			}
		} else {
			return (CRYPTO_ARGUMENTS_BAD);
		}
	}

	rv = dup_mech_names(provider, array, count, KM_SLEEP);
	KCF_PROV_REFRELE(provider);
	if (modh != NULL)
		(void) ddi_modclose(modh);
	return (rv);
}
示例#10
0
int
crypto_object_find_init(crypto_provider_t provider, crypto_session_id_t sid,
    crypto_object_attribute_t *attrs, uint_t count, void **cookie,
    crypto_call_req_t *crq)
{
	kcf_req_params_t params;
	kcf_provider_desc_t *pd = provider;
	kcf_provider_desc_t *real_provider = pd;
	int rv;

	ASSERT(KCF_PROV_REFHELD(pd));

	if (cookie == NULL) {
		return (CRYPTO_ARGUMENTS_BAD);
	}

	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
		rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
		    object_ops), CRYPTO_OBJECT_OFFSET(object_find_init),
		    CHECK_RESTRICT(crq), pd, &real_provider);

		if (rv != CRYPTO_SUCCESS)
			return (rv);
	}

	if (CHECK_FASTPATH(crq, real_provider)) {
		rv = KCF_PROV_OBJECT_FIND_INIT(real_provider,
		    sid, attrs, count, cookie, KCF_SWFP_RHNDL(crq));
		KCF_PROV_INCRSTATS(pd, rv);
	} else {
		KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_FIND_INIT,
		    sid, 0, attrs, count, NULL, 0, cookie, NULL, 0, NULL);
		rv = kcf_submit_request(real_provider, NULL, crq,
		    &params, B_FALSE);
	}
	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
		KCF_PROV_REFRELE(real_provider);

	return (rv);
}
示例#11
0
/*
 * Return TRUE if at least one provider exists that can
 * supply random numbers.
 */
boolean_t
kcf_rngprov_check(void)
{
	int rv;
	kcf_provider_desc_t *pd;

	if ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
	    NULL, CRYPTO_FG_RANDOM, 0)) != NULL) {
		KCF_PROV_REFRELE(pd);
		/*
		 * We logged a warning once about no provider being available
		 * and now a provider became available. So, set the flag so
		 * that we can log again if the problem recurs.
		 */
		rng_ok_to_log = B_TRUE;
		rng_prov_found = B_TRUE;
		return (B_TRUE);
	} else {
		rng_prov_found = B_FALSE;
		return (B_FALSE);
	}
}
示例#12
0
int
crypto_session_close(crypto_provider_t provider, crypto_session_id_t sid,
    crypto_call_req_t *crq)
{
	int rv;
	kcf_req_params_t params;
	kcf_provider_desc_t *real_provider;
	kcf_provider_desc_t *pd = provider;

	if (pd == NULL)
		return (CRYPTO_ARGUMENTS_BAD);

	ASSERT(KCF_PROV_REFHELD(pd));

	/* find a provider that supports session ops */
	(void) kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(session_ops),
	    CRYPTO_SESSION_OFFSET(session_close), pd, &real_provider);

	ASSERT(real_provider == pd ||
	    pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER);

	/* edge case is where the logical provider has no members */
	if (real_provider != NULL) {
		/* The fast path for SW providers. */
		if (CHECK_FASTPATH(crq, pd)) {
			rv = KCF_PROV_SESSION_CLOSE(real_provider,
			    sid, KCF_SWFP_RHNDL(crq), pd);
			KCF_PROV_INCRSTATS(pd, rv);
		} else {
			KCF_WRAP_SESSION_OPS_PARAMS(&params,
			    KCF_OP_SESSION_CLOSE, NULL, sid,
			    CRYPTO_USER, NULL, 0, pd);
			rv = kcf_submit_request(real_provider, NULL, crq,
			    &params, B_FALSE);
		}
		KCF_PROV_REFRELE(real_provider);
	}
	return (CRYPTO_SUCCESS);
}
示例#13
0
int
crypto_session_open(crypto_provider_t provider, crypto_session_id_t *sidp,
crypto_call_req_t *crq)
{
	kcf_req_params_t params;
	kcf_provider_desc_t *real_provider;
	kcf_provider_desc_t *pd = provider;

	ASSERT(KCF_PROV_REFHELD(pd));

	/* find a provider that supports session ops */
	(void) kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(session_ops),
	    CRYPTO_SESSION_OFFSET(session_open), pd, &real_provider);

	if (real_provider != NULL) {
		int rv;

		ASSERT(real_provider == pd ||
		    pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER);

		if (CHECK_FASTPATH(crq, pd)) {
			rv = KCF_PROV_SESSION_OPEN(real_provider, sidp,
			    KCF_SWFP_RHNDL(crq), pd);
			KCF_PROV_INCRSTATS(pd, rv);
		} else {
			KCF_WRAP_SESSION_OPS_PARAMS(&params,
			    KCF_OP_SESSION_OPEN, sidp, 0, CRYPTO_USER, NULL,
			    0, pd);
			rv = kcf_submit_request(real_provider, NULL, crq,
			    &params, B_FALSE);
		}
		KCF_PROV_REFRELE(real_provider);

		if (rv != CRYPTO_SUCCESS) {
			return (rv);
		}
	}
	return (CRYPTO_SUCCESS);
}
示例#14
0
int
crypto_object_set_attribute_value(crypto_provider_t provider,
    crypto_session_id_t sid, crypto_object_id_t object_handle,
    crypto_object_attribute_t *attrs, uint_t count, crypto_call_req_t *crq)
{
	kcf_req_params_t params;
	kcf_provider_desc_t *pd = provider;
	kcf_provider_desc_t *real_provider = pd;
	int rv;

	ASSERT(KCF_PROV_REFHELD(pd));

	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
		rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
		    object_ops),
		    CRYPTO_OBJECT_OFFSET(object_set_attribute_value),
		    CHECK_RESTRICT(crq), pd, &real_provider);

		if (rv != CRYPTO_SUCCESS)
			return (rv);
	}

	if (CHECK_FASTPATH(crq, real_provider)) {
		rv = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(real_provider,
		    sid, object_handle, attrs, count, KCF_SWFP_RHNDL(crq));
		KCF_PROV_INCRSTATS(pd, rv);
	} else {
		KCF_WRAP_OBJECT_OPS_PARAMS(&params,
		    KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE, sid, object_handle,
		    attrs, count, NULL, 0, NULL, NULL, 0, NULL);
		rv = kcf_submit_request(real_provider, NULL, crq,
		    &params, B_FALSE);
	}
	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
		KCF_PROV_REFRELE(real_provider);

	return (rv);
}
示例#15
0
int
crypto_object_find(crypto_provider_t provider, void *cookie,
    crypto_object_id_t *handles, uint_t *count, uint_t max_count,
    crypto_call_req_t *crq)
{
	kcf_req_params_t params;
	kcf_provider_desc_t *pd = provider;
	kcf_provider_desc_t *real_provider = pd;
	int rv;

	ASSERT(KCF_PROV_REFHELD(pd));

	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
		rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(
		    object_ops), CRYPTO_OBJECT_OFFSET(object_find),
		    CHECK_RESTRICT(crq), pd, &real_provider);

		if (rv != CRYPTO_SUCCESS)
			return (rv);
	}

	if (CHECK_FASTPATH(crq, real_provider)) {
		rv = KCF_PROV_OBJECT_FIND(real_provider, cookie, handles,
		    max_count, count, KCF_SWFP_RHNDL(crq));
		KCF_PROV_INCRSTATS(pd, rv);
	} else {
		KCF_WRAP_OBJECT_OPS_PARAMS(&params, KCF_OP_OBJECT_FIND, 0,
		    0, NULL, 0, handles, 0, NULL, cookie, max_count, count);
		rv = kcf_submit_request(real_provider, NULL, crq,
		    &params, B_FALSE);
	}
	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
		KCF_PROV_REFRELE(real_provider);

	return (rv);
}
示例#16
0
/*
 * Same as crypto_mac_init_prov(), but relies on the KCF scheduler to
 * choose a provider. See crypto_mac_init_prov() comments for more
 * information.
 */
int
crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key,
    crypto_ctx_template_t tmpl, crypto_context_t *ctxp,
    crypto_call_req_t  *crq)
{
	int error;
	kcf_mech_entry_t *me;
	kcf_provider_desc_t *pd;
	kcf_ctx_template_t *ctx_tmpl;
	crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
	kcf_prov_tried_t *list = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
	    list, CRYPTO_FG_MAC, CHECK_RESTRICT(crq), 0)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	/*
	 * For SW providers, check the validity of the context template
	 * It is very rare that the generation number mis-matches, so
	 * is acceptable to fail here, and let the consumer recover by
	 * freeing this tmpl and create a new one for the key and new SW
	 * provider
	 */

	if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
	    ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
		if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
			if (list != NULL)
				kcf_free_triedlist(list);
			KCF_PROV_REFRELE(pd);
			return (CRYPTO_OLD_CTX_TEMPLATE);
		} else {
			spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
		}
	}

	if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
	    (pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) {
		/*
		 * The hardware provider has limited HMAC support.
		 * So, we fallback early here to using a software provider.
		 *
		 * XXX - need to enhance to do the fallback later in
		 * crypto_mac_update() if the size of accumulated input data
		 * exceeds the maximum size digestable by hardware provider.
		 */
		error = CRYPTO_BUFFER_TOO_BIG;
	} else {
		error = crypto_mac_init_prov(pd, pd->pd_sid, mech, key,
		    spi_ctx_tmpl, ctxp, crq);
	}
	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);

	KCF_PROV_REFRELE(pd);
	return (error);
}
示例#17
0
/*
 * Same as crypto_mac_verify_prov(), but relies on the KCF scheduler to choose
 * a provider. See crypto_mac_verify_prov() comments for more information.
 */
int
crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data,
    crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac,
    crypto_call_req_t *crq)
{
	int error;
	kcf_mech_entry_t *me;
	kcf_req_params_t params;
	kcf_provider_desc_t *pd;
	kcf_ctx_template_t *ctx_tmpl;
	crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;
	kcf_prov_tried_t *list = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error,
	    list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq),
	    data->cd_length)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	/*
	 * For SW providers, check the validity of the context template
	 * It is very rare that the generation number mis-matches, so
	 * is acceptable to fail here, and let the consumer recover by
	 * freeing this tmpl and create a new one for the key and new SW
	 * provider
	 */
	if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
	    ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
		if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
			if (list != NULL)
				kcf_free_triedlist(list);
			KCF_PROV_REFRELE(pd);
			return (CRYPTO_OLD_CTX_TEMPLATE);
		} else {
			spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
		}
	}

	/* The fast path for SW providers. */
	if (CHECK_FASTPATH(crq, pd)) {
		crypto_mechanism_t lmech;

		lmech = *mech;
		KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);

		error = KCF_PROV_MAC_VERIFY_ATOMIC(pd, pd->pd_sid, &lmech, key,
		    data, mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq));
		KCF_PROV_INCRSTATS(pd, error);
	} else {
		if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
		    (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) &&
		    (data->cd_length > pd->pd_hash_limit)) {
			/* see comments in crypto_mac() */
			error = CRYPTO_BUFFER_TOO_BIG;
		} else {
			KCF_WRAP_MAC_OPS_PARAMS(&params,
			    KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech,
			    key, data, mac, spi_ctx_tmpl);

			error = kcf_submit_request(pd, NULL, crq, &params,
			    KCF_ISDUALREQ(crq));
		}
	}

	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);

	KCF_PROV_REFRELE(pd);
	return (error);
}
示例#18
0
/*
 * Called from CRYPTO_LOAD_SOFT_DISABLED ioctl.
 * If new_count is 0, then completely remove the entry.
 */
int
crypto_load_soft_disabled(char *name, uint_t new_count,
    crypto_mech_name_t *new_array)
{
	kcf_provider_desc_t *provider = NULL;
	crypto_mech_name_t *prev_array;
	uint_t prev_count = 0;
	int rv;

	provider = kcf_prov_tab_lookup_by_name(name);
	if (provider != NULL) {
		mutex_enter(&provider->pd_lock);
		/*
		 * Check if any other thread is disabling or removing
		 * this provider. We return if this is the case.
		 */
		if (provider->pd_state >= KCF_PROV_DISABLED) {
			mutex_exit(&provider->pd_lock);
			KCF_PROV_REFRELE(provider);
			return (CRYPTO_BUSY);
		}
		provider->pd_state = KCF_PROV_DISABLED;
		mutex_exit(&provider->pd_lock);

		undo_register_provider(provider, B_TRUE);
		KCF_PROV_REFRELE(provider);
		if (provider->pd_kstat != NULL)
			KCF_PROV_REFRELE(provider);

		mutex_enter(&provider->pd_lock);
		/* Wait till the existing requests complete. */
		while (provider->pd_state != KCF_PROV_FREED) {
			cv_wait(&provider->pd_remove_cv, &provider->pd_lock);
		}
		mutex_exit(&provider->pd_lock);
	}

	if (new_count == 0) {
		kcf_policy_remove_by_name(name, &prev_count, &prev_array);
		crypto_free_mech_list(prev_array, prev_count);
		rv = CRYPTO_SUCCESS;
		goto out;
	}

	/* put disabled mechanisms into policy table */
	if ((rv = kcf_policy_load_soft_disabled(name, new_count, new_array,
	    &prev_count, &prev_array)) == CRYPTO_SUCCESS) {
		crypto_free_mech_list(prev_array, prev_count);
	}

out:
	if (provider != NULL) {
		redo_register_provider(provider);
		if (provider->pd_kstat != NULL)
			KCF_PROV_REFHOLD(provider);
		mutex_enter(&provider->pd_lock);
		provider->pd_state = KCF_PROV_READY;
		mutex_exit(&provider->pd_lock);
	} else if (rv == CRYPTO_SUCCESS) {
		/*
		 * There are some cases where it is useful to kCF clients
		 * to have a provider whose mechanism is enabled now to be
		 * available. So, we attempt to load it here.
		 *
		 * The check, new_count < prev_count, ensures that we do this
		 * only in the case where a mechanism(s) is now enabled.
		 * This check assumes that enable and disable are separate
		 * administrative actions and are not done in a single action.
		 */
		if (new_count < prev_count && (in_soft_config_list(name)) &&
		    (modload("crypto", name) != -1)) {
			struct modctl *mcp;
			boolean_t load_again = B_FALSE;

			if ((mcp = mod_hold_by_name(name)) != NULL) {
				mcp->mod_loadflags |= MOD_NOAUTOUNLOAD;

				/* memory pressure may have unloaded module */
				if (!mcp->mod_installed)
					load_again = B_TRUE;
				mod_release_mod(mcp);

				if (load_again)
					(void) modload("crypto", name);
			}
		}
	}

	return (rv);
}
示例#19
0
文件: kcf_spi.c 项目: MarkGavalda/zfs
/*
 * This routine is used to add cryptographic providers to the KEF framework.
 * Providers pass a crypto_provider_info structure to crypto_register_provider()
 * and get back a handle.  The crypto_provider_info structure contains a
 * list of mechanisms supported by the provider and an ops vector containing
 * provider entry points.  Hardware providers call this routine in their attach
 * routines.  Software providers call this routine in their _init() routine.
 */
int
crypto_register_provider(crypto_provider_info_t *info,
    crypto_kcf_provider_handle_t *handle)
{
	char ks_name[KSTAT_STRLEN];

	kcf_provider_desc_t *prov_desc = NULL;
	int ret = CRYPTO_ARGUMENTS_BAD;

	if (info->pi_interface_version > CRYPTO_SPI_VERSION_3)
		return (CRYPTO_VERSION_MISMATCH);

	/*
	 * Check provider type, must be software, hardware, or logical.
	 */
	if (info->pi_provider_type != CRYPTO_HW_PROVIDER &&
	    info->pi_provider_type != CRYPTO_SW_PROVIDER &&
	    info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER)
		return (CRYPTO_ARGUMENTS_BAD);

	/*
	 * Allocate and initialize a new provider descriptor. We also
	 * hold it and release it when done.
	 */
	prov_desc = kcf_alloc_provider_desc(info);
	KCF_PROV_REFHOLD(prov_desc);

	prov_desc->pd_prov_type = info->pi_provider_type;

	/* provider-private handle, opaque to KCF */
	prov_desc->pd_prov_handle = info->pi_provider_handle;

	/* copy provider description string */
	if (info->pi_provider_description != NULL) {
		/*
		 * pi_provider_descriptor is a string that can contain
		 * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters
		 * INCLUDING the terminating null character. A bcopy()
		 * is necessary here as pd_description should not have
		 * a null character. See comments in kcf_alloc_provider_desc()
		 * for details on pd_description field.
		 */
		bcopy(info->pi_provider_description, prov_desc->pd_description,
		    MIN(strlen(info->pi_provider_description),
		    (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN));
	}

	if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) {
		if (info->pi_ops_vector == NULL) {
			goto bail;
		}
		copy_ops_vector_v1(info->pi_ops_vector,
		    prov_desc->pd_ops_vector);
		if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) {
			copy_ops_vector_v2(info->pi_ops_vector,
			    prov_desc->pd_ops_vector);
			prov_desc->pd_flags = info->pi_flags;
		}
		if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) {
			copy_ops_vector_v3(info->pi_ops_vector,
			    prov_desc->pd_ops_vector);
		}
	}

	/* object_ops and nostore_key_ops are mutually exclusive */
	if (prov_desc->pd_ops_vector->co_object_ops &&
	    prov_desc->pd_ops_vector->co_nostore_key_ops) {
		goto bail;
	}

	/* process the mechanisms supported by the provider */
	if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS)
		goto bail;

	/*
	 * Add provider to providers tables, also sets the descriptor
	 * pd_prov_id field.
	 */
	if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) {
		undo_register_provider(prov_desc, B_FALSE);
		goto bail;
	}

	/*
	 * We create a taskq only for a hardware provider. The global
	 * software queue is used for software providers. We handle ordering
	 * of multi-part requests in the taskq routine. So, it is safe to
	 * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag
	 * to keep some entries cached to improve performance.
	 */
	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
		prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq",
		    crypto_taskq_threads, minclsyspri,
		    crypto_taskq_minalloc, crypto_taskq_maxalloc,
		    TASKQ_PREPOPULATE);
	else
		prov_desc->pd_sched_info.ks_taskq = NULL;

	/* no kernel session to logical providers */
	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
		/*
		 * Open a session for session-oriented providers. This session
		 * is used for all kernel consumers. This is fine as a provider
		 * is required to support multiple thread access to a session.
		 * We can do this only after the taskq has been created as we
		 * do a kcf_submit_request() to open the session.
		 */
		if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) {
			kcf_req_params_t params;

			KCF_WRAP_SESSION_OPS_PARAMS(&params,
			    KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0,
			    CRYPTO_USER, NULL, 0, prov_desc);
			ret = kcf_submit_request(prov_desc, NULL, NULL, &params,
			    B_FALSE);

			if (ret != CRYPTO_SUCCESS) {
				undo_register_provider(prov_desc, B_TRUE);
				ret = CRYPTO_FAILED;
				goto bail;
			}
		}
	}

	if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
		/*
		 * Create the kstat for this provider. There is a kstat
		 * installed for each successfully registered provider.
		 * This kstat is deleted, when the provider unregisters.
		 */
		if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s",
			    "NONAME", "provider_stats");
		} else {
			(void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s",
			    "NONAME", 0,
			    prov_desc->pd_prov_id, "provider_stats");
		}

		prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto",
		    KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) /
		    sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);

		if (prov_desc->pd_kstat != NULL) {
			bcopy(&kcf_stats_ks_data_template,
			    &prov_desc->pd_ks_data,
			    sizeof (kcf_stats_ks_data_template));
			prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data;
			KCF_PROV_REFHOLD(prov_desc);
			KCF_PROV_IREFHOLD(prov_desc);
			prov_desc->pd_kstat->ks_private = prov_desc;
			prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update;
			kstat_install(prov_desc->pd_kstat);
		}
	}

	if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER)
		process_logical_providers(info, prov_desc);

	mutex_enter(&prov_desc->pd_lock);
	prov_desc->pd_state = KCF_PROV_READY;
	mutex_exit(&prov_desc->pd_lock);
	kcf_do_notify(prov_desc, B_TRUE);

	*handle = prov_desc->pd_kcf_prov_handle;
	ret = CRYPTO_SUCCESS;

bail:
	KCF_PROV_REFRELE(prov_desc);
	return (ret);
}
示例#20
0
文件: kcf_spi.c 项目: MarkGavalda/zfs
/*
 * This routine is used to notify the framework when a provider is being
 * removed.  Hardware providers call this routine in their detach routines.
 * Software providers call this routine in their _fini() routine.
 */
int
crypto_unregister_provider(crypto_kcf_provider_handle_t handle)
{
	uint_t mech_idx;
	kcf_provider_desc_t *desc;
	kcf_prov_state_t saved_state;

	/* lookup provider descriptor */
	if ((desc = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
		return (CRYPTO_UNKNOWN_PROVIDER);

	mutex_enter(&desc->pd_lock);
	/*
	 * Check if any other thread is disabling or removing
	 * this provider. We return if this is the case.
	 */
	if (desc->pd_state >= KCF_PROV_DISABLED) {
		mutex_exit(&desc->pd_lock);
		/* Release reference held by kcf_prov_tab_lookup(). */
		KCF_PROV_REFRELE(desc);
		return (CRYPTO_BUSY);
	}

	saved_state = desc->pd_state;
	desc->pd_state = KCF_PROV_REMOVED;

	if (saved_state == KCF_PROV_BUSY) {
		/*
		 * The per-provider taskq threads may be waiting. We
		 * signal them so that they can start failing requests.
		 */
		cv_broadcast(&desc->pd_resume_cv);
	}

	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
		/*
		 * Check if this provider is currently being used.
		 * pd_irefcnt is the number of holds from the internal
		 * structures. We add one to account for the above lookup.
		 */
		if (desc->pd_refcnt > desc->pd_irefcnt + 1) {
			desc->pd_state = saved_state;
			mutex_exit(&desc->pd_lock);
			/* Release reference held by kcf_prov_tab_lookup(). */
			KCF_PROV_REFRELE(desc);
			/*
			 * The administrator presumably will stop the clients
			 * thus removing the holds, when they get the busy
			 * return value.  Any retry will succeed then.
			 */
			return (CRYPTO_BUSY);
		}
	}
	mutex_exit(&desc->pd_lock);

	if (desc->pd_prov_type != CRYPTO_SW_PROVIDER) {
		remove_provider(desc);
	}

	if (desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) {
		/* remove the provider from the mechanisms tables */
		for (mech_idx = 0; mech_idx < desc->pd_mech_list_count;
		    mech_idx++) {
			kcf_remove_mech_provider(
			    desc->pd_mechanisms[mech_idx].cm_mech_name, desc);
		}
	}

	/* remove provider from providers table */
	if (kcf_prov_tab_rem_provider((crypto_provider_id_t)handle) !=
	    CRYPTO_SUCCESS) {
		/* Release reference held by kcf_prov_tab_lookup(). */
		KCF_PROV_REFRELE(desc);
		return (CRYPTO_UNKNOWN_PROVIDER);
	}

	delete_kstat(desc);

	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
		/* Release reference held by kcf_prov_tab_lookup(). */
		KCF_PROV_REFRELE(desc);

		/*
		 * Wait till the existing requests complete.
		 */
		mutex_enter(&desc->pd_lock);
		while (desc->pd_state != KCF_PROV_FREED)
			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
		mutex_exit(&desc->pd_lock);
	} else {
		/*
		 * Wait until requests that have been sent to the provider
		 * complete.
		 */
		mutex_enter(&desc->pd_lock);
		while (desc->pd_irefcnt > 0)
			cv_wait(&desc->pd_remove_cv, &desc->pd_lock);
		mutex_exit(&desc->pd_lock);
	}

	kcf_do_notify(desc, B_FALSE);

	if (desc->pd_prov_type == CRYPTO_SW_PROVIDER) {
		/*
		 * This is the only place where kcf_free_provider_desc()
		 * is called directly. KCF_PROV_REFRELE() should free the
		 * structure in all other places.
		 */
		ASSERT(desc->pd_state == KCF_PROV_FREED &&
		    desc->pd_refcnt == 0);
		kcf_free_provider_desc(desc);
	} else {
		KCF_PROV_REFRELE(desc);
	}

	return (CRYPTO_SUCCESS);
}
示例#21
0
文件: kcf_spi.c 项目: MarkGavalda/zfs
/*
 * This routine is used to notify the framework that the state of
 * a cryptographic provider has changed. Valid state codes are:
 *
 * CRYPTO_PROVIDER_READY
 * 	The provider indicates that it can process more requests. A provider
 *	will notify with this event if it previously has notified us with a
 *	CRYPTO_PROVIDER_BUSY.
 *
 * CRYPTO_PROVIDER_BUSY
 * 	The provider can not take more requests.
 *
 * CRYPTO_PROVIDER_FAILED
 *	The provider encountered an internal error. The framework will not
 * 	be sending any more requests to the provider. The provider may notify
 *	with a CRYPTO_PROVIDER_READY, if it is able to recover from the error.
 *
 * This routine can be called from user or interrupt context.
 */
void
crypto_provider_notification(crypto_kcf_provider_handle_t handle, uint_t state)
{
	kcf_provider_desc_t *pd;

	/* lookup the provider from the given handle */
	if ((pd = kcf_prov_tab_lookup((crypto_provider_id_t)handle)) == NULL)
		return;

	mutex_enter(&pd->pd_lock);

	if (pd->pd_state <= KCF_PROV_VERIFICATION_FAILED)
		goto out;

	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
		cmn_err(CE_WARN, "crypto_provider_notification: "
		    "logical provider (%x) ignored\n", handle);
		goto out;
	}
	switch (state) {
	case CRYPTO_PROVIDER_READY:
		switch (pd->pd_state) {
		case KCF_PROV_BUSY:
			pd->pd_state = KCF_PROV_READY;
			/*
			 * Signal the per-provider taskq threads that they
			 * can start submitting requests.
			 */
			cv_broadcast(&pd->pd_resume_cv);
			break;

		case KCF_PROV_FAILED:
			/*
			 * The provider recovered from the error. Let us
			 * use it now.
			 */
			pd->pd_state = KCF_PROV_READY;
			break;
		default:
			break;
		}
		break;

	case CRYPTO_PROVIDER_BUSY:
		switch (pd->pd_state) {
		case KCF_PROV_READY:
			pd->pd_state = KCF_PROV_BUSY;
			break;
		default:
			break;
		}
		break;

	case CRYPTO_PROVIDER_FAILED:
		/*
		 * We note the failure and return. The per-provider taskq
		 * threads check this flag and start failing the
		 * requests, if it is set. See process_req_hwp() for details.
		 */
		switch (pd->pd_state) {
		case KCF_PROV_READY:
			pd->pd_state = KCF_PROV_FAILED;
			break;

		case KCF_PROV_BUSY:
			pd->pd_state = KCF_PROV_FAILED;
			/*
			 * The per-provider taskq threads may be waiting. We
			 * signal them so that they can start failing requests.
			 */
			cv_broadcast(&pd->pd_resume_cv);
			break;
		default:
			break;
		}
		break;
	default:
		break;
	}
out:
	mutex_exit(&pd->pd_lock);
	KCF_PROV_REFRELE(pd);
}
示例#22
0
/*
 * Cycle through all the providers submitting a request to each provider
 * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT
 * and ALWAYS_EXTRACT.
 *
 * Returns the number of bytes that are written to 'ptr'. Returns -1
 * if no provider is found. ptr and len are unchanged.
 */
static int
rngprov_getbytes_nblk(uint8_t *ptr, size_t len)
{
	int rv, total_bytes;
	size_t blen;
	uchar_t *rndbuf;
	kcf_provider_desc_t *pd;
	kcf_req_params_t params;
	crypto_call_req_t req;
	kcf_prov_tried_t *list = NULL;
	int prov_cnt = 0;

	blen = 0;
	total_bytes = 0;
	req.cr_flag = CRYPTO_SKIP_REQID;
	req.cr_callback_func = notify_done;

	while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv,
	    list, CRYPTO_FG_RANDOM, 0)) != NULL) {

		prov_cnt ++;
		switch (pd->pd_prov_type) {
		case CRYPTO_HW_PROVIDER:
			/*
			 * We have to allocate a buffer here as we can not
			 * assume that the input buffer will remain valid
			 * when the callback comes. We use a fixed size buffer
			 * to simplify the book keeping.
			 */
			rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP);
			if (rndbuf == NULL) {
				KCF_PROV_REFRELE(pd);
				if (list != NULL)
					kcf_free_triedlist(list);
				return (total_bytes);
			}
			req.cr_callback_arg = rndbuf;
			KCF_WRAP_RANDOM_OPS_PARAMS(&params,
			    KCF_OP_RANDOM_GENERATE,
			    pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0);
			break;

		case CRYPTO_SW_PROVIDER:
			/*
			 * We do not need to allocate a buffer in the software
			 * provider case as there is no callback involved. We
			 * avoid any extra data copy by directly passing 'ptr'.
			 */
			KCF_WRAP_RANDOM_OPS_PARAMS(&params,
			    KCF_OP_RANDOM_GENERATE,
			    pd->pd_sid, ptr, len, 0, 0);
			break;
		}

		rv = kcf_submit_request(pd, NULL, &req, &params, B_FALSE);
		if (rv == CRYPTO_SUCCESS) {
			switch (pd->pd_prov_type) {
			case CRYPTO_HW_PROVIDER:
				/*
				 * Since we have the input buffer handy,
				 * we directly copy to it rather than
				 * adding to the pool.
				 */
				blen = min(MINEXTRACTBYTES, len);
				bcopy(rndbuf, ptr, blen);
				if (len < MINEXTRACTBYTES)
					rndc_addbytes(rndbuf + len,
					    MINEXTRACTBYTES - len);
				ptr += blen;
				len -= blen;
				total_bytes += blen;
				break;

			case CRYPTO_SW_PROVIDER:
				total_bytes += len;
				len = 0;
				break;
			}
		}

		/*
		 * We free the buffer in the callback routine
		 * for the CRYPTO_QUEUED case.
		 */
		if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
		    rv != CRYPTO_QUEUED) {
			bzero(rndbuf, MINEXTRACTBYTES);
			kmem_free(rndbuf, MINEXTRACTBYTES);
		}

		if (len == 0) {
			KCF_PROV_REFRELE(pd);
			break;
		}

		if (rv != CRYPTO_SUCCESS) {
			/* Add pd to the linked list of providers tried. */
			if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) ==
			    NULL) {
				KCF_PROV_REFRELE(pd);
				break;
			}
		}
	}

	if (list != NULL) {
		kcf_free_triedlist(list);
	}

	if (prov_cnt == 0) { /* no provider could be found. */
		rng_prov_found = B_FALSE;
		return (-1);
	} else {
		rng_prov_found = B_TRUE;
		/* See comments in kcf_rngprov_check() */
		rng_ok_to_log = B_TRUE;
	}

	return (total_bytes);
}
示例#23
0
/*
 * Callback routine for the next part of a simulated dual part.
 * Schedules the next step.
 *
 * This routine can be called from interrupt context.
 */
void
kcf_next_req(void *next_req_arg, int status)
{
	kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg;
	kcf_req_params_t *params = &(next_req->kr_params);
	kcf_areq_node_t *areq = next_req->kr_areq;
	int error = status;
	kcf_provider_desc_t *pd = NULL;
	crypto_dual_data_t *ct = NULL;

	/* Stop the processing if an error occured at this step */
	if (error != CRYPTO_SUCCESS) {
out:
		areq->an_reqarg = next_req->kr_callreq;
		KCF_AREQ_REFRELE(areq);
		kmem_free(next_req, sizeof (kcf_dual_req_t));
		areq->an_isdual = B_FALSE;
		kcf_aop_done(areq, error);
		return;
	}

	switch (params->rp_opgrp) {
	case KCF_OG_MAC: {

		/*
		 * The next req is submitted with the same reqid as the
		 * first part. The consumer only got back that reqid, and
		 * should still be able to cancel the operation during its
		 * second step.
		 */
		kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params);
		crypto_ctx_template_t mac_tmpl;
		kcf_mech_entry_t *me;

		ct = (crypto_dual_data_t *)mops->mo_data;
		mac_tmpl = (crypto_ctx_template_t)mops->mo_templ;

		/* No expected recoverable failures, so no retry list */
		pd = kcf_get_mech_provider(mops->mo_framework_mechtype,
		    &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2);

		if (pd == NULL) {
			error = CRYPTO_MECH_NOT_SUPPORTED;
			goto out;
		}
		/* Validate the MAC context template here */
		if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
		    (mac_tmpl != NULL)) {
			kcf_ctx_template_t *ctx_mac_tmpl;

			ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl;

			if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) {
				KCF_PROV_REFRELE(pd);
				error = CRYPTO_OLD_CTX_TEMPLATE;
				goto out;
			}
			mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl;
		}

		break;
	}
	case KCF_OG_DECRYPT: {
		kcf_decrypt_ops_params_t *dcrops =
		    &(params->rp_u.decrypt_params);

		ct = (crypto_dual_data_t *)dcrops->dop_ciphertext;
		/* No expected recoverable failures, so no retry list */
		pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype,
		    NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1);

		if (pd == NULL) {
			error = CRYPTO_MECH_NOT_SUPPORTED;
			goto out;
		}
		break;
	}
	default:
		break;
	}

	/* The second step uses len2 and offset2 of the dual_data */
	next_req->kr_saveoffset = ct->dd_offset1;
	next_req->kr_savelen = ct->dd_len1;
	ct->dd_offset1 = ct->dd_offset2;
	ct->dd_len1 = ct->dd_len2;

	/* preserve if the caller is restricted */
	if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) {
		areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED;
	} else {
		areq->an_reqarg.cr_flag = 0;
	}

	areq->an_reqarg.cr_callback_func = kcf_last_req;
	areq->an_reqarg.cr_callback_arg = next_req;
	areq->an_isdual = B_TRUE;

	/*
	 * We would like to call kcf_submit_request() here. But,
	 * that is not possible as that routine allocates a new
	 * kcf_areq_node_t request structure, while we need to
	 * reuse the existing request structure.
	 */
	switch (pd->pd_prov_type) {
	case CRYPTO_SW_PROVIDER:
		error = common_submit_request(pd, NULL, params,
		    KCF_RHNDL(KM_NOSLEEP));
		break;

	case CRYPTO_HW_PROVIDER: {
		kcf_provider_desc_t *old_pd;
		taskq_t *taskq = pd->pd_sched_info.ks_taskq;

		/*
		 * Set the params for the second step in the
		 * dual-ops.
		 */
		areq->an_params = *params;
		old_pd = areq->an_provider;
		KCF_PROV_REFRELE(old_pd);
		KCF_PROV_REFHOLD(pd);
		areq->an_provider = pd;

		/*
		 * Note that we have to do a taskq_dispatch()
		 * here as we may be in interrupt context.
		 */
		if (taskq_dispatch(taskq, process_req_hwp, areq,
		    TQ_NOSLEEP) == (taskqid_t)0) {
			error = CRYPTO_HOST_MEMORY;
		} else {
			error = CRYPTO_QUEUED;
		}
		break;
	}
	default:
		break;
	}

	/*
	 * We have to release the holds on the request and the provider
	 * in all cases.
	 */
	KCF_AREQ_REFRELE(areq);
	KCF_PROV_REFRELE(pd);

	if (error != CRYPTO_QUEUED) {
		/* restore, clean up, and invoke the client's callback */

		ct->dd_offset1 = next_req->kr_saveoffset;
		ct->dd_len1 = next_req->kr_savelen;
		areq->an_reqarg = next_req->kr_callreq;
		kmem_free(next_req, sizeof (kcf_dual_req_t));
		areq->an_isdual = B_FALSE;
		kcf_aop_done(areq, error);
	}
}
示例#24
0
/*
 * This routine is called when a request to a provider has failed
 * with a recoverable error. This routine tries to find another provider
 * and dispatches the request to the new provider, if one is available.
 * We reuse the request structure.
 *
 * A return value of NULL from kcf_get_mech_provider() indicates
 * we have tried the last provider.
 */
static int
kcf_resubmit_request(kcf_areq_node_t *areq)
{
	int error = CRYPTO_FAILED;
	kcf_context_t *ictx;
	kcf_provider_desc_t *old_pd;
	kcf_provider_desc_t *new_pd;
	crypto_mechanism_t *mech1 = NULL, *mech2 = NULL;
	crypto_mech_type_t prov_mt1, prov_mt2;
	crypto_func_group_t fg = 0;

	if (!can_resubmit(areq, &mech1, &mech2, &fg))
		return (error);

	old_pd = areq->an_provider;
	/*
	 * Add old_pd to the list of providers already tried. We release
	 * the hold on old_pd (from the earlier kcf_get_mech_provider()) in
	 * kcf_free_triedlist().
	 */
	if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd,
	    KM_NOSLEEP) == NULL)
		return (error);

	if (mech1 && !mech2) {
		new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error,
		    areq->an_tried_plist, fg,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
	} else {
		ASSERT(mech1 != NULL && mech2 != NULL);

		new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1,
		    &prov_mt2, &error, areq->an_tried_plist, fg, fg,
		    (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0);
	}

	if (new_pd == NULL)
		return (error);

	/*
	 * We reuse the old context by resetting provider specific
	 * fields in it.
	 */
	if ((ictx = areq->an_context) != NULL) {
		crypto_ctx_t *ctx;

		ASSERT(old_pd == ictx->kc_prov_desc);
		KCF_PROV_REFRELE(ictx->kc_prov_desc);
		KCF_PROV_REFHOLD(new_pd);
		ictx->kc_prov_desc = new_pd;

		ctx = &ictx->kc_glbl_ctx;
		ctx->cc_provider = new_pd->pd_prov_handle;
		ctx->cc_session = new_pd->pd_sid;
		ctx->cc_provider_private = NULL;
	}

	/* We reuse areq. by resetting the provider and context fields. */
	KCF_PROV_REFRELE(old_pd);
	KCF_PROV_REFHOLD(new_pd);
	areq->an_provider = new_pd;
	mutex_enter(&areq->an_lock);
	areq->an_state = REQ_WAITING;
	mutex_exit(&areq->an_lock);

	switch (new_pd->pd_prov_type) {
	case CRYPTO_SW_PROVIDER:
		error = kcf_disp_sw_request(areq);
		break;

	case CRYPTO_HW_PROVIDER: {
		taskq_t *taskq = new_pd->pd_sched_info.ks_taskq;

		if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) ==
		    TASKQID_INVALID) {
			error = CRYPTO_HOST_MEMORY;
		} else {
			error = CRYPTO_QUEUED;
		}

		break;
	default:
		break;
	}
	}

	return (error);
}
示例#25
0
static int
sign_sr_atomic_common(crypto_mechanism_t *mech, crypto_key_t *key,
    crypto_data_t *data, crypto_ctx_template_t tmpl, crypto_data_t *signature,
    crypto_call_req_t *crq, crypto_func_group_t fg)
{
	int error;
	kcf_mech_entry_t *me;
	kcf_provider_desc_t *pd;
	kcf_req_params_t params;
	kcf_prov_tried_t *list = NULL;
	kcf_ctx_template_t *ctx_tmpl;
	crypto_spi_ctx_template_t spi_ctx_tmpl = NULL;

retry:
	/* The pd is returned held */
	if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, list, fg,
	    CHECK_RESTRICT(crq), data->cd_length)) == NULL) {
		if (list != NULL)
			kcf_free_triedlist(list);
		return (error);
	}

	/*
	 * For SW providers, check the validity of the context template
	 * It is very rare that the generation number mis-matches, so
	 * it is acceptable to fail here, and let the consumer recover by
	 * freeing this tmpl and create a new one for the key and new SW
	 * provider.
	 */
	if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) &&
	    ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) {
		if (ctx_tmpl->ct_generation != me->me_gen_swprov) {
			if (list != NULL)
				kcf_free_triedlist(list);
			KCF_PROV_REFRELE(pd);
			return (CRYPTO_OLD_CTX_TEMPLATE);
		} else {
			spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl;
		}
	}

	/* The fast path for SW providers. */
	if (CHECK_FASTPATH(crq, pd)) {
		crypto_mechanism_t lmech;

		lmech = *mech;
		KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech);
		if (fg == CRYPTO_FG_SIGN_ATOMIC)
			error = KCF_PROV_SIGN_ATOMIC(pd, pd->pd_sid, &lmech,
			    key, data, spi_ctx_tmpl, signature,
			    KCF_SWFP_RHNDL(crq));
		else
			error = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, pd->pd_sid,
			    &lmech, key, data, spi_ctx_tmpl, signature,
			    KCF_SWFP_RHNDL(crq));
		KCF_PROV_INCRSTATS(pd, error);
	} else {
		kcf_op_type_t op = ((fg == CRYPTO_FG_SIGN_ATOMIC) ?
		    KCF_OP_ATOMIC : KCF_OP_SIGN_RECOVER_ATOMIC);

		KCF_WRAP_SIGN_OPS_PARAMS(&params, op, pd->pd_sid,
		    mech, key, data, signature, spi_ctx_tmpl);

		/* no crypto context to carry between multiple parts. */
		error = kcf_submit_request(pd, NULL, crq, &params, B_FALSE);
	}

	if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED &&
	    IS_RECOVERABLE(error)) {
		/* Add pd to the linked list of providers tried. */
		if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL)
			goto retry;
	}

	if (list != NULL)
		kcf_free_triedlist(list);

	KCF_PROV_REFRELE(pd);
	return (error);
}
示例#26
0
/*
 * crypto_cipher_init_prov()
 *
 * Arguments:
 *
 *	pd:	provider descriptor
 *	sid:	session id
 *	mech:	crypto_mechanism_t pointer.
 *		mech_type is a valid value previously returned by
 *		crypto_mech2id();
 *		When the mech's parameter is not NULL, its definition depends
 *		on the standard definition of the mechanism.
 *	key:	pointer to a crypto_key_t structure.
 *	tmpl:	a crypto_ctx_template_t, opaque template of a context of an
 *		encryption  or decryption with the 'mech' using 'key'.
 *		'tmpl' is created by a previous call to
 *		crypto_create_ctx_template().
 *	ctxp:	Pointer to a crypto_context_t.
 *	func:	CRYPTO_FG_ENCRYPT or CRYPTO_FG_DECRYPT.
 *	cr:	crypto_call_req_t calling conditions and call back info.
 *
 * Description:
 *	This is a common function invoked internally by both
 *	crypto_encrypt_init() and crypto_decrypt_init().
 *	Asynchronously submits a request for, or synchronously performs the
 *	initialization of an encryption or a decryption operation.
 *	When possible and applicable, will internally use the pre-expanded key
 *	schedule from the context template, tmpl.
 *	When complete and successful, 'ctxp' will contain a crypto_context_t
 *	valid for later calls to encrypt_update() and encrypt_final(), or
 *	decrypt_update() and decrypt_final().
 *	The caller should hold a reference on the specified provider
 *	descriptor before calling this function.
 *
 * Context:
 *	Process or interrupt, according to the semantics dictated by the 'cr'.
 *
 * Returns:
 *	See comment in the beginning of the file.
 */
static int
crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid,
    crypto_mechanism_t *mech, crypto_key_t *key,
    crypto_spi_ctx_template_t tmpl, crypto_context_t *ctxp,
    crypto_call_req_t *crq, crypto_func_group_t func)
{
	int error;
	crypto_ctx_t *ctx;
	kcf_req_params_t params;
	kcf_provider_desc_t *pd = provider;
	kcf_provider_desc_t *real_provider = pd;

	ASSERT(KCF_PROV_REFHELD(pd));

	if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) {
		if (func == CRYPTO_FG_ENCRYPT) {
			error = kcf_get_hardware_provider(mech->cm_type,
			    CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
			    &real_provider, CRYPTO_FG_ENCRYPT);
		} else {
			error = kcf_get_hardware_provider(mech->cm_type,
			    CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd,
			    &real_provider, CRYPTO_FG_DECRYPT);
		}

		if (error != CRYPTO_SUCCESS)
			return (error);
	}

	/* Allocate and initialize the canonical context */
	if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) {
		if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)
			KCF_PROV_REFRELE(real_provider);
		return (CRYPTO_HOST_MEMORY);
	}

	/* The fast path for SW providers. */
	if (CHECK_FASTPATH(crq, pd)) {
		crypto_mechanism_t lmech;

		lmech = *mech;
		KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech);

		if (func == CRYPTO_FG_ENCRYPT)
			error = KCF_PROV_ENCRYPT_INIT(real_provider, ctx,
			    &lmech, key, tmpl, KCF_SWFP_RHNDL(crq));
		else {
			ASSERT(func == CRYPTO_FG_DECRYPT);

			error = KCF_PROV_DECRYPT_INIT(real_provider, ctx,
			    &lmech, key, tmpl, KCF_SWFP_RHNDL(crq));
		}
		KCF_PROV_INCRSTATS(pd, error);

		goto done;
	}

	/* Check if context sharing is possible */
	if (pd->pd_prov_type == CRYPTO_HW_PROVIDER &&
	    key->ck_format == CRYPTO_KEY_RAW &&
	    KCF_CAN_SHARE_OPSTATE(pd, mech->cm_type)) {
		kcf_context_t *tctxp = (kcf_context_t *)ctx;
		kcf_provider_desc_t *tpd = NULL;
		crypto_mech_info_t *sinfo;

		if ((kcf_get_sw_prov(mech->cm_type, &tpd, &tctxp->kc_mech,
		    B_FALSE) == CRYPTO_SUCCESS)) {
			int tlen;

			sinfo = &(KCF_TO_PROV_MECHINFO(tpd, mech->cm_type));
			/*
			 * key->ck_length from the consumer is always in bits.
			 * We convert it to be in the same unit registered by
			 * the provider in order to do a comparison.
			 */
			if (sinfo->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES)
				tlen = key->ck_length >> 3;
			else