/* * Same as crypto_digest_init_prov(), but relies on the KCF scheduler * to choose a provider. See crypto_digest_init_prov() comments for * more information. */ int crypto_digest_init(crypto_mechanism_t *mech, crypto_context_t *ctxp, crypto_call_req_t *crq) { int error; kcf_provider_desc_t *pd; kcf_prov_tried_t *list = NULL; retry: /* The pd is returned held */ if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error, list, CRYPTO_FG_DIGEST, CHECK_RESTRICT(crq), 0)) == NULL) { if (list != NULL) kcf_free_triedlist(list); return (error); } error = crypto_digest_init_prov(pd, pd->pd_sid, mech, ctxp, crq); if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) goto retry; } if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (error); }
/* * This routine is called for blocking reads. * * The argument is_taskq_thr indicates whether the caller is * the taskq thread dispatched by the timeout handler routine. * In this case, we cycle through all the providers * submitting a request to each provider to generate random numbers. * * For other cases, we pick a provider and submit a request to generate * random numbers. We retry using another provider if we get an error. * * Returns the number of bytes that are written to 'ptr'. Returns -1 * if no provider is found. ptr and need are unchanged. */ static int rngprov_getbytes(uint8_t *ptr, size_t need, boolean_t is_taskq_thr) { int rv; int prov_cnt = 0; int total_bytes = 0; kcf_provider_desc_t *pd; kcf_req_params_t params; kcf_prov_tried_t *list = NULL; while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, list, CRYPTO_FG_RANDOM, 0)) != NULL) { prov_cnt++; KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, pd->pd_sid, ptr, need, 0, 0); rv = kcf_submit_request(pd, NULL, NULL, ¶ms, B_FALSE); ASSERT(rv != CRYPTO_QUEUED); if (rv == CRYPTO_SUCCESS) { total_bytes += need; if (is_taskq_thr) rndc_addbytes(ptr, need); else { KCF_PROV_REFRELE(pd); break; } } if (is_taskq_thr || rv != CRYPTO_SUCCESS) { /* Add pd to the linked list of providers tried. */ if (kcf_insert_triedlist(&list, pd, KM_SLEEP) == NULL) { KCF_PROV_REFRELE(pd); break; } } } if (list != NULL) kcf_free_triedlist(list); if (prov_cnt == 0) { /* no provider could be found. */ rng_prov_found = B_FALSE; return (-1); } else { rng_prov_found = B_TRUE; /* See comments in kcf_rngprov_check() */ rng_ok_to_log = B_TRUE; } return (total_bytes); }
int crypto_sign_init(crypto_mechanism_t *mech, crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *crq) { int error; kcf_mech_entry_t *me; kcf_provider_desc_t *pd; kcf_prov_tried_t *list = NULL; kcf_ctx_template_t *ctx_tmpl; crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; retry: /* The pd is returned held */ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, list, CRYPTO_FG_SIGN, CHECK_RESTRICT(crq), 0)) == NULL) { if (list != NULL) kcf_free_triedlist(list); return (error); } /* * For SW providers, check the validity of the context template * It is very rare that the generation number mis-matches, so * it is acceptable to fail here, and let the consumer recover by * freeing this tmpl and create a new one for the key and new SW * provider. */ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { if (ctx_tmpl->ct_generation != me->me_gen_swprov) { if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (CRYPTO_OLD_CTX_TEMPLATE); } else { spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; } } error = crypto_sign_init_prov(pd, pd->pd_sid, mech, key, spi_ctx_tmpl, ctxp, crq); if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) goto retry; } if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (error); }
/* * Same as crypto_digest_prov(), but relies on the KCF scheduler to * choose a provider. See crypto_digest_prov() comments for more information. */ int crypto_digest(crypto_mechanism_t *mech, crypto_data_t *data, crypto_data_t *digest, crypto_call_req_t *crq) { int error; kcf_provider_desc_t *pd; kcf_req_params_t params; kcf_prov_tried_t *list = NULL; retry: /* The pd is returned held */ if ((pd = kcf_get_mech_provider(mech->cm_type, NULL, &error, list, CRYPTO_FG_DIGEST_ATOMIC, CHECK_RESTRICT(crq), data->cd_length)) == NULL) { if (list != NULL) kcf_free_triedlist(list); return (error); } /* The fast path for SW providers. */ if (CHECK_FASTPATH(crq, pd)) { crypto_mechanism_t lmech; lmech = *mech; KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); error = KCF_PROV_DIGEST_ATOMIC(pd, pd->pd_sid, &lmech, data, digest, KCF_SWFP_RHNDL(crq)); KCF_PROV_INCRSTATS(pd, error); } else { KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, pd->pd_sid, mech, NULL, data, digest); /* no crypto context to carry between multiple parts. */ error = kcf_submit_request(pd, NULL, crq, ¶ms, B_FALSE); } if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) goto retry; } if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (error); }
/* * Same as crypto_mac_init_prov(), but relies on the KCF scheduler to * choose a provider. See crypto_mac_init_prov() comments for more * information. */ int crypto_mac_init(crypto_mechanism_t *mech, crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *crq) { int error; kcf_mech_entry_t *me; kcf_provider_desc_t *pd; kcf_ctx_template_t *ctx_tmpl; crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; kcf_prov_tried_t *list = NULL; retry: /* The pd is returned held */ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, list, CRYPTO_FG_MAC, CHECK_RESTRICT(crq), 0)) == NULL) { if (list != NULL) kcf_free_triedlist(list); return (error); } /* * For SW providers, check the validity of the context template * It is very rare that the generation number mis-matches, so * is acceptable to fail here, and let the consumer recover by * freeing this tmpl and create a new one for the key and new SW * provider */ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { if (ctx_tmpl->ct_generation != me->me_gen_swprov) { if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (CRYPTO_OLD_CTX_TEMPLATE); } else { spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; } } if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && (pd->pd_flags & CRYPTO_HASH_NO_UPDATE)) { /* * The hardware provider has limited HMAC support. * So, we fallback early here to using a software provider. * * XXX - need to enhance to do the fallback later in * crypto_mac_update() if the size of accumulated input data * exceeds the maximum size digestable by hardware provider. */ error = CRYPTO_BUFFER_TOO_BIG; } else { error = crypto_mac_init_prov(pd, pd->pd_sid, mech, key, spi_ctx_tmpl, ctxp, crq); } if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) goto retry; } if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (error); }
/* * Same as crypto_mac_verify_prov(), but relies on the KCF scheduler to choose * a provider. See crypto_mac_verify_prov() comments for more information. */ int crypto_mac_verify(crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq) { int error; kcf_mech_entry_t *me; kcf_req_params_t params; kcf_provider_desc_t *pd; kcf_ctx_template_t *ctx_tmpl; crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; kcf_prov_tried_t *list = NULL; retry: /* The pd is returned held */ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, list, CRYPTO_FG_MAC_ATOMIC, CHECK_RESTRICT(crq), data->cd_length)) == NULL) { if (list != NULL) kcf_free_triedlist(list); return (error); } /* * For SW providers, check the validity of the context template * It is very rare that the generation number mis-matches, so * is acceptable to fail here, and let the consumer recover by * freeing this tmpl and create a new one for the key and new SW * provider */ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { if (ctx_tmpl->ct_generation != me->me_gen_swprov) { if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (CRYPTO_OLD_CTX_TEMPLATE); } else { spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; } } /* The fast path for SW providers. */ if (CHECK_FASTPATH(crq, pd)) { crypto_mechanism_t lmech; lmech = *mech; KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); error = KCF_PROV_MAC_VERIFY_ATOMIC(pd, pd->pd_sid, &lmech, key, data, mac, spi_ctx_tmpl, KCF_SWFP_RHNDL(crq)); KCF_PROV_INCRSTATS(pd, error); } else { if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && (pd->pd_flags & CRYPTO_HASH_NO_UPDATE) && (data->cd_length > pd->pd_hash_limit)) { /* see comments in crypto_mac() */ error = CRYPTO_BUFFER_TOO_BIG; } else { KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_MAC_VERIFY_ATOMIC, pd->pd_sid, mech, key, data, mac, spi_ctx_tmpl); error = kcf_submit_request(pd, NULL, crq, ¶ms, KCF_ISDUALREQ(crq)); } } if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) goto retry; } if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (error); }
/* * Cycle through all the providers submitting a request to each provider * to generate random numbers. This is called for the modes - NONBLOCK_EXTRACT * and ALWAYS_EXTRACT. * * Returns the number of bytes that are written to 'ptr'. Returns -1 * if no provider is found. ptr and len are unchanged. */ static int rngprov_getbytes_nblk(uint8_t *ptr, size_t len) { int rv, total_bytes; size_t blen; uchar_t *rndbuf; kcf_provider_desc_t *pd; kcf_req_params_t params; crypto_call_req_t req; kcf_prov_tried_t *list = NULL; int prov_cnt = 0; blen = 0; total_bytes = 0; req.cr_flag = CRYPTO_SKIP_REQID; req.cr_callback_func = notify_done; while ((pd = kcf_get_mech_provider(rngmech_type, NULL, NULL, &rv, list, CRYPTO_FG_RANDOM, 0)) != NULL) { prov_cnt ++; switch (pd->pd_prov_type) { case CRYPTO_HW_PROVIDER: /* * We have to allocate a buffer here as we can not * assume that the input buffer will remain valid * when the callback comes. We use a fixed size buffer * to simplify the book keeping. */ rndbuf = kmem_alloc(MINEXTRACTBYTES, KM_NOSLEEP); if (rndbuf == NULL) { KCF_PROV_REFRELE(pd); if (list != NULL) kcf_free_triedlist(list); return (total_bytes); } req.cr_callback_arg = rndbuf; KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, pd->pd_sid, rndbuf, MINEXTRACTBYTES, 0, 0); break; case CRYPTO_SW_PROVIDER: /* * We do not need to allocate a buffer in the software * provider case as there is no callback involved. We * avoid any extra data copy by directly passing 'ptr'. */ KCF_WRAP_RANDOM_OPS_PARAMS(¶ms, KCF_OP_RANDOM_GENERATE, pd->pd_sid, ptr, len, 0, 0); break; } rv = kcf_submit_request(pd, NULL, &req, ¶ms, B_FALSE); if (rv == CRYPTO_SUCCESS) { switch (pd->pd_prov_type) { case CRYPTO_HW_PROVIDER: /* * Since we have the input buffer handy, * we directly copy to it rather than * adding to the pool. */ blen = min(MINEXTRACTBYTES, len); bcopy(rndbuf, ptr, blen); if (len < MINEXTRACTBYTES) rndc_addbytes(rndbuf + len, MINEXTRACTBYTES - len); ptr += blen; len -= blen; total_bytes += blen; break; case CRYPTO_SW_PROVIDER: total_bytes += len; len = 0; break; } } /* * We free the buffer in the callback routine * for the CRYPTO_QUEUED case. */ if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && rv != CRYPTO_QUEUED) { bzero(rndbuf, MINEXTRACTBYTES); kmem_free(rndbuf, MINEXTRACTBYTES); } if (len == 0) { KCF_PROV_REFRELE(pd); break; } if (rv != CRYPTO_SUCCESS) { /* Add pd to the linked list of providers tried. */ if (kcf_insert_triedlist(&list, pd, KM_NOSLEEP) == NULL) { KCF_PROV_REFRELE(pd); break; } } } if (list != NULL) { kcf_free_triedlist(list); } if (prov_cnt == 0) { /* no provider could be found. */ rng_prov_found = B_FALSE; return (-1); } else { rng_prov_found = B_TRUE; /* See comments in kcf_rngprov_check() */ rng_ok_to_log = B_TRUE; } return (total_bytes); }
/* * This routine is called when a request to a provider has failed * with a recoverable error. This routine tries to find another provider * and dispatches the request to the new provider, if one is available. * We reuse the request structure. * * A return value of NULL from kcf_get_mech_provider() indicates * we have tried the last provider. */ static int kcf_resubmit_request(kcf_areq_node_t *areq) { int error = CRYPTO_FAILED; kcf_context_t *ictx; kcf_provider_desc_t *old_pd; kcf_provider_desc_t *new_pd; crypto_mechanism_t *mech1 = NULL, *mech2 = NULL; crypto_mech_type_t prov_mt1, prov_mt2; crypto_func_group_t fg = 0; if (!can_resubmit(areq, &mech1, &mech2, &fg)) return (error); old_pd = areq->an_provider; /* * Add old_pd to the list of providers already tried. We release * the hold on old_pd (from the earlier kcf_get_mech_provider()) in * kcf_free_triedlist(). */ if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd, KM_NOSLEEP) == NULL) return (error); if (mech1 && !mech2) { new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error, areq->an_tried_plist, fg, (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); } else { ASSERT(mech1 != NULL && mech2 != NULL); new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1, &prov_mt2, &error, areq->an_tried_plist, fg, fg, (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); } if (new_pd == NULL) return (error); /* * We reuse the old context by resetting provider specific * fields in it. */ if ((ictx = areq->an_context) != NULL) { crypto_ctx_t *ctx; ASSERT(old_pd == ictx->kc_prov_desc); KCF_PROV_REFRELE(ictx->kc_prov_desc); KCF_PROV_REFHOLD(new_pd); ictx->kc_prov_desc = new_pd; ctx = &ictx->kc_glbl_ctx; ctx->cc_provider = new_pd->pd_prov_handle; ctx->cc_session = new_pd->pd_sid; ctx->cc_provider_private = NULL; } /* We reuse areq. by resetting the provider and context fields. */ KCF_PROV_REFRELE(old_pd); KCF_PROV_REFHOLD(new_pd); areq->an_provider = new_pd; mutex_enter(&areq->an_lock); areq->an_state = REQ_WAITING; mutex_exit(&areq->an_lock); switch (new_pd->pd_prov_type) { case CRYPTO_SW_PROVIDER: error = kcf_disp_sw_request(areq); break; case CRYPTO_HW_PROVIDER: { taskq_t *taskq = new_pd->pd_sched_info.ks_taskq; if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) == TASKQID_INVALID) { error = CRYPTO_HOST_MEMORY; } else { error = CRYPTO_QUEUED; } break; default: break; } } return (error); }
static int sign_sr_atomic_common(crypto_mechanism_t *mech, crypto_key_t *key, crypto_data_t *data, crypto_ctx_template_t tmpl, crypto_data_t *signature, crypto_call_req_t *crq, crypto_func_group_t fg) { int error; kcf_mech_entry_t *me; kcf_provider_desc_t *pd; kcf_req_params_t params; kcf_prov_tried_t *list = NULL; kcf_ctx_template_t *ctx_tmpl; crypto_spi_ctx_template_t spi_ctx_tmpl = NULL; retry: /* The pd is returned held */ if ((pd = kcf_get_mech_provider(mech->cm_type, &me, &error, list, fg, CHECK_RESTRICT(crq), data->cd_length)) == NULL) { if (list != NULL) kcf_free_triedlist(list); return (error); } /* * For SW providers, check the validity of the context template * It is very rare that the generation number mis-matches, so * it is acceptable to fail here, and let the consumer recover by * freeing this tmpl and create a new one for the key and new SW * provider. */ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && ((ctx_tmpl = (kcf_ctx_template_t *)tmpl) != NULL)) { if (ctx_tmpl->ct_generation != me->me_gen_swprov) { if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (CRYPTO_OLD_CTX_TEMPLATE); } else { spi_ctx_tmpl = ctx_tmpl->ct_prov_tmpl; } } /* The fast path for SW providers. */ if (CHECK_FASTPATH(crq, pd)) { crypto_mechanism_t lmech; lmech = *mech; KCF_SET_PROVIDER_MECHNUM(mech->cm_type, pd, &lmech); if (fg == CRYPTO_FG_SIGN_ATOMIC) error = KCF_PROV_SIGN_ATOMIC(pd, pd->pd_sid, &lmech, key, data, spi_ctx_tmpl, signature, KCF_SWFP_RHNDL(crq)); else error = KCF_PROV_SIGN_RECOVER_ATOMIC(pd, pd->pd_sid, &lmech, key, data, spi_ctx_tmpl, signature, KCF_SWFP_RHNDL(crq)); KCF_PROV_INCRSTATS(pd, error); } else { kcf_op_type_t op = ((fg == CRYPTO_FG_SIGN_ATOMIC) ? KCF_OP_ATOMIC : KCF_OP_SIGN_RECOVER_ATOMIC); KCF_WRAP_SIGN_OPS_PARAMS(¶ms, op, pd->pd_sid, mech, key, data, signature, spi_ctx_tmpl); /* no crypto context to carry between multiple parts. */ error = kcf_submit_request(pd, NULL, crq, ¶ms, B_FALSE); } if (error != CRYPTO_SUCCESS && error != CRYPTO_QUEUED && IS_RECOVERABLE(error)) { /* Add pd to the linked list of providers tried. */ if (kcf_insert_triedlist(&list, pd, KCF_KMFLAG(crq)) != NULL) goto retry; } if (list != NULL) kcf_free_triedlist(list); KCF_PROV_REFRELE(pd); return (error); }