/* * crypto_mac_prov() * * Arguments: * mech: crypto_mechanism_t pointer. * mech_type is a valid value previously returned by * crypto_mech2id(); * When the mech's parameter is not NULL, its definition depends * on the standard definition of the mechanism. * key: pointer to a crypto_key_t structure. * data: The message to compute the MAC for. * mac: Storage for the MAC. The length needed depends on the mechanism. * tmpl: a crypto_ctx_template_t, opaque template of a context of a * MAC with the 'mech' using 'key'. 'tmpl' is created by * a previous call to crypto_create_ctx_template(). * cr: crypto_call_req_t calling conditions and call back info. * * Description: * Asynchronously submits a request for, or synchronously performs a * single-part message authentication of 'data' with the mechanism * 'mech', using * the key 'key', on the specified provider with * the specified session id. * When complete and successful, 'mac' will contain the message * authentication code. * * Context: * Process or interrupt, according to the semantics dictated by the 'crq'. * * Returns: * See comment in the beginning of the file. */ int crypto_mac_prov(crypto_provider_t provider, crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_data_t *data, crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_data_t *mac, crypto_call_req_t *crq) { kcf_req_params_t params; kcf_provider_desc_t *pd = provider; kcf_provider_desc_t *real_provider = pd; int rv; ASSERT(KCF_PROV_REFHELD(pd)); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { rv = kcf_get_hardware_provider(mech->cm_type, CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, &real_provider, CRYPTO_FG_MAC_ATOMIC); if (rv != CRYPTO_SUCCESS) return (rv); } KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, sid, mech, key, data, mac, tmpl); rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); return (rv); }
int crypto_session_logout(crypto_provider_t provider, crypto_session_id_t sid, crypto_call_req_t *crq) { kcf_req_params_t params; kcf_provider_desc_t *pd = provider; kcf_provider_desc_t *real_provider = pd; int rv; ASSERT(KCF_PROV_REFHELD(pd)); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET( session_ops), CRYPTO_SESSION_OFFSET(session_logout), pd, &real_provider); if (rv != CRYPTO_SUCCESS) return (rv); } if (CHECK_FASTPATH(crq, real_provider)) { rv = KCF_PROV_SESSION_LOGOUT(real_provider, sid, KCF_SWFP_RHNDL(crq)); KCF_PROV_INCRSTATS(pd, rv); } else { KCF_WRAP_SESSION_OPS_PARAMS(¶ms, KCF_OP_SESSION_LOGOUT, NULL, sid, 0, NULL, 0, real_provider); rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); } if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); return (rv); }
/* * crypto_digest_prov() * * Arguments: * pd: pointer to the descriptor of the provider to use for this * operation. * sid: provider session id. * mech: crypto_mechanism_t pointer. * mech_type is a valid value previously returned by * crypto_mech2id(); * When the mech's parameter is not NULL, its definition depends * on the standard definition of the mechanism. * data: The message to be digested. * digest: Storage for the digest. The length needed depends on the * mechanism. * cr: crypto_call_req_t calling conditions and call back info. * * Description: * Asynchronously submits a request for, or synchronously performs the * digesting operation of 'data' on the specified * provider with the specified session. * When complete and successful, 'digest' will contain the digest value. * The caller should hold a reference on the specified provider * descriptor before calling this function. * * Context: * Process or interrupt, according to the semantics dictated by the 'cr'. * * Returns: * See comment in the beginning of the file. */ int crypto_digest_prov(crypto_provider_t provider, crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_data_t *data, crypto_data_t *digest, crypto_call_req_t *crq) { kcf_req_params_t params; kcf_provider_desc_t *pd = provider; kcf_provider_desc_t *real_provider = pd; int rv; ASSERT(KCF_PROV_REFHELD(pd)); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { rv = kcf_get_hardware_provider(mech->cm_type, CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, &real_provider, CRYPTO_FG_DIGEST_ATOMIC); if (rv != CRYPTO_SUCCESS) return (rv); } KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_ATOMIC, sid, mech, NULL, data, digest); /* no crypto context to carry between multiple parts. */ rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); return (rv); }
/* * crypto_mac_init_prov() * * Arguments: * pd: pointer to the descriptor of the provider to use for this * operation. * sid: provider session id. * mech: crypto_mechanism_t pointer. * mech_type is a valid value previously returned by * crypto_mech2id(); * When the mech's parameter is not NULL, its definition depends * on the standard definition of the mechanism. * key: pointer to a crypto_key_t structure. * tmpl: a crypto_ctx_template_t, opaque template of a context of a * MAC with the 'mech' using 'key'. 'tmpl' is created by * a previous call to crypto_create_ctx_template(). * ctxp: Pointer to a crypto_context_t. * cr: crypto_call_req_t calling conditions and call back info. * * Description: * Asynchronously submits a request for, or synchronously performs the * initialization of a MAC operation on the specified provider with * the specified session. * When possible and applicable, will internally use the pre-computed MAC * context from the context template, tmpl. * When complete and successful, 'ctxp' will contain a crypto_context_t * valid for later calls to mac_update() and mac_final(). * The caller should hold a reference on the specified provider * descriptor before calling this function. * * Context: * Process or interrupt, according to the semantics dictated by the 'cr'. * * Returns: * See comment in the beginning of the file. */ int crypto_mac_init_prov(crypto_provider_t provider, crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_key_t *key, crypto_spi_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *crq) { int rv; crypto_ctx_t *ctx; kcf_req_params_t params; kcf_provider_desc_t *pd = provider; kcf_provider_desc_t *real_provider = pd; ASSERT(KCF_PROV_REFHELD(pd)); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { rv = kcf_get_hardware_provider(mech->cm_type, CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, &real_provider, CRYPTO_FG_MAC); if (rv != CRYPTO_SUCCESS) return (rv); } /* Allocate and initialize the canonical context */ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) { if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); return (CRYPTO_HOST_MEMORY); } /* The fast path for SW providers. */ if (CHECK_FASTPATH(crq, pd)) { crypto_mechanism_t lmech; lmech = *mech; KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech); rv = KCF_PROV_MAC_INIT(real_provider, ctx, &lmech, key, tmpl, KCF_SWFP_RHNDL(crq)); KCF_PROV_INCRSTATS(pd, rv); } else { KCF_WRAP_MAC_OPS_PARAMS(¶ms, KCF_OP_INIT, sid, mech, key, NULL, NULL, tmpl); rv = kcf_submit_request(real_provider, ctx, crq, ¶ms, B_FALSE); } if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED)) *ctxp = (crypto_context_t)ctx; else { /* Release the hold done in kcf_new_ctx(). */ KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private); } return (rv); }
int crypto_sign_recover_init_prov(crypto_provider_t provider, crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_key_t *key, crypto_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *crq) { int rv; crypto_ctx_t *ctx; kcf_req_params_t params; kcf_provider_desc_t *pd = provider; kcf_provider_desc_t *real_provider = pd; ASSERT(KCF_PROV_REFHELD(pd)); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { rv = kcf_get_hardware_provider(mech->cm_type, CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, &real_provider, CRYPTO_FG_SIGN_RECOVER); if (rv != CRYPTO_SUCCESS) return (rv); } /* Allocate and initialize the canonical context */ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) { if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); return (CRYPTO_HOST_MEMORY); } KCF_WRAP_SIGN_OPS_PARAMS(¶ms, KCF_OP_SIGN_RECOVER_INIT, sid, mech, key, NULL, NULL, tmpl); rv = kcf_submit_request(real_provider, ctx, crq, ¶ms, B_FALSE); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); if ((rv == CRYPTO_SUCCESS) || (rv == CRYPTO_QUEUED)) *ctxp = (crypto_context_t)ctx; else { /* Release the hold done in kcf_new_ctx(). */ KCF_CONTEXT_REFRELE((kcf_context_t *)ctx->cc_framework_private); } return (rv); }
int crypto_object_find_init(crypto_provider_t provider, crypto_session_id_t sid, crypto_object_attribute_t *attrs, uint_t count, void **cookie, crypto_call_req_t *crq) { kcf_req_params_t params; kcf_provider_desc_t *pd = provider; kcf_provider_desc_t *real_provider = pd; int rv; ASSERT(KCF_PROV_REFHELD(pd)); if (cookie == NULL) { return (CRYPTO_ARGUMENTS_BAD); } if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET( object_ops), CRYPTO_OBJECT_OFFSET(object_find_init), CHECK_RESTRICT(crq), pd, &real_provider); if (rv != CRYPTO_SUCCESS) return (rv); } if (CHECK_FASTPATH(crq, real_provider)) { rv = KCF_PROV_OBJECT_FIND_INIT(real_provider, sid, attrs, count, cookie, KCF_SWFP_RHNDL(crq)); KCF_PROV_INCRSTATS(pd, rv); } else { KCF_WRAP_OBJECT_OPS_PARAMS(¶ms, KCF_OP_OBJECT_FIND_INIT, sid, 0, attrs, count, NULL, 0, cookie, NULL, 0, NULL); rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); } if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); return (rv); }
int crypto_session_close(crypto_provider_t provider, crypto_session_id_t sid, crypto_call_req_t *crq) { int rv; kcf_req_params_t params; kcf_provider_desc_t *real_provider; kcf_provider_desc_t *pd = provider; if (pd == NULL) return (CRYPTO_ARGUMENTS_BAD); ASSERT(KCF_PROV_REFHELD(pd)); /* find a provider that supports session ops */ (void) kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(session_ops), CRYPTO_SESSION_OFFSET(session_close), pd, &real_provider); ASSERT(real_provider == pd || pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER); /* edge case is where the logical provider has no members */ if (real_provider != NULL) { /* The fast path for SW providers. */ if (CHECK_FASTPATH(crq, pd)) { rv = KCF_PROV_SESSION_CLOSE(real_provider, sid, KCF_SWFP_RHNDL(crq), pd); KCF_PROV_INCRSTATS(pd, rv); } else { KCF_WRAP_SESSION_OPS_PARAMS(¶ms, KCF_OP_SESSION_CLOSE, NULL, sid, CRYPTO_USER, NULL, 0, pd); rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); } KCF_PROV_REFRELE(real_provider); } return (CRYPTO_SUCCESS); }
int crypto_session_open(crypto_provider_t provider, crypto_session_id_t *sidp, crypto_call_req_t *crq) { kcf_req_params_t params; kcf_provider_desc_t *real_provider; kcf_provider_desc_t *pd = provider; ASSERT(KCF_PROV_REFHELD(pd)); /* find a provider that supports session ops */ (void) kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET(session_ops), CRYPTO_SESSION_OFFSET(session_open), pd, &real_provider); if (real_provider != NULL) { int rv; ASSERT(real_provider == pd || pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER); if (CHECK_FASTPATH(crq, pd)) { rv = KCF_PROV_SESSION_OPEN(real_provider, sidp, KCF_SWFP_RHNDL(crq), pd); KCF_PROV_INCRSTATS(pd, rv); } else { KCF_WRAP_SESSION_OPS_PARAMS(¶ms, KCF_OP_SESSION_OPEN, sidp, 0, CRYPTO_USER, NULL, 0, pd); rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); } KCF_PROV_REFRELE(real_provider); if (rv != CRYPTO_SUCCESS) { return (rv); } } return (CRYPTO_SUCCESS); }
int crypto_object_set_attribute_value(crypto_provider_t provider, crypto_session_id_t sid, crypto_object_id_t object_handle, crypto_object_attribute_t *attrs, uint_t count, crypto_call_req_t *crq) { kcf_req_params_t params; kcf_provider_desc_t *pd = provider; kcf_provider_desc_t *real_provider = pd; int rv; ASSERT(KCF_PROV_REFHELD(pd)); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET( object_ops), CRYPTO_OBJECT_OFFSET(object_set_attribute_value), CHECK_RESTRICT(crq), pd, &real_provider); if (rv != CRYPTO_SUCCESS) return (rv); } if (CHECK_FASTPATH(crq, real_provider)) { rv = KCF_PROV_OBJECT_SET_ATTRIBUTE_VALUE(real_provider, sid, object_handle, attrs, count, KCF_SWFP_RHNDL(crq)); KCF_PROV_INCRSTATS(pd, rv); } else { KCF_WRAP_OBJECT_OPS_PARAMS(¶ms, KCF_OP_OBJECT_SET_ATTRIBUTE_VALUE, sid, object_handle, attrs, count, NULL, 0, NULL, NULL, 0, NULL); rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); } if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); return (rv); }
int crypto_object_find(crypto_provider_t provider, void *cookie, crypto_object_id_t *handles, uint_t *count, uint_t max_count, crypto_call_req_t *crq) { kcf_req_params_t params; kcf_provider_desc_t *pd = provider; kcf_provider_desc_t *real_provider = pd; int rv; ASSERT(KCF_PROV_REFHELD(pd)); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { rv = kcf_get_hardware_provider_nomech(CRYPTO_OPS_OFFSET( object_ops), CRYPTO_OBJECT_OFFSET(object_find), CHECK_RESTRICT(crq), pd, &real_provider); if (rv != CRYPTO_SUCCESS) return (rv); } if (CHECK_FASTPATH(crq, real_provider)) { rv = KCF_PROV_OBJECT_FIND(real_provider, cookie, handles, max_count, count, KCF_SWFP_RHNDL(crq)); KCF_PROV_INCRSTATS(pd, rv); } else { KCF_WRAP_OBJECT_OPS_PARAMS(¶ms, KCF_OP_OBJECT_FIND, 0, 0, NULL, 0, handles, 0, NULL, cookie, max_count, count); rv = kcf_submit_request(real_provider, NULL, crq, ¶ms, B_FALSE); } if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); return (rv); }
/* * Routine called by both ioctl and k-api. The consumer should * bundle the parameters into a kcf_req_params_t structure. A bunch * of macros are available in ops_impl.h for this bundling. They are: * * KCF_WRAP_DIGEST_OPS_PARAMS() * KCF_WRAP_MAC_OPS_PARAMS() * KCF_WRAP_ENCRYPT_OPS_PARAMS() * KCF_WRAP_DECRYPT_OPS_PARAMS() ... etc. * * It is the caller's responsibility to free the ctx argument when * appropriate. See the KCF_CONTEXT_COND_RELEASE macro for details. */ int kcf_submit_request(kcf_provider_desc_t *pd, crypto_ctx_t *ctx, crypto_call_req_t *crq, kcf_req_params_t *params, boolean_t cont) { int error = CRYPTO_SUCCESS; kcf_areq_node_t *areq; kcf_sreq_node_t *sreq; kcf_context_t *kcf_ctx; taskq_t *taskq = pd->pd_sched_info.ks_taskq; kcf_ctx = ctx ? (kcf_context_t *)ctx->cc_framework_private : NULL; /* Synchronous cases */ if (crq == NULL) { switch (pd->pd_prov_type) { case CRYPTO_SW_PROVIDER: error = common_submit_request(pd, ctx, params, KCF_RHNDL(KM_SLEEP)); break; case CRYPTO_HW_PROVIDER: /* * Special case for CRYPTO_SYNCHRONOUS providers that * never return a CRYPTO_QUEUED error. We skip any * request allocation and call the SPI directly. */ if ((pd->pd_flags & CRYPTO_SYNCHRONOUS) && EMPTY_TASKQ(taskq)) { KCF_PROV_IREFHOLD(pd); if (pd->pd_state == KCF_PROV_READY) { error = common_submit_request(pd, ctx, params, KCF_RHNDL(KM_SLEEP)); KCF_PROV_IREFRELE(pd); ASSERT(error != CRYPTO_QUEUED); break; } KCF_PROV_IREFRELE(pd); } sreq = kmem_cache_alloc(kcf_sreq_cache, KM_SLEEP); sreq->sn_state = REQ_ALLOCATED; sreq->sn_rv = CRYPTO_FAILED; sreq->sn_params = params; /* * Note that we do not need to hold the context * for synchronous case as the context will never * become invalid underneath us. We do not need to hold * the provider here either as the caller has a hold. */ sreq->sn_context = kcf_ctx; ASSERT(KCF_PROV_REFHELD(pd)); sreq->sn_provider = pd; ASSERT(taskq != NULL); /* * Call the SPI directly if the taskq is empty and the * provider is not busy, else dispatch to the taskq. * Calling directly is fine as this is the synchronous * case. This is unlike the asynchronous case where we * must always dispatch to the taskq. */ if (EMPTY_TASKQ(taskq) && pd->pd_state == KCF_PROV_READY) { process_req_hwp(sreq); } else { /* * We can not tell from taskq_dispatch() return * value if we exceeded maxalloc. Hence the * check here. Since we are allowed to wait in * the synchronous case, we wait for the taskq * to become empty. */ if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { taskq_wait(taskq); } (void) taskq_dispatch(taskq, process_req_hwp, sreq, TQ_SLEEP); } /* * Wait for the notification to arrive, * if the operation is not done yet. * Bug# 4722589 will make the wait a cv_wait_sig(). */ mutex_enter(&sreq->sn_lock); while (sreq->sn_state < REQ_DONE) cv_wait(&sreq->sn_cv, &sreq->sn_lock); mutex_exit(&sreq->sn_lock); error = sreq->sn_rv; kmem_cache_free(kcf_sreq_cache, sreq); break; default: error = CRYPTO_FAILED; break; } } else { /* Asynchronous cases */ switch (pd->pd_prov_type) { case CRYPTO_SW_PROVIDER: if (!(crq->cr_flag & CRYPTO_ALWAYS_QUEUE)) { /* * This case has less overhead since there is * no switching of context. */ error = common_submit_request(pd, ctx, params, KCF_RHNDL(KM_NOSLEEP)); } else { /* * CRYPTO_ALWAYS_QUEUE is set. We need to * queue the request and return. */ areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params, cont); if (areq == NULL) error = CRYPTO_HOST_MEMORY; else { if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) { /* * Set the request handle. This handle * is used for any crypto_cancel_req(9f) * calls from the consumer. We have to * do this before dispatching the * request. */ crq->cr_reqid = kcf_reqid_insert(areq); } error = kcf_disp_sw_request(areq); /* * There is an error processing this * request. Remove the handle and * release the request structure. */ if (error != CRYPTO_QUEUED) { if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) kcf_reqid_delete(areq); KCF_AREQ_REFRELE(areq); } } } break; case CRYPTO_HW_PROVIDER: /* * We need to queue the request and return. */ areq = kcf_areqnode_alloc(pd, kcf_ctx, crq, params, cont); if (areq == NULL) { error = CRYPTO_HOST_MEMORY; goto done; } ASSERT(taskq != NULL); /* * We can not tell from taskq_dispatch() return * value if we exceeded maxalloc. Hence the check * here. */ if (taskq->tq_nalloc >= crypto_taskq_maxalloc) { error = CRYPTO_BUSY; KCF_AREQ_REFRELE(areq); goto done; } if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) { /* * Set the request handle. This handle is used * for any crypto_cancel_req(9f) calls from the * consumer. We have to do this before dispatching * the request. */ crq->cr_reqid = kcf_reqid_insert(areq); } if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) == TASKQID_INVALID) { error = CRYPTO_HOST_MEMORY; if (!(crq->cr_flag & CRYPTO_SKIP_REQID)) kcf_reqid_delete(areq); KCF_AREQ_REFRELE(areq); } else { error = CRYPTO_QUEUED; } break; default: error = CRYPTO_FAILED; break; } } done: return (error); }
/* * crypto_cipher_init_prov() * * Arguments: * * pd: provider descriptor * sid: session id * mech: crypto_mechanism_t pointer. * mech_type is a valid value previously returned by * crypto_mech2id(); * When the mech's parameter is not NULL, its definition depends * on the standard definition of the mechanism. * key: pointer to a crypto_key_t structure. * tmpl: a crypto_ctx_template_t, opaque template of a context of an * encryption or decryption with the 'mech' using 'key'. * 'tmpl' is created by a previous call to * crypto_create_ctx_template(). * ctxp: Pointer to a crypto_context_t. * func: CRYPTO_FG_ENCRYPT or CRYPTO_FG_DECRYPT. * cr: crypto_call_req_t calling conditions and call back info. * * Description: * This is a common function invoked internally by both * crypto_encrypt_init() and crypto_decrypt_init(). * Asynchronously submits a request for, or synchronously performs the * initialization of an encryption or a decryption operation. * When possible and applicable, will internally use the pre-expanded key * schedule from the context template, tmpl. * When complete and successful, 'ctxp' will contain a crypto_context_t * valid for later calls to encrypt_update() and encrypt_final(), or * decrypt_update() and decrypt_final(). * The caller should hold a reference on the specified provider * descriptor before calling this function. * * Context: * Process or interrupt, according to the semantics dictated by the 'cr'. * * Returns: * See comment in the beginning of the file. */ static int crypto_cipher_init_prov(crypto_provider_t provider, crypto_session_id_t sid, crypto_mechanism_t *mech, crypto_key_t *key, crypto_spi_ctx_template_t tmpl, crypto_context_t *ctxp, crypto_call_req_t *crq, crypto_func_group_t func) { int error; crypto_ctx_t *ctx; kcf_req_params_t params; kcf_provider_desc_t *pd = provider; kcf_provider_desc_t *real_provider = pd; ASSERT(KCF_PROV_REFHELD(pd)); if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) { if (func == CRYPTO_FG_ENCRYPT) { error = kcf_get_hardware_provider(mech->cm_type, CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, &real_provider, CRYPTO_FG_ENCRYPT); } else { error = kcf_get_hardware_provider(mech->cm_type, CRYPTO_MECH_INVALID, CHECK_RESTRICT(crq), pd, &real_provider, CRYPTO_FG_DECRYPT); } if (error != CRYPTO_SUCCESS) return (error); } /* Allocate and initialize the canonical context */ if ((ctx = kcf_new_ctx(crq, real_provider, sid)) == NULL) { if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) KCF_PROV_REFRELE(real_provider); return (CRYPTO_HOST_MEMORY); } /* The fast path for SW providers. */ if (CHECK_FASTPATH(crq, pd)) { crypto_mechanism_t lmech; lmech = *mech; KCF_SET_PROVIDER_MECHNUM(mech->cm_type, real_provider, &lmech); if (func == CRYPTO_FG_ENCRYPT) error = KCF_PROV_ENCRYPT_INIT(real_provider, ctx, &lmech, key, tmpl, KCF_SWFP_RHNDL(crq)); else { ASSERT(func == CRYPTO_FG_DECRYPT); error = KCF_PROV_DECRYPT_INIT(real_provider, ctx, &lmech, key, tmpl, KCF_SWFP_RHNDL(crq)); } KCF_PROV_INCRSTATS(pd, error); goto done; } /* Check if context sharing is possible */ if (pd->pd_prov_type == CRYPTO_HW_PROVIDER && key->ck_format == CRYPTO_KEY_RAW && KCF_CAN_SHARE_OPSTATE(pd, mech->cm_type)) { kcf_context_t *tctxp = (kcf_context_t *)ctx; kcf_provider_desc_t *tpd = NULL; crypto_mech_info_t *sinfo; if ((kcf_get_sw_prov(mech->cm_type, &tpd, &tctxp->kc_mech, B_FALSE) == CRYPTO_SUCCESS)) { int tlen; sinfo = &(KCF_TO_PROV_MECHINFO(tpd, mech->cm_type)); /* * key->ck_length from the consumer is always in bits. * We convert it to be in the same unit registered by * the provider in order to do a comparison. */ if (sinfo->cm_mech_flags & CRYPTO_KEYSIZE_UNIT_IN_BYTES) tlen = key->ck_length >> 3; else