/* * Create a new context. */ crypto_ctx_t * kcf_new_ctx(crypto_call_req_t *crq, kcf_provider_desc_t *pd, crypto_session_id_t sid) { crypto_ctx_t *ctx; kcf_context_t *kcf_ctx; kcf_ctx = kmem_cache_alloc(kcf_context_cache, (crq == NULL) ? KM_SLEEP : KM_NOSLEEP); if (kcf_ctx == NULL) return (NULL); /* initialize the context for the consumer */ kcf_ctx->kc_refcnt = 1; kcf_ctx->kc_req_chain_first = NULL; kcf_ctx->kc_req_chain_last = NULL; kcf_ctx->kc_secondctx = NULL; KCF_PROV_REFHOLD(pd); kcf_ctx->kc_prov_desc = pd; kcf_ctx->kc_sw_prov_desc = NULL; kcf_ctx->kc_mech = NULL; ctx = &kcf_ctx->kc_glbl_ctx; ctx->cc_provider = pd->pd_prov_handle; ctx->cc_session = sid; ctx->cc_provider_private = NULL; ctx->cc_framework_private = (void *)kcf_ctx; ctx->cc_flags = 0; ctx->cc_opstate = NULL; return (ctx); }
/* * Performs a digest update on the specified key. Note that there is * no k-API crypto_digest_key() equivalent of this function. */ int crypto_digest_key_prov(crypto_context_t context, crypto_key_t *key, crypto_call_req_t *cr) { crypto_ctx_t *ctx = (crypto_ctx_t *)context; kcf_context_t *kcf_ctx; kcf_provider_desc_t *pd; int error; kcf_req_params_t params; if ((ctx == NULL) || ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || ((pd = kcf_ctx->kc_prov_desc) == NULL)) { return (CRYPTO_INVALID_CONTEXT); } ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); KCF_PROV_REFHOLD(pd); /* The fast path for SW providers. */ if (CHECK_FASTPATH(cr, pd)) { error = KCF_PROV_DIGEST_KEY(pd, ctx, key, NULL); KCF_PROV_INCRSTATS(pd, error); } else { KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_DIGEST_KEY, ctx->cc_session, NULL, key, NULL, NULL); error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); } KCF_PROV_REFRELE(pd); return (error); }
/* * See comments for crypto_digest_update() and crypto_digest_final(). */ int crypto_digest_single(crypto_context_t context, crypto_data_t *data, crypto_data_t *digest, crypto_call_req_t *cr) { crypto_ctx_t *ctx = (crypto_ctx_t *)context; kcf_context_t *kcf_ctx; kcf_provider_desc_t *pd; int error; kcf_req_params_t params; if ((ctx == NULL) || ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || ((pd = kcf_ctx->kc_prov_desc) == NULL)) { return (CRYPTO_INVALID_CONTEXT); } KCF_PROV_REFHOLD(pd); /* The fast path for SW providers. */ if (CHECK_FASTPATH(cr, pd)) { error = KCF_PROV_DIGEST(pd, ctx, data, digest, NULL); KCF_PROV_INCRSTATS(pd, error); } else { KCF_WRAP_DIGEST_OPS_PARAMS(¶ms, KCF_OP_SINGLE, pd->pd_sid, NULL, NULL, data, digest); error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); } KCF_PROV_REFRELE(pd); /* Release the hold done in kcf_new_ctx() during init step. */ KCF_CONTEXT_COND_RELEASE(error, kcf_ctx); return (error); }
/* * Returns the provider descriptor corresponding to the specified * device name and instance. A REFHOLD is done on the descriptor * before it is returned to the caller. It is the responsibility * of the caller to do a REFRELE once it is done with the provider * descriptor. Only hardware providers are returned by this function. */ kcf_provider_desc_t * kcf_prov_tab_lookup_by_dev(char *name, uint_t instance) { kcf_provider_desc_t *prov_desc; uint_t i; mutex_enter(&prov_tab_mutex); for (i = 0; i < KCF_MAX_PROVIDERS; i++) { if ((prov_desc = prov_tab[i]) != NULL && (!KCF_IS_PROV_REMOVED(prov_desc)) && prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) { ASSERT(prov_desc->pd_name != NULL); if (strncmp(prov_desc->pd_name, name, MAXNAMELEN) == 0 && prov_desc->pd_instance == instance) { KCF_PROV_REFHOLD(prov_desc); mutex_exit(&prov_tab_mutex); return (prov_desc); } } } mutex_exit(&prov_tab_mutex); return (NULL); }
/* * Returns in the location pointed to by pd a pointer to the descriptor * for the software provider for the specified mechanism. * The provider descriptor is returned held and it is the caller's * responsibility to release it when done. The mechanism entry * is returned if the optional argument mep is non NULL. * * Returns one of the CRYPTO_ * error codes on failure, and * CRYPTO_SUCCESS on success. */ int kcf_get_sw_prov(crypto_mech_type_t mech_type, kcf_provider_desc_t **pd, kcf_mech_entry_t **mep, boolean_t log_warn) { kcf_mech_entry_t *me; /* get the mechanism entry for this mechanism */ if (kcf_get_mech_entry(mech_type, &me) != KCF_SUCCESS) return (CRYPTO_MECHANISM_INVALID); /* * Get the software provider for this mechanism. * Lock the mech_entry until we grab the 'pd'. */ mutex_enter(&me->me_mutex); if (me->me_sw_prov == NULL || (*pd = me->me_sw_prov->pm_prov_desc) == NULL) { /* no SW provider for this mechanism */ if (log_warn) cmn_err(CE_WARN, "no SW provider for \"%s\"\n", me->me_name); mutex_exit(&me->me_mutex); return (CRYPTO_MECH_NOT_SUPPORTED); } KCF_PROV_REFHOLD(*pd); mutex_exit(&me->me_mutex); if (mep != NULL) *mep = me; return (CRYPTO_SUCCESS); }
int crypto_sign_recover_single(crypto_context_t context, crypto_data_t *data, crypto_data_t *signature, crypto_call_req_t *cr) { crypto_ctx_t *ctx = (crypto_ctx_t *)context; kcf_context_t *kcf_ctx; kcf_provider_desc_t *pd; int error; kcf_req_params_t params; if ((ctx == NULL) || ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || ((pd = kcf_ctx->kc_prov_desc) == NULL)) { return (CRYPTO_INVALID_CONTEXT); } KCF_PROV_REFHOLD(pd); KCF_WRAP_SIGN_OPS_PARAMS(¶ms, KCF_OP_SIGN_RECOVER, 0, NULL, NULL, data, signature, NULL); error = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); KCF_PROV_REFRELE(pd); /* Release the hold done in kcf_new_ctx() during init step. */ KCF_CONTEXT_COND_RELEASE(error, kcf_ctx); return (error); }
/* * See comments for crypto_digest_final(). */ int crypto_sign_final(crypto_context_t context, crypto_data_t *signature, crypto_call_req_t *cr) { crypto_ctx_t *ctx = (crypto_ctx_t *)context; kcf_context_t *kcf_ctx; kcf_provider_desc_t *pd; int rv; kcf_req_params_t params; if ((ctx == NULL) || ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || ((pd = kcf_ctx->kc_prov_desc) == NULL)) { return (CRYPTO_INVALID_CONTEXT); } ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); KCF_PROV_REFHOLD(pd); KCF_WRAP_SIGN_OPS_PARAMS(¶ms, KCF_OP_FINAL, ctx->cc_session, NULL, NULL, NULL, signature, NULL); rv = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); KCF_PROV_REFRELE(pd); /* Release the hold done in kcf_new_ctx() during init step. */ KCF_CONTEXT_COND_RELEASE(rv, kcf_ctx); return (rv); }
/* * See comments for crypto_digest_update(). */ int crypto_sign_update(crypto_context_t context, crypto_data_t *data, crypto_call_req_t *cr) { crypto_ctx_t *ctx = (crypto_ctx_t *)context; kcf_context_t *kcf_ctx; kcf_provider_desc_t *pd; kcf_req_params_t params; int rv; if ((ctx == NULL) || ((kcf_ctx = (kcf_context_t *)ctx->cc_framework_private) == NULL) || ((pd = kcf_ctx->kc_prov_desc) == NULL)) { return (CRYPTO_INVALID_CONTEXT); } ASSERT(pd->pd_prov_type != CRYPTO_LOGICAL_PROVIDER); KCF_PROV_REFHOLD(pd); KCF_WRAP_SIGN_OPS_PARAMS(¶ms, KCF_OP_UPDATE, ctx->cc_session, NULL, NULL, data, NULL, NULL); rv = kcf_submit_request(pd, ctx, cr, ¶ms, B_FALSE); KCF_PROV_REFRELE(pd); return (rv); }
/* * Allocate a new async request node. * * ictx - Framework private context pointer * crq - Has callback function and argument. Should be non NULL. * req - The parameters to pass to the SPI */ static kcf_areq_node_t * kcf_areqnode_alloc(kcf_provider_desc_t *pd, kcf_context_t *ictx, crypto_call_req_t *crq, kcf_req_params_t *req, boolean_t isdual) { kcf_areq_node_t *arptr, *areq; ASSERT(crq != NULL); arptr = kmem_cache_alloc(kcf_areq_cache, KM_NOSLEEP); if (arptr == NULL) return (NULL); arptr->an_state = REQ_ALLOCATED; arptr->an_reqarg = *crq; arptr->an_params = *req; arptr->an_context = ictx; arptr->an_isdual = isdual; arptr->an_next = arptr->an_prev = NULL; KCF_PROV_REFHOLD(pd); arptr->an_provider = pd; arptr->an_tried_plist = NULL; arptr->an_refcnt = 1; arptr->an_idnext = arptr->an_idprev = NULL; /* * Requests for context-less operations do not use the * fields - an_is_my_turn, and an_ctxchain_next. */ if (ictx == NULL) return (arptr); KCF_CONTEXT_REFHOLD(ictx); /* * Chain this request to the context. */ mutex_enter(&ictx->kc_in_use_lock); arptr->an_ctxchain_next = NULL; if ((areq = ictx->kc_req_chain_last) == NULL) { arptr->an_is_my_turn = B_TRUE; ictx->kc_req_chain_last = ictx->kc_req_chain_first = arptr; } else { ASSERT(ictx->kc_req_chain_first != NULL); arptr->an_is_my_turn = B_FALSE; /* Insert the new request to the end of the chain. */ areq->an_ctxchain_next = arptr; ictx->kc_req_chain_last = arptr; } mutex_exit(&ictx->kc_in_use_lock); return (arptr); }
/* * Utility routine called from crypto_load_soft_disabled(). Callers * should have done a prior undo_register_provider(). */ void redo_register_provider(kcf_provider_desc_t *pd) { /* process the mechanisms supported by the provider */ (void) init_prov_mechs(NULL, pd); /* * Hold provider in providers table. We should not call * kcf_prov_tab_add_provider() here as the provider descriptor * is still valid which means it has an entry in the provider * table. */ KCF_PROV_REFHOLD(pd); KCF_PROV_IREFHOLD(pd); }
/* * Add a provider to the provider table. If no free entry can be found * for the new provider, returns CRYPTO_HOST_MEMORY. Otherwise, add * the provider to the table, initialize the pd_prov_id field * of the specified provider descriptor to the index in that table, * and return CRYPTO_SUCCESS. Note that a REFHOLD is done on the * provider when pointed to by a table entry. */ int kcf_prov_tab_add_provider(kcf_provider_desc_t *prov_desc) { uint_t i; ASSERT(prov_tab != NULL); mutex_enter(&prov_tab_mutex); /* see if any slots can be freed */ if (kcf_need_provtab_walk) kcf_free_unregistered_provs(); /* find free slot in providers table */ for (i = 0; i < KCF_MAX_PROVIDERS && prov_tab[i] != NULL; i++) ; if (i == KCF_MAX_PROVIDERS) { /* ran out of providers entries */ mutex_exit(&prov_tab_mutex); cmn_err(CE_WARN, "out of providers entries"); return (CRYPTO_HOST_MEMORY); } /* initialize entry */ prov_tab[i] = prov_desc; KCF_PROV_REFHOLD(prov_desc); prov_tab_num++; mutex_exit(&prov_tab_mutex); /* update provider descriptor */ prov_desc->pd_prov_id = i; /* * The KCF-private provider handle is defined as the internal * provider id. */ prov_desc->pd_kcf_prov_handle = (crypto_kcf_provider_handle_t)prov_desc->pd_prov_id; #if DEBUG if (kcf_frmwrk_debug >= 1) kcf_prov_tab_dump("kcf_prov_tab_add_provider"); #endif /* DEBUG */ return (CRYPTO_SUCCESS); }
/* * This function goes through the provider table and verifies * any KCF_PROV_UNVERIFIED providers. * * This is called when kcfd is up and the door handle is ready. It is * again called when the status of FIPS 140 has been determined, so providers * delayed by FIPS 140 can now be verified. */ void verify_unverified_providers() { int i; kcf_provider_desc_t *pd; boolean_t need_verify; ASSERT(kcf_dh != NULL); mutex_enter(&prov_tab_mutex); for (i = 0; i < KCF_MAX_PROVIDERS; i++) { if ((pd = prov_tab[i]) == NULL) continue; if (pd->pd_prov_type == CRYPTO_LOGICAL_PROVIDER) continue; mutex_enter(&pd->pd_lock); need_verify = pd->pd_state == KCF_PROV_UNVERIFIED; mutex_exit(&pd->pd_lock); if (!need_verify) continue; KCF_PROV_REFHOLD(pd); /* * We need to drop this lock, since it could be * acquired by kcf_verify_signature(). * This is safe, as any providers that are * added to the table after we dropped the * lock *will see* a non NULL kcf_dh and hence * would have been verified by other means. */ mutex_exit(&prov_tab_mutex); /* This routine will release the above holds */ kcf_verify_signature(pd); mutex_enter(&prov_tab_mutex); } mutex_exit(&prov_tab_mutex); }
/* * Returns the provider descriptor corresponding to the specified * provider id. A REFHOLD is done on the descriptor before it is * returned to the caller. It is the responsibility of the caller * to do a REFRELE once it is done with the provider descriptor. */ kcf_provider_desc_t * kcf_prov_tab_lookup(crypto_provider_id_t prov_id) { kcf_provider_desc_t *prov_desc; mutex_enter(&prov_tab_mutex); prov_desc = prov_tab[prov_id]; if (prov_desc == NULL) { mutex_exit(&prov_tab_mutex); return (NULL); } KCF_PROV_REFHOLD(prov_desc); mutex_exit(&prov_tab_mutex); return (prov_desc); }
/* * Add a provider to the provider table. If no free entry can be found * for the new provider, returns CRYPTO_HOST_MEMORY. Otherwise, add * the provider to the table, initialize the pd_prov_id field * of the specified provider descriptor to the index in that table, * and return CRYPTO_SUCCESS. Note that a REFHOLD is done on the * provider when pointed to by a table entry. */ int kcf_prov_tab_add_provider(kcf_provider_desc_t *prov_desc) { uint_t i; ASSERT(prov_tab != NULL); mutex_enter(&prov_tab_mutex); /* find free slot in providers table */ for (i = 1; i < KCF_MAX_PROVIDERS && prov_tab[i] != NULL; i++) ; if (i == KCF_MAX_PROVIDERS) { /* ran out of providers entries */ mutex_exit(&prov_tab_mutex); cmn_err(CE_WARN, "out of providers entries"); return (CRYPTO_HOST_MEMORY); } /* initialize entry */ prov_tab[i] = prov_desc; KCF_PROV_REFHOLD(prov_desc); KCF_PROV_IREFHOLD(prov_desc); prov_tab_num++; mutex_exit(&prov_tab_mutex); /* update provider descriptor */ prov_desc->pd_prov_id = i; /* * The KCF-private provider handle is defined as the internal * provider id. */ prov_desc->pd_kcf_prov_handle = (crypto_kcf_provider_handle_t)prov_desc->pd_prov_id; return (CRYPTO_SUCCESS); }
/* * Returns an array of hardware provider descriptors. This routine * used by cryptoadm(1M). A REFHOLD is done on each descriptor before * the array is returned. The entire table can be freed by calling * kcf_free_provider_tab(). * * A NULL name argument puts all hardware providers in the array. * A non-NULL name argument puts only those providers in the array * which match the name and instance arguments. */ int kcf_get_hw_prov_tab(uint_t *count, kcf_provider_desc_t ***array, int kmflag, char *name, uint_t instance, boolean_t unverified) { kcf_provider_desc_t *prov_desc; kcf_provider_desc_t **p = NULL; char *last; uint_t cnt = 0; uint_t i, j; int rval = CRYPTO_SUCCESS; size_t n, final_size; /* count the providers */ mutex_enter(&prov_tab_mutex); for (i = 0; i < KCF_MAX_PROVIDERS; i++) { if ((prov_desc = prov_tab[i]) != NULL && prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) { if (KCF_IS_PROV_USABLE(prov_desc) || (unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) { if (name == NULL || (strncmp(prov_desc->pd_name, name, MAXNAMELEN) == 0 && prov_desc->pd_instance == instance)) { cnt++; } } } } mutex_exit(&prov_tab_mutex); if (cnt == 0) goto out; n = cnt * sizeof (kcf_provider_desc_t *); again: p = kmem_zalloc(n, kmflag); if (p == NULL) { rval = CRYPTO_HOST_MEMORY; goto out; } /* pointer to last entry in the array */ last = (char *)&p[cnt-1]; mutex_enter(&prov_tab_mutex); for (i = 0, j = 0; i < KCF_MAX_PROVIDERS; i++) { if ((prov_desc = prov_tab[i]) != NULL && prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) { if (KCF_IS_PROV_USABLE(prov_desc) || (unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) { if (name == NULL || (strncmp(prov_desc->pd_name, name, MAXNAMELEN) == 0 && prov_desc->pd_instance == instance)) { if ((char *)&p[j] > last) { mutex_exit(&prov_tab_mutex); kcf_free_provider_tab(cnt, p); n = n << 1; cnt = cnt << 1; goto again; } p[j++] = prov_desc; KCF_PROV_REFHOLD(prov_desc); } } } } mutex_exit(&prov_tab_mutex); final_size = j * sizeof (kcf_provider_desc_t *); ASSERT(final_size <= n); /* check if buffer we allocated is too large */ if (final_size < n) { char *final_buffer = NULL; if (final_size > 0) { final_buffer = kmem_alloc(final_size, kmflag); if (final_buffer == NULL) { kcf_free_provider_tab(cnt, p); cnt = 0; p = NULL; rval = CRYPTO_HOST_MEMORY; goto out; } bcopy(p, final_buffer, final_size); } kmem_free(p, n); p = (kcf_provider_desc_t **)final_buffer; } cnt = j; out: *count = cnt; *array = p; return (rval); }
/* * Returns an array of hardware and logical provider descriptors, * a.k.a the PKCS#11 slot list. A REFHOLD is done on each descriptor * before the array is returned. The entire table can be freed by * calling kcf_free_provider_tab(). */ int kcf_get_slot_list(uint_t *count, kcf_provider_desc_t ***array, boolean_t unverified) { kcf_provider_desc_t *prov_desc; kcf_provider_desc_t **p = NULL; char *last; uint_t cnt = 0; uint_t i, j; int rval = CRYPTO_SUCCESS; size_t n, final_size; /* count the providers */ mutex_enter(&prov_tab_mutex); for (i = 0; i < KCF_MAX_PROVIDERS; i++) { if ((prov_desc = prov_tab[i]) != NULL && ((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER && (prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) || prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) { if (KCF_IS_PROV_USABLE(prov_desc) || (unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) { cnt++; } } } mutex_exit(&prov_tab_mutex); if (cnt == 0) goto out; n = cnt * sizeof (kcf_provider_desc_t *); again: p = kmem_zalloc(n, KM_SLEEP); /* pointer to last entry in the array */ last = (char *)&p[cnt-1]; mutex_enter(&prov_tab_mutex); /* fill the slot list */ for (i = 0, j = 0; i < KCF_MAX_PROVIDERS; i++) { if ((prov_desc = prov_tab[i]) != NULL && ((prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER && (prov_desc->pd_flags & CRYPTO_HIDE_PROVIDER) == 0) || prov_desc->pd_prov_type == CRYPTO_LOGICAL_PROVIDER)) { if (KCF_IS_PROV_USABLE(prov_desc) || (unverified && KCF_IS_PROV_UNVERIFIED(prov_desc))) { if ((char *)&p[j] > last) { mutex_exit(&prov_tab_mutex); kcf_free_provider_tab(cnt, p); n = n << 1; cnt = cnt << 1; goto again; } p[j++] = prov_desc; KCF_PROV_REFHOLD(prov_desc); } } } mutex_exit(&prov_tab_mutex); final_size = j * sizeof (kcf_provider_desc_t *); cnt = j; ASSERT(final_size <= n); /* check if buffer we allocated is too large */ if (final_size < n) { char *final_buffer = NULL; if (final_size > 0) { final_buffer = kmem_alloc(final_size, KM_SLEEP); bcopy(p, final_buffer, final_size); } kmem_free(p, n); p = (kcf_provider_desc_t **)final_buffer; } out: *count = cnt; *array = p; return (rval); }
/* * This routine is used to add cryptographic providers to the KEF framework. * Providers pass a crypto_provider_info structure to crypto_register_provider() * and get back a handle. The crypto_provider_info structure contains a * list of mechanisms supported by the provider and an ops vector containing * provider entry points. Hardware providers call this routine in their attach * routines. Software providers call this routine in their _init() routine. */ int crypto_register_provider(crypto_provider_info_t *info, crypto_kcf_provider_handle_t *handle) { char ks_name[KSTAT_STRLEN]; kcf_provider_desc_t *prov_desc = NULL; int ret = CRYPTO_ARGUMENTS_BAD; if (info->pi_interface_version > CRYPTO_SPI_VERSION_3) return (CRYPTO_VERSION_MISMATCH); /* * Check provider type, must be software, hardware, or logical. */ if (info->pi_provider_type != CRYPTO_HW_PROVIDER && info->pi_provider_type != CRYPTO_SW_PROVIDER && info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) return (CRYPTO_ARGUMENTS_BAD); /* * Allocate and initialize a new provider descriptor. We also * hold it and release it when done. */ prov_desc = kcf_alloc_provider_desc(info); KCF_PROV_REFHOLD(prov_desc); prov_desc->pd_prov_type = info->pi_provider_type; /* provider-private handle, opaque to KCF */ prov_desc->pd_prov_handle = info->pi_provider_handle; /* copy provider description string */ if (info->pi_provider_description != NULL) { /* * pi_provider_descriptor is a string that can contain * up to CRYPTO_PROVIDER_DESCR_MAX_LEN + 1 characters * INCLUDING the terminating null character. A bcopy() * is necessary here as pd_description should not have * a null character. See comments in kcf_alloc_provider_desc() * for details on pd_description field. */ bcopy(info->pi_provider_description, prov_desc->pd_description, MIN(strlen(info->pi_provider_description), (size_t)CRYPTO_PROVIDER_DESCR_MAX_LEN)); } if (info->pi_provider_type != CRYPTO_LOGICAL_PROVIDER) { if (info->pi_ops_vector == NULL) { goto bail; } copy_ops_vector_v1(info->pi_ops_vector, prov_desc->pd_ops_vector); if (info->pi_interface_version >= CRYPTO_SPI_VERSION_2) { copy_ops_vector_v2(info->pi_ops_vector, prov_desc->pd_ops_vector); prov_desc->pd_flags = info->pi_flags; } if (info->pi_interface_version == CRYPTO_SPI_VERSION_3) { copy_ops_vector_v3(info->pi_ops_vector, prov_desc->pd_ops_vector); } } /* object_ops and nostore_key_ops are mutually exclusive */ if (prov_desc->pd_ops_vector->co_object_ops && prov_desc->pd_ops_vector->co_nostore_key_ops) { goto bail; } /* process the mechanisms supported by the provider */ if ((ret = init_prov_mechs(info, prov_desc)) != CRYPTO_SUCCESS) goto bail; /* * Add provider to providers tables, also sets the descriptor * pd_prov_id field. */ if ((ret = kcf_prov_tab_add_provider(prov_desc)) != CRYPTO_SUCCESS) { undo_register_provider(prov_desc, B_FALSE); goto bail; } /* * We create a taskq only for a hardware provider. The global * software queue is used for software providers. We handle ordering * of multi-part requests in the taskq routine. So, it is safe to * have multiple threads for the taskq. We pass TASKQ_PREPOPULATE flag * to keep some entries cached to improve performance. */ if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) prov_desc->pd_sched_info.ks_taskq = taskq_create("kcf_taskq", crypto_taskq_threads, minclsyspri, crypto_taskq_minalloc, crypto_taskq_maxalloc, TASKQ_PREPOPULATE); else prov_desc->pd_sched_info.ks_taskq = NULL; /* no kernel session to logical providers */ if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { /* * Open a session for session-oriented providers. This session * is used for all kernel consumers. This is fine as a provider * is required to support multiple thread access to a session. * We can do this only after the taskq has been created as we * do a kcf_submit_request() to open the session. */ if (KCF_PROV_SESSION_OPS(prov_desc) != NULL) { kcf_req_params_t params; KCF_WRAP_SESSION_OPS_PARAMS(¶ms, KCF_OP_SESSION_OPEN, &prov_desc->pd_sid, 0, CRYPTO_USER, NULL, 0, prov_desc); ret = kcf_submit_request(prov_desc, NULL, NULL, ¶ms, B_FALSE); if (ret != CRYPTO_SUCCESS) { undo_register_provider(prov_desc, B_TRUE); ret = CRYPTO_FAILED; goto bail; } } } if (prov_desc->pd_prov_type != CRYPTO_LOGICAL_PROVIDER) { /* * Create the kstat for this provider. There is a kstat * installed for each successfully registered provider. * This kstat is deleted, when the provider unregisters. */ if (prov_desc->pd_prov_type == CRYPTO_SW_PROVIDER) { (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%s", "NONAME", "provider_stats"); } else { (void) snprintf(ks_name, KSTAT_STRLEN, "%s_%d_%u_%s", "NONAME", 0, prov_desc->pd_prov_id, "provider_stats"); } prov_desc->pd_kstat = kstat_create("kcf", 0, ks_name, "crypto", KSTAT_TYPE_NAMED, sizeof (kcf_prov_stats_t) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL); if (prov_desc->pd_kstat != NULL) { bcopy(&kcf_stats_ks_data_template, &prov_desc->pd_ks_data, sizeof (kcf_stats_ks_data_template)); prov_desc->pd_kstat->ks_data = &prov_desc->pd_ks_data; KCF_PROV_REFHOLD(prov_desc); KCF_PROV_IREFHOLD(prov_desc); prov_desc->pd_kstat->ks_private = prov_desc; prov_desc->pd_kstat->ks_update = kcf_prov_kstat_update; kstat_install(prov_desc->pd_kstat); } } if (prov_desc->pd_prov_type == CRYPTO_HW_PROVIDER) process_logical_providers(info, prov_desc); mutex_enter(&prov_desc->pd_lock); prov_desc->pd_state = KCF_PROV_READY; mutex_exit(&prov_desc->pd_lock); kcf_do_notify(prov_desc, B_TRUE); *handle = prov_desc->pd_kcf_prov_handle; ret = CRYPTO_SUCCESS; bail: KCF_PROV_REFRELE(prov_desc); return (ret); }
/* * This routine is called when a request to a provider has failed * with a recoverable error. This routine tries to find another provider * and dispatches the request to the new provider, if one is available. * We reuse the request structure. * * A return value of NULL from kcf_get_mech_provider() indicates * we have tried the last provider. */ static int kcf_resubmit_request(kcf_areq_node_t *areq) { int error = CRYPTO_FAILED; kcf_context_t *ictx; kcf_provider_desc_t *old_pd; kcf_provider_desc_t *new_pd; crypto_mechanism_t *mech1 = NULL, *mech2 = NULL; crypto_mech_type_t prov_mt1, prov_mt2; crypto_func_group_t fg = 0; if (!can_resubmit(areq, &mech1, &mech2, &fg)) return (error); old_pd = areq->an_provider; /* * Add old_pd to the list of providers already tried. We release * the hold on old_pd (from the earlier kcf_get_mech_provider()) in * kcf_free_triedlist(). */ if (kcf_insert_triedlist(&areq->an_tried_plist, old_pd, KM_NOSLEEP) == NULL) return (error); if (mech1 && !mech2) { new_pd = kcf_get_mech_provider(mech1->cm_type, NULL, &error, areq->an_tried_plist, fg, (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); } else { ASSERT(mech1 != NULL && mech2 != NULL); new_pd = kcf_get_dual_provider(mech1, mech2, NULL, &prov_mt1, &prov_mt2, &error, areq->an_tried_plist, fg, fg, (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), 0); } if (new_pd == NULL) return (error); /* * We reuse the old context by resetting provider specific * fields in it. */ if ((ictx = areq->an_context) != NULL) { crypto_ctx_t *ctx; ASSERT(old_pd == ictx->kc_prov_desc); KCF_PROV_REFRELE(ictx->kc_prov_desc); KCF_PROV_REFHOLD(new_pd); ictx->kc_prov_desc = new_pd; ctx = &ictx->kc_glbl_ctx; ctx->cc_provider = new_pd->pd_prov_handle; ctx->cc_session = new_pd->pd_sid; ctx->cc_provider_private = NULL; } /* We reuse areq. by resetting the provider and context fields. */ KCF_PROV_REFRELE(old_pd); KCF_PROV_REFHOLD(new_pd); areq->an_provider = new_pd; mutex_enter(&areq->an_lock); areq->an_state = REQ_WAITING; mutex_exit(&areq->an_lock); switch (new_pd->pd_prov_type) { case CRYPTO_SW_PROVIDER: error = kcf_disp_sw_request(areq); break; case CRYPTO_HW_PROVIDER: { taskq_t *taskq = new_pd->pd_sched_info.ks_taskq; if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) == TASKQID_INVALID) { error = CRYPTO_HOST_MEMORY; } else { error = CRYPTO_QUEUED; } break; default: break; } } return (error); }
/* * Called from CRYPTO_LOAD_SOFT_DISABLED ioctl. * If new_count is 0, then completely remove the entry. */ int crypto_load_soft_disabled(char *name, uint_t new_count, crypto_mech_name_t *new_array) { kcf_provider_desc_t *provider = NULL; crypto_mech_name_t *prev_array; uint_t prev_count = 0; int rv; provider = kcf_prov_tab_lookup_by_name(name); if (provider != NULL) { mutex_enter(&provider->pd_lock); /* * Check if any other thread is disabling or removing * this provider. We return if this is the case. */ if (provider->pd_state >= KCF_PROV_DISABLED) { mutex_exit(&provider->pd_lock); KCF_PROV_REFRELE(provider); return (CRYPTO_BUSY); } provider->pd_state = KCF_PROV_DISABLED; mutex_exit(&provider->pd_lock); undo_register_provider(provider, B_TRUE); KCF_PROV_REFRELE(provider); if (provider->pd_kstat != NULL) KCF_PROV_REFRELE(provider); mutex_enter(&provider->pd_lock); /* Wait till the existing requests complete. */ while (provider->pd_state != KCF_PROV_FREED) { cv_wait(&provider->pd_remove_cv, &provider->pd_lock); } mutex_exit(&provider->pd_lock); } if (new_count == 0) { kcf_policy_remove_by_name(name, &prev_count, &prev_array); crypto_free_mech_list(prev_array, prev_count); rv = CRYPTO_SUCCESS; goto out; } /* put disabled mechanisms into policy table */ if ((rv = kcf_policy_load_soft_disabled(name, new_count, new_array, &prev_count, &prev_array)) == CRYPTO_SUCCESS) { crypto_free_mech_list(prev_array, prev_count); } out: if (provider != NULL) { redo_register_provider(provider); if (provider->pd_kstat != NULL) KCF_PROV_REFHOLD(provider); mutex_enter(&provider->pd_lock); provider->pd_state = KCF_PROV_READY; mutex_exit(&provider->pd_lock); } else if (rv == CRYPTO_SUCCESS) { /* * There are some cases where it is useful to kCF clients * to have a provider whose mechanism is enabled now to be * available. So, we attempt to load it here. * * The check, new_count < prev_count, ensures that we do this * only in the case where a mechanism(s) is now enabled. * This check assumes that enable and disable are separate * administrative actions and are not done in a single action. */ if (new_count < prev_count && (in_soft_config_list(name)) && (modload("crypto", name) != -1)) { struct modctl *mcp; boolean_t load_again = B_FALSE; if ((mcp = mod_hold_by_name(name)) != NULL) { mcp->mod_loadflags |= MOD_NOAUTOUNLOAD; /* memory pressure may have unloaded module */ if (!mcp->mod_installed) load_again = B_TRUE; mod_release_mod(mcp); if (load_again) (void) modload("crypto", name); } } } return (rv); }
/* * Callback routine for the next part of a simulated dual part. * Schedules the next step. * * This routine can be called from interrupt context. */ void kcf_next_req(void *next_req_arg, int status) { kcf_dual_req_t *next_req = (kcf_dual_req_t *)next_req_arg; kcf_req_params_t *params = &(next_req->kr_params); kcf_areq_node_t *areq = next_req->kr_areq; int error = status; kcf_provider_desc_t *pd = NULL; crypto_dual_data_t *ct = NULL; /* Stop the processing if an error occured at this step */ if (error != CRYPTO_SUCCESS) { out: areq->an_reqarg = next_req->kr_callreq; KCF_AREQ_REFRELE(areq); kmem_free(next_req, sizeof (kcf_dual_req_t)); areq->an_isdual = B_FALSE; kcf_aop_done(areq, error); return; } switch (params->rp_opgrp) { case KCF_OG_MAC: { /* * The next req is submitted with the same reqid as the * first part. The consumer only got back that reqid, and * should still be able to cancel the operation during its * second step. */ kcf_mac_ops_params_t *mops = &(params->rp_u.mac_params); crypto_ctx_template_t mac_tmpl; kcf_mech_entry_t *me; ct = (crypto_dual_data_t *)mops->mo_data; mac_tmpl = (crypto_ctx_template_t)mops->mo_templ; /* No expected recoverable failures, so no retry list */ pd = kcf_get_mech_provider(mops->mo_framework_mechtype, &me, &error, NULL, CRYPTO_FG_MAC_ATOMIC, (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len2); if (pd == NULL) { error = CRYPTO_MECH_NOT_SUPPORTED; goto out; } /* Validate the MAC context template here */ if ((pd->pd_prov_type == CRYPTO_SW_PROVIDER) && (mac_tmpl != NULL)) { kcf_ctx_template_t *ctx_mac_tmpl; ctx_mac_tmpl = (kcf_ctx_template_t *)mac_tmpl; if (ctx_mac_tmpl->ct_generation != me->me_gen_swprov) { KCF_PROV_REFRELE(pd); error = CRYPTO_OLD_CTX_TEMPLATE; goto out; } mops->mo_templ = ctx_mac_tmpl->ct_prov_tmpl; } break; } case KCF_OG_DECRYPT: { kcf_decrypt_ops_params_t *dcrops = &(params->rp_u.decrypt_params); ct = (crypto_dual_data_t *)dcrops->dop_ciphertext; /* No expected recoverable failures, so no retry list */ pd = kcf_get_mech_provider(dcrops->dop_framework_mechtype, NULL, &error, NULL, CRYPTO_FG_DECRYPT_ATOMIC, (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED), ct->dd_len1); if (pd == NULL) { error = CRYPTO_MECH_NOT_SUPPORTED; goto out; } break; } default: break; } /* The second step uses len2 and offset2 of the dual_data */ next_req->kr_saveoffset = ct->dd_offset1; next_req->kr_savelen = ct->dd_len1; ct->dd_offset1 = ct->dd_offset2; ct->dd_len1 = ct->dd_len2; /* preserve if the caller is restricted */ if (areq->an_reqarg.cr_flag & CRYPTO_RESTRICTED) { areq->an_reqarg.cr_flag = CRYPTO_RESTRICTED; } else { areq->an_reqarg.cr_flag = 0; } areq->an_reqarg.cr_callback_func = kcf_last_req; areq->an_reqarg.cr_callback_arg = next_req; areq->an_isdual = B_TRUE; /* * We would like to call kcf_submit_request() here. But, * that is not possible as that routine allocates a new * kcf_areq_node_t request structure, while we need to * reuse the existing request structure. */ switch (pd->pd_prov_type) { case CRYPTO_SW_PROVIDER: error = common_submit_request(pd, NULL, params, KCF_RHNDL(KM_NOSLEEP)); break; case CRYPTO_HW_PROVIDER: { kcf_provider_desc_t *old_pd; taskq_t *taskq = pd->pd_sched_info.ks_taskq; /* * Set the params for the second step in the * dual-ops. */ areq->an_params = *params; old_pd = areq->an_provider; KCF_PROV_REFRELE(old_pd); KCF_PROV_REFHOLD(pd); areq->an_provider = pd; /* * Note that we have to do a taskq_dispatch() * here as we may be in interrupt context. */ if (taskq_dispatch(taskq, process_req_hwp, areq, TQ_NOSLEEP) == (taskqid_t)0) { error = CRYPTO_HOST_MEMORY; } else { error = CRYPTO_QUEUED; } break; } default: break; } /* * We have to release the holds on the request and the provider * in all cases. */ KCF_AREQ_REFRELE(areq); KCF_PROV_REFRELE(pd); if (error != CRYPTO_QUEUED) { /* restore, clean up, and invoke the client's callback */ ct->dd_offset1 = next_req->kr_saveoffset; ct->dd_len1 = next_req->kr_savelen; areq->an_reqarg = next_req->kr_callreq; kmem_free(next_req, sizeof (kcf_dual_req_t)); areq->an_isdual = B_FALSE; kcf_aop_done(areq, error); } }