/* * Executes a skein_update and skein_digest on a pre-initialized crypto * context in a single step. See the documentation to these functions to * see what to pass here. */ static int skein_digest(crypto_ctx_t *ctx, crypto_data_t *data, crypto_data_t *digest, crypto_req_handle_t req) { int error = CRYPTO_SUCCESS; ASSERT(SKEIN_CTX(ctx) != NULL); if (digest->cd_length < CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen)) { digest->cd_length = CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen); return (CRYPTO_BUFFER_TOO_SMALL); } error = skein_update(ctx, data, req); if (error != CRYPTO_SUCCESS) { bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); SKEIN_CTX_LVALUE(ctx) = NULL; digest->cd_length = 0; return (error); } error = skein_final(ctx, digest, req); return (error); }
/* * Performs a Final on a context and writes to an mblk digest output. */ static int skein_digest_final_mblk(skein_ctx_t *ctx, crypto_data_t *digest, crypto_req_handle_t req) { off_t offset = digest->cd_offset; mblk_t *mp; /* Jump to the first mblk_t that will be used to store the digest. */ for (mp = digest->cd_mp; mp != NULL && offset >= MBLKL(mp); offset -= MBLKL(mp), mp = mp->b_cont) ; if (mp == NULL) { /* caller specified offset is too large */ return (CRYPTO_DATA_LEN_RANGE); } if (offset + CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen) <= MBLKL(mp)) { /* The digest will fit in the current mblk. */ SKEIN_OP(ctx, Final, mp->b_rptr + offset); } else { /* Split the digest up between the individual buffers. */ uint8_t *digest_tmp; off_t scratch_offset = 0; size_t length = CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen); size_t cur_len; digest_tmp = kmem_alloc(CRYPTO_BITS2BYTES( ctx->sc_digest_bitlen), crypto_kmflag(req)); if (digest_tmp == NULL) return (CRYPTO_HOST_MEMORY); SKEIN_OP(ctx, Final, digest_tmp); while (mp != NULL && length > 0) { cur_len = MIN(MBLKL(mp) - offset, length); bcopy(digest_tmp + scratch_offset, mp->b_rptr + offset, cur_len); length -= cur_len; mp = mp->b_cont; scratch_offset += cur_len; offset = 0; } kmem_free(digest_tmp, CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen)); if (mp == NULL && length > 0) { /* digest too long to fit in the mblk buffers */ return (CRYPTO_DATA_LEN_RANGE); } } return (CRYPTO_SUCCESS); }
/*ARGSUSED*/ static int skein_digest_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_data_t *data, crypto_data_t *digest, crypto_req_handle_t req) { int error; skein_ctx_t skein_ctx; crypto_ctx_t ctx; SKEIN_CTX_LVALUE(&ctx) = &skein_ctx; /* Init */ if (!VALID_SKEIN_DIGEST_MECH(mechanism->cm_type)) return (CRYPTO_MECHANISM_INVALID); skein_ctx.sc_mech_type = mechanism->cm_type; error = skein_get_digest_bitlen(mechanism, &skein_ctx.sc_digest_bitlen); if (error != CRYPTO_SUCCESS) goto out; SKEIN_OP(&skein_ctx, Init, skein_ctx.sc_digest_bitlen); if ((error = skein_update(&ctx, data, digest)) != CRYPTO_SUCCESS) goto out; if ((error = skein_final(&ctx, data, digest)) != CRYPTO_SUCCESS) goto out; out: if (error == CRYPTO_SUCCESS) digest->cd_length = CRYPTO_BITS2BYTES(skein_ctx.sc_digest_bitlen); else digest->cd_length = 0; bzero(&skein_ctx, sizeof (skein_ctx)); return (error); }
/*ARGSUSED*/ static int skein_final(crypto_ctx_t *ctx, crypto_data_t *digest, crypto_req_handle_t req) { int error = CRYPTO_SUCCESS; ASSERT(SKEIN_CTX(ctx) != NULL); if (digest->cd_length < CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen)) { digest->cd_length = CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen); return (CRYPTO_BUFFER_TOO_SMALL); } switch (digest->cd_format) { case CRYPTO_DATA_RAW: SKEIN_OP(SKEIN_CTX(ctx), Final, (uint8_t *)digest->cd_raw.iov_base + digest->cd_offset); break; case CRYPTO_DATA_UIO: error = skein_digest_final_uio(SKEIN_CTX(ctx), digest, req); break; case CRYPTO_DATA_MBLK: error = skein_digest_final_mblk(SKEIN_CTX(ctx), digest, req); break; default: error = CRYPTO_ARGUMENTS_BAD; } if (error == CRYPTO_SUCCESS) digest->cd_length = CRYPTO_BITS2BYTES(SKEIN_CTX(ctx)->sc_digest_bitlen); else digest->cd_length = 0; bzero(SKEIN_CTX(ctx), sizeof (*SKEIN_CTX(ctx))); kmem_free(SKEIN_CTX(ctx), sizeof (*(SKEIN_CTX(ctx)))); SKEIN_CTX_LVALUE(ctx) = NULL; return (error); }
/* ARGSUSED */ static int sha1_create_ctx_template(crypto_provider_handle_t provider, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size, crypto_req_handle_t req) { sha1_hmac_ctx_t *sha1_hmac_ctx_tmpl; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); if ((mechanism->cm_type != SHA1_HMAC_MECH_INFO_TYPE) && (mechanism->cm_type != SHA1_HMAC_GEN_MECH_INFO_TYPE)) { return (CRYPTO_MECHANISM_INVALID); } /* Add support for key by attributes (RFE 4706552) */ if (key->ck_format != CRYPTO_KEY_RAW) return (CRYPTO_ARGUMENTS_BAD); /* * Allocate and initialize SHA1 context. */ sha1_hmac_ctx_tmpl = kmem_alloc(sizeof (sha1_hmac_ctx_t), crypto_kmflag(req)); if (sha1_hmac_ctx_tmpl == NULL) return (CRYPTO_HOST_MEMORY); if (keylen_in_bytes > SHA1_HMAC_BLOCK_SIZE) { uchar_t digested_key[SHA1_DIGEST_LENGTH]; /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_SHA1_DIGEST_KEY(&sha1_hmac_ctx_tmpl->hc_icontext, key->ck_data, keylen_in_bytes, digested_key); sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, digested_key, SHA1_DIGEST_LENGTH); } else { sha1_mac_init_ctx(sha1_hmac_ctx_tmpl, key->ck_data, keylen_in_bytes); } sha1_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type; *ctx_template = (crypto_spi_ctx_template_t)sha1_hmac_ctx_tmpl; *ctx_template_size = sizeof (sha1_hmac_ctx_t); return (CRYPTO_SUCCESS); }
/* * Helper function that builds a Skein MAC context from the provided * mechanism and key. */ static int skein_mac_ctx_build(skein_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key) { int error; if (!VALID_SKEIN_MAC_MECH(mechanism->cm_type)) return (CRYPTO_MECHANISM_INVALID); if (key->ck_format != CRYPTO_KEY_RAW) return (CRYPTO_ARGUMENTS_BAD); ctx->sc_mech_type = mechanism->cm_type; error = skein_get_digest_bitlen(mechanism, &ctx->sc_digest_bitlen); if (error != CRYPTO_SUCCESS) return (error); SKEIN_OP(ctx, InitExt, ctx->sc_digest_bitlen, 0, key->ck_data, CRYPTO_BITS2BYTES(key->ck_length)); return (CRYPTO_SUCCESS); }
/* * Performs a Final on a context and writes to a uio digest output. */ static int skein_digest_final_uio(skein_ctx_t *ctx, crypto_data_t *digest, crypto_req_handle_t req) { off_t offset = digest->cd_offset; uint_t vec_idx; uio_t *uio = digest->cd_uio; /* we support only kernel buffer */ if (uio->uio_segflg != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* * Jump to the first iovec containing ptr to the digest to be returned. */ for (vec_idx = 0; offset >= uio->uio_iov[vec_idx].iov_len && vec_idx < uio->uio_iovcnt; offset -= uio->uio_iov[vec_idx++].iov_len) ; if (vec_idx == uio->uio_iovcnt) { /* * The caller specified an offset that is larger than the * total size of the buffers it provided. */ return (CRYPTO_DATA_LEN_RANGE); } if (offset + CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen) <= uio->uio_iov[vec_idx].iov_len) { /* The computed digest will fit in the current iovec. */ SKEIN_OP(ctx, Final, (uchar_t *)uio->uio_iov[vec_idx].iov_base + offset); } else { uint8_t *digest_tmp; off_t scratch_offset = 0; size_t length = CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen); size_t cur_len; digest_tmp = kmem_alloc(CRYPTO_BITS2BYTES( ctx->sc_digest_bitlen), crypto_kmflag(req)); if (digest_tmp == NULL) return (CRYPTO_HOST_MEMORY); SKEIN_OP(ctx, Final, digest_tmp); while (vec_idx < uio->uio_iovcnt && length > 0) { cur_len = MIN(uio->uio_iov[vec_idx].iov_len - offset, length); bcopy(digest_tmp + scratch_offset, uio->uio_iov[vec_idx].iov_base + offset, cur_len); length -= cur_len; vec_idx++; scratch_offset += cur_len; offset = 0; } kmem_free(digest_tmp, CRYPTO_BITS2BYTES(ctx->sc_digest_bitlen)); if (vec_idx == uio->uio_iovcnt && length > 0) { /* * The end of the specified iovec's was reached but * the length requested could not be processed, i.e. * The caller requested to digest more data than it * provided. */ return (CRYPTO_DATA_LEN_RANGE); } } return (CRYPTO_SUCCESS); }
/* * Initializes a multi-part MAC operation. */ static int md5_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) { int ret = CRYPTO_SUCCESS; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE && mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE) return (CRYPTO_MECHANISM_INVALID); /* Add support for key by attributes (RFE 4706552) */ if (key->ck_format != CRYPTO_KEY_RAW) return (CRYPTO_ARGUMENTS_BAD); ctx->cc_provider_private = kmem_alloc(sizeof (md5_hmac_ctx_t), crypto_kmflag(req)); if (ctx->cc_provider_private == NULL) return (CRYPTO_HOST_MEMORY); if (ctx_template != NULL) { /* reuse context template */ bcopy(ctx_template, PROV_MD5_HMAC_CTX(ctx), sizeof (md5_hmac_ctx_t)); } else { /* no context template, compute context */ if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) { uchar_t digested_key[MD5_DIGEST_LENGTH]; md5_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private; /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_MD5_DIGEST_KEY(&hmac_ctx->hc_icontext, key->ck_data, keylen_in_bytes, digested_key); md5_mac_init_ctx(PROV_MD5_HMAC_CTX(ctx), digested_key, MD5_DIGEST_LENGTH); } else { md5_mac_init_ctx(PROV_MD5_HMAC_CTX(ctx), key->ck_data, keylen_in_bytes); } } /* * Get the mechanism parameters, if applicable. */ PROV_MD5_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type; if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) { if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (ulong_t)) ret = CRYPTO_MECHANISM_PARAM_INVALID; PROV_MD5_GET_DIGEST_LEN(mechanism, PROV_MD5_HMAC_CTX(ctx)->hc_digest_len); if (PROV_MD5_HMAC_CTX(ctx)->hc_digest_len > MD5_DIGEST_LENGTH) ret = CRYPTO_MECHANISM_PARAM_INVALID; } if (ret != CRYPTO_SUCCESS) { bzero(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t)); kmem_free(ctx->cc_provider_private, sizeof (md5_hmac_ctx_t)); ctx->cc_provider_private = NULL; } return (ret); }
/* ARGSUSED */ static int md5_mac_verify_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) { int ret = CRYPTO_SUCCESS; uchar_t digest[MD5_DIGEST_LENGTH]; md5_hmac_ctx_t md5_hmac_ctx; uint32_t digest_len = MD5_DIGEST_LENGTH; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE && mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE) return (CRYPTO_MECHANISM_INVALID); /* Add support for key by attributes (RFE 4706552) */ if (key->ck_format != CRYPTO_KEY_RAW) return (CRYPTO_ARGUMENTS_BAD); if (ctx_template != NULL) { /* reuse context template */ bcopy(ctx_template, &md5_hmac_ctx, sizeof (md5_hmac_ctx_t)); } else { /* no context template, compute context */ if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) { /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_MD5_DIGEST_KEY(&md5_hmac_ctx.hc_icontext, key->ck_data, keylen_in_bytes, digest); md5_mac_init_ctx(&md5_hmac_ctx, digest, MD5_DIGEST_LENGTH); } else { md5_mac_init_ctx(&md5_hmac_ctx, key->ck_data, keylen_in_bytes); } } /* * Get the mechanism parameters, if applicable. */ if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) { if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (ulong_t)) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } PROV_MD5_GET_DIGEST_LEN(mechanism, digest_len); if (digest_len > MD5_DIGEST_LENGTH) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } } if (mac->cd_length != digest_len) { ret = CRYPTO_INVALID_MAC; goto bail; } /* do an MD5 update of the inner context using the specified data */ MD5_MAC_UPDATE(data, md5_hmac_ctx, ret); if (ret != CRYPTO_SUCCESS) /* the update failed, free context and bail */ goto bail; /* do an MD5 final on the inner context */ MD5Final(digest, &md5_hmac_ctx.hc_icontext); /* * Do an MD5 update on the outer context, feeding the inner * digest as data. */ MD5Update(&md5_hmac_ctx.hc_ocontext, digest, MD5_DIGEST_LENGTH); /* * Do an MD5 final on the outer context, storing the computed * digest in the local digest buffer. */ MD5Final(digest, &md5_hmac_ctx.hc_ocontext); /* * Compare the computed digest against the expected digest passed * as argument. */ switch (mac->cd_format) { case CRYPTO_DATA_RAW: if (bcmp(digest, (unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, digest_len) != 0) ret = CRYPTO_INVALID_MAC; break; case CRYPTO_DATA_UIO: { off_t offset = mac->cd_offset; uint_t vec_idx; off_t scratch_offset = 0; size_t length = digest_len; size_t cur_len; /* we support only kernel buffer */ if (mac->cd_uio->uio_segflg != UIO_SYSSPACE) return (CRYPTO_ARGUMENTS_BAD); /* jump to the first iovec containing the expected digest */ for (vec_idx = 0; offset >= mac->cd_uio->uio_iov[vec_idx].iov_len && vec_idx < mac->cd_uio->uio_iovcnt; offset -= mac->cd_uio->uio_iov[vec_idx++].iov_len); if (vec_idx == mac->cd_uio->uio_iovcnt) { /* * The caller specified an offset that is * larger than the total size of the buffers * it provided. */ ret = CRYPTO_DATA_LEN_RANGE; break; } /* do the comparison of computed digest vs specified one */ while (vec_idx < mac->cd_uio->uio_iovcnt && length > 0) { cur_len = MIN(mac->cd_uio->uio_iov[vec_idx].iov_len - offset, length); if (bcmp(digest + scratch_offset, mac->cd_uio->uio_iov[vec_idx].iov_base + offset, cur_len) != 0) { ret = CRYPTO_INVALID_MAC; break; } length -= cur_len; vec_idx++; scratch_offset += cur_len; offset = 0; } break; } case CRYPTO_DATA_MBLK: { off_t offset = mac->cd_offset; mblk_t *mp; off_t scratch_offset = 0; size_t length = digest_len; size_t cur_len; /* jump to the first mblk_t containing the expected digest */ for (mp = mac->cd_mp; mp != NULL && offset >= MBLKL(mp); offset -= MBLKL(mp), mp = mp->b_cont); if (mp == NULL) { /* * The caller specified an offset that is larger than * the total size of the buffers it provided. */ ret = CRYPTO_DATA_LEN_RANGE; break; } while (mp != NULL && length > 0) { cur_len = MIN(MBLKL(mp) - offset, length); if (bcmp(digest + scratch_offset, mp->b_rptr + offset, cur_len) != 0) { ret = CRYPTO_INVALID_MAC; break; } length -= cur_len; mp = mp->b_cont; scratch_offset += cur_len; offset = 0; } break; } default: ret = CRYPTO_ARGUMENTS_BAD; } bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t)); return (ret); bail: bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t)); mac->cd_length = 0; return (ret); }
/* ARGSUSED */ static int md5_mac_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) { int ret = CRYPTO_SUCCESS; uchar_t digest[MD5_DIGEST_LENGTH]; md5_hmac_ctx_t md5_hmac_ctx; uint32_t digest_len = MD5_DIGEST_LENGTH; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); if (mechanism->cm_type != MD5_HMAC_MECH_INFO_TYPE && mechanism->cm_type != MD5_HMAC_GEN_MECH_INFO_TYPE) return (CRYPTO_MECHANISM_INVALID); /* Add support for key by attributes (RFE 4706552) */ if (key->ck_format != CRYPTO_KEY_RAW) return (CRYPTO_ARGUMENTS_BAD); if (ctx_template != NULL) { /* reuse context template */ bcopy(ctx_template, &md5_hmac_ctx, sizeof (md5_hmac_ctx_t)); } else { /* no context template, compute context */ if (keylen_in_bytes > MD5_HMAC_BLOCK_SIZE) { /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_MD5_DIGEST_KEY(&md5_hmac_ctx.hc_icontext, key->ck_data, keylen_in_bytes, digest); md5_mac_init_ctx(&md5_hmac_ctx, digest, MD5_DIGEST_LENGTH); } else { md5_mac_init_ctx(&md5_hmac_ctx, key->ck_data, keylen_in_bytes); } } /* * Get the mechanism parameters, if applicable. */ if (mechanism->cm_type == MD5_HMAC_GEN_MECH_INFO_TYPE) { if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (ulong_t)) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } PROV_MD5_GET_DIGEST_LEN(mechanism, digest_len); if (digest_len > MD5_DIGEST_LENGTH) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } } /* do an MD5 update of the inner context using the specified data */ MD5_MAC_UPDATE(data, md5_hmac_ctx, ret); if (ret != CRYPTO_SUCCESS) /* the update failed, free context and bail */ goto bail; /* do an MD5 final on the inner context */ MD5Final(digest, &md5_hmac_ctx.hc_icontext); /* * Do an MD5 update on the outer context, feeding the inner * digest as data. */ MD5Update(&md5_hmac_ctx.hc_ocontext, digest, MD5_DIGEST_LENGTH); /* * Do an MD5 final on the outer context, storing the computed * digest in the users buffer. */ switch (mac->cd_format) { case CRYPTO_DATA_RAW: if (digest_len != MD5_DIGEST_LENGTH) { /* * The caller requested a short digest. Digest * into a scratch buffer and return to * the user only what was requested. */ MD5Final(digest, &md5_hmac_ctx.hc_ocontext); bcopy(digest, (unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, digest_len); } else { MD5Final((unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, &md5_hmac_ctx.hc_ocontext); } break; case CRYPTO_DATA_UIO: ret = md5_digest_final_uio(&md5_hmac_ctx.hc_ocontext, mac, digest_len, digest); break; case CRYPTO_DATA_MBLK: ret = md5_digest_final_mblk(&md5_hmac_ctx.hc_ocontext, mac, digest_len, digest); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) { mac->cd_length = digest_len; } else { mac->cd_length = 0; } /* Extra paranoia: zeroizing the local context on the stack */ bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t)); return (ret); bail: bzero(&md5_hmac_ctx, sizeof (md5_hmac_ctx_t)); mac->cd_length = 0; return (ret); }
/* * zcrypt_wrap_key * * Using the provided wrapping key wrap the in memory representation of the * key into a form suitable for storage in a zap object. * * Uses kmem_alloc to create space for the wrapped key, the caller * should free with kmem_free when it is finished with the wrapped key. * * returns 0 on success */ int zcrypt_wrap_key(zcrypt_key_t *wrappingkey, zcrypt_key_t *ptkey, caddr_t *wkeybuf, size_t *wkeylen, uint64_t wcrypt) { crypto_mechanism_t wmech; crypto_data_t wkey_cdt, ptkey_cdt; size_t ptkeylen; size_t ivlen; int ret; zcrypt_key_phys_t *wkeyphys; /* * Currently we only support wrapping keys of CRYPTO_KEY_RAW */ ASSERT(ptkey->zk_key.ck_format == CRYPTO_KEY_RAW); ASSERT(wcrypt < ZIO_CRYPT_WRAP_FUNCTIONS); wmech.cm_type = crypto_mech2id( zio_crypt_wrap_table[wcrypt].cwi_mechname); if (wmech.cm_type == CRYPTO_MECH_INVALID) return (EINVAL); wkeyphys = kmem_zalloc(sizeof (zcrypt_key_phys_t), KM_PUSHPAGE); //Sleep wkeyphys->zkp_crypt = wcrypt; /* * The data encryption and MAC keys are wrapped separately * since in the future we may support one or both of them * being in a FIPS 140-2 token as sensitive objects and we * won't be able to treat them as "data" and wrap them together * in one operation. */ /* Data encryption key is first */ ptkeylen = CRYPTO_BITS2BYTES(ptkey->zk_key.ck_length); ivlen = zio_crypt_wrap_table[wcrypt].cwi_ivlen; VERIFY(random_get_bytes((uchar_t *)wkeyphys->zkp_kiv, ivlen) == 0); if (wcrypt == ZIO_CRYPT_WRAP_AES_CCM) { CK_AES_CCM_PARAMS *ccmp; ccmp = kmem_zalloc(sizeof (CK_AES_CCM_PARAMS), KM_PUSHPAGE); //Sleep ccmp->ulNonceSize = ivlen; ccmp->nonce = (uchar_t *)wkeyphys->zkp_kiv; ccmp->ulDataSize = ptkeylen; ccmp->ulMACSize = zio_crypt_wrap_table[wcrypt].cwi_maclen; wmech.cm_param = (char *)ccmp; wmech.cm_param_len = sizeof (CK_AES_CCM_PARAMS); } else if (wcrypt == ZIO_CRYPT_WRAP_AES_GCM) { CK_AES_GCM_PARAMS *gcmp; gcmp = kmem_zalloc(sizeof (CK_AES_GCM_PARAMS), KM_PUSHPAGE); //Sleep gcmp->ulIvLen = ivlen; gcmp->pIv = (uchar_t *)wkeyphys->zkp_kiv; gcmp->ulTagBits = zio_crypt_wrap_table[wcrypt].cwi_maclen * 8; wmech.cm_param = (char *)gcmp; wmech.cm_param_len = sizeof (CK_AES_GCM_PARAMS); } else { ASSERT(0); } SET_CRYPTO_DATA(wkey_cdt, (char *)wkeyphys->zkp_key, ptkeylen + zio_crypt_wrap_table[wcrypt].cwi_maclen); SET_CRYPTO_DATA(ptkey_cdt, ptkey->zk_key.ck_data, ptkeylen); #if _KERNEL printk("zcrypt 1\n"); #endif ret = crypto_encrypt(&wmech, &ptkey_cdt, &wrappingkey->zk_key, NULL, &wkey_cdt, NULL); bzero(wmech.cm_param, wmech.cm_param_len); kmem_free(wmech.cm_param, wmech.cm_param_len); if (ret != CRYPTO_SUCCESS) goto out; /* Now the HMAC-SHA256 key for use with dedup IV generation */ ptkeylen = CRYPTO_BITS2BYTES(ptkey->zk_mackey.ck_length); VERIFY(random_get_bytes((uchar_t *)wkeyphys->zkp_miv, ivlen) == 0); if (wcrypt == ZIO_CRYPT_WRAP_AES_CCM) { CK_AES_CCM_PARAMS *ccmp; ccmp = kmem_zalloc(sizeof (CK_AES_CCM_PARAMS), KM_PUSHPAGE); //Sleep ccmp->ulNonceSize = ivlen; ccmp->nonce = (uchar_t *)wkeyphys->zkp_miv; ccmp->ulDataSize = ptkeylen; ccmp->ulMACSize = zio_crypt_wrap_table[wcrypt].cwi_maclen; wmech.cm_param = (char *)ccmp; wmech.cm_param_len = sizeof (CK_AES_CCM_PARAMS); } else if (wcrypt == ZIO_CRYPT_WRAP_AES_GCM) { CK_AES_GCM_PARAMS *gcmp; gcmp = kmem_zalloc(sizeof (CK_AES_GCM_PARAMS), KM_PUSHPAGE); //Sleep gcmp->ulIvLen = ivlen; gcmp->pIv = (uchar_t *)wkeyphys->zkp_miv; gcmp->ulTagBits = zio_crypt_wrap_table[wcrypt].cwi_maclen * 8; wmech.cm_param = (char *)gcmp; wmech.cm_param_len = sizeof (CK_AES_GCM_PARAMS); } else { ASSERT(0); } SET_CRYPTO_DATA(wkey_cdt, (char *)wkeyphys->zkp_mackey, ptkeylen + zio_crypt_wrap_table[wcrypt].cwi_maclen); SET_CRYPTO_DATA(ptkey_cdt, ptkey->zk_mackey.ck_data, ptkeylen); #if _KERNEL printk("zcrypt 2\n"); #endif ret = crypto_encrypt(&wmech, &ptkey_cdt, &wrappingkey->zk_key, NULL, &wkey_cdt, NULL); bzero(wmech.cm_param, wmech.cm_param_len); kmem_free(wmech.cm_param, wmech.cm_param_len); out: if (ret != CRYPTO_SUCCESS) { kmem_free(wkeyphys, sizeof (zcrypt_key_phys_t)); *wkeylen = 0; return (ret); } *wkeylen = sizeof (zcrypt_key_phys_t); *wkeybuf = (caddr_t)wkeyphys; return (0); }
static int sha2_mac_init(crypto_ctx_t *ctx, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) { int ret = CRYPTO_SUCCESS; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); uint_t sha_digest_len, sha_hmac_block_size; /* * Set the digest length and block size to values appropriate to the * mechanism */ switch (mechanism->cm_type) { case SHA256_HMAC_MECH_INFO_TYPE: case SHA256_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA256_DIGEST_LENGTH; sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE; break; case SHA384_HMAC_MECH_INFO_TYPE: case SHA384_HMAC_GEN_MECH_INFO_TYPE: case SHA512_HMAC_MECH_INFO_TYPE: case SHA512_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA512_DIGEST_LENGTH; sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE; break; default: return (CRYPTO_MECHANISM_INVALID); } if (key->ck_format != CRYPTO_KEY_RAW) return (CRYPTO_ARGUMENTS_BAD); ctx->cc_provider_private = kmem_alloc(sizeof (sha2_hmac_ctx_t), crypto_kmflag(req)); if (ctx->cc_provider_private == NULL) return (CRYPTO_HOST_MEMORY); PROV_SHA2_HMAC_CTX(ctx)->hc_mech_type = mechanism->cm_type; if (ctx_template != NULL) { /* reuse context template */ bcopy(ctx_template, PROV_SHA2_HMAC_CTX(ctx), sizeof (sha2_hmac_ctx_t)); } else { /* no context template, compute context */ if (keylen_in_bytes > sha_hmac_block_size) { uchar_t digested_key[SHA512_DIGEST_LENGTH]; sha2_hmac_ctx_t *hmac_ctx = ctx->cc_provider_private; /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3, &hmac_ctx->hc_icontext, key->ck_data, keylen_in_bytes, digested_key); sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx), digested_key, sha_digest_len); } else { sha2_mac_init_ctx(PROV_SHA2_HMAC_CTX(ctx), key->ck_data, keylen_in_bytes); } } /* * Get the mechanism parameters, if applicable. */ if (mechanism->cm_type % 3 == 2) { if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (ulong_t)) ret = CRYPTO_MECHANISM_PARAM_INVALID; PROV_SHA2_GET_DIGEST_LEN(mechanism, PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len); if (PROV_SHA2_HMAC_CTX(ctx)->hc_digest_len > sha_digest_len) ret = CRYPTO_MECHANISM_PARAM_INVALID; } if (ret != CRYPTO_SUCCESS) { bzero(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t)); kmem_free(ctx->cc_provider_private, sizeof (sha2_hmac_ctx_t)); ctx->cc_provider_private = NULL; } return (ret); }
/* ARGSUSED */ static int sha2_create_ctx_template(crypto_provider_handle_t provider, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_spi_ctx_template_t *ctx_template, size_t *ctx_template_size, crypto_req_handle_t req) { sha2_hmac_ctx_t *sha2_hmac_ctx_tmpl; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); uint32_t sha_digest_len, sha_hmac_block_size; /* * Set the digest length and block size to values appropriate to the * mechanism */ switch (mechanism->cm_type) { case SHA256_HMAC_MECH_INFO_TYPE: case SHA256_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA256_DIGEST_LENGTH; sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE; break; case SHA384_HMAC_MECH_INFO_TYPE: case SHA384_HMAC_GEN_MECH_INFO_TYPE: case SHA512_HMAC_MECH_INFO_TYPE: case SHA512_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = SHA512_DIGEST_LENGTH; sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE; break; default: return (CRYPTO_MECHANISM_INVALID); } /* Add support for key by attributes (RFE 4706552) */ if (key->ck_format != CRYPTO_KEY_RAW) return (CRYPTO_ARGUMENTS_BAD); /* * Allocate and initialize SHA2 context. */ sha2_hmac_ctx_tmpl = kmem_alloc(sizeof (sha2_hmac_ctx_t), crypto_kmflag(req)); if (sha2_hmac_ctx_tmpl == NULL) return (CRYPTO_HOST_MEMORY); sha2_hmac_ctx_tmpl->hc_mech_type = mechanism->cm_type; if (keylen_in_bytes > sha_hmac_block_size) { uchar_t digested_key[SHA512_DIGEST_LENGTH]; /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3, &sha2_hmac_ctx_tmpl->hc_icontext, key->ck_data, keylen_in_bytes, digested_key); sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, digested_key, sha_digest_len); } else { sha2_mac_init_ctx(sha2_hmac_ctx_tmpl, key->ck_data, keylen_in_bytes); } *ctx_template = (crypto_spi_ctx_template_t)sha2_hmac_ctx_tmpl; *ctx_template_size = sizeof (sha2_hmac_ctx_t); return (CRYPTO_SUCCESS); }
/* ARGSUSED */ static int sha2_mac_atomic(crypto_provider_handle_t provider, crypto_session_id_t session_id, crypto_mechanism_t *mechanism, crypto_key_t *key, crypto_data_t *data, crypto_data_t *mac, crypto_spi_ctx_template_t ctx_template, crypto_req_handle_t req) { int ret = CRYPTO_SUCCESS; uchar_t digest[SHA512_DIGEST_LENGTH]; sha2_hmac_ctx_t sha2_hmac_ctx; uint32_t sha_digest_len, digest_len, sha_hmac_block_size; uint_t keylen_in_bytes = CRYPTO_BITS2BYTES(key->ck_length); /* * Set the digest length and block size to values appropriate to the * mechanism */ switch (mechanism->cm_type) { case SHA256_HMAC_MECH_INFO_TYPE: case SHA256_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = digest_len = SHA256_DIGEST_LENGTH; sha_hmac_block_size = SHA256_HMAC_BLOCK_SIZE; break; case SHA384_HMAC_MECH_INFO_TYPE: case SHA384_HMAC_GEN_MECH_INFO_TYPE: case SHA512_HMAC_MECH_INFO_TYPE: case SHA512_HMAC_GEN_MECH_INFO_TYPE: sha_digest_len = digest_len = SHA512_DIGEST_LENGTH; sha_hmac_block_size = SHA512_HMAC_BLOCK_SIZE; break; default: return (CRYPTO_MECHANISM_INVALID); } /* Add support for key by attributes (RFE 4706552) */ if (key->ck_format != CRYPTO_KEY_RAW) return (CRYPTO_ARGUMENTS_BAD); if (ctx_template != NULL) { /* reuse context template */ bcopy(ctx_template, &sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t)); } else { sha2_hmac_ctx.hc_mech_type = mechanism->cm_type; /* no context template, initialize context */ if (keylen_in_bytes > sha_hmac_block_size) { /* * Hash the passed-in key to get a smaller key. * The inner context is used since it hasn't been * initialized yet. */ PROV_SHA2_DIGEST_KEY(mechanism->cm_type / 3, &sha2_hmac_ctx.hc_icontext, key->ck_data, keylen_in_bytes, digest); sha2_mac_init_ctx(&sha2_hmac_ctx, digest, sha_digest_len); } else { sha2_mac_init_ctx(&sha2_hmac_ctx, key->ck_data, keylen_in_bytes); } } /* get the mechanism parameters, if applicable */ if ((mechanism->cm_type % 3) == 2) { if (mechanism->cm_param == NULL || mechanism->cm_param_len != sizeof (ulong_t)) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } PROV_SHA2_GET_DIGEST_LEN(mechanism, digest_len); if (digest_len > sha_digest_len) { ret = CRYPTO_MECHANISM_PARAM_INVALID; goto bail; } } /* do a SHA2 update of the inner context using the specified data */ SHA2_MAC_UPDATE(data, sha2_hmac_ctx, ret); if (ret != CRYPTO_SUCCESS) /* the update failed, free context and bail */ goto bail; /* * Do a SHA2 final on the inner context. */ SHA2Final(digest, &sha2_hmac_ctx.hc_icontext); /* * Do an SHA2 update on the outer context, feeding the inner * digest as data. * * HMAC-SHA384 needs special handling as the outer hash needs only 48 * bytes of the inner hash value. */ if (mechanism->cm_type == SHA384_HMAC_MECH_INFO_TYPE || mechanism->cm_type == SHA384_HMAC_GEN_MECH_INFO_TYPE) SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, SHA384_DIGEST_LENGTH); else SHA2Update(&sha2_hmac_ctx.hc_ocontext, digest, sha_digest_len); /* * Do a SHA2 final on the outer context, storing the computed * digest in the users buffer. */ switch (mac->cd_format) { case CRYPTO_DATA_RAW: if (digest_len != sha_digest_len) { /* * The caller requested a short digest. Digest * into a scratch buffer and return to * the user only what was requested. */ SHA2Final(digest, &sha2_hmac_ctx.hc_ocontext); bcopy(digest, (unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, digest_len); } else { SHA2Final((unsigned char *)mac->cd_raw.iov_base + mac->cd_offset, &sha2_hmac_ctx.hc_ocontext); } break; case CRYPTO_DATA_UIO: ret = sha2_digest_final_uio(&sha2_hmac_ctx.hc_ocontext, mac, digest_len, digest); break; case CRYPTO_DATA_MBLK: ret = sha2_digest_final_mblk(&sha2_hmac_ctx.hc_ocontext, mac, digest_len, digest); break; default: ret = CRYPTO_ARGUMENTS_BAD; } if (ret == CRYPTO_SUCCESS) { mac->cd_length = digest_len; return (CRYPTO_SUCCESS); } bail: bzero(&sha2_hmac_ctx, sizeof (sha2_hmac_ctx_t)); mac->cd_length = 0; return (ret); }