static int cfs_crypto_hash_alloc(unsigned char alg_id, const struct cfs_crypto_hash_type **type, struct hash_desc *desc, unsigned char *key, unsigned int key_len) { int err = 0; *type = cfs_crypto_hash_type(alg_id); if (*type == NULL) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", alg_id, CFS_HASH_ALG_MAX); return -EINVAL; } desc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0); if (desc->tfm == NULL) return -EINVAL; if (IS_ERR(desc->tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); return PTR_ERR(desc->tfm); } desc->flags = 0; /** Shash have different logic for initialization then digest * shash: crypto_hash_setkey, crypto_hash_init * digest: crypto_digest_init, crypto_digest_setkey * Skip this function for digest, because we use shash logic at * cfs_crypto_hash_alloc. */ if (key != NULL) { err = crypto_hash_setkey(desc->tfm, key, key_len); } else if ((*type)->cht_key != 0) { err = crypto_hash_setkey(desc->tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); } if (err != 0) { crypto_free_hash(desc->tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_name, (crypto_hash_tfm(desc->tfm))->__crt_alg->cra_driver_name, cfs_crypto_hash_speeds[alg_id]); return crypto_hash_init(desc); }
static struct crypto_hash *pohmelfs_init_hash(struct pohmelfs_sb *psb) { int err; struct crypto_hash *hash; hash = crypto_alloc_hash(psb->hash_string, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) { err = PTR_ERR(hash); dprintk("%s: idx: %u: failed to allocate hash '%s', err: %d.\n", __func__, psb->idx, psb->hash_string, err); goto err_out_exit; } psb->crypto_attached_size = crypto_hash_digestsize(hash); if (!psb->hash_keysize) return hash; err = crypto_hash_setkey(hash, psb->hash_key, psb->hash_keysize); if (err) { dprintk("%s: idx: %u: failed to set key for hash '%s', err: %d.\n", __func__, psb->idx, psb->hash_string, err); goto err_out_free; } return hash; err_out_free: crypto_free_hash(hash); err_out_exit: return ERR_PTR(err); }
/* * Crypto machinery: hash/cipher support for the given crypto controls. */ static struct crypto_hash *dst_init_hash(struct dst_crypto_ctl *ctl, u8 *key) { int err; struct crypto_hash *hash; hash = crypto_alloc_hash(ctl->hash_algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) { err = PTR_ERR(hash); dprintk("%s: failed to allocate hash '%s', err: %d.\n", __func__, ctl->hash_algo, err); goto err_out_exit; } ctl->crypto_attached_size = crypto_hash_digestsize(hash); if (!ctl->hash_keysize) return hash; err = crypto_hash_setkey(hash, key, ctl->hash_keysize); if (err) { dprintk("%s: failed to set key for hash '%s', err: %d.\n", __func__, ctl->hash_algo, err); goto err_out_free; } return hash; err_out_free: crypto_free_hash(hash); err_out_exit: return ERR_PTR(err); }
int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { struct hash_desc desc; struct scatterlist sg[2]; u8 hdr[ETH_HLEN + 2]; if (tfm_michael == NULL) { printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); return -1; } memcpy(&hdr[0], da, ETH_ALEN); memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN); hdr[ETH_ALEN * 2] = priority; hdr[ETH_ALEN * 2 + 1] = 0; hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; sg_init_table(sg, 2); sg_set_buf(&sg[0], hdr, sizeof(hdr)); sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN)) return -1; desc.tfm = tfm_michael; desc.flags = 0; return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr), mic); }
int orinoco_mic(struct crypto_hash *tfm_michael, u8 *key, u8 *da, u8 *sa, u8 priority, u8 *data, size_t data_len, u8 *mic) { struct hash_desc desc; struct scatterlist sg[2]; u8 hdr[ETH_HLEN + 2]; /* size of header + padding */ if (tfm_michael == NULL) { printk(KERN_WARNING "orinoco_mic: tfm_michael == NULL\n"); return -1; } /* Copy header into buffer. We need the padding on the end zeroed */ memcpy(&hdr[0], da, ETH_ALEN); memcpy(&hdr[ETH_ALEN], sa, ETH_ALEN); hdr[ETH_ALEN * 2] = priority; hdr[ETH_ALEN * 2 + 1] = 0; hdr[ETH_ALEN * 2 + 2] = 0; hdr[ETH_ALEN * 2 + 3] = 0; /* Use scatter gather to MIC header and data in one go */ sg_init_table(sg, 2); sg_set_buf(&sg[0], hdr, sizeof(hdr)); sg_set_buf(&sg[1], data, data_len); if (crypto_hash_setkey(tfm_michael, key, MIC_KEYLEN)) return -1; desc.tfm = tfm_michael; desc.flags = 0; return crypto_hash_digest(&desc, sg, data_len + sizeof(hdr), mic); }
static int init_hash (struct hash_desc * desc) { struct crypto_hash * tfm = NULL; int ret = -1; /* Same as build time */ const unsigned char * key = "The quick brown fox jumps over the lazy dog"; tfm = crypto_alloc_hash ("hmac(sha256)", 0, 0); if (IS_ERR(tfm)) { printk(KERN_ERR "FIPS(%s): integ failed to allocate tfm %ld", __FUNCTION__, PTR_ERR(tfm)); return -1; } ret = crypto_hash_setkey (tfm, key, strlen(key)); if (ret) { printk(KERN_ERR "FIPS(%s): fail at crypto_hash_setkey", __FUNCTION__); return -1; } desc->tfm = tfm; desc->flags = 0; ret = crypto_hash_init (desc); if (ret) { printk(KERN_ERR "FIPS(%s): fail at crypto_hash_init", __FUNCTION__); return -1; } return 0; }
/** * Initialize the state descriptor for the specified hash algorithm. * * An internal routine to allocate the hash-specific state in \a hdesc for * use with cfs_crypto_hash_digest() to compute the hash of a single message, * though possibly in multiple chunks. The descriptor internal state should * be freed with cfs_crypto_hash_final(). * * \param[in] hash_alg hash algorithm id (CFS_HASH_ALG_*) * \param[out] type pointer to the hash description in hash_types[] array * \param[in,out] hdesc hash state descriptor to be initialized * \param[in] key initial hash value/state, NULL to use default value * \param[in] key_len length of \a key * * \retval 0 on success * \retval negative errno on failure */ static int cfs_crypto_hash_alloc(enum cfs_crypto_hash_alg hash_alg, const struct cfs_crypto_hash_type **type, struct hash_desc *hdesc, unsigned char *key, unsigned int key_len) { int err = 0; *type = cfs_crypto_hash_type(hash_alg); if (*type == NULL) { CWARN("Unsupported hash algorithm id = %d, max id is %d\n", hash_alg, CFS_HASH_ALG_MAX); return -EINVAL; } hdesc->tfm = crypto_alloc_hash((*type)->cht_name, 0, 0); if (hdesc->tfm == NULL) return -EINVAL; if (IS_ERR(hdesc->tfm)) { CDEBUG(D_INFO, "Failed to alloc crypto hash %s\n", (*type)->cht_name); return PTR_ERR(hdesc->tfm); } hdesc->flags = 0; if (key != NULL) err = crypto_hash_setkey(hdesc->tfm, key, key_len); else if ((*type)->cht_key != 0) err = crypto_hash_setkey(hdesc->tfm, (unsigned char *)&((*type)->cht_key), (*type)->cht_size); if (err != 0) { crypto_free_hash(hdesc->tfm); return err; } CDEBUG(D_INFO, "Using crypto hash: %s (%s) speed %d MB/s\n", (crypto_hash_tfm(hdesc->tfm))->__crt_alg->cra_name, (crypto_hash_tfm(hdesc->tfm))->__crt_alg->cra_driver_name, cfs_crypto_hash_speeds[hash_alg]); return crypto_hash_init(hdesc); }
void do_integrity_check(void) { u8 *rbuf = (u8 *) ZIMAGE_ADDR; u32 len; u8 hmac[SHA256_DIGEST_SIZE]; struct hash_desc desc; struct scatterlist sg; u8 *key = "12345678"; printk(KERN_INFO "FIPS: do kernel integrity check\n"); if (unlikely(!need_integrity_check || in_fips_err())) return; if (*((u32 *) &rbuf[36]) != 0x016F2818) { printk(KERN_ERR "FIPS: invalid zImage magic number."); set_in_fips_err(); goto err1; } if (*(u32 *) &rbuf[44] <= *(u32 *) &rbuf[40]) { printk(KERN_ERR "FIPS: invalid zImage calculated len"); set_in_fips_err(); goto err1; } len = *(u32 *) &rbuf[44] - *(u32 *) &rbuf[40]; desc.tfm = crypto_alloc_hash("hmac(sha256)", 0, 0); if (IS_ERR(desc.tfm)) { printk(KERN_ERR "FIPS: integ failed to allocate tfm %ld\n", PTR_ERR(desc.tfm)); set_in_fips_err(); goto err; } sg_init_one(&sg, rbuf, len); crypto_hash_setkey(desc.tfm, key, strlen(key)); crypto_hash_digest(&desc, &sg, len, hmac); if (!strncmp(hmac, &rbuf[len], SHA256_DIGEST_SIZE)) { printk(KERN_INFO "FIPS: integrity check passed\n"); } else { printk(KERN_ERR "FIPS: integrity check failed\n"); set_in_fips_err(); } err: crypto_free_hash(desc.tfm); err1: need_integrity_check = false; return; }
void do_integrity_check(void) { u8* rbuf=__va(ZIMAGE_START); u32 len; u8 hmac[SHA256_DIGEST_SIZE]; struct hash_desc desc; struct scatterlist sg; u8* key="12345678"; printk(KERN_INFO "do kernel integrity check\n"); if (integrity_checked || in_fips_err()) return; if ( *((u32*) &rbuf[36]) != 0x016F2818) { printk(KERN_ERR "integ: invalid zImage magic number."); set_in_fips_err(); goto err; } len = *(u32*)&rbuf[44] - *(u32*)&rbuf[40]; if (len < 0) { printk(KERN_ERR "integ: invalid zImage calculated len"); set_in_fips_err(); goto err; } desc.tfm = crypto_alloc_hash("hmac(sha256)",0,0); if (IS_ERR(desc.tfm)) { printk(KERN_ERR "integ: failed to allocate tfm %ld\n",PTR_ERR(desc.tfm)); set_in_fips_err(); goto err; } sg_init_one(&sg, rbuf, len); crypto_hash_setkey(desc.tfm,key,strlen(key)); crypto_hash_digest(&desc,&sg,len,hmac); if (!strncmp(hmac,&rbuf[len],SHA256_DIGEST_SIZE)) { printk(KERN_INFO "integrity check passed"); } else { printk(KERN_ERR "integrity check failed"); set_in_fips_err(); } err: integrity_checked=true; crypto_free_hash(desc.tfm); return; }
static int DriverEnvironment_HMAC(const char *algo, const void *key, size_t key_len, const void *data, size_t data_len, void *result, size_t result_len) { struct crypto_hash *tfm; struct scatterlist sg[1]; struct hash_desc desc; int ret; tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); if(IS_ERR(tfm)) { DE_TRACE_INT(TR_CRYPTO, "failed to allocate hash (%ld)\n", PTR_ERR(tfm)); return WIFI_ENGINE_FAILURE; } if(crypto_hash_digestsize(tfm) > result_len) { crypto_free_hash(tfm); return WIFI_ENGINE_FAILURE_INVALID_LENGTH; } sg_init_one(&sg[0], data, data_len); crypto_hash_clear_flags(tfm, ~0); ret = crypto_hash_setkey(tfm, key, key_len); if(ret != 0) { DE_TRACE_INT(TR_CRYPTO, "failed to set key (%d)\n", ret); crypto_free_hash(tfm); return WIFI_ENGINE_FAILURE; } desc.tfm = tfm; desc.flags = 0; ret = crypto_hash_digest(&desc, sg, data_len, result); if(ret != 0) { DE_TRACE_INT(TR_CRYPTO, "faild to digest (%d)\n", ret); crypto_free_hash(tfm); return WIFI_ENGINE_FAILURE; } crypto_free_hash(tfm); return WIFI_ENGINE_SUCCESS; }
static char *calc_hmac(char *plain_text, unsigned int plain_text_size, char *key, unsigned int key_size) { struct scatterlist sg; char *result; struct crypto_hash *tfm; struct hash_desc desc; int ret; tfm = crypto_alloc_hash("hmac(sha1)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk(KERN_ERR "failed to load transform for hmac(sha1): %ld\n", PTR_ERR(tfm)); return NULL; } desc.tfm = tfm; desc.flags = 0; result = kzalloc(TOSLSM_DIGEST_SIZE, GFP_KERNEL); if (!result) { printk(KERN_ERR "out of memory!\n"); goto out; } sg_set_buf(&sg, plain_text, plain_text_size); ret = crypto_hash_setkey(tfm, key, key_size); if (ret) { printk(KERN_ERR "setkey() failed ret=%d\n", ret); kfree(result); result = NULL; goto out; } ret = crypto_hash_digest(&desc, &sg, plain_text_size, result); if (ret) { printk(KERN_ERR "digest() failed ret=%d\n", ret); kfree(result); result = NULL; goto out; } out: crypto_free_hash(tfm); return result; }
static void HMAC_sha1(const __u8 *key, int keyLength, void *input, int inputLength, __u8 *output) { struct scatterlist sg[1]; struct hash_desc desc; struct crypto_hash *hash_tfm = crypto_alloc_hash("hmac(sha1)", 0, CRYPTO_ALG_ASYNC); desc.tfm = hash_tfm; desc.flags = 0; sg_set_buf(&sg[0], input, inputLength); crypto_hash_init(&desc); crypto_hash_setkey(desc.tfm, key, keyLength); crypto_hash_digest(&desc, &sg[0], inputLength, output); crypto_free_hash(hash_tfm); }
u32 spkm3_make_token(struct spkm3_ctx *ctx, struct xdr_buf * text, struct xdr_netobj * token, int toktype) { s32 checksum_type; char tokhdrbuf[25]; char cksumdata[16]; struct xdr_netobj md5cksum = {.len = 0, .data = cksumdata}; struct xdr_netobj mic_hdr = {.len = 0, .data = tokhdrbuf}; int tokenlen = 0; unsigned char *ptr; s32 now; int ctxelen = 0, ctxzbit = 0; int md5elen = 0, md5zbit = 0; now = jiffies; if (ctx->ctx_id.len != 16) { dprintk("RPC: spkm3_make_token BAD ctx_id.len %d\n", ctx->ctx_id.len); goto out_err; } if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) { dprintk("RPC: gss_spkm3_seal: unsupported I-ALG " "algorithm. only support hmac-md5 I-ALG.\n"); goto out_err; } else checksum_type = CKSUMTYPE_HMAC_MD5; if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) { dprintk("RPC: gss_spkm3_seal: unsupported C-ALG " "algorithm\n"); goto out_err; } if (toktype == SPKM_MIC_TOK) { /* Calculate checksum over the mic-header */ asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit); spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data, ctxelen, ctxzbit); if (make_spkm3_checksum(checksum_type, &ctx->derived_integ_key, (char *)mic_hdr.data, mic_hdr.len, text, 0, &md5cksum)) goto out_err; asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit); tokenlen = 10 + ctxelen + 1 + md5elen + 1; /* Create token header using generic routines */ token->len = g_token_size(&ctx->mech_used, tokenlen); ptr = token->data; g_make_token_header(&ctx->mech_used, tokenlen, &ptr); spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit); } else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */ dprintk("RPC: gss_spkm3_seal: SPKM_WRAP_TOK " "not supported\n"); goto out_err; } /* XXX need to implement sequence numbers, and ctx->expired */ return GSS_S_COMPLETE; out_err: token->data = NULL; token->len = 0; return GSS_S_FAILURE; } static int spkm3_checksummer(struct scatterlist *sg, void *data) { struct hash_desc *desc = data; return crypto_hash_update(desc, sg, sg->length); } /* checksum the plaintext data and hdrlen bytes of the token header */ s32 make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header, unsigned int hdrlen, struct xdr_buf *body, unsigned int body_offset, struct xdr_netobj *cksum) { char *cksumname; struct hash_desc desc; /* XXX add to ctx? */ struct scatterlist sg[1]; int err; switch (cksumtype) { case CKSUMTYPE_HMAC_MD5: cksumname = "hmac(md5)"; break; default: dprintk("RPC: spkm3_make_checksum:" " unsupported checksum %d", cksumtype); return GSS_S_FAILURE; } if (key->data == NULL || key->len <= 0) return GSS_S_FAILURE; desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) return GSS_S_FAILURE; cksum->len = crypto_hash_digestsize(desc.tfm); desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP; err = crypto_hash_setkey(desc.tfm, key->data, key->len); if (err) goto out; err = crypto_hash_init(&desc); if (err) goto out; sg_set_buf(sg, header, hdrlen); crypto_hash_update(&desc, sg, sg->length); xdr_process_buf(body, body_offset, body->len - body_offset, spkm3_checksummer, &desc); crypto_hash_final(&desc, cksum->data); out: crypto_free_hash(desc.tfm); return err ? GSS_S_FAILURE : 0; }
static int tf_self_test_integrity(const char *alg_name, struct module *mod) { unsigned char expected[32]; unsigned char actual[32]; struct scatterlist *sg = NULL; struct hash_desc desc = {NULL, 0}; size_t digest_length; unsigned char *const key = tf_integrity_hmac_sha256_key; size_t const key_length = sizeof(tf_integrity_hmac_sha256_key); int error; if (mod->raw_binary_ptr == NULL) return -ENXIO; if (tf_integrity_hmac_sha256_expected_value == NULL) return -ENOENT; INFO("expected=%s", tf_integrity_hmac_sha256_expected_value); error = scan_hex(expected, sizeof(expected), tf_integrity_hmac_sha256_expected_value); if (error < 0) { pr_err("tf_driver: Badly formatted hmac_sha256 parameter " "(should be a hex string)\n"); return -EIO; }; desc.tfm = crypto_alloc_hash(alg_name, 0, 0); if (IS_ERR_OR_NULL(desc.tfm)) { ERROR("crypto_alloc_hash(%s) failed", alg_name); error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm); goto abort; } digest_length = crypto_hash_digestsize(desc.tfm); INFO("alg_name=%s driver_name=%s digest_length=%u", alg_name, crypto_tfm_alg_driver_name(crypto_hash_tfm(desc.tfm)), digest_length); error = crypto_hash_setkey(desc.tfm, key, key_length); if (error) { ERROR("crypto_hash_setkey(%s) failed: %d", alg_name, error); goto abort; } sg = vmalloc_to_sg(mod->raw_binary_ptr, mod->raw_binary_size); if (IS_ERR_OR_NULL(sg)) { ERROR("vmalloc_to_sg(%lu) failed: %d", mod->raw_binary_size, (int)sg); error = (sg == NULL ? -ENOMEM : (int)sg); goto abort; } error = crypto_hash_digest(&desc, sg, mod->raw_binary_size, actual); if (error) { ERROR("crypto_hash_digest(%s) failed: %d", alg_name, error); goto abort; } kfree(sg); crypto_free_hash(desc.tfm); #ifdef CONFIG_TF_DRIVER_FAULT_INJECTION if (tf_fault_injection_mask & TF_CRYPTO_ALG_INTEGRITY) { pr_warning("TF: injecting fault in integrity check!\n"); actual[0] = 0xff; actual[1] ^= 0xff; } #endif TF_TRACE_ARRAY(expected, digest_length); TF_TRACE_ARRAY(actual, digest_length); if (memcmp(expected, actual, digest_length)) { ERROR("wrong %s digest value", alg_name); error = -EINVAL; } else { INFO("%s: digest successful", alg_name); error = 0; } return error; abort: if (!IS_ERR_OR_NULL(sg)) kfree(sg); if (!IS_ERR_OR_NULL(desc.tfm)) crypto_free_hash(desc.tfm); return error == -ENOMEM ? error : -EIO; }
static int tf_self_test_digest(const char *alg_name, const struct digest_test_vector *tv) { unsigned char digest[64]; unsigned char input[256]; struct scatterlist sg; struct hash_desc desc = {NULL, 0}; int error; size_t digest_length; desc.tfm = crypto_alloc_hash(alg_name, 0, 0); if (IS_ERR_OR_NULL(desc.tfm)) { ERROR("crypto_alloc_hash(%s) failed", alg_name); error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm); goto abort; } digest_length = crypto_hash_digestsize(desc.tfm); INFO("alg_name=%s driver_name=%s digest_length=%u", alg_name, crypto_tfm_alg_driver_name(crypto_hash_tfm(desc.tfm)), digest_length); if (digest_length > sizeof(digest)) { ERROR("digest length too large (%zu > %zu)", digest_length, sizeof(digest)); error = -ENOMEM; goto abort; } if (tv->key != NULL) { error = crypto_hash_setkey(desc.tfm, tv->key, tv->key_length); if (error) { ERROR("crypto_hash_setkey(%s) failed: %d", alg_name, error); goto abort; } TF_TRACE_ARRAY(tv->key, tv->key_length); } error = crypto_hash_init(&desc); if (error) { ERROR("crypto_hash_init(%s) failed: %d", alg_name, error); goto abort; } /* The test vector data is in vmalloc'ed memory since it's a module global. Copy it to the stack, since the crypto API doesn't support vmalloc'ed memory. */ if (tv->length > sizeof(input)) { ERROR("data too large (%zu > %zu)", tv->length, sizeof(input)); error = -ENOMEM; goto abort; } memcpy(input, tv->text, tv->length); INFO("sg_init_one(%p, %p, %u)", &sg, input, tv->length); sg_init_one(&sg, input, tv->length); TF_TRACE_ARRAY(input, tv->length); error = crypto_hash_update(&desc, &sg, tv->length); if (error) { ERROR("crypto_hash_update(%s) failed: %d", alg_name, error); goto abort; } error = crypto_hash_final(&desc, digest); if (error) { ERROR("crypto_hash_final(%s) failed: %d", alg_name, error); goto abort; } crypto_free_hash(desc.tfm); desc.tfm = NULL; if (memcmp(digest, tv->digest, digest_length)) { TF_TRACE_ARRAY(digest, digest_length); ERROR("wrong %s digest value", alg_name); pr_err("[SMC Driver] error: SMC Driver POST FAILURE (%s)\n", alg_name); error = -EINVAL; } else { INFO("%s: digest successful", alg_name); error = 0; } return error; abort: if (!IS_ERR_OR_NULL(desc.tfm)) crypto_free_hash(desc.tfm); pr_err("[SMC Driver] error: SMC Driver POST FAILURE (%s)\n", alg_name); return error; }
static int esp6_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; struct crypto_blkcipher *tfm; /* null auth and encryption can have zero length keys */ if (x->aalg) { if (x->aalg->alg_key_len > 512) goto error; } if (x->ealg == NULL) goto error; if (x->encap) goto error; esp = kzalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; if (x->aalg) { struct xfrm_algo_desc *aalg_desc; struct crypto_hash *hash; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; hash = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) goto error; esp->auth.tfm = hash; if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) goto error; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_hash_digestsize(hash)) { NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_hash_digestsize(hash), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); if (!esp->auth.work_icv) goto error; } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; esp->conf.tfm = tfm; esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); if (unlikely(esp->conf.ivec == NULL)) goto error; esp->conf.ivinitted = 0; } if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) goto error; x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct ipv6hdr); x->data = esp; return 0; error: x->data = esp; esp6_destroy(x); x->data = NULL; return -EINVAL; }
static int ah_init_state(struct xfrm_state *x) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; struct crypto_hash *tfm; if (!x->aalg) goto error; if (x->encap) goto error; ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); if (ahp == NULL) return -ENOMEM; tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; ahp->tfm = tfm; if (crypto_hash_setkey(tfm, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8)) goto error; /* * Lookup the algorithm description maintained by xfrm_algo, * verify crypto transform properties, and store information * we need for AH processing. This lookup cannot fail here * after a successful crypto_alloc_hash(). */ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_hash_digestsize(tfm)) { printk(KERN_INFO "AH: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_hash_digestsize(tfm), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); if (!ahp->work_icv) goto error; x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); x->data = ahp; return 0; error: if (ahp) { kfree(ahp->work_icv); crypto_free_hash(ahp->tfm); kfree(ahp); } return -EINVAL; }
static int esp_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; struct crypto_blkcipher *tfm; u32 align; /* null auth and encryption can have zero length keys */ if (x->aalg) { if (x->aalg->alg_key_len > 512) goto error; } if (x->ealg == NULL) goto error; esp = kzalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; if (x->aalg) { struct xfrm_algo_desc *aalg_desc; struct crypto_hash *hash; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; hash = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) goto error; esp->auth.tfm = hash; if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) goto error; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_hash_digestsize(hash)) { NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_hash_digestsize(hash), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); if (!esp->auth.work_icv) goto error; } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; esp->conf.tfm = tfm; esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); if (unlikely(esp->conf.ivec == NULL)) goto error; esp->conf.ivinitted = 0; } if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) goto error; x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); else if (x->props.mode == XFRM_MODE_BEET) x->props.header_len += IPV4_BEET_PHMAXLEN; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; switch (encap->encap_type) { default: goto error; case UDP_ENCAP_ESPINUDP: x->props.header_len += sizeof(struct udphdr); break; case UDP_ENCAP_ESPINUDP_NON_IKE: x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); break; } } x->data = esp; align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); if (esp->conf.padlen) align = max_t(u32, align, esp->conf.padlen); x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len; return 0; error: x->data = esp; esp_destroy(x); x->data = NULL; return -EINVAL; }
int calculate_integrity(struct file *filp, char *ibuf, int ilen) { int r = -1 , ret = -1; ssize_t vfs_read_retval = 0; loff_t file_offset = 0; mm_segment_t oldfs = get_fs(); char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL); struct scatterlist sg; struct crypto_hash *tfm = NULL; struct hash_desc desc; char *algo = kmalloc(1024, GFP_KERNEL); if (!algo) { ret = -ENOMEM; goto out; } __initialize_with_null(algo, 1024); #ifdef EXTRA_CREDIT ret = vfs_getxattr(filp->f_path.dentry, INT_TYPE_XATTR, algo, 1024); if (ret <= 0) __initialize_with_null(algo, 1024); #endif if (*algo == '\0') strcpy(algo, DEFAULT_ALGO); if (!buf) goto out; __initialize_with_null(ibuf, ilen); if (!filp->f_op->read) { r = -2; goto out; } filp->f_pos = 0; set_fs(KERNEL_DS); tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { r = -EINVAL; goto out; } desc.tfm = tfm; desc.flags = 0; if (crypto_hash_digestsize(tfm) > ilen) { r = -EINVAL; goto out; } crypto_hash_setkey(tfm, key, strlen(key)); ret = crypto_hash_init(&desc); if (ret) { r = ret; goto out; } sg_init_table(&sg, 1); file_offset = 0; do { vfs_read_retval = vfs_read(filp, buf, PAGE_SIZE, &file_offset); if (vfs_read_retval < 0) { ret = vfs_read_retval; goto out; } sg_set_buf(&sg, (u8 *)buf, vfs_read_retval); ret = crypto_hash_update(&desc, &sg, sg.length); if (ret) { r = ret; goto out; } if (vfs_read_retval < ksize(buf)) break; } while (1); ret = crypto_hash_final(&desc, ibuf); if (ret) { r = ret; goto out; } out: kfree(buf); kfree(algo); if (!IS_ERR(tfm)) crypto_free_hash(tfm); set_fs(oldfs); return ret; }
static int verihmac_calc_hash(struct file* file,char* digest) { struct hash_desc desc; struct scatterlist sg; int rc; char *rbuf; loff_t i_size, offset = 0; rbuf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!rbuf) { printk(VERI_ERROR "no free memory\n"); rc = -ENOMEM; return rc; } desc.tfm = crypto_alloc_hash(VERI_HMAC, 0,CRYPTO_ALG_ASYNC); if (IS_ERR(desc.tfm)) { printk(VERI_ERROR "failed to load %s transform: %ld\n", VERI_HMAC, PTR_ERR(desc.tfm)); rc = PTR_ERR(desc.tfm); kfree(rbuf); return rc; } desc.flags = 0; if(crypto_hash_setkey(desc.tfm,password_str,password_len)!=0) { printk(VERI_ERROR "setting key error \n"); kfree(rbuf); crypto_free_hash(desc.tfm); return -1; } rc=crypto_hash_init(&desc); if(rc) { printk(VERI_ERROR "hash_init failed \n"); crypto_free_hash(desc.tfm); kfree(rbuf); return rc; } i_size = i_size_read(file->f_dentry->d_inode); while (offset < i_size) { int rbuf_len; rbuf_len = kernel_read(file, offset, rbuf, PAGE_SIZE); if (rbuf_len < 0) { rc = rbuf_len; break; } if (rbuf_len == 0) break; offset += rbuf_len; sg_set_buf(&sg, rbuf, rbuf_len); rc = crypto_hash_update(&desc, &sg, rbuf_len); if (rc) break; } kfree(rbuf); if (!rc) rc = crypto_hash_final(&desc, digest); crypto_free_hash(desc.tfm); return rc; }
void do_integrity_check(void) { u8 *rbuf = 0; u32 len; u8 hmac[SHA256_DIGEST_SIZE]; struct hash_desc desc; struct scatterlist sg; u8 *key = "12345678"; int i, step_len = PAGE_SIZE, err; u8 *pAllocBuf = 0; printk(KERN_INFO "FIPS: integrity start\n"); if (unlikely(!need_integrity_check || in_fips_err())) { printk(KERN_INFO "FIPS: integrity check not needed\n"); return; } rbuf = (u8*)phys_to_virt((unsigned long)CONFIG_CRYPTO_FIPS_INTEG_COPY_ADDRESS); if (*((u32 *) &rbuf[36]) != 0x016F2818) { printk(KERN_ERR "FIPS: invalid zImage magic number."); set_in_fips_err(); goto err1; } if (*(u32 *) &rbuf[44] <= *(u32 *) &rbuf[40]) { printk(KERN_ERR "FIPS: invalid zImage calculated len"); set_in_fips_err(); goto err1; } len = *(u32 *) &rbuf[44] - *(u32 *) &rbuf[40]; printk(KERN_INFO "FIPS: integrity actual zImageLen = %d\n", len); printk(KERN_INFO "FIPS: do kernel integrity check address: %lx \n", (unsigned long)rbuf); desc.tfm = crypto_alloc_hash("hmac(sha256)", 0, 0); if (IS_ERR(desc.tfm)) { printk(KERN_ERR "FIPS: integ failed to allocate tfm %ld\n", PTR_ERR(desc.tfm)); set_in_fips_err(); goto err1; } #if FIPS_FUNC_TEST == 2 rbuf[1024] = rbuf[1024] + 1; #endif crypto_hash_setkey(desc.tfm, key, strlen(key)); pAllocBuf = kmalloc(step_len,GFP_KERNEL); if (!pAllocBuf) { printk(KERN_INFO "Fail to alloc memory, length %d\n", step_len); set_in_fips_err(); goto err1; } err = crypto_hash_init(&desc); if (err) { printk(KERN_INFO "fail at crypto_hash_init\n"); set_in_fips_err(); kfree(pAllocBuf); goto err1; } for (i = 0; i < len; i += step_len) { //last is reached if (i + step_len >= len - 1) { memcpy(pAllocBuf, &rbuf[i], len - i); sg_init_one(&sg, pAllocBuf, len - i); err = crypto_hash_update(&desc, &sg, len - i); if (err) { printk(KERN_INFO "Fail to crypto_hash_update1\n"); set_in_fips_err(); goto err; } err = crypto_hash_final(&desc, hmac); if (err) { printk(KERN_INFO "Fail to crypto_hash_final\n"); set_in_fips_err(); goto err; } } else { memcpy(pAllocBuf, &rbuf[i], step_len); sg_init_one(&sg, pAllocBuf, step_len); err = crypto_hash_update(&desc, &sg, step_len); if (err) { printk(KERN_INFO "Fail to crypto_hash_update\n"); set_in_fips_err(); goto err; } } } #if FIPS_FUNC_TEST == 2 rbuf[1024] = rbuf[1024] - 1; #endif if (!strncmp(hmac, &rbuf[len], SHA256_DIGEST_SIZE)) { printk(KERN_INFO "FIPS: integrity check passed\n"); } else { printk(KERN_ERR "FIPS: integrity check failed. hmac:%lx, buf:%lx.\n",(long) hmac, (long)rbuf[len] ); set_in_fips_err(); } err: kfree(pAllocBuf); crypto_free_hash(desc.tfm); err1: need_integrity_check = false; /* if(integrity_mem_reservoir != 0) { printk(KERN_NOTICE "FIPS free integrity_mem_reservoir = %ld\n", integrity_mem_reservoir); free_bootmem((unsigned long)CONFIG_CRYPTO_FIPS_INTEG_COPY_ADDRESS, integrity_mem_reservoir); } */ }
static int ah_init_state(struct xfrm_state *x) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; struct crypto_hash *tfm; if (!x->aalg) goto error; if (x->encap) goto error; ahp = kzalloc(sizeof(*ahp), GFP_KERNEL); if (ahp == NULL) return -ENOMEM; tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; ahp->tfm = tfm; if (crypto_hash_setkey(tfm, x->aalg->alg_key, (x->aalg->alg_key_len + 7) / 8)) goto error; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_hash_digestsize(tfm)) { printk(KERN_INFO "AH: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_hash_digestsize(tfm), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); if (!ahp->work_icv) goto error; x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); x->data = ahp; return 0; error: if (ahp) { kfree(ahp->work_icv); crypto_free_hash(ahp->tfm); kfree(ahp); } return -EINVAL; }
int ipsec_ah_output(struct ipsec_state *x, struct sk_buff *skb) { int err = -1; struct iphdr *iph, *top_iph; struct ip_auth_hdr *ah; struct ipsec_alg_tfms * alg_tfms = g_alg_tfms[smp_processor_id()][x->props.auth_algo][0]; struct crypto_hash * hash = alg_tfms->hash; struct sa_ah_data *ahp; union { struct iphdr iph; char buf[60]; } tmp_iph; iph = &tmp_iph.iph; top_iph = skb->nh.iph; iph->tos = top_iph->tos; iph->ttl = top_iph->ttl; iph->frag_off = top_iph->frag_off; if (top_iph->ihl != 5) { iph->daddr = top_iph->daddr; memcpy(iph+1, top_iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); err = ip_clear_mutable_options(top_iph, &top_iph->daddr); if (err) { return -1; } } ah = (struct ip_auth_hdr *)((char *)top_iph+top_iph->ihl*4); ah->nexthdr = top_iph->protocol; top_iph->tos = 0; top_iph->tot_len = htons(skb->len); top_iph->frag_off = 0; top_iph->ttl = 0; top_iph->protocol = IPPROTO_AH; top_iph->check = 0; ahp = &x->u.ah_data; ah->hdrlen = (XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len) >> 2) - 2; ah->reserved = 0; ah->spi = x->id.spi; if ((NULL != g_drv_sae_func_s.ipsec_get_seqnum) && (x->drv_content != NULL)) { ah->seq_no = g_drv_sae_func_s.ipsec_get_seqnum(x->drv_content); } else { ah->seq_no = htonl(++x->oseq); } if(unlikely(!(skb->service_flags&SKB_SERVICE_FLAG_IPSEC_UNPAR))&&(NULL != g_drv_sae_func_s.encrypt_hmac)&&(x->drv_content != NULL)) { drv_sae_fw_s drv_sea_fw_p; int tmp = -1; drv_sae_platcontent_s *platcontent_s_p = (drv_sae_platcontent_s *)kmalloc(sizeof(drv_sae_platcontent_s)+60,GFP_ATOMIC); if (NULL == platcontent_s_p) { return -1; } memcpy(platcontent_s_p->buf,iph,60); platcontent_s_p->protono = IPPROTO_AH; platcontent_s_p->sa = (unsigned char *)x; drv_sea_fw_p.hash_data_len = skb->len; drv_sea_fw_p.hash_data_offset = 0; drv_sea_fw_p.cipher_data_len = 0; drv_sea_fw_p.cipher_data_offset = 0; drv_sea_fw_p.iv_len = 0; drv_sea_fw_p.iv_offset= 0; drv_sea_fw_p.hash_out_len = ahp->icv_full_len; drv_sea_fw_p.hash_out_buf = NULL;//do not need anymore drv_sea_fw_p.plat_content = (u8*)platcontent_s_p; drv_sea_fw_p.drv_content = x->drv_content; memset(ah->auth_data, 0, ahp->icv_trunc_len); tmp = g_drv_sae_func_s.encrypt_hmac(skb, &drv_sea_fw_p); if(0 == tmp) return IPSEC_HARD_OPEATE_OK; kfree(platcontent_s_p); } if (crypto_hash_setkey(hash, x->auth_key.alg_key, (x->auth_key.alg_key_len+7)/8)) { IPSEC_INNEC_POLICY_COUNTER(set_auth_key_err); goto error; } err = ipsec_ah_mac_digest(ahp, alg_tfms, skb, ah->auth_data); if (err) { goto error; } memcpy(ah->auth_data, alg_tfms->work_icv, ahp->icv_trunc_len); top_iph->tos = iph->tos; top_iph->ttl = iph->ttl; top_iph->frag_off = iph->frag_off; if (top_iph->ihl != 5) { top_iph->daddr = iph->daddr; memcpy(top_iph+1, iph+1, top_iph->ihl*4 - sizeof(struct iphdr)); } ip_send_check(top_iph); error: return 0; }
int ipsec_ah_input(struct ipsec_state *x, struct sk_buff *skb, unsigned int spi) { int ah_hlen; int ihl; int err; struct iphdr *iph; struct ip_auth_hdr *ah; struct sa_ah_data *ahp; struct ipsec_alg_tfms * alg_tfms = g_alg_tfms[smp_processor_id()][x->props.auth_algo][0]; struct crypto_hash * hash = alg_tfms->hash; u8 auth_buf[32]; int temp; if (!pskb_may_pull(skb, sizeof(struct ip_auth_hdr))) { return -1; } ah = (struct ip_auth_hdr*)skb->data; ahp = &x->u.ah_data; ah_hlen = (ah->hdrlen + 2) << 2; if (ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_full_len) && ah_hlen != XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len)) return -1; if (!pskb_may_pull(skb, ah_hlen)) return -1; /* We are going to _remove_ AH header to keep sockets happy, * so... Later this can change. */ if (skb_cloned(skb) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) return -1; skb->ip_summed = CHECKSUM_NONE; ah = (struct ip_auth_hdr*)skb->data; iph = skb->nh.iph; ihl = skb->data - skb->nh.raw; iph->ttl = 0; iph->tos = 0; iph->frag_off = 0; iph->check = 0; if (ihl > sizeof(*iph)) { __be32 dummy; if (ip_clear_mutable_options(iph, &dummy)) { return -1; } } temp = skb->data - skb->nh.raw; __skb_push(skb,temp ); if ((NULL != g_drv_sae_func_s.decrypt_hmac)&&(x->drv_content != NULL)) { drv_sae_platcontent_s *platcontent_s_p; drv_sae_fw_s drv_sea_fw_p; int tmp = -1; platcontent_s_p = (drv_sae_platcontent_s *)kmalloc(sizeof(drv_sae_platcontent_s)+60+ahp->icv_trunc_len,GFP_ATOMIC); if (NULL == platcontent_s_p) { return -1; } platcontent_s_p->protono = (-1)*(IPPROTO_AH); platcontent_s_p->sa = (unsigned char *)x; memcpy(platcontent_s_p->buf, iph, ihl);//ip header memcpy((platcontent_s_p->buf + 60), ah->auth_data, ahp->icv_trunc_len);//auth data drv_sea_fw_p.hash_data_len = skb->len; drv_sea_fw_p.hash_data_offset = 0; drv_sea_fw_p.cipher_data_len = 0; drv_sea_fw_p.cipher_data_offset = 0; drv_sea_fw_p.iv_len = 0; drv_sea_fw_p.iv_offset = 0; drv_sea_fw_p.hash_out_len = ahp->icv_full_len; drv_sea_fw_p.hash_out_buf = NULL;//do not need anymore drv_sea_fw_p.plat_content = (u8*)platcontent_s_p; drv_sea_fw_p.drv_content = x->drv_content; memset(ah->auth_data, 0, ahp->icv_trunc_len); tmp = g_drv_sae_func_s.decrypt_hmac(skb, &drv_sea_fw_p); __skb_pull(skb,temp ); skb_push(skb, ihl); if( 0 == tmp ) return IPSEC_HARD_OPEATE_OK; memcpy(auth_buf, (platcontent_s_p->buf + 60), ahp->icv_trunc_len); kfree(platcontent_s_p); } if (crypto_hash_setkey(hash, x->auth_key.alg_key, (x->auth_key.alg_key_len+7)/8)) { IPSEC_INNEC_POLICY_COUNTER(set_auth_key_err); err = -1; goto out; } err = ipsec_ah_mac_digest(ahp, alg_tfms, skb, ah->auth_data); if (err) { goto out; } err = -EINVAL; if (memcmp(alg_tfms->work_icv, auth_buf, ahp->icv_trunc_len)) { goto out; } iph->protocol = ah->nexthdr; skb->h.raw = memcpy(skb->nh.raw += ah_hlen, iph, ihl); __skb_pull(skb, ah_hlen + ihl); return 0; out: return err; }