void esp_destroy(struct xfrm_state *x) { struct esp_data *esp = x->data; if (!esp) return; if (esp->conf.tfm) { crypto_free_tfm(esp->conf.tfm); esp->conf.tfm = NULL; } if (esp->conf.ivec) { kfree(esp->conf.ivec); esp->conf.ivec = NULL; } if (esp->auth.tfm) { crypto_free_tfm(esp->auth.tfm); esp->auth.tfm = NULL; } if (esp->auth.work_icv) { kfree(esp->auth.work_icv); esp->auth.work_icv = NULL; } kfree(esp); }
static void gss_delete_sec_context_kerberos(void *internal_ctx) { struct krb5_ctx *kctx = internal_ctx; crypto_free_tfm(kctx->seq); crypto_free_tfm(kctx->enc); kfree(kctx->mech_used.data); kfree(kctx); }
/* * Allocate space for a (de)compressor. */ static void *mppe_alloc(unsigned char *options, int optlen) { struct ppp_mppe_state *state; unsigned int digestsize; if (optlen != CILEN_MPPE + sizeof(state->master_key) || options[0] != CI_MPPE || options[1] != CILEN_MPPE) goto out; state = (struct ppp_mppe_state *) kmalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) goto out; memset(state, 0, sizeof(*state)); state->arc4 = crypto_alloc_tfm("arc4", 0); if (!state->arc4) goto out_free; state->sha1 = crypto_alloc_tfm("sha1", 0); if (!state->sha1) goto out_free; digestsize = crypto_tfm_alg_digestsize(state->sha1); if (digestsize < MPPE_MAX_KEY_LEN) goto out_free; state->sha1_digest = kmalloc(digestsize, GFP_KERNEL); if (!state->sha1_digest) goto out_free; /* Save keys. */ memcpy(state->master_key, &options[CILEN_MPPE], sizeof(state->master_key)); memcpy(state->session_key, state->master_key, sizeof(state->master_key)); /* * We defer initial key generation until mppe_init(), as mppe_alloc() * is called frequently during negotiation. */ return (void *)state; out_free: if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) crypto_free_tfm(state->sha1); if (state->arc4) crypto_free_tfm(state->arc4); kfree(state); out: return NULL; }
static void prism2_wep_deinit(void *priv) { struct prism2_wep_data *_priv = priv; if (_priv) { if (_priv->tx_tfm) crypto_free_tfm(_priv->tx_tfm); if (_priv->rx_tfm) crypto_free_tfm(_priv->rx_tfm); } kfree(priv); }
/* * Deallocate space for a (de)compressor. */ static void mppe_free(void *arg) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; if (state) { if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) crypto_free_tfm(state->sha1); if (state->arc4) crypto_free_tfm(state->arc4); kfree(state); } }
static void ipcomp6_free_data(struct ipcomp_data *ipcd) { if (ipcd->tfm) crypto_free_tfm(ipcd->tfm); if (ipcd->scratch) kfree(ipcd->scratch); }
static void * prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; priv = kmalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; memset(priv, 0, sizeof(*priv)); priv->key_idx = keyidx; priv->tfm = crypto_alloc_tfm("arc4", 0); if (priv->tfm == NULL) { printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); goto fail; } /* start WEP IV from a random value */ get_random_bytes(&priv->iv, 4); return priv; fail: if (priv) { if (priv->tfm) crypto_free_tfm(priv->tfm); kfree(priv); } return NULL; }
static void * prism2_wep_init(int keyidx) { struct prism2_wep_data *priv; priv = kmalloc(sizeof(*priv), GFP_ATOMIC); if (priv == NULL) goto fail; memset(priv, 0, sizeof(*priv)); priv->key_idx = keyidx; #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) priv->tfm = crypto_alloc_tfm("arc4", 0); if (priv->tfm == NULL) { printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); goto fail; } #else priv->tx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->tx_tfm)) { printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); priv->tx_tfm = NULL; goto fail; } priv->rx_tfm = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(priv->rx_tfm)) { printk(KERN_DEBUG "ieee80211_crypt_wep: could not allocate " "crypto API arc4\n"); priv->rx_tfm = NULL; goto fail; } #endif /* start WEP IV from a random value */ get_random_bytes(&priv->iv, 4); return priv; fail: #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) if (priv) { if (priv->tfm) crypto_free_tfm(priv->tfm); kfree(priv); } #else if (priv) { if (priv->tx_tfm) crypto_free_blkcipher(priv->tx_tfm); if (priv->rx_tfm) crypto_free_blkcipher(priv->rx_tfm); kfree(priv); } #endif return NULL; }
/****************************************************************************** Description : Call this only at the end of the whole program when you do not want to use signature verification again. Parameters : Return value: ******************************************************************************/ void digsig_sign_verify_free(void) { if (sha1_tfm) { crypto_free_tfm(sha1_tfm); sha1_tfm = NULL; } /* this might cause unpredictable behavior if structures are refering to this, their pointer might suddenly become NULL, might need a usage count associated */ }
int do_digest(char *code, char **result) { char *ret; int len = strlen(code); tfm = crypto_alloc_tfm("sha1", 0); if (IS_ERR(tfm)) return -1; sg_init_one(&sg, code, len); ret = (char *)kmalloc(50, GFP_KERNEL); if (!result) { crypto_free_tfm(tfm); return -1; } memset(ret, 0, 50); crypto_digest_final(tfm, result); crypto_free_tfm(tfm); *result = ret; return 0; }
static int cryptoloop_release(struct loop_device *lo) { struct crypto_tfm *tfm = (struct crypto_tfm *) lo->key_data; if (tfm != NULL) { crypto_free_tfm(tfm); lo->key_data = NULL; return 0; } printk(KERN_ERR "cryptoloop_release(): tfm == NULL?\n"); return -EINVAL; }
static void ah6_destroy(struct xfrm_state *x) { struct ah_data *ahp = x->data; if (!ahp) return; kfree(ahp->work_icv); ahp->work_icv = NULL; crypto_free_tfm(ahp->tfm); ahp->tfm = NULL; kfree(ahp); }
static void prism2_wep_deinit(void *priv) { struct prism2_wep_data *_priv = priv; #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) if (_priv && _priv->tfm) crypto_free_tfm(_priv->tfm); #else if (_priv) { if (_priv->tx_tfm) crypto_free_blkcipher(_priv->tx_tfm); if (_priv->rx_tfm) crypto_free_blkcipher(_priv->rx_tfm); } #endif kfree(priv); }
static int cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) { int err = -EINVAL; char cms[LO_NAME_SIZE]; /* cipher-mode string */ char *cipher; char *mode; char *cmsp = cms; /* c-m string pointer */ struct crypto_tfm *tfm = NULL; /* encryption breaks for non sector aligned offsets */ if (info->lo_offset % LOOP_IV_SECTOR_SIZE) goto out; strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); cms[LO_NAME_SIZE - 1] = 0; cipher = strsep(&cmsp, "-"); mode = strsep(&cmsp, "-"); if (mode == NULL || strcmp(mode, "cbc") == 0) tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_CBC | CRYPTO_TFM_REQ_MAY_SLEEP); else if (strcmp(mode, "ecb") == 0) tfm = crypto_alloc_tfm(cipher, CRYPTO_TFM_MODE_ECB | CRYPTO_TFM_REQ_MAY_SLEEP); if (tfm == NULL) return -EINVAL; err = tfm->crt_u.cipher.cit_setkey(tfm, info->lo_encrypt_key, info->lo_encrypt_key_size); if (err != 0) goto out_free_tfm; lo->key_data = tfm; return 0; out_free_tfm: crypto_free_tfm(tfm); out: return err; }
static inline const void * get_key(const void *p, const void *end, struct crypto_tfm **res) { struct xdr_netobj key; int alg, alg_mode; char *alg_name; p = simple_get_bytes(p, end, &alg, sizeof(alg)); if (IS_ERR(p)) goto out_err; p = simple_get_netobj(p, end, &key); if (IS_ERR(p)) goto out_err; switch (alg) { case ENCTYPE_DES_CBC_RAW: alg_name = "des"; alg_mode = CRYPTO_TFM_MODE_CBC; break; default: printk("gss_kerberos_mech: unsupported algorithm %d\n", alg); goto out_err_free_key; } if (!(*res = crypto_alloc_tfm(alg_name, alg_mode))) { printk("gss_kerberos_mech: unable to initialize crypto algorithm %s\n", alg_name); goto out_err_free_key; } if (crypto_cipher_setkey(*res, key.data, key.len)) { printk("gss_kerberos_mech: error setting key for crypto algorithm %s\n", alg_name); goto out_err_free_tfm; } kfree(key.data); return p; out_err_free_tfm: crypto_free_tfm(*res); out_err_free_key: kfree(key.data); p = ERR_PTR(-EINVAL); out_err: return p; }
int esp6_init_state(struct xfrm_state *x, void *args) { struct esp_data *esp = NULL; if (x->aalg) { if (x->aalg->alg_key_len == 0 || x->aalg->alg_key_len > 512) goto error; } if (x->ealg == NULL) goto error; esp = kmalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; memset(esp, 0, sizeof(*esp)); if (x->aalg) { struct xfrm_algo_desc *aalg_desc; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); if (esp->auth.tfm == NULL) goto error; esp->auth.icv = esp_hmac_digest; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_tfm_alg_digestsize(esp->auth.tfm)) { printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_tfm_alg_digestsize(esp->auth.tfm), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); if (!esp->auth.work_icv) goto error; } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; if (x->props.ealgo == SADB_EALG_NULL) esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); else esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC); if (esp->conf.tfm == NULL) goto error; esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); get_random_bytes(esp->conf.ivec, esp->conf.ivlen); } crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len); x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; if (x->props.mode) x->props.header_len += sizeof(struct ipv6hdr); x->data = esp; return 0; error: if (esp) { if (esp->auth.tfm) crypto_free_tfm(esp->auth.tfm); if (esp->auth.work_icv) kfree(esp->auth.work_icv); if (esp->conf.tfm) crypto_free_tfm(esp->conf.tfm); kfree(esp); } return -EINVAL; }
/* * verify a module's signature */ int module_verify_signature(struct module_verify_data *mvdata) { const Elf_Shdr *sechdrs = mvdata->sections; const char *secstrings = mvdata->secstrings; const char *sig; unsigned sig_size; int i, ret; for (i = 1; i < mvdata->nsects; i++) { switch (sechdrs[i].sh_type) { case SHT_PROGBITS: if (strcmp(mvdata->secstrings + sechdrs[i].sh_name, ".module_sig") == 0) { mvdata->sig_index = i; } break; } } if (mvdata->sig_index <= 0) goto no_signature; sig = mvdata->buffer + sechdrs[mvdata->sig_index].sh_offset; sig_size = sechdrs[mvdata->sig_index].sh_size; _debug("sig in section %d (size %d)\n", mvdata->sig_index, sig_size); /* produce a canonicalisation map for the sections */ ret = module_verify_canonicalise(mvdata); if (ret < 0) return ret; /* grab an SHA1 transformation context * - !!! if this tries to load the sha1.ko module, we will deadlock!!! */ mvdata->digest = crypto_alloc_tfm2("sha1", 0, 1); if (!mvdata->digest) { printk("Couldn't load module - SHA1 transform unavailable\n"); return -EPERM; } crypto_digest_init(mvdata->digest); #ifdef MODSIGN_DEBUG mvdata->xcsum = 0; #endif /* load data from each relevant section into the digest */ for (i = 1; i < mvdata->nsects; i++) { unsigned long sh_type = sechdrs[i].sh_type; unsigned long sh_info = sechdrs[i].sh_info; unsigned long sh_size = sechdrs[i].sh_size; unsigned long sh_flags = sechdrs[i].sh_flags; const char *sh_name = secstrings + sechdrs[i].sh_name; const void *data = mvdata->buffer + sechdrs[i].sh_offset; if (i == mvdata->sig_index) continue; #ifdef MODSIGN_DEBUG mvdata->csum = 0; #endif /* it would be nice to include relocation sections, but the act * of adding a signature to the module seems changes their * contents, because the symtab gets changed when sections are * added or removed */ if (sh_type == SHT_REL || sh_type == SHT_RELA) { if (mvdata->canonlist[sh_info]) { uint32_t xsh_info = mvdata->canonmap[sh_info]; crypto_digest_update_data(mvdata, sh_name, strlen(sh_name)); crypto_digest_update_val(mvdata, sechdrs[i].sh_type); crypto_digest_update_val(mvdata, sechdrs[i].sh_flags); crypto_digest_update_val(mvdata, sechdrs[i].sh_size); crypto_digest_update_val(mvdata, sechdrs[i].sh_addralign); crypto_digest_update_val(mvdata, xsh_info); if (sh_type == SHT_RELA) ret = extract_elf_rela( mvdata, i, data, sh_size / sizeof(Elf_Rela), sh_name); else ret = extract_elf_rel( mvdata, i, data, sh_size / sizeof(Elf_Rel), sh_name); if (ret < 0) goto format_error; } continue; } /* include allocatable loadable sections */ if (sh_type != SHT_NOBITS && sh_flags & SHF_ALLOC) goto include_section; continue; include_section: crypto_digest_update_data(mvdata, sh_name, strlen(sh_name)); crypto_digest_update_val(mvdata, sechdrs[i].sh_type); crypto_digest_update_val(mvdata, sechdrs[i].sh_flags); crypto_digest_update_val(mvdata, sechdrs[i].sh_size); crypto_digest_update_val(mvdata, sechdrs[i].sh_addralign); crypto_digest_update_data(mvdata, data, sh_size); _debug("%08zx %02x digested the %s section, size %ld\n", mvdata->signed_size, mvdata->csum, sh_name, sh_size); mvdata->canonlist[i] = 1; } _debug("Contributed %zu bytes to the digest (csum 0x%02x)\n", mvdata->signed_size, mvdata->xcsum); /* do the actual signature verification */ i = ksign_verify_signature(sig, sig_size, mvdata->digest); _debug("verify-sig : %d\n", i); if (i == 0) i = 1; return i; format_error: crypto_free_tfm(mvdata->digest); return -ELIBBAD; /* deal with the case of an unsigned module */ no_signature: if (!signedonly) return 0; printk("An attempt to load unsigned module was rejected\n"); return -EPERM; } /* end module_verify_signature() */
static int ah6_init_state(struct xfrm_state *x, void *args) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; if (!x->aalg) goto error; /* null auth can use a zero length key */ if (x->aalg->alg_key_len > 512) goto error; if (x->encap) goto error; ahp = kmalloc(sizeof(*ahp), GFP_KERNEL); if (ahp == NULL) return -ENOMEM; memset(ahp, 0, sizeof(*ahp)); ahp->key = x->aalg->alg_key; ahp->key_len = (x->aalg->alg_key_len+7)/8; ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); if (!ahp->tfm) goto error; ahp->icv = ah_hmac_digest; /* * Lookup the algorithm description maintained by xfrm_algo, * verify crypto transform properties, and store information * we need for AH processing. This lookup cannot fail here * after a successful crypto_alloc_tfm(). */ aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_tfm_alg_digestsize(ahp->tfm)) { printk(KERN_INFO "AH: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_tfm_alg_digestsize(ahp->tfm), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); if (!ahp->work_icv) goto error; x->props.header_len = XFRM_ALIGN8(sizeof(struct ipv6_auth_hdr) + ahp->icv_trunc_len); if (x->props.mode) x->props.header_len += sizeof(struct ipv6hdr); x->data = ahp; return 0; error: if (ahp) { if (ahp->work_icv) kfree(ahp->work_icv); if (ahp->tfm) crypto_free_tfm(ahp->tfm); kfree(ahp); } return -EINVAL; }
static int ah_init_state(struct xfrm_state *x) { struct ah_data *ahp = NULL; struct xfrm_algo_desc *aalg_desc; unsigned int digestsize; u32 mode = 0; if (!x->aalg) goto error; /* null auth can use a zero length key */ if (x->aalg->alg_key_len > 512) goto error; if (x->encap) goto error; ahp = kmalloc(sizeof(*ahp), GFP_KERNEL); if (ahp == NULL) return -ENOMEM; memset(ahp, 0, sizeof(*ahp)); aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); #ifdef CONFIG_CRYPTO_XCBC mode = aalg_desc->desc.sadb_alg_id == SADB_X_AALG_AES_XCBC_MAC ? CRYPTO_TFM_MODE_CBC : 0; #endif ahp->key = x->aalg->alg_key; ahp->key_len = (x->aalg->alg_key_len+7)/8; ahp->tfm = crypto_alloc_tfm(x->aalg->alg_name, mode); if (!ahp->tfm) goto error; #ifdef CONFIG_CRYPTO_XCBC ahp->icv = !mode ? ah_hmac_digest : ah_xcbc_digest; #else ahp->icv = ah_hmac_digest; #endif digestsize = !mode ? crypto_tfm_alg_digestsize(ahp->tfm) : crypto_tfm_alg_blocksize(ahp->tfm); /* * Lookup the algorithm description maintained by xfrm_algo, * verify crypto transform properties, and store information * we need for AH processing. This lookup cannot fail here * after a successful crypto_alloc_tfm(). */ if (aalg_desc->uinfo.auth.icv_fullbits/8 != digestsize) { printk(KERN_INFO "AH: %s digestsize %u != %hu\n", x->aalg->alg_name, digestsize, aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN); ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL); if (!ahp->work_icv) goto error; x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) + ahp->icv_trunc_len); if (x->props.mode) x->props.header_len += sizeof(struct iphdr); x->data = ahp; return 0; error: if (ahp) { kfree(ahp->work_icv); crypto_free_tfm(ahp->tfm); kfree(ahp); } return -EINVAL; }
int plaintext_to_sha1(unsigned char *hash, const char *plaintext, unsigned int len) { struct page *page = NULL; char *data = NULL; int offset = 0; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) struct crypto_hash *tfm = NULL; struct scatterlist sg = {0}; struct hash_desc desc = {0}; page = alloc_page(GFP_KERNEL); if (!page) { return -ENOMEM; } data = (char *)page_address(page); tfm = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { __free_page(page); return -EINVAL; } desc.tfm = tfm; desc.flags = 0; crypto_hash_init(&desc); sg_init_one(&sg, (void *)data, PAGE_SIZE); for (offset = 0; offset < len; offset += PAGE_SIZE) { memset(data, 0x00, PAGE_SIZE); /* Check if the data is page size or part of page */ if ((len - offset) >= PAGE_SIZE) { memcpy(data, plaintext + offset, PAGE_SIZE); crypto_hash_update(&desc, &sg, PAGE_SIZE); } else { memcpy(data, plaintext + offset, (len - offset)); sg_init_one(&sg, (void *)data, (len - offset)); crypto_hash_update(&desc, &sg, (len - offset)); } } crypto_hash_final(&desc, hash); crypto_free_hash(tfm); #else struct crypto_tfm *tfm; struct scatterlist sg; page = alloc_page(GFP_KERNEL); if (!page) { return -ENOMEM; } data = (char *)page_address(page); tfm = crypto_alloc_tfm("sha1", CRYPTO_TFM_REQ_MAY_SLEEP); if (tfm == NULL) { __free_page(page); return -EINVAL; } crypto_digest_init(tfm); sg_init_one(&sg, (u8 *)data, PAGE_SIZE); for (offset = 0; offset < len; offset += PAGE_SIZE) { memset(data, 0x00, PAGE_SIZE); if ((len - offset) >= PAGE_SIZE) { memcpy(data, plaintext + offset, PAGE_SIZE); crypto_digest_update(tfm, &sg, 1); } else { memcpy(data, plaintext + offset, (len - offset)); sg_init_one(&sg, (u8 *)data, (len - offset)); crypto_digest_update(tfm, &sg, 1); } } crypto_digest_final(tfm, hash); crypto_free_tfm(tfm); #endif __free_page(page); return 0; }
int esp_init_state(struct xfrm_state *x, void *args) { struct esp_data *esp = NULL; /* null auth and encryption can have zero length keys */ if (x->aalg) { if (x->aalg->alg_key_len > 512) goto error; } if (x->ealg == NULL) goto error; esp = kmalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; memset(esp, 0, sizeof(*esp)); if (x->aalg) { struct xfrm_algo_desc *aalg_desc; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; esp->auth.tfm = crypto_alloc_tfm(x->aalg->alg_name, 0); if (esp->auth.tfm == NULL) goto error; esp->auth.icv = esp_hmac_digest; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_tfm_alg_digestsize(esp->auth.tfm)) { printk(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_tfm_alg_digestsize(esp->auth.tfm), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); if (!esp->auth.work_icv) goto error; } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; if (x->props.ealgo == SADB_EALG_NULL) esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_ECB); else esp->conf.tfm = crypto_alloc_tfm(x->ealg->alg_name, CRYPTO_TFM_MODE_CBC); if (esp->conf.tfm == NULL) goto error; esp->conf.ivlen = crypto_tfm_alg_ivsize(esp->conf.tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); get_random_bytes(esp->conf.ivec, esp->conf.ivlen); } crypto_cipher_setkey(esp->conf.tfm, esp->conf.key, esp->conf.key_len); x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; if (x->props.mode) x->props.header_len += sizeof(struct iphdr); if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; if (encap->encap_type) { switch (encap->encap_type) { case UDP_ENCAP_ESPINUDP: x->props.header_len += sizeof(struct udphdr); break; default: printk (KERN_INFO "esp_init_state(): Unhandled encap type: %u\n", encap->encap_type); break; } } } x->data = esp; x->props.trailer_len = esp4_get_max_size(x, 0) - x->props.header_len; return 0; error: if (esp) { if (esp->auth.tfm) crypto_free_tfm(esp->auth.tfm); if (esp->auth.work_icv) kfree(esp->auth.work_icv); if (esp->conf.tfm) crypto_free_tfm(esp->conf.tfm); kfree(esp); } return -EINVAL; }
static int gss_import_sec_context_kerberos(const void *p, size_t len, struct gss_ctx *ctx_id) { const void *end = (const void *)((const char *)p + len); struct krb5_ctx *ctx; if (!(ctx = kzalloc(sizeof(*ctx), GFP_KERNEL))) goto out_err; p = simple_get_bytes(p, end, &ctx->initiate, sizeof(ctx->initiate)); if (IS_ERR(p)) goto out_err_free_ctx; p = simple_get_bytes(p, end, &ctx->seed_init, sizeof(ctx->seed_init)); if (IS_ERR(p)) goto out_err_free_ctx; p = simple_get_bytes(p, end, ctx->seed, sizeof(ctx->seed)); if (IS_ERR(p)) goto out_err_free_ctx; p = simple_get_bytes(p, end, &ctx->signalg, sizeof(ctx->signalg)); if (IS_ERR(p)) goto out_err_free_ctx; p = simple_get_bytes(p, end, &ctx->sealalg, sizeof(ctx->sealalg)); if (IS_ERR(p)) goto out_err_free_ctx; p = simple_get_bytes(p, end, &ctx->endtime, sizeof(ctx->endtime)); if (IS_ERR(p)) goto out_err_free_ctx; p = simple_get_bytes(p, end, &ctx->seq_send, sizeof(ctx->seq_send)); if (IS_ERR(p)) goto out_err_free_ctx; p = simple_get_netobj(p, end, &ctx->mech_used); if (IS_ERR(p)) goto out_err_free_ctx; p = get_key(p, end, &ctx->enc); if (IS_ERR(p)) goto out_err_free_mech; p = get_key(p, end, &ctx->seq); if (IS_ERR(p)) goto out_err_free_key1; if (p != end) { p = ERR_PTR(-EFAULT); goto out_err_free_key2; } ctx_id->internal_ctx_id = ctx; dprintk("RPC: Successfully imported new context.\n"); return 0; out_err_free_key2: crypto_free_tfm(ctx->seq); out_err_free_key1: crypto_free_tfm(ctx->enc); out_err_free_mech: kfree(ctx->mech_used.data); out_err_free_ctx: kfree(ctx); out_err: return PTR_ERR(p); }