int calc_seckey(struct cifsSesInfo *ses) { int rc; struct crypto_blkcipher *tfm_arc4; struct scatterlist sgin, sgout; struct blkcipher_desc desc; unsigned char sec_key[CIFS_SESS_KEY_SIZE]; /* a nonce */ get_random_bytes(sec_key, CIFS_SESS_KEY_SIZE); tfm_arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (!tfm_arc4 || IS_ERR(tfm_arc4)) { cERROR(1, "could not allocate crypto API arc4\n"); return PTR_ERR(tfm_arc4); } desc.tfm = tfm_arc4; crypto_blkcipher_setkey(tfm_arc4, ses->auth_key.response, CIFS_SESS_KEY_SIZE); sg_init_one(&sgin, sec_key, CIFS_SESS_KEY_SIZE); sg_init_one(&sgout, ses->ntlmssp->ciphertext, CIFS_CPHTXT_SIZE); rc = crypto_blkcipher_encrypt(&desc, &sgout, &sgin, CIFS_CPHTXT_SIZE); if (rc) { cERROR(1, "could not encrypt session key rc: %d\n", rc); crypto_free_blkcipher(tfm_arc4); return rc; } /* make secondary_key/nonce as session key */ memcpy(ses->auth_key.response, sec_key, CIFS_SESS_KEY_SIZE); /* and make len as that of session key only */ ses->auth_key.len = CIFS_SESS_KEY_SIZE; crypto_free_blkcipher(tfm_arc4); return 0; }
static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r) { struct blkcipher_desc desc; struct scatterlist sg; uint8_t tmp[16], data[16]; int err; if (tfm == NULL) { BT_ERR("tfm %p", tfm); return -EINVAL; } desc.tfm = tfm; desc.flags = 0; /* The most significant octet of key corresponds to k[0] */ swap128(k, tmp); err = crypto_blkcipher_setkey(tfm, tmp, 16); if (err) { BT_ERR("cipher setkey failed: %d", err); return err; } /* Most significant octet of plaintextData corresponds to data[0] */ swap128(r, data); sg_init_one(&sg, data, 16); err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16); if (err) BT_ERR("Encrypt data error %d", err); /* Most significant octet of encryptedData corresponds to data[0] */ swap128(data, r); return err; }
static int init_blkcipher_desc(struct blkcipher_desc *desc, const u8 *key, unsigned int key_len, const u8 *iv, unsigned int ivsize) { int ret; desc->tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(desc->tfm)) { pr_err("encrypted_key: failed to load %s transform (%ld)\n", blkcipher_alg, PTR_ERR(desc->tfm)); return PTR_ERR(desc->tfm); } desc->flags = 0; ret = crypto_blkcipher_setkey(desc->tfm, key, key_len); if (ret < 0) { pr_err("encrypted_key: failed to setkey (%d)\n", ret); crypto_free_blkcipher(desc->tfm); return ret; } crypto_blkcipher_set_iv(desc->tfm, iv, ivsize); return 0; }
int wrapfs_encrypt_page(struct page *dst_page, struct page *src_page, char *key) { int ret = 0; struct crypto_blkcipher *tfm = NULL; struct blkcipher_desc desc; const char *algo = "ctr(aes)"; struct scatterlist src_sg, dst_sg; sg_init_table(&src_sg, 1); sg_init_table(&dst_sg, 1); sg_set_page(&src_sg, src_page, PAGE_CACHE_SIZE, 0); sg_set_page(&dst_sg, dst_page, PAGE_CACHE_SIZE, 0); tfm = crypto_alloc_blkcipher(algo,0,CRYPTO_ALG_ASYNC); if(IS_ERR(tfm)){ printk(KERN_ERR "AES: cipher: Failed to load transform for %ld\n",PTR_ERR(tfm)); return PTR_ERR(tfm); } desc.tfm = tfm; desc.flags = 0; ret = crypto_blkcipher_setkey(tfm,key,32); ret = crypto_blkcipher_encrypt(&desc, &dst_sg, &src_sg, PAGE_CACHE_SIZE); if (ret) { printk(KERN_ERR "Error encrypting\n"); goto out; } out: crypto_free_blkcipher(tfm); return ret; }
int gss_keyblock_init(struct gss_keyblock *kb, char *alg_name, const int alg_mode) { int rc; kb->kb_tfm = crypto_alloc_blkcipher(alg_name, alg_mode, 0); if (IS_ERR(kb->kb_tfm)) { rc = PTR_ERR(kb->kb_tfm); kb->kb_tfm = NULL; CERROR("failed to alloc tfm: %s, mode %d: rc = %d\n", alg_name, alg_mode, rc); return rc; } rc = crypto_blkcipher_setkey(kb->kb_tfm, kb->kb_key.data, kb->kb_key.len); if (rc) { CERROR("failed to set %s key, len %d, rc = %d\n", alg_name, kb->kb_key.len, rc); return rc; } return 0; }
static int cryptoloop_init(struct loop_device *lo, const struct loop_info64 *info) { int err = -EINVAL; int cipher_len; int mode_len; char cms[LO_NAME_SIZE]; /* cipher-mode string */ char *cipher; char *mode; char *cmsp = cms; /* c-m string pointer */ struct crypto_blkcipher *tfm; /* encryption breaks for non sector aligned offsets */ if (info->lo_offset % LOOP_IV_SECTOR_SIZE) goto out; strncpy(cms, info->lo_crypt_name, LO_NAME_SIZE); cms[LO_NAME_SIZE - 1] = 0; cipher = cmsp; cipher_len = strcspn(cmsp, "-"); mode = cmsp + cipher_len; mode_len = 0; if (*mode) { mode++; mode_len = strcspn(mode, "-"); } if (!mode_len) { mode = "cbc"; mode_len = 3; } if (cipher_len + mode_len + 3 > LO_NAME_SIZE) return -EINVAL; memmove(cms, mode, mode_len); cmsp = cms + mode_len; *cmsp++ = '('; memcpy(cmsp, info->lo_crypt_name, cipher_len); cmsp += cipher_len; *cmsp++ = ')'; *cmsp = 0; tfm = crypto_alloc_blkcipher(cms, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) return PTR_ERR(tfm); err = crypto_blkcipher_setkey(tfm, info->lo_encrypt_key, info->lo_encrypt_key_size); if (err != 0) goto out_free_tfm; lo->key_data = tfm; return 0; out_free_tfm: crypto_free_blkcipher(tfm); out: return err; }
/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of * the frame: IV (4 bytes), encrypted payload (including SNAP header), * ICV (4 bytes). len includes both IV and ICV. * * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on * failure. If frame is OK, IV and ICV will be removed. */ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; #if(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) struct blkcipher_desc desc = {.tfm = wep->rx_tfm}; #endif u32 klen, plen; u8 key[WEP_KEY_LEN + 3]; u8 keyidx, *pos; #ifndef JOHN_HWSEC u32 crc; u8 icv[4]; struct scatterlist sg; #endif if (skb->len < hdr_len + 8) return -1; pos = skb->data + hdr_len; key[0] = *pos++; key[1] = *pos++; key[2] = *pos++; keyidx = *pos++ >> 6; if (keyidx != wep->key_idx) return -1; klen = 3 + wep->key_len; /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); /* Apply RC4 to data and compute CRC32 over decrypted data */ plen = skb->len - hdr_len - 8; #ifndef JOHN_HWSEC #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) crypto_cipher_setkey(wep->tfm, key, klen); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = plen + 4; crypto_cipher_decrypt(wep->tfm, &sg, &sg, plen + 4); #else crypto_blkcipher_setkey(wep->rx_tfm, key, klen); #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = plen + 4; #else sg_init_one(&sg, pos, plen + 4); #endif if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) return -7; #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) crc = ~crc32_le(~0, pos, plen); #else crc = ~ether_crc_le(plen, pos); #endif icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; if (memcmp(icv, pos + plen, 4) != 0) { /* ICV mismatch - drop frame */ return -2; } #endif /* JOHN_HWSEC */ /* Remove IV and ICV */ memmove(skb->data + 4, skb->data, hdr_len); skb_pull(skb, 4); skb_trim(skb, skb->len - 4); return 0; } static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; if (len < 0 || len > WEP_KEY_LEN) return -1; memcpy(wep->key, key, len); wep->key_len = len; return 0; } static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; if (len < wep->key_len) return -1; memcpy(key, wep->key, wep->key_len); return wep->key_len; } static char * prism2_wep_print_stats(char *p, void *priv) { struct prism2_wep_data *wep = priv; p += sprintf(p, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len); return p; } static struct ieee80211_crypto_ops ieee80211_crypt_wep = { .name = "WEP", .init = prism2_wep_init, .deinit = prism2_wep_deinit, .encrypt_mpdu = prism2_wep_encrypt, .decrypt_mpdu = prism2_wep_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = prism2_wep_set_key, .get_key = prism2_wep_get_key, .print_stats = prism2_wep_print_stats, .extra_prefix_len = 4, /* IV */ .extra_postfix_len = 4, /* ICV */ .owner = THIS_MODULE, }; int __init ieee80211_crypto_wep_init(void) { return ieee80211_register_crypto_ops(&ieee80211_crypt_wep); } void __exit ieee80211_crypto_wep_exit(void) { ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep); } void ieee80211_wep_null(void) { // printk("============>%s()\n", __FUNCTION__); return; } #if 0 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) EXPORT_SYMBOL(ieee80211_wep_null); #else EXPORT_SYMBOL_NOVERS(ieee80211_wep_null); #endif module_init(ieee80211_crypto_wep_init); module_exit(ieee80211_crypto_wep_exit);
static int gcm_aes_nx_crypt(struct aead_request *req, int enc) { struct nx_crypto_ctx *nx_ctx = crypto_tfm_ctx(req->base.tfm); struct nx_csbcpb *csbcpb = nx_ctx->csbcpb; struct blkcipher_desc desc; unsigned int nbytes = req->cryptlen; int rc = -EINVAL; if (nbytes > nx_ctx->ap->databytelen) goto out; desc.info = nx_ctx->priv.gcm.iv; /* initialize the counter */ *(u32 *)(desc.info + NX_GCM_CTR_OFFSET) = 1; /* For scenarios where the input message is zero length, AES CTR mode * may be used. Set the source data to be a single block (16B) of all * zeros, and set the input IV value to be the same as the GMAC IV * value. - nx_wb 4.8.1.3 */ if (nbytes == 0) { char src[AES_BLOCK_SIZE] = {}; struct scatterlist sg; desc.tfm = crypto_alloc_blkcipher("ctr(aes)", 0, 0); if (IS_ERR(desc.tfm)) { rc = -ENOMEM; goto out; } crypto_blkcipher_setkey(desc.tfm, csbcpb->cpb.aes_gcm.key, NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_128 ? 16 : NX_CPB_KEY_SIZE(csbcpb) == NX_KS_AES_192 ? 24 : 32); sg_init_one(&sg, src, AES_BLOCK_SIZE); if (enc) crypto_blkcipher_encrypt_iv(&desc, req->dst, &sg, AES_BLOCK_SIZE); else crypto_blkcipher_decrypt_iv(&desc, req->dst, &sg, AES_BLOCK_SIZE); crypto_free_blkcipher(desc.tfm); rc = 0; goto out; } desc.tfm = (struct crypto_blkcipher *)req->base.tfm; csbcpb->cpb.aes_gcm.bit_length_aad = req->assoclen * 8; if (req->assoclen) { rc = nx_gca(nx_ctx, req, csbcpb->cpb.aes_gcm.in_pat_or_aad); if (rc) goto out; } if (enc) NX_CPB_FDM(csbcpb) |= NX_FDM_ENDE_ENCRYPT; else nbytes -= crypto_aead_authsize(crypto_aead_reqtfm(req)); csbcpb->cpb.aes_gcm.bit_length_data = nbytes * 8; rc = nx_build_sg_lists(nx_ctx, &desc, req->dst, req->src, nbytes, csbcpb->cpb.aes_gcm.iv_or_cnt); if (rc) goto out; rc = nx_hcall_sync(nx_ctx, &nx_ctx->op, req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP); if (rc) goto out; atomic_inc(&(nx_ctx->stats->aes_ops)); atomic64_add(csbcpb->csb.processed_byte_count, &(nx_ctx->stats->aes_bytes)); if (enc) { /* copy out the auth tag */ scatterwalk_map_and_copy(csbcpb->cpb.aes_gcm.out_pat_or_mac, req->dst, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_TO_SG); } else if (req->assoclen) { u8 *itag = nx_ctx->priv.gcm.iauth_tag; u8 *otag = csbcpb->cpb.aes_gcm.out_pat_or_mac; scatterwalk_map_and_copy(itag, req->dst, nbytes, crypto_aead_authsize(crypto_aead_reqtfm(req)), SCATTERWALK_FROM_SG); rc = memcmp(itag, otag, crypto_aead_authsize(crypto_aead_reqtfm(req))) ? -EBADMSG : 0; } out: return rc; }
static int esp_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; struct crypto_blkcipher *tfm; u32 align; /* null auth and encryption can have zero length keys */ if (x->aalg) { if (x->aalg->alg_key_len > 512) goto error; } if (x->ealg == NULL) goto error; esp = kzalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; if (x->aalg) { struct xfrm_algo_desc *aalg_desc; struct crypto_hash *hash; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; hash = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) goto error; esp->auth.tfm = hash; if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) goto error; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_hash_digestsize(hash)) { NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_hash_digestsize(hash), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); if (!esp->auth.work_icv) goto error; } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; esp->conf.tfm = tfm; esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); if (unlikely(esp->conf.ivec == NULL)) goto error; esp->conf.ivinitted = 0; } if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) goto error; x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen; if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct iphdr); else if (x->props.mode == XFRM_MODE_BEET) x->props.header_len += IPV4_BEET_PHMAXLEN; if (x->encap) { struct xfrm_encap_tmpl *encap = x->encap; switch (encap->encap_type) { default: goto error; case UDP_ENCAP_ESPINUDP: x->props.header_len += sizeof(struct udphdr); break; case UDP_ENCAP_ESPINUDP_NON_IKE: x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32); break; } } x->data = esp; align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4); if (esp->conf.padlen) align = max_t(u32, align, esp->conf.padlen); x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len; return 0; error: x->data = esp; esp_destroy(x); x->data = NULL; return -EINVAL; }
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted, * so the payload length increases with 8 bytes. * * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) */ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; u32 klen, len; u8 key[WEP_KEY_LEN + 3]; u8 *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); #if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED)) struct blkcipher_desc desc = {.tfm = wep->tx_tfm}; #endif u32 crc; u8 *icv; struct scatterlist sg; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; len = skb->len - hdr_len; pos = skb_push(skb, 4); memmove(pos, pos + 4, hdr_len); pos += hdr_len; klen = 3 + wep->key_len; wep->iv++; /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N) * can be used to speedup attacks, so avoid using them. */ if ((wep->iv & 0xff00) == 0xff00) { u8 B = (wep->iv >> 16) & 0xff; if (B >= 3 && B < klen) wep->iv += 0x0100; } /* Prepend 24-bit IV to RC4 key and TX frame */ *pos++ = key[0] = (wep->iv >> 16) & 0xff; *pos++ = key[1] = (wep->iv >> 8) & 0xff; *pos++ = key[2] = wep->iv & 0xff; *pos++ = wep->key_idx << 6; /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { /* Append little-endian CRC32 and encrypt it to produce ICV */ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) crc = ~crc32_le(~0, pos, len); #else crc = ~ether_crc_le(len, pos); #endif icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) crypto_cipher_setkey(wep->tfm, key, klen); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4); return 0; #else crypto_blkcipher_setkey(wep->tx_tfm, key, klen); #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; #else sg_init_one(&sg, pos, len+4); #endif return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); #endif }
static int aml_keybox_aes_encrypt(const void *key, int key_len, const u8 *aes_iv, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[1]; struct crypto_blkcipher *tfm = aml_keybox_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; if(src_len & 0x0f){ printk("%s:%d,src_len %d is not 16byte align",__func__,__LINE__,src_len); return -1; } if (IS_ERR(tfm)){ printk("%s:%d,crypto_alloc fail\n",__func__,__LINE__); return PTR_ERR(tfm); } *dst_len = src_len ; crypto_blkcipher_setkey((void *) tfm, key, key_len); sg_init_table(sg_in, 1); sg_set_buf(&sg_in[0], src, src_len); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); //printk("key_len:%d,ivsize:%d\n",key_len,ivsize); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,src_len); crypto_free_blkcipher(tfm); if (ret < 0){ printk("%s:%d,ceph_aes_crypt failed %d\n", __func__,__LINE__,ret); return ret; } return 0; } static int aml_keybox_aes_decrypt(const void *key, int key_len, const u8 *aes_iv, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[1]; struct crypto_blkcipher *tfm = aml_keybox_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; void *iv; int ivsize; int ret; // int last_byte; if(src_len &0x0f){ printk("%s:%d,src_len %d is not 16byte align",__func__,__LINE__,src_len); return -1; } if (IS_ERR(tfm)){ printk("%s:%d,crypto_alloc fail\n",__func__,__LINE__); return PTR_ERR(tfm); } crypto_blkcipher_setkey((void *) tfm, key, key_len); sg_init_table(sg_in, 1); sg_init_table(sg_out, 1); sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); //printk("key_len:%d,ivsize:%d\n",key_len,ivsize); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0){ printk("%s:%d,ceph_aes_decrypt failed %d\n", __func__,__LINE__,ret); return ret; } *dst_len = src_len; return 0; } int aes_crypto_encrypt(void *dst,size_t *dst_len,const void *src,size_t src_len) { int ret; unsigned char iv_aes[16]; unsigned char key_aes[32]; memcpy(iv_aes,&default_AESkey[0],16); memcpy(key_aes,&default_AESkey[16],32); ret = aml_keybox_aes_encrypt(key_aes,sizeof(key_aes),iv_aes,dst,dst_len,src,src_len); return ret; }
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; u32 klen, len; u8 key[WEP_KEY_LEN + 3]; u8 *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); #if((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED)) struct blkcipher_desc desc = {.tfm = wep->tx_tfm}; #endif u32 crc; u8 *icv; struct scatterlist sg; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len) return -1; len = skb->len - hdr_len; pos = skb_push(skb, 4); memmove(pos, pos + 4, hdr_len); pos += hdr_len; klen = 3 + wep->key_len; wep->iv++; if ((wep->iv & 0xff00) == 0xff00) { u8 B = (wep->iv >> 16) & 0xff; if (B >= 3 && B < klen) wep->iv += 0x0100; } *pos++ = key[0] = (wep->iv >> 16) & 0xff; *pos++ = key[1] = (wep->iv >> 8) & 0xff; *pos++ = key[2] = wep->iv & 0xff; *pos++ = wep->key_idx << 6; memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) crc = ~crc32_le(~0, pos, len); #else crc = ~ether_crc_le(len, pos); #endif icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; #if((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) crypto_cipher_setkey(wep->tfm, key, klen); sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4); return 0; #else crypto_blkcipher_setkey(wep->tx_tfm, key, klen); #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; #else sg_init_one(&sg, pos, len+4); #endif return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); #endif }
char * decrypt_filename(const char *c, const unsigned char *k, int kl) { struct crypto_blkcipher *tfm = 0; struct scatterlist sgi[1], sgo[1]; unsigned char *work; unsigned char ivec[16]; int outlen = 0, rlen; unsigned int sum1, sum2; char *ret = 0, *rp = 0; struct blkcipher_desc desc[1]; int rc; tfm = crypto_alloc_blkcipher(ALG, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk(KERN_ERR "decrypt_filename: crypto_alloc_blkcipher (%s) failed\n", ALG); ret = (void*)tfm; /* ERR_PTR(PTR_ERR(tfm)) */ tfm = 0; goto Done; } if (crypto_blkcipher_setkey(tfm, k, kl) < 0) { printk(KERN_ERR "decrypt_filename: crypto_blkcipher_setkey failed (bad key?)\n"); ret = ERR_PTR(-EIO); goto Done; } work = b92_decode(c, &outlen); if (!work || outlen < 5) { printk(KERN_ERR "decrypt_filename: bad filename %d/<%s> %d\n", (int)strlen(c), c, outlen); ret = ERR_PTR(-ENOMEM); goto Done; } if (!work) { printk(KERN_ERR "decrypt_filename: b92_decode failed!\n"); ret = ERR_PTR(-ENOMEM); goto Done; } memcpy(ivec, work, 4); memset(ivec+4, 0, sizeof ivec-5); ivec[sizeof ivec-1]=1; rlen = NAME_ALLOC_LEN(outlen-4); rp = kmalloc(rlen, GFP_KERNEL); if (!rp) { printk(KERN_ERR "decrypt_filename: can't allocate %d bytes\n", rlen-3); ret = ERR_PTR(-ENOMEM); goto Done; } rp[outlen-4] = 0; sg_init_table(sgi, 1); sg_init_table(sgo, 1); sg_set_buf(sgi, work+4, outlen-4); sg_set_buf(sgo, rp, outlen-4); memset(desc, 0, sizeof *desc); desc->info = ivec; desc->tfm = tfm; rc = crypto_blkcipher_encrypt_iv(desc, sgo, sgi, outlen-4); if (rc) { printk(KERN_ERR "decrypt_filename: encrypt failed code=%d\n", rc); ret = ERR_PTR(rc); goto Done; } sum1 = ~crc32_le(~0, rp, outlen-4); sum2 = (((unsigned int)work[0])<<24) | (((unsigned int)work[1])<<16) | (((unsigned int)work[2])<<8) | ((unsigned int)work[3]); if (sum1 != sum2) { printk(KERN_ERR "decrypt_filename: crc didn't match! (on %x) (got %x)\n", be32_to_cpu(sum1), be32_to_cpu(sum2)); ret = ERR_PTR(-EIO); goto Done; } ret = rp; rp = 0; Done: if (work) kfree(work); if (tfm) crypto_free_blkcipher(tfm); if (rp) kfree(rp); return ret; }
char * encrypt_filename(const char *c, const unsigned char *k, int kl) { struct crypto_blkcipher *tfm = 0; struct scatterlist sg[1]; unsigned char ivec[16]; int l, l2; unsigned int sum; char *ret = 0; char *work; struct blkcipher_desc desc[1]; int rc; tfm = crypto_alloc_blkcipher(ALG, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) { printk(KERN_ERR "encrypt_filename: crypto_alloc_blkcipher (%s) failed\n", ALG); ret = (void*)tfm; /* ERR_PTR(PTR_ERR(tfm)) */ tfm = 0; goto Failed; } if (crypto_blkcipher_setkey(tfm, k, kl) < 0) { printk(KERN_ERR "encrypt_filename: crypto_blkcipher_setkey failed (bad key?)\n"); ret = ERR_PTR(-EIO); goto Failed; } l = strlen(c); /* NOTE: bitrev(crc32_le != crc32_be( */ sum = ~crc32_le(~0, c, l); l2 = l + 4; work = kmalloc(l2, GFP_KERNEL); if (!work) { printk(KERN_ERR "encrypt_filename: can't allocate %d bytes\n", l2); ret = ERR_PTR(-ENOMEM); goto Failed; } work[0] = sum>>24; work[1] = sum>>16; work[2] = sum>>8; work[3] = sum; memcpy(work+4, c, l); memcpy(ivec, work, 4); memset(ivec+4, 0, sizeof ivec-5); ivec[sizeof ivec-1]=1; sg_init_table(sg, 1); sg_set_buf(sg, work+4, l); memset(desc, 0, sizeof *desc); desc->info = ivec; desc->tfm = tfm; rc = crypto_blkcipher_encrypt_iv(desc, sg, sg, l); if (rc) { printk(KERN_ERR "encrypt_filename: encrypt failed code=%d\n", rc); ret = ERR_PTR(rc); goto Failed; } ret = b92_encode(work, l2); Failed: if (work) kfree(work); if (tfm) crypto_free_blkcipher(tfm); return ret; }
/** * handle_enc_dec * @f: pointer to file_struct struct which has all the information about input, temporary and output files * @buf: data which has to be encrypted or decrypted * @n_bytes: number of bytes to be encrypted or decrypted * @key: key used for encryption or decryption * @flags: encrypt or decrypt to indicate the desired operation * * Encrypts or decrypts the data as per the flags parameter. Resultant encrypted or decrypted data is stored in buf.This data is written * to temporary file. For encryption, the encrypted data is written to temporary file. For decryption, the decrypted data is written to * temporary file. * * Returns zero on success; non-zero otherwise; */ int handle_enc_dec(struct file_struct *f, unsigned char *buf, int n_bytes, char *key, int flags) { int err = 0, i, temp; struct crypto_blkcipher *blkcipher = NULL; unsigned char aes_key[AES_KEY_SIZE]; unsigned char iv[AES_KEY_SIZE] = "\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11\x11"; struct scatterlist sg; struct blkcipher_desc desc; if (n_bytes%AES_KEY_SIZE != 0) { printk(KERN_ALERT"size not multiple of 16 for encryption\n"); err = -EINVAL; goto ERR; } for (i = 0 ; i < AES_KEY_SIZE ; i++) aes_key[i] = key[i]; blkcipher = crypto_alloc_blkcipher("cbc(aes)", 0, 0); if (IS_ERR(blkcipher)) { printk(KERN_ALERT"could not allocate blkcipher handle for %s\n", "cbsaes"); err = PTR_ERR(blkcipher); goto ERR; } if (crypto_blkcipher_setkey(blkcipher, aes_key, AES_KEY_SIZE)) { printk(KERN_ALERT"key could not be set\n"); err = -EAGAIN; goto ERR; } crypto_blkcipher_set_iv(blkcipher, iv, AES_KEY_SIZE); desc.flags = 0; desc.tfm = blkcipher; sg_init_one(&sg, buf, n_bytes); printk(KERN_ALERT"sg iinited\n"); /* encrypt data in place */ if (flags == 1) { crypto_blkcipher_encrypt(&desc, &sg, &sg, n_bytes); printk(KERN_ALERT"encryption done\n"); } else { crypto_blkcipher_decrypt(&desc, &sg, &sg, n_bytes); printk(KERN_ALERT"Decryption done\n"); } printk(KERN_ALERT"printing enc/dec data\n"); printk(KERN_ALERT"Cipher operation completed\n"); temp = write_to_file(f->filp_temp, buf, n_bytes); if (blkcipher) crypto_free_blkcipher(blkcipher); err = 0; ERR: return err; }
static int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[2], sg_out[1]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; size_t zero_padding = (0x10 - (src_len & 0x0f)); char pad[16]; if (IS_ERR(tfm)) return PTR_ERR(tfm); memset(pad, zero_padding, zero_padding); *dst_len = src_len + zero_padding; crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], src, src_len); sg_set_buf(&sg_in[1], pad, zero_padding); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); /* print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, pad, zero_padding, 1); */ ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, src_len + zero_padding); crypto_free_blkcipher(tfm); if (ret < 0) pr_err("ceph_aes_crypt failed %d\n", ret); /* print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ return 0; } static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { struct scatterlist sg_in[3], sg_out[1]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f)); char pad[16]; if (IS_ERR(tfm)) return PTR_ERR(tfm); memset(pad, zero_padding, zero_padding); *dst_len = src1_len + src2_len + zero_padding; crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 3); sg_set_buf(&sg_in[0], src1, src1_len); sg_set_buf(&sg_in[1], src2, src2_len); sg_set_buf(&sg_in[2], pad, zero_padding); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); /* print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1, src1, src1_len, 1); print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1, src2, src2_len, 1); print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1, pad, zero_padding, 1); */ ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, src1_len + src2_len + zero_padding); crypto_free_blkcipher(tfm); if (ret < 0) pr_err("ceph_aes_crypt2 failed %d\n", ret); /* print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ return 0; } static int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[2]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); sg_set_buf(&sg_out[1], pad, sizeof(pad)); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); /* print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); */ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst_len) last_byte = ((char *)dst)[src_len - 1]; else last_byte = pad[src_len - *dst_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { *dst_len = src_len - last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; /* bad padding */ } /* print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1, dst, *dst_len, 1); */ return 0; } static int ceph_aes_decrypt2(const void *key, int key_len, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[3]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); sg_init_table(sg_in, 1); sg_set_buf(sg_in, src, src_len); sg_init_table(sg_out, 3); sg_set_buf(&sg_out[0], dst1, *dst1_len); sg_set_buf(&sg_out[1], dst2, *dst2_len); sg_set_buf(&sg_out[2], pad, sizeof(pad)); crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); /* print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1, key, key_len, 1); print_hex_dump(KERN_ERR, "dec in: ", DUMP_PREFIX_NONE, 16, 1, src, src_len, 1); */ ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst1_len) last_byte = ((char *)dst1)[src_len - 1]; else if (src_len <= *dst1_len + *dst2_len) last_byte = ((char *)dst2)[src_len - *dst1_len - 1]; else last_byte = pad[src_len - *dst1_len - *dst2_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { src_len -= last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; /* bad padding */ } if (src_len < *dst1_len) { *dst1_len = src_len; *dst2_len = 0; } else { *dst2_len = src_len - *dst1_len; } /* print_hex_dump(KERN_ERR, "dec out1: ", DUMP_PREFIX_NONE, 16, 1, dst1, *dst1_len, 1); print_hex_dump(KERN_ERR, "dec out2: ", DUMP_PREFIX_NONE, 16, 1, dst2, *dst2_len, 1); */ return 0; } int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src_len) return -ERANGE; memcpy(dst, src, src_len); *dst_len = src_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_decrypt(secret->key, secret->len, dst, dst_len, src, src_len); default: return -EINVAL; } } int ceph_decrypt2(struct ceph_crypto_key *secret, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { size_t t; switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst1_len + *dst2_len < src_len) return -ERANGE; t = min(*dst1_len, src_len); memcpy(dst1, src, t); *dst1_len = t; src += t; src_len -= t; if (src_len) { t = min(*dst2_len, src_len); memcpy(dst2, src, t); *dst2_len = t; } return 0; case CEPH_CRYPTO_AES: return ceph_aes_decrypt2(secret->key, secret->len, dst1, dst1_len, dst2, dst2_len, src, src_len); default: return -EINVAL; } } int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src_len) return -ERANGE; memcpy(dst, src, src_len); *dst_len = src_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_encrypt(secret->key, secret->len, dst, dst_len, src, src_len); default: return -EINVAL; } } int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src1_len + src2_len) return -ERANGE; memcpy(dst, src1, src1_len); memcpy(dst + src1_len, src2, src2_len); *dst_len = src1_len + src2_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len, src1, src1_len, src2, src2_len); default: return -EINVAL; } }
static int ceph_aes_encrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[2], sg_out[1]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; size_t zero_padding = (0x10 - (src_len & 0x0f)); char pad[16]; if (IS_ERR(tfm)) return PTR_ERR(tfm); memset(pad, zero_padding, zero_padding); *dst_len = src_len + zero_padding; crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 2); sg_set_buf(&sg_in[0], src, src_len); sg_set_buf(&sg_in[1], pad, zero_padding); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, src_len + zero_padding); crypto_free_blkcipher(tfm); if (ret < 0) pr_err("ceph_aes_crypt failed %d\n", ret); return 0; } static int ceph_aes_encrypt2(const void *key, int key_len, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { struct scatterlist sg_in[3], sg_out[1]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; int ret; void *iv; int ivsize; size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f)); char pad[16]; if (IS_ERR(tfm)) return PTR_ERR(tfm); memset(pad, zero_padding, zero_padding); *dst_len = src1_len + src2_len + zero_padding; crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 3); sg_set_buf(&sg_in[0], src1, src1_len); sg_set_buf(&sg_in[1], src2, src2_len); sg_set_buf(&sg_in[2], pad, zero_padding); sg_init_table(sg_out, 1); sg_set_buf(sg_out, dst, *dst_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, src1_len + src2_len + zero_padding); crypto_free_blkcipher(tfm); if (ret < 0) pr_err("ceph_aes_crypt2 failed %d\n", ret); return 0; } static int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[2]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); crypto_blkcipher_setkey((void *)tfm, key, key_len); sg_init_table(sg_in, 1); sg_init_table(sg_out, 2); sg_set_buf(sg_in, src, src_len); sg_set_buf(&sg_out[0], dst, *dst_len); sg_set_buf(&sg_out[1], pad, sizeof(pad)); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst_len) last_byte = ((char *)dst)[src_len - 1]; else last_byte = pad[src_len - *dst_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { *dst_len = src_len - last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; } return 0; } static int ceph_aes_decrypt2(const void *key, int key_len, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { struct scatterlist sg_in[1], sg_out[3]; struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher(); struct blkcipher_desc desc = { .tfm = tfm }; char pad[16]; void *iv; int ivsize; int ret; int last_byte; if (IS_ERR(tfm)) return PTR_ERR(tfm); sg_init_table(sg_in, 1); sg_set_buf(sg_in, src, src_len); sg_init_table(sg_out, 3); sg_set_buf(&sg_out[0], dst1, *dst1_len); sg_set_buf(&sg_out[1], dst2, *dst2_len); sg_set_buf(&sg_out[2], pad, sizeof(pad)); crypto_blkcipher_setkey((void *)tfm, key, key_len); iv = crypto_blkcipher_crt(tfm)->iv; ivsize = crypto_blkcipher_ivsize(tfm); memcpy(iv, aes_iv, ivsize); ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len); crypto_free_blkcipher(tfm); if (ret < 0) { pr_err("ceph_aes_decrypt failed %d\n", ret); return ret; } if (src_len <= *dst1_len) last_byte = ((char *)dst1)[src_len - 1]; else if (src_len <= *dst1_len + *dst2_len) last_byte = ((char *)dst2)[src_len - *dst1_len - 1]; else last_byte = pad[src_len - *dst1_len - *dst2_len - 1]; if (last_byte <= 16 && src_len >= last_byte) { src_len -= last_byte; } else { pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n", last_byte, (int)src_len); return -EPERM; } if (src_len < *dst1_len) { *dst1_len = src_len; *dst2_len = 0; } else { *dst2_len = src_len - *dst1_len; } return 0; } int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src_len) return -ERANGE; memcpy(dst, src, src_len); *dst_len = src_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_decrypt(secret->key, secret->len, dst, dst_len, src, src_len); default: return -EINVAL; } } int ceph_decrypt2(struct ceph_crypto_key *secret, void *dst1, size_t *dst1_len, void *dst2, size_t *dst2_len, const void *src, size_t src_len) { size_t t; switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst1_len + *dst2_len < src_len) return -ERANGE; t = min(*dst1_len, src_len); memcpy(dst1, src, t); *dst1_len = t; src += t; src_len -= t; if (src_len) { t = min(*dst2_len, src_len); memcpy(dst2, src, t); *dst2_len = t; } return 0; case CEPH_CRYPTO_AES: return ceph_aes_decrypt2(secret->key, secret->len, dst1, dst1_len, dst2, dst2_len, src, src_len); default: return -EINVAL; } } int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src, size_t src_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src_len) return -ERANGE; memcpy(dst, src, src_len); *dst_len = src_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_encrypt(secret->key, secret->len, dst, dst_len, src, src_len); default: return -EINVAL; } } int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len, const void *src1, size_t src1_len, const void *src2, size_t src2_len) { switch (secret->type) { case CEPH_CRYPTO_NONE: if (*dst_len < src1_len + src2_len) return -ERANGE; memcpy(dst, src1, src1_len); memcpy(dst + src1_len, src2, src2_len); *dst_len = src1_len + src2_len; return 0; case CEPH_CRYPTO_AES: return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len, src1, src1_len, src2, src2_len); default: return -EINVAL; } } int ceph_key_instantiate(struct key *key, const void *data, size_t datalen) { struct ceph_crypto_key *ckey; int ret; void *p; ret = -EINVAL; if (datalen <= 0 || datalen > 32767 || !data) goto err; ret = key_payload_reserve(key, datalen); if (ret < 0) goto err; ret = -ENOMEM; ckey = kmalloc(sizeof(*ckey), GFP_KERNEL); if (!ckey) goto err; p = (void *)data; ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen); if (ret < 0) goto err_ckey; key->payload.data = ckey; return 0; err_ckey: kfree(ckey); err: return ret; } int ceph_key_match(const struct key *key, const void *description) { return strcmp(key->description, description) == 0; } void ceph_key_destroy(struct key *key) { struct ceph_crypto_key *ckey = key->payload.data; ceph_crypto_key_destroy(ckey); kfree(ckey); } struct key_type key_type_ceph = { .name = "ceph", .instantiate = ceph_key_instantiate, .match = ceph_key_match, .destroy = ceph_key_destroy, }; int ceph_crypto_init(void) { return register_key_type(&key_type_ceph); } void ceph_crypto_shutdown(void) { unregister_key_type(&key_type_ceph); }
/* Perform WEP decryption on given buffer. Buffer includes whole WEP part of * the frame: IV (4 bytes), encrypted payload (including SNAP header), * ICV (4 bytes). len includes both IV and ICV. * * Returns 0 if frame was decrypted successfully and ICV was correct and -1 on * failure. If frame is OK, IV and ICV will be removed. */ static int prism2_wep_decrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; struct blkcipher_desc desc = { .tfm = wep->rx_tfm }; u32 klen, plen; u8 key[WEP_KEY_LEN + 3]; u8 keyidx, *pos; u32 crc; u8 icv[4]; struct scatterlist sg; if (skb->len < hdr_len + 8) return -1; pos = skb->data + hdr_len; key[0] = *pos++; key[1] = *pos++; key[2] = *pos++; keyidx = *pos++ >> 6; if (keyidx != wep->key_idx) return -1; klen = 3 + wep->key_len; /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); /* Apply RC4 to data and compute CRC32 over decrypted data */ plen = skb->len - hdr_len - 8; crypto_blkcipher_setkey(wep->rx_tfm, key, klen); sg_init_one(&sg, pos, plen + 4); if (crypto_blkcipher_decrypt(&desc, &sg, &sg, plen + 4)) return -7; crc = ~crc32_le(~0, pos, plen); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; if (memcmp(icv, pos + plen, 4) != 0) { /* ICV mismatch - drop frame */ return -2; } /* Remove IV and ICV */ memmove(skb->data + 4, skb->data, hdr_len); skb_pull(skb, 4); skb_trim(skb, skb->len - 4); return 0; } static int prism2_wep_set_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; if (len < 0 || len > WEP_KEY_LEN) return -1; memcpy(wep->key, key, len); wep->key_len = len; return 0; } static int prism2_wep_get_key(void *key, int len, u8 *seq, void *priv) { struct prism2_wep_data *wep = priv; if (len < wep->key_len) return -1; memcpy(key, wep->key, wep->key_len); return wep->key_len; } static char * prism2_wep_print_stats(char *p, void *priv) { struct prism2_wep_data *wep = priv; p += sprintf(p, "key[%d] alg=WEP len=%d\n", wep->key_idx, wep->key_len); return p; } static struct ieee80211_crypto_ops ieee80211_crypt_wep = { .name = "WEP", .init = prism2_wep_init, .deinit = prism2_wep_deinit, .encrypt_mpdu = prism2_wep_encrypt, .decrypt_mpdu = prism2_wep_decrypt, .encrypt_msdu = NULL, .decrypt_msdu = NULL, .set_key = prism2_wep_set_key, .get_key = prism2_wep_get_key, .print_stats = prism2_wep_print_stats, .extra_prefix_len = 4, /* IV */ .extra_postfix_len = 4, /* ICV */ .owner = THIS_MODULE, }; int ieee80211_crypto_wep_init(void) { return ieee80211_register_crypto_ops(&ieee80211_crypt_wep); } void ieee80211_crypto_wep_exit(void) { ieee80211_unregister_crypto_ops(&ieee80211_crypt_wep); } void ieee80211_wep_null(void) { // printk("============>%s()\n", __func__); return; }
void test_aes(void) { struct crypto_blkcipher *tfm; struct blkcipher_desc desc; u32 bs; int i,j; u32 npages; struct scatterlist *src; struct scatterlist *dst; char *buf; char **ins, **outs; unsigned int ret; u8 key[] = {0x00, 0x01, 0x02, 0x03, 0x05, 0x06, 0x07, 0x08, 0x0A, 0x0B, 0x0C, 0x0D, 0x0F, 0x10, 0x11, 0x12}; npages = MAX_BLK_SIZE/PAGE_SIZE; src = kmalloc(npages*sizeof(struct scatterlist), __GFP_ZERO|GFP_KERNEL); if (!src) { printk("taes ERROR: failed to alloc src\n"); return; } dst = kmalloc(npages*sizeof(struct scatterlist), __GFP_ZERO|GFP_KERNEL); if (!dst) { printk("taes ERROR: failed to alloc dst\n"); kfree(src); return; } ins = kmalloc(npages*sizeof(char*), __GFP_ZERO|GFP_KERNEL); if (!ins) { printk("taes ERROR: failed to alloc ins\n"); kfree(src); kfree(dst); return; } outs = kmalloc(npages*sizeof(char*), __GFP_ZERO|GFP_KERNEL); if (!outs) { printk("taes ERROR: failed to alloc outs\n"); kfree(src); kfree(dst); kfree(ins); return; } tfm = crypto_alloc_blkcipher(CIPHER, 0, 0); if (IS_ERR(tfm)) { printk("failed to load transform for %s: %ld\n", CIPHER, PTR_ERR(tfm)); goto out; } desc.tfm = tfm; desc.flags = 0; ret = crypto_blkcipher_setkey(tfm, key, sizeof(key)); if (ret) { printk("setkey() failed flags=%x\n", crypto_blkcipher_get_flags(tfm)); goto out; } sg_init_table(src, npages); for (i=0; i<npages; i++) { buf = (void *)__get_free_page(GFP_KERNEL); if (!buf) { printk("taes ERROR: alloc free page error\n"); goto free_err_pages; } ins[i] = buf; strcpy(buf, "this is a plain text!"); sg_set_buf(src+i, buf, PAGE_SIZE); buf = (void *)__get_free_page(GFP_KERNEL); if (!buf) { printk("taes ERROR: alloc free page error\n"); goto free_err_pages; } outs[i] = buf; sg_set_buf(dst+i, buf, PAGE_SIZE); } for (bs = MAX_BLK_SIZE; bs >= MIN_BLK_SIZE; bs>>=1) { struct timeval t0, t1; long int enc, dec; do_gettimeofday(&t0); for (j=0; j<TEST_TIMES; j++) { ret = crypto_blkcipher_encrypt(&desc, dst, src, bs); if (ret) { printk("taes ERROR: enc error\n"); goto free_err_pages; } } do_gettimeofday(&t1); enc = 1000000*(t1.tv_sec-t0.tv_sec) + ((int)(t1.tv_usec) - (int)(t0.tv_usec)); do_gettimeofday(&t0); for (j=0; j<TEST_TIMES; j++) { ret = crypto_blkcipher_decrypt(&desc, src, dst, bs); if (ret) { printk("taes ERROR: dec error\n"); goto free_err_pages; } } do_gettimeofday(&t1); dec = 1000000*(t1.tv_sec-t0.tv_sec) + ((int)(t1.tv_usec) - (int)(t0.tv_usec)); printk("Size %u, enc %ld, dec %ld\n", bs, enc, dec); } free_err_pages: for (i=0; i<npages && ins[i]; i++){ free_page((unsigned long)ins[i]); } for (i=0; i<npages && outs[i]; i++){ free_page((unsigned long)outs[i]); } out: kfree(src); kfree(dst); kfree(ins); kfree(outs); crypto_free_blkcipher(tfm); }
static int esp6_init_state(struct xfrm_state *x) { struct esp_data *esp = NULL; struct crypto_blkcipher *tfm; /* null auth and encryption can have zero length keys */ if (x->aalg) { if (x->aalg->alg_key_len > 512) goto error; } if (x->ealg == NULL) goto error; if (x->encap) goto error; esp = kzalloc(sizeof(*esp), GFP_KERNEL); if (esp == NULL) return -ENOMEM; if (x->aalg) { struct xfrm_algo_desc *aalg_desc; struct crypto_hash *hash; esp->auth.key = x->aalg->alg_key; esp->auth.key_len = (x->aalg->alg_key_len+7)/8; hash = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(hash)) goto error; esp->auth.tfm = hash; if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len)) goto error; aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0); BUG_ON(!aalg_desc); if (aalg_desc->uinfo.auth.icv_fullbits/8 != crypto_hash_digestsize(hash)) { NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n", x->aalg->alg_name, crypto_hash_digestsize(hash), aalg_desc->uinfo.auth.icv_fullbits/8); goto error; } esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8; esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8; esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL); if (!esp->auth.work_icv) goto error; } esp->conf.key = x->ealg->alg_key; esp->conf.key_len = (x->ealg->alg_key_len+7)/8; tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC); if (IS_ERR(tfm)) goto error; esp->conf.tfm = tfm; esp->conf.ivlen = crypto_blkcipher_ivsize(tfm); esp->conf.padlen = 0; if (esp->conf.ivlen) { esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL); if (unlikely(esp->conf.ivec == NULL)) goto error; esp->conf.ivinitted = 0; } if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len)) goto error; x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen; if (x->props.mode == XFRM_MODE_TUNNEL) x->props.header_len += sizeof(struct ipv6hdr); x->data = esp; return 0; error: x->data = esp; esp6_destroy(x); x->data = NULL; return -EINVAL; }
/* Main encrypt function * referenced from ceph_aes_encrypt() function * returns length encrypted */ int encrypt_data(const void *key, int length_key, void *to_buffer, const void *from_buffer, size_t *to_length, size_t from_length, char *algo_name){ struct scatterlist scatter_list_src[2]; struct scatterlist scatter_list_dest[1]; struct crypto_blkcipher *tfm = crypto_alloc_blkcipher(algo_name, 0, CRYPTO_ALG_ASYNC); struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 }; size_t null_padding = (0x10 - (from_length & 0x0f)); int return_value = 0; char padding_array[48]; printk("algo_name: %s\n", algo_name); /* check to see if the cipher struct is set properly */ if(IS_ERR(tfm)) { printk("Error in setting tfm\n"); return PTR_ERR(tfm); } memset(padding_array, null_padding, null_padding); *to_length = from_length + null_padding; /* let's set the key for the cipher */ crypto_blkcipher_setkey((void *)tfm, key, length_key); sg_init_table(scatter_list_src, 2); sg_set_buf(&scatter_list_src[0], from_buffer, from_length); sg_set_buf(&scatter_list_src[1], padding_array, null_padding); sg_init_table(scatter_list_dest, 1); sg_set_buf(scatter_list_dest, to_buffer,*to_length); /* let's start encrypting */ return_value = crypto_blkcipher_encrypt(&desc, scatter_list_dest, scatter_list_src, from_length + null_padding); /* free up the blk cipher */ crypto_free_blkcipher(tfm); if (return_value < 0) { printk(KERN_CRIT "crypto_blcipher encryption failed with errno %d.\n",return_value); } return return_value; } int decrypt_data(const void *key, int length_key, void *to_buffer, const void *from_buffer, size_t *to_length, size_t from_length, char *algo_name) { int return_value =0; int end_element; char padding_array[48]; struct scatterlist scatter_list_src[1]; struct scatterlist scatter_list_dest[2]; struct crypto_blkcipher *tfm = crypto_alloc_blkcipher(algo_name, 0, CRYPTO_ALG_ASYNC); struct blkcipher_desc desc = { .tfm = tfm }; printk("algo_name: %s\n", algo_name); /* check to see if the cipher struct is set properly */ if(IS_ERR(tfm)) { return PTR_ERR(tfm); } /* Setting the key for Block cipher */ crypto_blkcipher_setkey((void *)tfm, key, length_key); sg_init_table(scatter_list_src, 1); sg_init_table(scatter_list_dest, 2); sg_set_buf(scatter_list_src, from_buffer, from_length); sg_set_buf(&scatter_list_dest[0], to_buffer, *to_length); sg_set_buf(&scatter_list_dest[1], padding_array, sizeof(padding_array)); /* let's decrypt using crypto_blkcipher */ return_value = crypto_blkcipher_decrypt(&desc, scatter_list_dest, scatter_list_src, from_length); /* Free up the blk cipher */ crypto_free_blkcipher(tfm); if (return_value < 0) { printk(KERN_CRIT "crypto_blcipher decryption failed 1.\n"); return return_value; } if (from_length <= *to_length) end_element = ((char *)to_buffer)[from_length - 1]; else end_element = padding_array[from_length - *to_length - 1]; if (end_element <= 16 && from_length >= end_element) { *to_length = from_length - end_element; } else { printk(KERN_CRIT "crypto_blcipher decryption failed 2.\n"); return -EPERM; //bad padding } return return_value; }
static int tf_self_test_perform_blkcipher( const char *alg_name, const struct blkcipher_test_vector *tv, bool decrypt) { struct blkcipher_desc desc = {0}; struct scatterlist sg_in, sg_out; unsigned char *in = NULL; unsigned char *out = NULL; unsigned in_size, out_size; int error; desc.tfm = crypto_alloc_blkcipher(alg_name, 0, 0); if (IS_ERR_OR_NULL(desc.tfm)) { ERROR("crypto_alloc_blkcipher(%s) failed", alg_name); error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm); goto abort; } INFO("%s alg_name=%s driver_name=%s key_size=%u block_size=%u", decrypt ? "decrypt" : "encrypt", alg_name, crypto_tfm_alg_driver_name(crypto_blkcipher_tfm(desc.tfm)), tv->key_length * 8, crypto_blkcipher_blocksize(desc.tfm)); in_size = tv->length; in = kmalloc(in_size, GFP_KERNEL); if (IS_ERR_OR_NULL(in)) { ERROR("kmalloc(%u) failed: %d", in_size, (int)in); error = (in == NULL ? -ENOMEM : (int)in); goto abort; } memcpy(in, decrypt ? tv->ciphertext : tv->plaintext, tv->length); out_size = tv->length + crypto_blkcipher_blocksize(desc.tfm); out = kmalloc(out_size, GFP_KERNEL); if (IS_ERR_OR_NULL(out)) { ERROR("kmalloc(%u) failed: %d", out_size, (int)out); error = (out == NULL ? -ENOMEM : (int)out); goto abort; } error = crypto_blkcipher_setkey(desc.tfm, tv->key, tv->key_length); if (error) { ERROR("crypto_alloc_setkey(%s) failed", alg_name); goto abort; } TF_TRACE_ARRAY(tv->key, tv->key_length); if (tv->iv != NULL) { unsigned iv_length = crypto_blkcipher_ivsize(desc.tfm); crypto_blkcipher_set_iv(desc.tfm, tv->iv, iv_length); TF_TRACE_ARRAY(tv->iv, iv_length); } sg_init_one(&sg_in, in, tv->length); sg_init_one(&sg_out, out, tv->length); TF_TRACE_ARRAY(in, tv->length); (decrypt ? crypto_blkcipher_decrypt : crypto_blkcipher_encrypt) (&desc, &sg_out, &sg_in, tv->length); if (error) { ERROR("crypto_blkcipher_%s(%s) failed", decrypt ? "decrypt" : "encrypt", alg_name); goto abort; } TF_TRACE_ARRAY(out, tv->length); crypto_free_blkcipher(desc.tfm); if (memcmp((decrypt ? tv->plaintext : tv->ciphertext), out, tv->length)) { ERROR("Wrong %s/%u %s result", alg_name, tv->key_length * 8, decrypt ? "decryption" : "encryption"); error = -EINVAL; } else { INFO("%s/%u: %s successful", alg_name, tv->key_length * 8, decrypt ? "decryption" : "encryption"); error = 0; } kfree(in); kfree(out); return error; abort: if (!IS_ERR_OR_NULL(desc.tfm)) crypto_free_blkcipher(desc.tfm); if (!IS_ERR_OR_NULL(out)) kfree(out); if (!IS_ERR_OR_NULL(in)) kfree(in); return error; }
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted, * so the payload length increases with 8 bytes. * * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data)) */ static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv) { struct prism2_wep_data *wep = priv; u32 klen, len; u8 key[WEP_KEY_LEN + 3]; u8 *pos; cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE); #if ( !defined(BUILT_IN_CRYPTO) && ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED)) ) struct blkcipher_desc desc = {.tfm = wep->tx_tfm}; #endif u32 crc; u8 *icv; #ifdef _RTL8192_EXT_PATCH_ u8 broadcastaddr[6] = {0xff,0xff,0xff,0xff,0xff,0xff}; struct rtllib_hdr_3addr* tmp_header = (struct rtllib_hdr_3addr*)(skb->data); u8 is_broadcast_data = 0; u8 is_multicast_data = 0; #endif struct scatterlist sg; if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 || skb->len < hdr_len){ printk("Error!!!headroom=%d tailroom=%d skblen=%d hdr_len=%d\n",skb_headroom(skb),skb_tailroom(skb),skb->len,hdr_len); return -1; } #ifdef _RTL8192_EXT_PATCH_ if(tcb_desc->badhoc==0){ if(memcmp(tmp_header->addr1,broadcastaddr,6) == 0){ is_broadcast_data = 1; tcb_desc->bHwSec = 0; } if(is_multicast_ether_addr(tmp_header->addr1)){ is_multicast_data = 1; tcb_desc->bHwSec = 0; } } #endif len = skb->len - hdr_len; pos = skb_push(skb, 4); memmove(pos, pos + 4, hdr_len); pos += hdr_len; klen = 3 + wep->key_len; wep->iv++; /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N) * can be used to speedup attacks, so avoid using them. */ if ((wep->iv & 0xff00) == 0xff00) { u8 B = (wep->iv >> 16) & 0xff; if (B >= 3 && B < klen) wep->iv += 0x0100; } /* Prepend 24-bit IV to RC4 key and TX frame */ *pos++ = key[0] = (wep->iv >> 16) & 0xff; *pos++ = key[1] = (wep->iv >> 8) & 0xff; *pos++ = key[2] = wep->iv & 0xff; *pos++ = wep->key_idx << 6; /* Copy rest of the WEP key (the secret part) */ memcpy(key + 3, wep->key, wep->key_len); if (!tcb_desc->bHwSec) { /* Append little-endian CRC32 and encrypt it to produce ICV */ #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0)) crc = ~crc32_le(~0, pos, len); #else crc = ~ether_crc_le(len, pos); #endif icv = skb_put(skb, 4); icv[0] = crc; icv[1] = crc >> 8; icv[2] = crc >> 16; icv[3] = crc >> 24; #if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24)) sg.page = virt_to_page(pos); sg.offset = offset_in_page(pos); sg.length = len + 4; #else sg_init_one(&sg, pos, len+4); #endif #if ( defined(BUILT_IN_CRYPTO) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) ) crypto_cipher_setkey(wep->tfm, key, klen); crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4); return 0; #else crypto_blkcipher_setkey(wep->tx_tfm, key, klen); return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4); #endif }
/* * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3. * Well, not what's written there, but rather what they meant. */ static void mppe_rekey(struct ppp_mppe_state * state, int initial_key) { struct scatterlist sg_in[1], sg_out[1]; struct blkcipher_desc desc = { .tfm = state->arc4 }; get_new_key_from_sha(state); if (!initial_key) { crypto_blkcipher_setkey(state->arc4, state->sha1_digest, state->keylen); sg_init_table(sg_in, 1); sg_init_table(sg_out, 1); setup_sg(sg_in, state->sha1_digest, state->keylen); setup_sg(sg_out, state->session_key, state->keylen); if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, state->keylen) != 0) { printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n"); } } else { memcpy(state->session_key, state->sha1_digest, state->keylen); } if (state->keylen == 8) { /* See RFC 3078 */ state->session_key[0] = 0xd1; state->session_key[1] = 0x26; state->session_key[2] = 0x9e; } crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen); } /* * Allocate space for a (de)compressor. */ static void *mppe_alloc(unsigned char *options, int optlen) { struct ppp_mppe_state *state; unsigned int digestsize; if (optlen != CILEN_MPPE + sizeof(state->master_key) || options[0] != CI_MPPE || options[1] != CILEN_MPPE) goto out; state = kzalloc(sizeof(*state), GFP_KERNEL); if (state == NULL) goto out; state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(state->arc4)) { state->arc4 = NULL; goto out_free; } state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC); if (IS_ERR(state->sha1)) { state->sha1 = NULL; goto out_free; } digestsize = crypto_hash_digestsize(state->sha1); if (digestsize < MPPE_MAX_KEY_LEN) goto out_free; state->sha1_digest = kmalloc(digestsize, GFP_KERNEL); if (!state->sha1_digest) goto out_free; /* Save keys. */ memcpy(state->master_key, &options[CILEN_MPPE], sizeof(state->master_key)); memcpy(state->session_key, state->master_key, sizeof(state->master_key)); /* * We defer initial key generation until mppe_init(), as mppe_alloc() * is called frequently during negotiation. */ return (void *)state; out_free: if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) crypto_free_hash(state->sha1); if (state->arc4) crypto_free_blkcipher(state->arc4); kfree(state); out: return NULL; } /* * Deallocate space for a (de)compressor. */ static void mppe_free(void *arg) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; if (state) { if (state->sha1_digest) kfree(state->sha1_digest); if (state->sha1) crypto_free_hash(state->sha1); if (state->arc4) crypto_free_blkcipher(state->arc4); kfree(state); } } /* * Initialize (de)compressor state. */ static int mppe_init(void *arg, unsigned char *options, int optlen, int unit, int debug, const char *debugstr) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; unsigned char mppe_opts; if (optlen != CILEN_MPPE || options[0] != CI_MPPE || options[1] != CILEN_MPPE) return 0; MPPE_CI_TO_OPTS(&options[2], mppe_opts); if (mppe_opts & MPPE_OPT_128) state->keylen = 16; else if (mppe_opts & MPPE_OPT_40) state->keylen = 8; else { printk(KERN_WARNING "%s[%d]: unknown key length\n", debugstr, unit); return 0; } if (mppe_opts & MPPE_OPT_STATEFUL) state->stateful = 1; /* Generate the initial session key. */ mppe_rekey(state, 1); if (debug) { int i; char mkey[sizeof(state->master_key) * 2 + 1]; char skey[sizeof(state->session_key) * 2 + 1]; printk(KERN_DEBUG "%s[%d]: initialized with %d-bit %s mode\n", debugstr, unit, (state->keylen == 16) ? 128 : 40, (state->stateful) ? "stateful" : "stateless"); for (i = 0; i < sizeof(state->master_key); i++) sprintf(mkey + i * 2, "%02x", state->master_key[i]); for (i = 0; i < sizeof(state->session_key); i++) sprintf(skey + i * 2, "%02x", state->session_key[i]); printk(KERN_DEBUG "%s[%d]: keys: master: %s initial session: %s\n", debugstr, unit, mkey, skey); } /* * Initialize the coherency count. The initial value is not specified * in RFC 3078, but we can make a reasonable assumption that it will * start at 0. Setting it to the max here makes the comp/decomp code * do the right thing (determined through experiment). */ state->ccount = MPPE_CCOUNT_SPACE - 1; /* * Note that even though we have initialized the key table, we don't * set the FLUSHED bit. This is contrary to RFC 3078, sec. 3.1. */ state->bits = MPPE_BIT_ENCRYPTED; state->unit = unit; state->debug = debug; return 1; } static int mppe_comp_init(void *arg, unsigned char *options, int optlen, int unit, int hdrlen, int debug) { /* ARGSUSED */ return mppe_init(arg, options, optlen, unit, debug, "mppe_comp_init"); } /* * We received a CCP Reset-Request (actually, we are sending a Reset-Ack), * tell the compressor to rekey. Note that we MUST NOT rekey for * every CCP Reset-Request; we only rekey on the next xmit packet. * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost. * So, rekeying for every CCP Reset-Request is broken as the peer will not * know how many times we've rekeyed. (If we rekey and THEN get another * CCP Reset-Request, we must rekey again.) */ static void mppe_comp_reset(void *arg) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; state->bits |= MPPE_BIT_FLUSHED; } /* * Compress (encrypt) a packet. * It's strange to call this a compressor, since the output is always * MPPE_OVHD + 2 bytes larger than the input. */ static int mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf, int isize, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; struct blkcipher_desc desc = { .tfm = state->arc4 }; int proto; struct scatterlist sg_in[1], sg_out[1]; /* * Check that the protocol is in the range we handle. */ proto = PPP_PROTOCOL(ibuf); if (proto < 0x0021 || proto > 0x00fa) return 0; /* Make sure we have enough room to generate an encrypted packet. */ if (osize < isize + MPPE_OVHD + 2) { /* Drop the packet if we should encrypt it, but can't. */ printk(KERN_DEBUG "mppe_compress[%d]: osize too small! " "(have: %d need: %d)\n", state->unit, osize, osize + MPPE_OVHD + 2); return -1; } osize = isize + MPPE_OVHD + 2; /* * Copy over the PPP header and set control bits. */ obuf[0] = PPP_ADDRESS(ibuf); obuf[1] = PPP_CONTROL(ibuf); obuf[2] = PPP_COMP >> 8; /* isize + MPPE_OVHD + 1 */ obuf[3] = PPP_COMP; /* isize + MPPE_OVHD + 2 */ obuf += PPP_HDRLEN; state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; if (state->debug >= 7) printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit, state->ccount); obuf[0] = state->ccount >> 8; obuf[1] = state->ccount & 0xff; if (!state->stateful || /* stateless mode */ ((state->ccount & 0xff) == 0xff) || /* "flag" packet */ (state->bits & MPPE_BIT_FLUSHED)) { /* CCP Reset-Request */ /* We must rekey */ if (state->debug && state->stateful) printk(KERN_DEBUG "mppe_compress[%d]: rekeying\n", state->unit); mppe_rekey(state, 0); state->bits |= MPPE_BIT_FLUSHED; } obuf[0] |= state->bits; state->bits &= ~MPPE_BIT_FLUSHED; /* reset for next xmit */ obuf += MPPE_OVHD; ibuf += 2; /* skip to proto field */ isize -= 2; /* Encrypt packet */ sg_init_table(sg_in, 1); sg_init_table(sg_out, 1); setup_sg(sg_in, ibuf, isize); setup_sg(sg_out, obuf, osize); if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) { printk(KERN_DEBUG "crypto_cypher_encrypt failed\n"); return -1; } state->stats.unc_bytes += isize; state->stats.unc_packets++; state->stats.comp_bytes += osize; state->stats.comp_packets++; return osize; } /* * Since every frame grows by MPPE_OVHD + 2 bytes, this is always going * to look bad ... and the longer the link is up the worse it will get. */ static void mppe_comp_stats(void *arg, struct compstat *stats) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; *stats = state->stats; } static int mppe_decomp_init(void *arg, unsigned char *options, int optlen, int unit, int hdrlen, int mru, int debug) { /* ARGSUSED */ return mppe_init(arg, options, optlen, unit, debug, "mppe_decomp_init"); } /* * We received a CCP Reset-Ack. Just ignore it. */ static void mppe_decomp_reset(void *arg) { /* ARGSUSED */ return; } /* * Decompress (decrypt) an MPPE packet. */ static int mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf, int osize) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; struct blkcipher_desc desc = { .tfm = state->arc4 }; unsigned ccount; int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED; int sanity = 0; struct scatterlist sg_in[1], sg_out[1]; if (isize <= PPP_HDRLEN + MPPE_OVHD) { if (state->debug) printk(KERN_DEBUG "mppe_decompress[%d]: short pkt (%d)\n", state->unit, isize); return DECOMP_ERROR; } /* * Make sure we have enough room to decrypt the packet. * Note that for our test we only subtract 1 byte whereas in * mppe_compress() we added 2 bytes (+MPPE_OVHD); * this is to account for possible PFC. */ if (osize < isize - MPPE_OVHD - 1) { printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! " "(have: %d need: %d)\n", state->unit, osize, isize - MPPE_OVHD - 1); return DECOMP_ERROR; } osize = isize - MPPE_OVHD - 2; /* assume no PFC */ ccount = MPPE_CCOUNT(ibuf); if (state->debug >= 7) printk(KERN_DEBUG "mppe_decompress[%d]: ccount %d\n", state->unit, ccount); /* sanity checks -- terminate with extreme prejudice */ if (!(MPPE_BITS(ibuf) & MPPE_BIT_ENCRYPTED)) { printk(KERN_DEBUG "mppe_decompress[%d]: ENCRYPTED bit not set!\n", state->unit); state->sanity_errors += 100; sanity = 1; } if (!state->stateful && !flushed) { printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in " "stateless mode!\n", state->unit); state->sanity_errors += 100; sanity = 1; } if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) { printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on " "flag packet!\n", state->unit); state->sanity_errors += 100; sanity = 1; } if (sanity) { if (state->sanity_errors < SANITY_MAX) return DECOMP_ERROR; else /* * Take LCP down if the peer is sending too many bogons. * We don't want to do this for a single or just a few * instances since it could just be due to packet corruption. */ return DECOMP_FATALERROR; } /* * Check the coherency count. */ if (!state->stateful) { /* RFC 3078, sec 8.1. Rekey for every packet. */ while (state->ccount != ccount) { mppe_rekey(state, 0); state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; } } else { /* RFC 3078, sec 8.2. */ if (!state->discard) { /* normal state */ state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE; if (ccount != state->ccount) { /* * (ccount > state->ccount) * Packet loss detected, enter the discard state. * Signal the peer to rekey (by sending a CCP Reset-Request). */ state->discard = 1; return DECOMP_ERROR; } } else { /* discard state */ if (!flushed) { /* ccp.c will be silent (no additional CCP Reset-Requests). */ return DECOMP_ERROR; } else { /* Rekey for every missed "flag" packet. */ while ((ccount & ~0xff) != (state->ccount & ~0xff)) { mppe_rekey(state, 0); state->ccount = (state->ccount + 256) % MPPE_CCOUNT_SPACE; } /* reset */ state->discard = 0; state->ccount = ccount; /* * Another problem with RFC 3078 here. It implies that the * peer need not send a Reset-Ack packet. But RFC 1962 * requires it. Hopefully, M$ does send a Reset-Ack; even * though it isn't required for MPPE synchronization, it is * required to reset CCP state. */ } } if (flushed) mppe_rekey(state, 0); } /* * Fill in the first part of the PPP header. The protocol field * comes from the decrypted data. */ obuf[0] = PPP_ADDRESS(ibuf); /* +1 */ obuf[1] = PPP_CONTROL(ibuf); /* +1 */ obuf += 2; ibuf += PPP_HDRLEN + MPPE_OVHD; isize -= PPP_HDRLEN + MPPE_OVHD; /* -6 */ /* net osize: isize-4 */ /* * Decrypt the first byte in order to check if it is * a compressed or uncompressed protocol field. */ sg_init_table(sg_in, 1); sg_init_table(sg_out, 1); setup_sg(sg_in, ibuf, 1); setup_sg(sg_out, obuf, 1); if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); return DECOMP_ERROR; } /* * Do PFC decompression. * This would be nicer if we were given the actual sk_buff * instead of a char *. */ if ((obuf[0] & 0x01) != 0) { obuf[1] = obuf[0]; obuf[0] = 0; obuf++; osize++; } /* And finally, decrypt the rest of the packet. */ setup_sg(sg_in, ibuf + 1, isize - 1); setup_sg(sg_out, obuf + 1, osize - 1); if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) { printk(KERN_DEBUG "crypto_cypher_decrypt failed\n"); return DECOMP_ERROR; } state->stats.unc_bytes += osize; state->stats.unc_packets++; state->stats.comp_bytes += isize; state->stats.comp_packets++; /* good packet credit */ state->sanity_errors >>= 1; return osize; } /* * Incompressible data has arrived (this should never happen!). * We should probably drop the link if the protocol is in the range * of what should be encrypted. At the least, we should drop this * packet. (How to do this?) */ static void mppe_incomp(void *arg, unsigned char *ibuf, int icnt) { struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg; if (state->debug && (PPP_PROTOCOL(ibuf) >= 0x0021 && PPP_PROTOCOL(ibuf) <= 0x00fa)) printk(KERN_DEBUG "mppe_incomp[%d]: incompressible (unencrypted) data! " "(proto %04x)\n", state->unit, PPP_PROTOCOL(ibuf)); state->stats.inc_bytes += icnt; state->stats.inc_packets++; state->stats.unc_bytes += icnt; state->stats.unc_packets++; } /************************************************************* * Module interface table *************************************************************/ /* * Procedures exported to if_ppp.c. */ static struct compressor ppp_mppe = { .compress_proto = CI_MPPE, .comp_alloc = mppe_alloc, .comp_free = mppe_free, .comp_init = mppe_comp_init, .comp_reset = mppe_comp_reset, .compress = mppe_compress, .comp_stat = mppe_comp_stats, .decomp_alloc = mppe_alloc, .decomp_free = mppe_free, .decomp_init = mppe_decomp_init, .decomp_reset = mppe_decomp_reset, .decompress = mppe_decompress, .incomp = mppe_incomp, .decomp_stat = mppe_comp_stats, .owner = THIS_MODULE, .comp_extra = MPPE_PAD, }; /* * ppp_mppe_init() * * Prior to allowing load, try to load the arc4 and sha1 crypto * libraries. The actual use will be allocated later, but * this way the module will fail to insmod if they aren't available. */ static int __init ppp_mppe_init(void) { int answer; if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) && crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC))) return -ENODEV; sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL); if (!sha_pad) return -ENOMEM; sha_pad_init(sha_pad); answer = ppp_register_compressor(&ppp_mppe); if (answer == 0) printk(KERN_INFO "PPP MPPE Compression module registered\n"); else kfree(sha_pad); return answer; } static void __exit ppp_mppe_cleanup(void) { ppp_unregister_compressor(&ppp_mppe); kfree(sha_pad); } module_init(ppp_mppe_init); module_exit(ppp_mppe_cleanup);
/* * Function definition for sys_xcrypt. * This function encrypts/decrypts files using AES Block Cipher algorithm using CBC mode. */ asmlinkage int sys_xcrypt( const char * const infile, const char * const opfile, const char * const keybuf, const int keylen, const short int flags ) { const char algo[] = "cbc(aes)"; char *ipBuf = NULL, *opBuf = NULL, *iv = NULL, *inFile = NULL, *opFile = NULL, *keyBuf = NULL; int errno = 0, ret = 0; int actReadLen = 0, actWriteLen = 0, padLen = 0, blkSiz = 0, ipFileLen = 0, opFileLen = 0, keyLen = 0; int delOpFile = 0, prmbLen = 0, idx = 0; unsigned int fileSize = 0, factor = 1; struct file *inFilePtr = NULL, *opFilePtr = NULL; struct crypto_blkcipher *tfm = NULL; struct blkcipher_desc desc; struct scatterlist sg[2]; struct dentry *tmpDentry; struct inode *tmpInode = NULL; mm_segment_t oldfs; /* Check for NULL pointers or invalid values */ if( ( NULL == infile ) || ( NULL == opfile ) || ( NULL == keybuf ) || ( ( _FLAG_ENCRYPT_ != flags ) && ( _FLAG_DECRYPT_ != flags ) ) ) { printk( KERN_ALERT "Invalid I/P" ); errno = -EINVAL; goto OUT_OK; } /* Verify if all the pointers belong to the user's own address space */ ret = access_ok( VERIFY_READ, infile, 0 ); if( !ret ) { printk( KERN_ALERT "Invalid pointer to I/P file passed as argument" ); errno = -EFAULT; goto OUT_OK; } ret = access_ok( VERIFY_READ, opfile, 0 ); if( !ret ) { printk( KERN_ALERT "Invalid pointer to O/P file passed as argument" ); errno = -EFAULT; goto OUT_OK; } ret = access_ok( VERIFY_READ, keybuf, 0 ); if( !ret ) { printk( KERN_ALERT "Invalid pointer to Password passed as argument" ); errno = -EFAULT; goto OUT_OK; } /* Find out the length of the i/p buffers */ ipFileLen = strlen_user( infile ); opFileLen = strlen_user( opfile ); keyLen = strlen_user( keybuf ); /* Allocate buffers to copy i/p arguments from user space to kernel space */ inFile = kmalloc( ipFileLen, GFP_KERNEL ); if( NULL == inFile ) { errno = -ENOMEM; goto OUT_OK; } else { ret = strncpy_from_user( inFile, infile, ipFileLen ); if( ret < 0 ) { errno = ret; goto OUT_IP; } } opFile = kmalloc( opFileLen, GFP_KERNEL ); if( NULL == opFile ) { errno = -ENOMEM; goto OUT_IP; } else { ret = strncpy_from_user( opFile, opfile, opFileLen ); if( ret < 0 ) { errno = ret; goto OUT_IP; } } keyBuf = kmalloc( keyLen, GFP_KERNEL ); if( NULL == keyBuf ) { errno = -ENOMEM; goto OUT_IP; } else { ret = strncpy_from_user( keyBuf, keybuf, keyLen ); if( ret < 0 ) { errno = ret; goto OUT_IP; } } /* Open I/P file. It will report error in case of non-existing file and bad permissions but not bad owner */ inFilePtr = filp_open( inFile, O_RDONLY, 0 ); if ( !inFilePtr || IS_ERR( inFilePtr ) ) { errno = (int)PTR_ERR( inFilePtr ); printk( KERN_ALERT "Error opening i/p file: %d\n", errno ); inFilePtr = NULL; goto OUT_IP; } /* Check if the file is a regular file or not */ if( !S_ISREG( inFilePtr->f_path.dentry->d_inode->i_mode ) ) { printk( KERN_ALERT "Error as file is not a regular one" ); errno = -EBADF; goto OUT_FILE; } /* Check if the I/p file and the process owner match */ if( ( current->real_cred->uid != inFilePtr->f_path.dentry->d_inode->i_uid ) && ( current->real_cred->uid != 0 ) ) { printk( KERN_ALERT "Error as owner of file and process does not match" ); errno = -EACCES; goto OUT_FILE; } /* Open O/P file with error handling */ opFilePtr = filp_open( opFile, O_WRONLY | O_CREAT | O_EXCL, 0 ); if ( !opFilePtr || IS_ERR( opFilePtr ) ) { errno = (int)PTR_ERR( opFilePtr ); printk( KERN_ALERT "Error opening o/p file: %d\n", errno ); opFilePtr = NULL; goto OUT_FILE; } /* * Check if the infile and opfile point to the same file * If they reside on the different file partition and have same name then it should be allowed else not */ if( ( inFilePtr->f_path.dentry->d_inode->i_sb == opFilePtr->f_path.dentry->d_inode->i_sb ) && ( inFilePtr->f_path.dentry->d_inode->i_ino == opFilePtr->f_path.dentry->d_inode->i_ino ) ) { printk( KERN_ALERT "I/p and O/p file cannot be same" ); errno = -EINVAL; goto OUT_FILE; } /* Set the o/p file permission to i/p file */ opFilePtr->f_path.dentry->d_inode->i_mode = inFilePtr->f_path.dentry->d_inode->i_mode; /* Set the file position to the beginning of the file */ inFilePtr->f_pos = 0; opFilePtr->f_pos = 0; /* Allocate buffer to read data into and to write data to. For performance reasons, set its size equal to PAGE_SIZE */ ipBuf = kmalloc( PAGE_SIZE, GFP_KERNEL ); if( NULL == ipBuf ) { errno = -ENOMEM; goto OUT_FILE; } memset( ipBuf, _NULL_CHAR_, PAGE_SIZE ); opBuf = kmalloc( PAGE_SIZE, GFP_KERNEL ); if( NULL == opBuf ) { errno = -ENOMEM; goto OUT_DATA_PAGE; } memset( opBuf, _NULL_CHAR_, PAGE_SIZE ); /* Allocate tfm */ tfm = crypto_alloc_blkcipher( algo, 0, CRYPTO_ALG_ASYNC ); if ( NULL == tfm ) { printk( KERN_ALERT "Failed to load transform for %s: %ld\n", algo, PTR_ERR( tfm ) ); errno = -EINVAL; goto OUT_DATA_PAGE; } /* Initialize desc */ desc.tfm = tfm; desc.flags = 0; ret = crypto_blkcipher_setkey( tfm, keybuf, keylen ); if( ret ) { printk( "Setkey() failed. Flags=%x\n", crypto_blkcipher_get_flags( tfm ) ); errno = -EINVAL; goto OUT_CIPHER; } /* Initialize sg structure */ FILL_SG( &sg[0], ipBuf, PAGE_SIZE ); FILL_SG( &sg[1], opBuf, PAGE_SIZE ); /* Get the block size */ blkSiz = ((tfm->base).__crt_alg)->cra_blocksize; /* Initialize IV */ iv = kmalloc( blkSiz, GFP_KERNEL ); if( NULL == iv ) { errno = -ENOMEM; goto OUT_CIPHER; } memset( iv, _NULL_CHAR_, blkSiz ); crypto_blkcipher_set_iv( tfm, iv, crypto_blkcipher_ivsize( tfm ) ); /* Store the key and file size in encrypted form in the preamble */ switch( flags ) { case _FLAG_ENCRYPT_: memcpy( ipBuf, keybuf, keylen ); prmbLen = keylen; fileSize = (unsigned int)inFilePtr->f_path.dentry->d_inode->i_size; while( fileSize ) { ipBuf[ prmbLen + idx ] = fileSize % 10; fileSize /= 10; ++idx; } prmbLen += idx; #ifdef _DEBUG_ printk( KERN_ALERT "idx=%d prmbLen=%d\n", idx, prmbLen ); #endif memset( ipBuf + prmbLen, _ETX_, _UL_MAX_SIZE_ - idx ); prmbLen += ( _UL_MAX_SIZE_ - idx ); #ifdef _DEBUG_ printk( KERN_ALERT "prmbLen=%d\n", prmbLen ); #endif padLen = blkSiz - ( prmbLen % blkSiz ); memset( ipBuf + prmbLen, _ETX_, padLen ); prmbLen += padLen; #ifdef _DEBUG_ printk( KERN_ALERT "padLen=%d prmbLen=%d\n", padLen, prmbLen ); #endif ret = crypto_blkcipher_encrypt( &desc, &sg[1], &sg[0], prmbLen ); if (ret) { printk( KERN_ALERT "Encryption failed. Flags=0x%x\n", tfm->base.crt_flags ); delOpFile = 1; goto OUT_IV; } oldfs = get_fs(); set_fs( KERNEL_DS ); opFilePtr->f_op->write( opFilePtr, opBuf, prmbLen, &opFilePtr->f_pos ); /* Reset the address space to user one */ set_fs( oldfs ); break; case _FLAG_DECRYPT_: /* Set the address space to kernel one */ oldfs = get_fs(); set_fs( KERNEL_DS ); prmbLen = keylen + _UL_MAX_SIZE_; padLen = blkSiz - ( prmbLen % blkSiz ); prmbLen += padLen; #ifdef _DEBUG_ printk( KERN_ALERT "padLen=%d prmbLen=%d\n", padLen, prmbLen ); #endif actReadLen = inFilePtr->f_op->read( inFilePtr, ipBuf, prmbLen, &inFilePtr->f_pos ); if( actReadLen != prmbLen ) { printk( KERN_ALERT "Requested number of bytes for preamble are lesser" ); delOpFile = 1; goto OUT_IV; } #ifdef _DEBUG_ printk( KERN_ALERT "actReadLen=%d\n", actReadLen ); #endif /* Reset the address space to user one */ set_fs( oldfs ); ret = crypto_blkcipher_decrypt( &desc, &sg[1], &sg[0], prmbLen ); if (ret) { printk( KERN_ALERT "Decryption failed. Flags=0x%x\n", tfm->base.crt_flags ); delOpFile = 1; goto OUT_IV; } ret = memcmp( keybuf, opBuf, keylen ); if( ret ) { printk( "Wrong password entered." ); errno = -EKEYREJECTED; goto OUT_IV; } idx = 0; fileSize = 0; while( opBuf[ keylen + idx ] != _ETX_ ) { fileSize += opBuf[ keylen + idx ] * factor; factor *= 10; ++idx; } #ifdef _DEBUG_ printk( KERN_ALERT "idx=%d fileSize=%u\n", idx, fileSize ); #endif break; } /* Read file till the file pointer reaches to the EOF */ while( inFilePtr->f_pos < inFilePtr->f_path.dentry->d_inode->i_size ) { /* Initialize it to NULL char */ memset( ipBuf, _NULL_CHAR_, PAGE_SIZE ); memset( opBuf, _NULL_CHAR_, PAGE_SIZE ); /* Set the address space to kernel one */ oldfs = get_fs(); set_fs( KERNEL_DS ); actReadLen = inFilePtr->f_op->read( inFilePtr, ipBuf, PAGE_SIZE, &inFilePtr->f_pos ); /* Reset the address space to user one */ set_fs( oldfs ); /* As per the i/p flag, do encryption/decryption */ switch( flags ) { case _FLAG_ENCRYPT_: /* For encryption ensure padding as per the block size */ #ifdef _DEBUG_ printk( KERN_ALERT "Bytes read from I/P file ::%d::\n", actReadLen ); #endif if( actReadLen % blkSiz ) { padLen = blkSiz - ( actReadLen % blkSiz ); memset( ipBuf + actReadLen, _ETX_, padLen ); actReadLen += padLen; } #ifdef _DEBUG_ printk( KERN_ALERT "Pad Length ::%d::\n", padLen ); printk( KERN_ALERT "Data read from I/P file ::%s::\n", ipBuf ); #endif /* Encrypt the data */ ret = crypto_blkcipher_encrypt( &desc, &sg[1], &sg[0], PAGE_SIZE ); if (ret) { printk( KERN_ALERT "Encryption failed. Flags=0x%x\n", tfm->base.crt_flags ); delOpFile = 1; goto OUT_IV; } break; case _FLAG_DECRYPT_: /* Decrypt the data */ ret = crypto_blkcipher_decrypt( &desc, &sg[1], &sg[0], PAGE_SIZE ); if (ret) { printk( KERN_ALERT "Decryption failed. Flags=0x%x\n", tfm->base.crt_flags ); delOpFile = 1; goto OUT_IV; } #ifdef _DEBUG_ printk( KERN_ALERT "Bytes read from I/P file ::%d::\n", actReadLen ); #endif while( _ETX_ == opBuf[ actReadLen - 1 ] ) { opBuf[ actReadLen - 1 ] = _NULL_CHAR_; --actReadLen; } #ifdef _DEBUG_ printk( KERN_ALERT "Bytes read from I/P file ::%d::\n", actReadLen ); printk( KERN_ALERT "Data read from I/P file ::%s::\n", opBuf ); #endif break; } /* * Start writing to the o/p file * Set the address space to kernel one */ oldfs = get_fs(); set_fs( KERNEL_DS ); actWriteLen = opFilePtr->f_op->write( opFilePtr, opBuf, actReadLen, &opFilePtr->f_pos ); /* Reset the address space to user one */ set_fs( oldfs ); #ifdef _DEBUG_ printk( KERN_ALERT "Bytes written to O/P file ::%d::\n", actWriteLen ); #endif } /* Free iv */ OUT_IV: kfree( iv ); iv = NULL; printk( KERN_ALERT "Memory for IV freed ..." ); /* Free tfm */ OUT_CIPHER: crypto_free_blkcipher( tfm ); printk( KERN_ALERT "Encryption Transform freed ..." ); /* Free i/p and o/p buffers */ OUT_DATA_PAGE: if( ipBuf ) { kfree( ipBuf ); ipBuf = NULL; } if( opBuf ) { kfree( opBuf ); opBuf = NULL; } printk( KERN_ALERT "Memory for encrption/decryption freed ..." ); /* Close any open files */ OUT_FILE: if( inFilePtr ) { filp_close( inFilePtr, NULL ); inFilePtr = NULL; printk( KERN_ALERT "I/p file closed ..." ); } if( opFilePtr ) { filp_close( opFilePtr, NULL ); opFilePtr = NULL; printk( KERN_ALERT "O/p file closed ..." ); } if( delOpFile ) { opFilePtr = filp_open( opFile, O_WRONLY , 0 ); if ( !opFilePtr || IS_ERR( opFilePtr ) ) { opFilePtr = NULL; goto OUT_IP; } tmpDentry = opFilePtr->f_path.dentry; tmpInode = tmpDentry->d_parent->d_inode; filp_close( opFilePtr, NULL ); vfs_unlink( tmpInode, tmpDentry ); printk( KERN_ALERT "O/p file deleted ..." ); } OUT_IP: if( inFile ) { kfree( inFile ); inFile = NULL; } if( opFile ) { kfree( opFile ); opFile = NULL; } if( keyBuf ) { kfree( keyBuf ); keyBuf = NULL; } printk( KERN_ALERT "Memory for I/P parameters freed ..." ); /* Return final status */ OUT_OK: printk( KERN_ALERT "Exiting function sys_xcrypt ..." ); return errno; }