u32
krb5_encrypt(
	struct crypto_blkcipher *tfm,
	void * iv,
	void * in,
	void * out,
	int length)
{
	u32 ret = -EINVAL;
	struct scatterlist sg[1];
	u8 local_iv[GSS_KRB5_MAX_BLOCKSIZE] = {0};
	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };

	if (length % crypto_blkcipher_blocksize(tfm) != 0)
		goto out;

	if (crypto_blkcipher_ivsize(tfm) > GSS_KRB5_MAX_BLOCKSIZE) {
		dprintk("RPC:       gss_k5encrypt: tfm iv size too large %d\n",
			crypto_blkcipher_ivsize(tfm));
		goto out;
	}

	if (iv)
		memcpy(local_iv, iv, crypto_blkcipher_ivsize(tfm));

	memcpy(out, in, length);
	sg_init_one(sg, out, length);

	ret = crypto_blkcipher_encrypt_iv(&desc, sg, sg, length);
out:
	dprintk("RPC:       krb5_encrypt returns %d\n", ret);
	return ret;
}
Ejemplo n.º 2
0
u32
krb5_decrypt(
     struct crypto_blkcipher *tfm,
     void * iv,
     void * in,
     void * out,
     int length)
{
	u32 ret = -EINVAL;
	struct scatterlist sg[1];
	u8 local_iv[16] = {0};
	struct blkcipher_desc desc = { .tfm = tfm, .info = local_iv };

	if (length % crypto_blkcipher_blocksize(tfm) != 0)
		goto out;

	if (crypto_blkcipher_ivsize(tfm) > 16) {
		dprintk("RPC:       gss_k5decrypt: tfm iv size to large %d\n",
			crypto_blkcipher_ivsize(tfm));
		goto out;
	}
	if (iv)
		memcpy(local_iv,iv, crypto_blkcipher_ivsize(tfm));

	memcpy(out, in, length);
	sg_set_buf(sg, out, length);

	ret = crypto_blkcipher_decrypt_iv(&desc, sg, sg, length);
out:
	dprintk("RPC:       gss_k5decrypt returns %d\n",ret);
	return ret;
}
Ejemplo n.º 3
0
static int encrypt_Cipher(char *key, char *src, char *dest, unsigned int len, int *written) {
    struct crypto_blkcipher *blkcipher = NULL;
    char *cipher = "cbc(aes)";
    struct scatterlist sg_in[2];
    struct scatterlist sg_out[1];
    struct blkcipher_desc desc;
    unsigned int encrypted_datalen;
    unsigned int padlen;
    char pad[16];
    char *iv=NULL;
    int ret = -EFAULT;
    encrypted_datalen = nearestRoundup(len);
    padlen = encrypted_datalen - len;
    blkcipher = crypto_alloc_blkcipher(cipher, 0, 0);
    if (IS_ERR(blkcipher)) {
        printk("could not allocate blkcipher handle for %s\n", cipher);
        return -PTR_ERR(blkcipher);
    }
   
    if (crypto_blkcipher_setkey(blkcipher, key, strlen(key))) {
        printk("key could not be set\n");
        ret = -EAGAIN;
        goto out;
    } 
    desc.flags = 0;
    desc.tfm = blkcipher;

    iv = (char *)kmalloc(crypto_blkcipher_ivsize(blkcipher) , GFP_KERNEL);
    if(iv==NULL) {
	printk("Initialisation vector not initialised\n");
        ret = -ENOMEM;
        goto out;
    }
    memset(iv, 0, crypto_blkcipher_ivsize(blkcipher));
    memset(pad, 0, sizeof pad);
    sg_init_table(sg_in, 2);
    sg_set_buf(&sg_in[0], src, len);
    sg_set_buf(&sg_in[1], pad, padlen);
    
    sg_init_table(sg_out, 1);
    sg_set_buf(sg_out, dest, encrypted_datalen);
    crypto_blkcipher_set_iv(blkcipher, iv, crypto_blkcipher_ivsize(blkcipher)); 
    ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in, encrypted_datalen);

    (*written) = encrypted_datalen;
    printk("Cipher Encryption operation completed\n");
    kfree(iv);
    crypto_free_blkcipher(blkcipher);
    return ret;
 
out:
    if (blkcipher)
	crypto_free_blkcipher(blkcipher);
    if (iv)
	kfree(iv);
    return ret;
}
Ejemplo n.º 4
0
static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
{
	struct blkcipher_desc desc;
	struct scatterlist sg;
	int err, iv_len;
	unsigned char iv[128];

	if (tfm == NULL) {
		BT_ERR("tfm %p", tfm);
		return -EINVAL;
	}

	desc.tfm = tfm;
	desc.flags = 0;

	err = crypto_blkcipher_setkey(tfm, k, 16);
	if (err) {
		BT_ERR("cipher setkey failed: %d", err);
		return err;
	}

	sg_init_one(&sg, r, 16);

	iv_len = crypto_blkcipher_ivsize(tfm);
	if (iv_len) {
		memset(&iv, 0xff, iv_len);
		crypto_blkcipher_set_iv(tfm, iv, iv_len);
	}

	err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
	if (err)
		BT_ERR("Encrypt data error %d", err);

	return err;
}
int blkcipher_walk_phys(struct blkcipher_desc *desc,
			struct blkcipher_walk *walk)
{
	walk->flags |= BLKCIPHER_WALK_PHYS;
	walk->walk_blocksize = crypto_blkcipher_blocksize(desc->tfm);
	walk->cipher_blocksize = walk->walk_blocksize;
	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
	return blkcipher_walk_first(desc, walk);
}
int blkcipher_walk_virt_block(struct blkcipher_desc *desc,
			      struct blkcipher_walk *walk,
			      unsigned int blocksize)
{
	walk->flags &= ~BLKCIPHER_WALK_PHYS;
	walk->walk_blocksize = blocksize;
	walk->cipher_blocksize = crypto_blkcipher_blocksize(desc->tfm);
	walk->ivsize = crypto_blkcipher_ivsize(desc->tfm);
	walk->alignmask = crypto_blkcipher_alignmask(desc->tfm);
	return blkcipher_walk_first(desc, walk);
}
Ejemplo n.º 7
0
/*
 * CC-MAC function WUSB1.0[6.5]
 *
 * Take a data string and produce the encrypted CBC Counter-mode MIC
 *
 * Note the names for most function arguments are made to (more or
 * less) match those used in the pseudo-function definition given in
 * WUSB1.0[6.5].
 *
 * @tfm_cbc: CBC(AES) blkcipher handle (initialized)
 *
 * @tfm_aes: AES cipher handle (initialized)
 *
 * @mic: buffer for placing the computed MIC (Message Integrity
 *       Code). This is exactly 8 bytes, and we expect the buffer to
 *       be at least eight bytes in length.
 *
 * @key: 128 bit symmetric key
 *
 * @n: CCM nonce
 *
 * @a: ASCII string, 14 bytes long (I guess zero padded if needed;
 *     we use exactly 14 bytes).
 *
 * @b: data stream to be processed; cannot be a global or const local
 *     (will confuse the scatterlists)
 *
 * @blen: size of b...
 *
 * Still not very clear how this is done, but looks like this: we
 * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
 * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
 * take the payload and divide it in blocks (16 bytes), xor them with
 * the previous crypto result (16 bytes) and crypt it, repeat the next
 * block with the output of the previous one, rinse wash (I guess this
 * is what AES CBC mode means...but I truly have no idea). So we use
 * the CBC(AES) blkcipher, that does precisely that. The IV (Initial
 * Vector) is 16 bytes and is set to zero, so
 *
 * See rfc3610. Linux crypto has a CBC implementation, but the
 * documentation is scarce, to say the least, and the example code is
 * so intricated that is difficult to understand how things work. Most
 * of this is guess work -- bite me.
 *
 * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
 *     using the 14 bytes of @a to fill up
 *     b1.{mac_header,e0,security_reserved,padding}.
 *
 * NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of
 *       l(m) is orthogonal, they bear no relationship, so it is not
 *       in conflict with the parameter's relation that
 *       WUSB1.0[6.4.2]) defines.
 *
 * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
 *       first errata released on 2005/07.
 *
 * NOTE: we need to clean IV to zero at each invocation to make sure
 *       we start with a fresh empty Initial Vector, so that the CBC
 *       works ok.
 *
 * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
 *       what sg[4] is for. Maybe there is a smarter way to do this.
 */
static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
			struct crypto_cipher *tfm_aes, void *mic,
			const struct aes_ccm_nonce *n,
			const struct aes_ccm_label *a, const void *b,
			size_t blen)
{
	int result = 0;
	struct blkcipher_desc desc;
	struct aes_ccm_b0 b0;
	struct aes_ccm_b1 b1;
	struct aes_ccm_a ax;
	struct scatterlist sg[4], sg_dst;
	void *iv, *dst_buf;
	size_t ivsize, dst_size;
	const u8 bzero[16] = { 0 };
	size_t zero_padding;

	/*
	 * These checks should be compile time optimized out
	 * ensure @a fills b1's mac_header and following fields
	 */
	WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
	WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));

	result = -ENOMEM;
	zero_padding = sizeof(struct aes_ccm_block)
		- blen % sizeof(struct aes_ccm_block);
	zero_padding = blen % sizeof(struct aes_ccm_block);
	if (zero_padding)
		zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
	dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
	dst_buf = kzalloc(dst_size, GFP_KERNEL);
	if (dst_buf == NULL) {
		printk(KERN_ERR "E: can't alloc destination buffer\n");
		goto error_dst_buf;
	}

	iv = crypto_blkcipher_crt(tfm_cbc)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm_cbc);
	memset(iv, 0, ivsize);

	/* Setup B0 */
	b0.flags = 0x59;	/* Format B0 */
	b0.ccm_nonce = *n;
	b0.lm = cpu_to_be16(0);	/* WUSB1.0[6.5] sez l(m) is 0 */

	/* Setup B1
Ejemplo n.º 8
0
static int aes_get_sizes(void)
{
	struct crypto_blkcipher *tfm;

	tfm = crypto_alloc_blkcipher(blkcipher_alg, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
		pr_err("encrypted_key: failed to alloc_cipher (%ld)\n",
		       PTR_ERR(tfm));
		return PTR_ERR(tfm);
	}
	ivsize = crypto_blkcipher_ivsize(tfm);
	blksize = crypto_blkcipher_blocksize(tfm);
	crypto_free_blkcipher(tfm);
	return 0;
}
static int __dek_aes_decrypt(struct crypto_blkcipher *tfm, char *src, char *dst, int len) {
	struct blkcipher_desc desc;
	struct scatterlist src_sg, dst_sg;
	int bsize = crypto_blkcipher_ivsize(tfm);
	u8 iv[bsize];

	memset(&iv, 0, sizeof(iv));
	desc.tfm = tfm;
	desc.info = iv;
	desc.flags = 0;

	sg_init_one(&src_sg, src, len);
	sg_init_one(&dst_sg, dst, len);

	return crypto_blkcipher_decrypt_iv(&desc, &dst_sg, &src_sg, len);
}
int ceph_aes_decrypt(const void *key, int key_len, void *dst, size_t *dst_len, const void *src,  size_t src_len){

    struct scatterlist sg_in[1], sg_out[2];
    struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
    struct blkcipher_desc desc = { .tfm = tfm };
    char pad[16];
    void *iv;
    int ivsize;
    int ret;
    int last_byte;

    if (IS_ERR(tfm))
        return PTR_ERR(tfm);
                                                            
    crypto_blkcipher_setkey((void *)tfm, key, key_len);
    sg_init_table(sg_in, 1);
    sg_init_table(sg_out, 2);
    sg_set_buf(sg_in, src, src_len);
    sg_set_buf(&sg_out[0], dst, *dst_len);
    sg_set_buf(&sg_out[1], pad, sizeof(pad));

    iv = crypto_blkcipher_crt(tfm)->iv;
    ivsize = crypto_blkcipher_ivsize(tfm);
    memcpy(iv, aes_iv, ivsize); 

    ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
    crypto_free_blkcipher(tfm);
    if (ret < 0) {
        pr_err("ceph_aes_decrypt failed %d\n", ret);
        return ret;
    }
    
    if (src_len <= *dst_len)
        last_byte = ((char*)dst)[src_len - 1];
    else
        last_byte = pad[src_len - *dst_len - 1];

    if (last_byte <= 16 && src_len >= last_byte) {
        *dst_len = src_len - last_byte;
    } 
    else {
        pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
        last_byte, (int)src_len);
        return -EPERM;  /* bad padding */
    }
    return 0;
}
int blkcipher_walk_done(struct blkcipher_desc *desc,
			struct blkcipher_walk *walk, int err)
{
	struct crypto_blkcipher *tfm = desc->tfm;
	unsigned int nbytes = 0;

	if (likely(err >= 0)) {
		unsigned int n = walk->nbytes - err;

		if (likely(!(walk->flags & BLKCIPHER_WALK_SLOW)))
			n = blkcipher_done_fast(walk, n);
		else if (WARN_ON(err)) {
			err = -EINVAL;
			goto err;
		} else
			n = blkcipher_done_slow(tfm, walk, n);

		nbytes = walk->total - n;
		err = 0;
	}

	scatterwalk_done(&walk->in, 0, nbytes);
	scatterwalk_done(&walk->out, 1, nbytes);

err:
	walk->total = nbytes;
	walk->nbytes = nbytes;

	if (nbytes) {
		crypto_yield(desc->flags);
		return blkcipher_walk_next(desc, walk);
	}

	if (walk->iv != desc->info)
		memcpy(desc->info, walk->iv, crypto_blkcipher_ivsize(tfm));
	if (walk->buffer != walk->page)
		kfree(walk->buffer);
	if (walk->page)
		free_page((unsigned long)walk->page);

	return err;
}
Ejemplo n.º 12
0
static inline int blkcipher_copy_iv(struct blkcipher_walk *walk,
				    struct crypto_blkcipher *tfm,
				    unsigned int alignmask)
{
	unsigned bs = crypto_blkcipher_blocksize(tfm);
	unsigned int ivsize = crypto_blkcipher_ivsize(tfm);
	unsigned int size = bs * 2 + ivsize + max(bs, ivsize) - (alignmask + 1);
	u8 *iv;

	size += alignmask & ~(crypto_tfm_ctx_alignment() - 1);
	walk->buffer = kmalloc(size, GFP_ATOMIC);
	if (!walk->buffer)
		return -ENOMEM;

	iv = (u8 *)ALIGN((unsigned long)walk->buffer, alignmask + 1);
	iv = blkcipher_get_spot(iv, bs) + bs;
	iv = blkcipher_get_spot(iv, bs) + bs;
	iv = blkcipher_get_spot(iv, ivsize);

	walk->iv = memcpy(iv, walk->iv, ivsize);
	return 0;
}
Ejemplo n.º 13
0
static int esp6_init_state(struct xfrm_state *x)
{
	struct esp_data *esp = NULL;
	struct crypto_blkcipher *tfm;

	/* null auth and encryption can have zero length keys */
	if (x->aalg) {
		if (x->aalg->alg_key_len > 512)
			goto error;
	}
	if (x->ealg == NULL)
		goto error;

	if (x->encap)
		goto error;

	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
	if (esp == NULL)
		return -ENOMEM;

	if (x->aalg) {
		struct xfrm_algo_desc *aalg_desc;
		struct crypto_hash *hash;

		esp->auth.key = x->aalg->alg_key;
		esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
		hash = crypto_alloc_hash(x->aalg->alg_name, 0,
					 CRYPTO_ALG_ASYNC);
		if (IS_ERR(hash))
			goto error;

		esp->auth.tfm = hash;
		if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
			goto error;
 
		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
		BUG_ON(!aalg_desc);
 
		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
		    crypto_hash_digestsize(hash)) {
			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
				 x->aalg->alg_name,
				 crypto_hash_digestsize(hash),
				 aalg_desc->uinfo.auth.icv_fullbits/8);
			goto error;
		}
 
		esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
		esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
 
		esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
		if (!esp->auth.work_icv)
			goto error;
	}
	esp->conf.key = x->ealg->alg_key;
	esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
	tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		goto error;
	esp->conf.tfm = tfm;
	esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
	esp->conf.padlen = 0;
	if (esp->conf.ivlen) {
		esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
		if (unlikely(esp->conf.ivec == NULL))
			goto error;
		esp->conf.ivinitted = 0;
	}
	if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
		goto error;
	x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
	if (x->props.mode == XFRM_MODE_TUNNEL)
		x->props.header_len += sizeof(struct ipv6hdr);
	x->data = esp;
	return 0;

error:
	x->data = esp;
	esp6_destroy(x);
	x->data = NULL;
	return -EINVAL;
}
Ejemplo n.º 14
0
static int aml_keybox_aes_encrypt(const void *key, int key_len,
                                  const u8 *aes_iv, void *dst, size_t *dst_len,
                                  const void *src, size_t src_len)
{
    struct scatterlist sg_in[1], sg_out[1];
    struct crypto_blkcipher *tfm = aml_keybox_crypto_alloc_cipher();
    struct blkcipher_desc desc =
        { .tfm = tfm, .flags = 0 };
	int ret;
	void *iv;
	int ivsize;
	if(src_len & 0x0f){
		printk("%s:%d,src_len %d is not 16byte align",__func__,__LINE__,src_len);
		return -1;
	}

	if (IS_ERR(tfm)){
		printk("%s:%d,crypto_alloc fail\n",__func__,__LINE__);
		return PTR_ERR(tfm);
	}

	*dst_len = src_len ;

	crypto_blkcipher_setkey((void *) tfm, key, key_len);
	sg_init_table(sg_in, 1);
	sg_set_buf(&sg_in[0], src, src_len);
	sg_init_table(sg_out, 1);
	sg_set_buf(sg_out, dst, *dst_len);
	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	//printk("key_len:%d,ivsize:%d\n",key_len,ivsize);

	memcpy(iv, aes_iv, ivsize);

    ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,src_len);
    crypto_free_blkcipher(tfm);
    if (ret < 0){
        printk("%s:%d,ceph_aes_crypt failed %d\n", __func__,__LINE__,ret);
        return ret;
	}
    return 0;
}
static int aml_keybox_aes_decrypt(const void *key, int key_len,
                                      const u8 *aes_iv, void *dst,
                                      size_t *dst_len, const void *src,
                                      size_t src_len)
{
    struct scatterlist sg_in[1], sg_out[1];
    struct crypto_blkcipher *tfm = aml_keybox_crypto_alloc_cipher();
    struct blkcipher_desc desc =
        { .tfm = tfm };
    void *iv;
    int ivsize;
    int ret;
//    int last_byte;
	if(src_len &0x0f){
		printk("%s:%d,src_len %d is not 16byte align",__func__,__LINE__,src_len);
		return -1;
	}

	if (IS_ERR(tfm)){
		printk("%s:%d,crypto_alloc fail\n",__func__,__LINE__);
		return PTR_ERR(tfm);
	}

	crypto_blkcipher_setkey((void *) tfm, key, key_len);
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 1);
	sg_set_buf(sg_in, src, src_len);
	sg_set_buf(&sg_out[0], dst, *dst_len);

	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	//printk("key_len:%d,ivsize:%d\n",key_len,ivsize);

	memcpy(iv, aes_iv, ivsize);

	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
	crypto_free_blkcipher(tfm);
	if (ret < 0){
		printk("%s:%d,ceph_aes_decrypt failed %d\n", __func__,__LINE__,ret);
		return ret;
	}
	*dst_len = src_len;
    return 0;
}

int aes_crypto_encrypt(void *dst,size_t *dst_len,const void *src,size_t src_len)
{
	int ret;
	unsigned char iv_aes[16];
	unsigned char key_aes[32];
	memcpy(iv_aes,&default_AESkey[0],16);
	memcpy(key_aes,&default_AESkey[16],32);

	ret = aml_keybox_aes_encrypt(key_aes,sizeof(key_aes),iv_aes,dst,dst_len,src,src_len);
	return ret;
}
Ejemplo n.º 15
0
Archivo: crypto.c Proyecto: 7799/linux
/*
 * CC-MAC function WUSB1.0[6.5]
 *
 * Take a data string and produce the encrypted CBC Counter-mode MIC
 *
 * Note the names for most function arguments are made to (more or
 * less) match those used in the pseudo-function definition given in
 * WUSB1.0[6.5].
 *
 * @tfm_cbc: CBC(AES) blkcipher handle (initialized)
 *
 * @tfm_aes: AES cipher handle (initialized)
 *
 * @mic: buffer for placing the computed MIC (Message Integrity
 *       Code). This is exactly 8 bytes, and we expect the buffer to
 *       be at least eight bytes in length.
 *
 * @key: 128 bit symmetric key
 *
 * @n: CCM nonce
 *
 * @a: ASCII string, 14 bytes long (I guess zero padded if needed;
 *     we use exactly 14 bytes).
 *
 * @b: data stream to be processed; cannot be a global or const local
 *     (will confuse the scatterlists)
 *
 * @blen: size of b...
 *
 * Still not very clear how this is done, but looks like this: we
 * create block B0 (as WUSB1.0[6.5] says), then we AES-crypt it with
 * @key. We bytewise xor B0 with B1 (1) and AES-crypt that. Then we
 * take the payload and divide it in blocks (16 bytes), xor them with
 * the previous crypto result (16 bytes) and crypt it, repeat the next
 * block with the output of the previous one, rinse wash (I guess this
 * is what AES CBC mode means...but I truly have no idea). So we use
 * the CBC(AES) blkcipher, that does precisely that. The IV (Initial
 * Vector) is 16 bytes and is set to zero, so
 *
 * See rfc3610. Linux crypto has a CBC implementation, but the
 * documentation is scarce, to say the least, and the example code is
 * so intricated that is difficult to understand how things work. Most
 * of this is guess work -- bite me.
 *
 * (1) Created as 6.5 says, again, using as l(a) 'Blen + 14', and
 *     using the 14 bytes of @a to fill up
 *     b1.{mac_header,e0,security_reserved,padding}.
 *
 * NOTE: The definition of l(a) in WUSB1.0[6.5] vs the definition of
 *       l(m) is orthogonal, they bear no relationship, so it is not
 *       in conflict with the parameter's relation that
 *       WUSB1.0[6.4.2]) defines.
 *
 * NOTE: WUSB1.0[A.1]: Host Nonce is missing a nibble? (1e); fixed in
 *       first errata released on 2005/07.
 *
 * NOTE: we need to clean IV to zero at each invocation to make sure
 *       we start with a fresh empty Initial Vector, so that the CBC
 *       works ok.
 *
 * NOTE: blen is not aligned to a block size, we'll pad zeros, that's
 *       what sg[4] is for. Maybe there is a smarter way to do this.
 */
static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
			struct crypto_cipher *tfm_aes, void *mic,
			const struct aes_ccm_nonce *n,
			const struct aes_ccm_label *a, const void *b,
			size_t blen)
{
	int result = 0;
	struct blkcipher_desc desc;
	struct aes_ccm_b0 b0;
	struct aes_ccm_b1 b1;
	struct aes_ccm_a ax;
	struct scatterlist sg[4], sg_dst;
	void *iv, *dst_buf;
	size_t ivsize, dst_size;
	const u8 bzero[16] = { 0 };
	size_t zero_padding;

	/*
	 * These checks should be compile time optimized out
	 * ensure @a fills b1's mac_header and following fields
	 */
	WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
	WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));

	result = -ENOMEM;
	zero_padding = sizeof(struct aes_ccm_block)
		- blen % sizeof(struct aes_ccm_block);
	zero_padding = blen % sizeof(struct aes_ccm_block);
	if (zero_padding)
		zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
	dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
	dst_buf = kzalloc(dst_size, GFP_KERNEL);
	if (dst_buf == NULL) {
		printk(KERN_ERR "E: can't alloc destination buffer\n");
		goto error_dst_buf;
	}

	iv = crypto_blkcipher_crt(tfm_cbc)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm_cbc);
	memset(iv, 0, ivsize);

	/* Setup B0 */
	b0.flags = 0x59;	/* Format B0 */
	b0.ccm_nonce = *n;
	b0.lm = cpu_to_be16(0);	/* WUSB1.0[6.5] sez l(m) is 0 */

	/* Setup B1
	 *
	 * The WUSB spec is anything but clear! WUSB1.0[6.5]
	 * says that to initialize B1 from A with 'l(a) = blen +
	 * 14'--after clarification, it means to use A's contents
	 * for MAC Header, EO, sec reserved and padding.
	 */
	b1.la = cpu_to_be16(blen + 14);
	memcpy(&b1.mac_header, a, sizeof(*a));

	sg_init_table(sg, ARRAY_SIZE(sg));
	sg_set_buf(&sg[0], &b0, sizeof(b0));
	sg_set_buf(&sg[1], &b1, sizeof(b1));
	sg_set_buf(&sg[2], b, blen);
	/* 0 if well behaved :) */
	sg_set_buf(&sg[3], bzero, zero_padding);
	sg_init_one(&sg_dst, dst_buf, dst_size);

	desc.tfm = tfm_cbc;
	desc.flags = 0;
	result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
	if (result < 0) {
		printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
		       result);
		goto error_cbc_crypt;
	}

	/* Now we crypt the MIC Tag (*iv) with Ax -- values per WUSB1.0[6.5]
	 * The procedure is to AES crypt the A0 block and XOR the MIC
	 * Tag against it; we only do the first 8 bytes and place it
	 * directly in the destination buffer.
	 *
	 * POS Crypto API: size is assumed to be AES's block size.
	 * Thanks for documenting it -- tip taken from airo.c
	 */
	ax.flags = 0x01;		/* as per WUSB 1.0 spec */
	ax.ccm_nonce = *n;
	ax.counter = 0;
	crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
	bytewise_xor(mic, &ax, iv, 8);
	result = 8;
error_cbc_crypt:
	kfree(dst_buf);
error_dst_buf:
	return result;
}
Ejemplo n.º 16
0
static int ceph_aes_encrypt(const void *key, int key_len,
			    void *dst, size_t *dst_len,
			    const void *src, size_t src_len)
{
	struct scatterlist sg_in[2], sg_out[1];
	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
	struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
	int ret;
	void *iv;
	int ivsize;
	size_t zero_padding = (0x10 - (src_len & 0x0f));
	char pad[16];

	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	memset(pad, zero_padding, zero_padding);

	*dst_len = src_len + zero_padding;

	crypto_blkcipher_setkey((void *)tfm, key, key_len);
	sg_init_table(sg_in, 2);
	sg_set_buf(&sg_in[0], src, src_len);
	sg_set_buf(&sg_in[1], pad, zero_padding);
	sg_init_table(sg_out, 1);
	sg_set_buf(sg_out, dst, *dst_len);
	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	memcpy(iv, aes_iv, ivsize);
	ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
				     src_len + zero_padding);
	crypto_free_blkcipher(tfm);
	if (ret < 0)
		pr_err("ceph_aes_crypt failed %d\n", ret);
	return 0;
}

static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
			     size_t *dst_len,
			     const void *src1, size_t src1_len,
			     const void *src2, size_t src2_len)
{
	struct scatterlist sg_in[3], sg_out[1];
	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
	struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
	int ret;
	void *iv;
	int ivsize;
	size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
	char pad[16];

	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	memset(pad, zero_padding, zero_padding);

	*dst_len = src1_len + src2_len + zero_padding;

	crypto_blkcipher_setkey((void *)tfm, key, key_len);
	sg_init_table(sg_in, 3);
	sg_set_buf(&sg_in[0], src1, src1_len);
	sg_set_buf(&sg_in[1], src2, src2_len);
	sg_set_buf(&sg_in[2], pad, zero_padding);
	sg_init_table(sg_out, 1);
	sg_set_buf(sg_out, dst, *dst_len);
	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	memcpy(iv, aes_iv, ivsize);
	ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
				     src1_len + src2_len + zero_padding);
	crypto_free_blkcipher(tfm);
	if (ret < 0)
		pr_err("ceph_aes_crypt2 failed %d\n", ret);
	return 0;
}

static int ceph_aes_decrypt(const void *key, int key_len,
			    void *dst, size_t *dst_len,
			    const void *src, size_t src_len)
{
	struct scatterlist sg_in[1], sg_out[2];
	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
	struct blkcipher_desc desc = { .tfm = tfm };
	char pad[16];
	void *iv;
	int ivsize;
	int ret;
	int last_byte;

	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	crypto_blkcipher_setkey((void *)tfm, key, key_len);
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 2);
	sg_set_buf(sg_in, src, src_len);
	sg_set_buf(&sg_out[0], dst, *dst_len);
	sg_set_buf(&sg_out[1], pad, sizeof(pad));

	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	memcpy(iv, aes_iv, ivsize);


	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
	crypto_free_blkcipher(tfm);
	if (ret < 0) {
		pr_err("ceph_aes_decrypt failed %d\n", ret);
		return ret;
	}

	if (src_len <= *dst_len)
		last_byte = ((char *)dst)[src_len - 1];
	else
		last_byte = pad[src_len - *dst_len - 1];
	if (last_byte <= 16 && src_len >= last_byte) {
		*dst_len = src_len - last_byte;
	} else {
		pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
		       last_byte, (int)src_len);
		return -EPERM;  
	}
	return 0;
}

static int ceph_aes_decrypt2(const void *key, int key_len,
			     void *dst1, size_t *dst1_len,
			     void *dst2, size_t *dst2_len,
			     const void *src, size_t src_len)
{
	struct scatterlist sg_in[1], sg_out[3];
	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
	struct blkcipher_desc desc = { .tfm = tfm };
	char pad[16];
	void *iv;
	int ivsize;
	int ret;
	int last_byte;

	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	sg_init_table(sg_in, 1);
	sg_set_buf(sg_in, src, src_len);
	sg_init_table(sg_out, 3);
	sg_set_buf(&sg_out[0], dst1, *dst1_len);
	sg_set_buf(&sg_out[1], dst2, *dst2_len);
	sg_set_buf(&sg_out[2], pad, sizeof(pad));

	crypto_blkcipher_setkey((void *)tfm, key, key_len);
	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	memcpy(iv, aes_iv, ivsize);


	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
	crypto_free_blkcipher(tfm);
	if (ret < 0) {
		pr_err("ceph_aes_decrypt failed %d\n", ret);
		return ret;
	}

	if (src_len <= *dst1_len)
		last_byte = ((char *)dst1)[src_len - 1];
	else if (src_len <= *dst1_len + *dst2_len)
		last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
	else
		last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
	if (last_byte <= 16 && src_len >= last_byte) {
		src_len -= last_byte;
	} else {
		pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
		       last_byte, (int)src_len);
		return -EPERM;  
	}

	if (src_len < *dst1_len) {
		*dst1_len = src_len;
		*dst2_len = 0;
	} else {
		*dst2_len = src_len - *dst1_len;
	}

	return 0;
}


int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
		 const void *src, size_t src_len)
{
	switch (secret->type) {
	case CEPH_CRYPTO_NONE:
		if (*dst_len < src_len)
			return -ERANGE;
		memcpy(dst, src, src_len);
		*dst_len = src_len;
		return 0;

	case CEPH_CRYPTO_AES:
		return ceph_aes_decrypt(secret->key, secret->len, dst,
					dst_len, src, src_len);

	default:
		return -EINVAL;
	}
}

int ceph_decrypt2(struct ceph_crypto_key *secret,
			void *dst1, size_t *dst1_len,
			void *dst2, size_t *dst2_len,
			const void *src, size_t src_len)
{
	size_t t;

	switch (secret->type) {
	case CEPH_CRYPTO_NONE:
		if (*dst1_len + *dst2_len < src_len)
			return -ERANGE;
		t = min(*dst1_len, src_len);
		memcpy(dst1, src, t);
		*dst1_len = t;
		src += t;
		src_len -= t;
		if (src_len) {
			t = min(*dst2_len, src_len);
			memcpy(dst2, src, t);
			*dst2_len = t;
		}
		return 0;

	case CEPH_CRYPTO_AES:
		return ceph_aes_decrypt2(secret->key, secret->len,
					 dst1, dst1_len, dst2, dst2_len,
					 src, src_len);

	default:
		return -EINVAL;
	}
}

int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
		 const void *src, size_t src_len)
{
	switch (secret->type) {
	case CEPH_CRYPTO_NONE:
		if (*dst_len < src_len)
			return -ERANGE;
		memcpy(dst, src, src_len);
		*dst_len = src_len;
		return 0;

	case CEPH_CRYPTO_AES:
		return ceph_aes_encrypt(secret->key, secret->len, dst,
					dst_len, src, src_len);

	default:
		return -EINVAL;
	}
}

int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
		  const void *src1, size_t src1_len,
		  const void *src2, size_t src2_len)
{
	switch (secret->type) {
	case CEPH_CRYPTO_NONE:
		if (*dst_len < src1_len + src2_len)
			return -ERANGE;
		memcpy(dst, src1, src1_len);
		memcpy(dst + src1_len, src2, src2_len);
		*dst_len = src1_len + src2_len;
		return 0;

	case CEPH_CRYPTO_AES:
		return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
					 src1, src1_len, src2, src2_len);

	default:
		return -EINVAL;
	}
}

int ceph_key_instantiate(struct key *key, const void *data, size_t datalen)
{
	struct ceph_crypto_key *ckey;
	int ret;
	void *p;

	ret = -EINVAL;
	if (datalen <= 0 || datalen > 32767 || !data)
		goto err;

	ret = key_payload_reserve(key, datalen);
	if (ret < 0)
		goto err;

	ret = -ENOMEM;
	ckey = kmalloc(sizeof(*ckey), GFP_KERNEL);
	if (!ckey)
		goto err;

	
	p = (void *)data;
	ret = ceph_crypto_key_decode(ckey, &p, (char*)data+datalen);
	if (ret < 0)
		goto err_ckey;

	key->payload.data = ckey;
	return 0;

err_ckey:
	kfree(ckey);
err:
	return ret;
}

int ceph_key_match(const struct key *key, const void *description)
{
	return strcmp(key->description, description) == 0;
}

void ceph_key_destroy(struct key *key) {
	struct ceph_crypto_key *ckey = key->payload.data;

	ceph_crypto_key_destroy(ckey);
	kfree(ckey);
}

struct key_type key_type_ceph = {
	.name		= "ceph",
	.instantiate	= ceph_key_instantiate,
	.match		= ceph_key_match,
	.destroy	= ceph_key_destroy,
};

int ceph_crypto_init(void) {
	return register_key_type(&key_type_ceph);
}

void ceph_crypto_shutdown(void) {
	unregister_key_type(&key_type_ceph);
}
Ejemplo n.º 17
0
static int wusb_ccm_mac(struct crypto_blkcipher *tfm_cbc,
			struct crypto_cipher *tfm_aes, void *mic,
			const struct aes_ccm_nonce *n,
			const struct aes_ccm_label *a, const void *b,
			size_t blen)
{
	int result = 0;
	struct blkcipher_desc desc;
	struct aes_ccm_b0 b0;
	struct aes_ccm_b1 b1;
	struct aes_ccm_a ax;
	struct scatterlist sg[4], sg_dst;
	void *iv, *dst_buf;
	size_t ivsize, dst_size;
	const u8 bzero[16] = { 0 };
	size_t zero_padding;

	WARN_ON(sizeof(*a) != sizeof(b1) - sizeof(b1.la));
	WARN_ON(sizeof(b0) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(b1) != sizeof(struct aes_ccm_block));
	WARN_ON(sizeof(ax) != sizeof(struct aes_ccm_block));

	result = -ENOMEM;
	zero_padding = sizeof(struct aes_ccm_block)
		- blen % sizeof(struct aes_ccm_block);
	zero_padding = blen % sizeof(struct aes_ccm_block);
	if (zero_padding)
		zero_padding = sizeof(struct aes_ccm_block) - zero_padding;
	dst_size = blen + sizeof(b0) + sizeof(b1) + zero_padding;
	dst_buf = kzalloc(dst_size, GFP_KERNEL);
	if (dst_buf == NULL) {
		printk(KERN_ERR "E: can't alloc destination buffer\n");
		goto error_dst_buf;
	}

	iv = crypto_blkcipher_crt(tfm_cbc)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm_cbc);
	memset(iv, 0, ivsize);

	
	b0.flags = 0x59;	
	b0.ccm_nonce = *n;
	b0.lm = cpu_to_be16(0);	

	b1.la = cpu_to_be16(blen + 14);
	memcpy(&b1.mac_header, a, sizeof(*a));

	sg_init_table(sg, ARRAY_SIZE(sg));
	sg_set_buf(&sg[0], &b0, sizeof(b0));
	sg_set_buf(&sg[1], &b1, sizeof(b1));
	sg_set_buf(&sg[2], b, blen);
	
	sg_set_buf(&sg[3], bzero, zero_padding);
	sg_init_one(&sg_dst, dst_buf, dst_size);

	desc.tfm = tfm_cbc;
	desc.flags = 0;
	result = crypto_blkcipher_encrypt(&desc, &sg_dst, sg, dst_size);
	if (result < 0) {
		printk(KERN_ERR "E: can't compute CBC-MAC tag (MIC): %d\n",
		       result);
		goto error_cbc_crypt;
	}

	ax.flags = 0x01;		
	ax.ccm_nonce = *n;
	ax.counter = 0;
	crypto_cipher_encrypt_one(tfm_aes, (void *)&ax, (void *)&ax);
	bytewise_xor(mic, &ax, iv, 8);
	result = 8;
error_cbc_crypt:
	kfree(dst_buf);
error_dst_buf:
	return result;
}
Ejemplo n.º 18
0
static int skcipher_crypt_blkcipher(struct skcipher_request *req,
				    int (*crypt)(struct blkcipher_desc *,
						 struct scatterlist *,
						 struct scatterlist *,
						 unsigned int))
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_blkcipher **ctx = crypto_skcipher_ctx(tfm);
	struct blkcipher_desc desc = {
		.tfm = *ctx,
		.info = req->iv,
		.flags = req->base.flags,
	};


	return crypt(&desc, req->dst, req->src, req->cryptlen);
}

static int skcipher_encrypt_blkcipher(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;

	return skcipher_crypt_blkcipher(req, alg->encrypt);
}

static int skcipher_decrypt_blkcipher(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
	struct blkcipher_alg *alg = &tfm->__crt_alg->cra_blkcipher;

	return skcipher_crypt_blkcipher(req, alg->decrypt);
}

static void crypto_exit_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
{
	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);

	crypto_free_blkcipher(*ctx);
}

static int crypto_init_skcipher_ops_blkcipher(struct crypto_tfm *tfm)
{
	struct crypto_alg *calg = tfm->__crt_alg;
	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
	struct crypto_blkcipher **ctx = crypto_tfm_ctx(tfm);
	struct crypto_blkcipher *blkcipher;
	struct crypto_tfm *btfm;

	if (!crypto_mod_get(calg))
		return -EAGAIN;

	btfm = __crypto_alloc_tfm(calg, CRYPTO_ALG_TYPE_BLKCIPHER,
					CRYPTO_ALG_TYPE_MASK);
	if (IS_ERR(btfm)) {
		crypto_mod_put(calg);
		return PTR_ERR(btfm);
	}

	blkcipher = __crypto_blkcipher_cast(btfm);
	*ctx = blkcipher;
	tfm->exit = crypto_exit_skcipher_ops_blkcipher;

	skcipher->setkey = skcipher_setkey_blkcipher;
	skcipher->encrypt = skcipher_encrypt_blkcipher;
	skcipher->decrypt = skcipher_decrypt_blkcipher;

	skcipher->ivsize = crypto_blkcipher_ivsize(blkcipher);
	skcipher->keysize = calg->cra_blkcipher.max_keysize;

	return 0;
}

static int skcipher_setkey_ablkcipher(struct crypto_skcipher *tfm,
				      const u8 *key, unsigned int keylen)
{
	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
	struct crypto_ablkcipher *ablkcipher = *ctx;
	int err;

	crypto_ablkcipher_clear_flags(ablkcipher, ~0);
	crypto_ablkcipher_set_flags(ablkcipher,
				    crypto_skcipher_get_flags(tfm) &
				    CRYPTO_TFM_REQ_MASK);
	err = crypto_ablkcipher_setkey(ablkcipher, key, keylen);
	crypto_skcipher_set_flags(tfm,
				  crypto_ablkcipher_get_flags(ablkcipher) &
				  CRYPTO_TFM_RES_MASK);

	return err;
}

static int skcipher_crypt_ablkcipher(struct skcipher_request *req,
				     int (*crypt)(struct ablkcipher_request *))
{
	struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
	struct crypto_ablkcipher **ctx = crypto_skcipher_ctx(tfm);
	struct ablkcipher_request *subreq = skcipher_request_ctx(req);

	ablkcipher_request_set_tfm(subreq, *ctx);
	ablkcipher_request_set_callback(subreq, skcipher_request_flags(req),
					req->base.complete, req->base.data);
	ablkcipher_request_set_crypt(subreq, req->src, req->dst, req->cryptlen,
				     req->iv);

	return crypt(subreq);
}

static int skcipher_encrypt_ablkcipher(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;

	return skcipher_crypt_ablkcipher(req, alg->encrypt);
}

static int skcipher_decrypt_ablkcipher(struct skcipher_request *req)
{
	struct crypto_skcipher *skcipher = crypto_skcipher_reqtfm(req);
	struct crypto_tfm *tfm = crypto_skcipher_tfm(skcipher);
	struct ablkcipher_alg *alg = &tfm->__crt_alg->cra_ablkcipher;

	return skcipher_crypt_ablkcipher(req, alg->decrypt);
}

static void crypto_exit_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
{
	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);

	crypto_free_ablkcipher(*ctx);
}

static int crypto_init_skcipher_ops_ablkcipher(struct crypto_tfm *tfm)
{
	struct crypto_alg *calg = tfm->__crt_alg;
	struct crypto_skcipher *skcipher = __crypto_skcipher_cast(tfm);
	struct crypto_ablkcipher **ctx = crypto_tfm_ctx(tfm);
	struct crypto_ablkcipher *ablkcipher;
	struct crypto_tfm *abtfm;

	if (!crypto_mod_get(calg))
		return -EAGAIN;

	abtfm = __crypto_alloc_tfm(calg, 0, 0);
	if (IS_ERR(abtfm)) {
		crypto_mod_put(calg);
		return PTR_ERR(abtfm);
	}

	ablkcipher = __crypto_ablkcipher_cast(abtfm);
	*ctx = ablkcipher;
	tfm->exit = crypto_exit_skcipher_ops_ablkcipher;

	skcipher->setkey = skcipher_setkey_ablkcipher;
	skcipher->encrypt = skcipher_encrypt_ablkcipher;
	skcipher->decrypt = skcipher_decrypt_ablkcipher;

	skcipher->ivsize = crypto_ablkcipher_ivsize(ablkcipher);
	skcipher->reqsize = crypto_ablkcipher_reqsize(ablkcipher) +
			    sizeof(struct ablkcipher_request);
	skcipher->keysize = calg->cra_ablkcipher.max_keysize;

	return 0;
}

static int crypto_skcipher_init_tfm(struct crypto_tfm *tfm)
{
	if (tfm->__crt_alg->cra_type == &crypto_blkcipher_type)
		return crypto_init_skcipher_ops_blkcipher(tfm);

	BUG_ON(tfm->__crt_alg->cra_type != &crypto_ablkcipher_type &&
	       tfm->__crt_alg->cra_type != &crypto_givcipher_type);

	return crypto_init_skcipher_ops_ablkcipher(tfm);
}

static const struct crypto_type crypto_skcipher_type2 = {
	.extsize = crypto_skcipher_extsize,
	.init_tfm = crypto_skcipher_init_tfm,
	.maskclear = ~CRYPTO_ALG_TYPE_MASK,
	.maskset = CRYPTO_ALG_TYPE_BLKCIPHER_MASK,
	.type = CRYPTO_ALG_TYPE_BLKCIPHER,
	.tfmsize = offsetof(struct crypto_skcipher, base),
};

struct crypto_skcipher *crypto_alloc_skcipher(const char *alg_name,
					      u32 type, u32 mask)
{
	return crypto_alloc_tfm(alg_name, &crypto_skcipher_type2, type, mask);
}
EXPORT_SYMBOL_GPL(crypto_alloc_skcipher);

MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Symmetric key cipher type");
Ejemplo n.º 19
0
static int esp_init_state(struct xfrm_state *x)
{
	struct esp_data *esp = NULL;
	struct crypto_blkcipher *tfm;
	u32 align;

	/* null auth and encryption can have zero length keys */
	if (x->aalg) {
		if (x->aalg->alg_key_len > 512)
			goto error;
	}
	if (x->ealg == NULL)
		goto error;

	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
	if (esp == NULL)
		return -ENOMEM;

	if (x->aalg) {
		struct xfrm_algo_desc *aalg_desc;
		struct crypto_hash *hash;

		esp->auth.key = x->aalg->alg_key;
		esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
		hash = crypto_alloc_hash(x->aalg->alg_name, 0,
					 CRYPTO_ALG_ASYNC);
		if (IS_ERR(hash))
			goto error;

		esp->auth.tfm = hash;
		if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
			goto error;

		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
		BUG_ON(!aalg_desc);

		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
		    crypto_hash_digestsize(hash)) {
			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
				 x->aalg->alg_name,
				 crypto_hash_digestsize(hash),
				 aalg_desc->uinfo.auth.icv_fullbits/8);
			goto error;
		}

		esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
		esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;

		esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
		if (!esp->auth.work_icv)
			goto error;
	}
	esp->conf.key = x->ealg->alg_key;
	esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
	tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		goto error;
	esp->conf.tfm = tfm;
	esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
	esp->conf.padlen = 0;
	if (esp->conf.ivlen) {
		esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
		if (unlikely(esp->conf.ivec == NULL))
			goto error;
		esp->conf.ivinitted = 0;
	}
	if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
		goto error;
	x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
	if (x->props.mode == XFRM_MODE_TUNNEL)
		x->props.header_len += sizeof(struct iphdr);
	else if (x->props.mode == XFRM_MODE_BEET)
		x->props.header_len += IPV4_BEET_PHMAXLEN;
	if (x->encap) {
		struct xfrm_encap_tmpl *encap = x->encap;

		switch (encap->encap_type) {
		default:
			goto error;
		case UDP_ENCAP_ESPINUDP:
			x->props.header_len += sizeof(struct udphdr);
			break;
		case UDP_ENCAP_ESPINUDP_NON_IKE:
			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
			break;
		}
	}
	x->data = esp;
	align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
	if (esp->conf.padlen)
		align = max_t(u32, align, esp->conf.padlen);
	x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len;
	return 0;

error:
	x->data = esp;
	esp_destroy(x);
	x->data = NULL;
	return -EINVAL;
}
Ejemplo n.º 20
0
static int tf_self_test_perform_blkcipher(
	const char *alg_name,
	const struct blkcipher_test_vector *tv,
	bool decrypt)
{
	struct blkcipher_desc desc = {0};
	struct scatterlist sg_in, sg_out;
	unsigned char *in = NULL;
	unsigned char *out = NULL;
	unsigned in_size, out_size;
	int error;

	desc.tfm = crypto_alloc_blkcipher(alg_name, 0, 0);
	if (IS_ERR_OR_NULL(desc.tfm)) {
		ERROR("crypto_alloc_blkcipher(%s) failed", alg_name);
		error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm);
		goto abort;
	}
	INFO("%s alg_name=%s driver_name=%s key_size=%u block_size=%u",
	     decrypt ? "decrypt" : "encrypt", alg_name,
	     crypto_tfm_alg_driver_name(crypto_blkcipher_tfm(desc.tfm)),
	     tv->key_length * 8,
	     crypto_blkcipher_blocksize(desc.tfm));

	in_size = tv->length;
	in = kmalloc(in_size, GFP_KERNEL);
	if (IS_ERR_OR_NULL(in)) {
		ERROR("kmalloc(%u) failed: %d", in_size, (int)in);
		error = (in == NULL ? -ENOMEM : (int)in);
		goto abort;
	}
	memcpy(in, decrypt ? tv->ciphertext : tv->plaintext,
	       tv->length);

	out_size = tv->length + crypto_blkcipher_blocksize(desc.tfm);
	out = kmalloc(out_size, GFP_KERNEL);
	if (IS_ERR_OR_NULL(out)) {
		ERROR("kmalloc(%u) failed: %d", out_size, (int)out);
		error = (out == NULL ? -ENOMEM : (int)out);
		goto abort;
	}

	error = crypto_blkcipher_setkey(desc.tfm, tv->key, tv->key_length);
	if (error) {
		ERROR("crypto_alloc_setkey(%s) failed", alg_name);
		goto abort;
	}
	TF_TRACE_ARRAY(tv->key, tv->key_length);
	if (tv->iv != NULL) {
		unsigned iv_length = crypto_blkcipher_ivsize(desc.tfm);
		crypto_blkcipher_set_iv(desc.tfm, tv->iv, iv_length);
		TF_TRACE_ARRAY(tv->iv, iv_length);
	}

	sg_init_one(&sg_in, in, tv->length);
	sg_init_one(&sg_out, out, tv->length);
	TF_TRACE_ARRAY(in, tv->length);
	(decrypt ? crypto_blkcipher_decrypt : crypto_blkcipher_encrypt)
		(&desc, &sg_out, &sg_in, tv->length);
	if (error) {
		ERROR("crypto_blkcipher_%s(%s) failed",
		      decrypt ? "decrypt" : "encrypt", alg_name);
		goto abort;
	}
	TF_TRACE_ARRAY(out, tv->length);

	crypto_free_blkcipher(desc.tfm);

	if (memcmp((decrypt ? tv->plaintext : tv->ciphertext),
		   out, tv->length)) {
		ERROR("Wrong %s/%u %s result", alg_name, tv->key_length * 8,
		      decrypt ? "decryption" : "encryption");
		error = -EINVAL;
	} else {
		INFO("%s/%u: %s successful", alg_name, tv->key_length * 8,
		     decrypt ? "decryption" : "encryption");
		error = 0;
	}
	kfree(in);
	kfree(out);
	return error;

abort:
	if (!IS_ERR_OR_NULL(desc.tfm))
		crypto_free_blkcipher(desc.tfm);
	if (!IS_ERR_OR_NULL(out))
		kfree(out);
	if (!IS_ERR_OR_NULL(in))
		kfree(in);
	return error;
}
Ejemplo n.º 21
0
/* 	
 *	Function definition for sys_xcrypt.
 *	This function encrypts/decrypts files using AES Block Cipher algorithm using CBC mode.
 */
asmlinkage int sys_xcrypt( const char * const infile, const char * const opfile, const char * const keybuf, const int keylen, const short int flags ) {
	const char algo[] = "cbc(aes)";
	char *ipBuf = NULL, *opBuf = NULL, *iv = NULL, *inFile = NULL, *opFile = NULL, *keyBuf = NULL;
	int errno = 0, ret = 0;
	int actReadLen = 0, actWriteLen = 0, padLen = 0, blkSiz = 0, ipFileLen = 0, opFileLen = 0, keyLen = 0;
	int delOpFile = 0, prmbLen = 0, idx = 0;
	unsigned int fileSize = 0, factor = 1;
	struct file *inFilePtr = NULL, *opFilePtr = NULL;
	struct crypto_blkcipher *tfm = NULL;
	struct blkcipher_desc desc;
	struct scatterlist sg[2];
	struct dentry *tmpDentry;
	struct inode *tmpInode = NULL;
	mm_segment_t oldfs;

	/* Check for NULL pointers or invalid values */
	if( ( NULL == infile ) || ( NULL == opfile ) || ( NULL == keybuf ) || ( ( _FLAG_ENCRYPT_ != flags ) && ( _FLAG_DECRYPT_ != flags ) ) ) {
		printk( KERN_ALERT "Invalid I/P" );
		errno = -EINVAL;
		goto OUT_OK;
	}

	/* Verify if all the pointers belong to the user's own address space */
	ret = access_ok( VERIFY_READ, infile, 0 );
	if( !ret ) {
		printk( KERN_ALERT "Invalid pointer to I/P file passed as argument" );
		errno = -EFAULT;
		goto OUT_OK;
	}

	ret = access_ok( VERIFY_READ, opfile, 0 );
	if( !ret ) {
		printk( KERN_ALERT "Invalid pointer to O/P file passed as argument" );
		errno = -EFAULT;
		goto OUT_OK;
	}

	ret = access_ok( VERIFY_READ, keybuf, 0 );
	if( !ret ) {
		printk( KERN_ALERT "Invalid pointer to Password passed as argument" );
		errno = -EFAULT;
		goto OUT_OK;
	}

	/* Find out the length of the i/p buffers */
	ipFileLen = strlen_user( infile );
	opFileLen = strlen_user( opfile );
	keyLen = strlen_user( keybuf );

	/* Allocate buffers to copy i/p arguments from user space to kernel space */
	inFile = kmalloc( ipFileLen, GFP_KERNEL );
	if( NULL == inFile ) {
		errno = -ENOMEM;
		goto OUT_OK;
	}
	else {
		ret = strncpy_from_user( inFile, infile, ipFileLen );
		if( ret < 0 ) {
			errno = ret;
			goto OUT_IP;
		}
	}
		
	opFile = kmalloc( opFileLen, GFP_KERNEL );
	if( NULL == opFile ) {
		errno = -ENOMEM;
		goto OUT_IP;
	}
	else {
		ret = strncpy_from_user( opFile, opfile, opFileLen );
		if( ret < 0 ) {
			errno = ret;
			goto OUT_IP;
		}
	}
		
	keyBuf = kmalloc( keyLen, GFP_KERNEL );
	if( NULL == keyBuf ) {
		errno = -ENOMEM;
		goto OUT_IP;
	}
	else {
		ret = strncpy_from_user( keyBuf, keybuf, keyLen );
		if( ret < 0 ) {
			errno = ret;
			goto OUT_IP;
		}
	}

	/* Open I/P file. It will report error in case of non-existing file and bad permissions but not bad owner */
	inFilePtr = filp_open( inFile, O_RDONLY, 0 );
	if ( !inFilePtr || IS_ERR( inFilePtr ) ) {
		errno = (int)PTR_ERR( inFilePtr );
		printk( KERN_ALERT "Error opening i/p file: %d\n", errno );
		inFilePtr = NULL;
		goto OUT_IP;
    	}

	/* Check if the file is a regular file or not */
	if( !S_ISREG( inFilePtr->f_path.dentry->d_inode->i_mode ) ) {
		printk( KERN_ALERT "Error as file is not a regular one" );
		errno = -EBADF;
		goto OUT_FILE;
	}

	/* Check if the I/p file and the process owner match */
	if( ( current->real_cred->uid != inFilePtr->f_path.dentry->d_inode->i_uid ) && ( current->real_cred->uid != 0 ) ) {
		printk( KERN_ALERT "Error as owner of file and process does not match" );
		errno = -EACCES;
		goto OUT_FILE;
	}

	/* Open O/P file with error handling */
	opFilePtr = filp_open( opFile, O_WRONLY | O_CREAT | O_EXCL, 0 );
	if ( !opFilePtr || IS_ERR( opFilePtr ) ) {
		errno = (int)PTR_ERR( opFilePtr );
		printk( KERN_ALERT "Error opening o/p file: %d\n", errno );
		opFilePtr = NULL;
		goto OUT_FILE;
	}

	/* 
	 * Check if the infile and opfile point to the same file
	 * If they reside on the different file partition and have same name then it should be allowed else not
	 */
	if( 	( inFilePtr->f_path.dentry->d_inode->i_sb == opFilePtr->f_path.dentry->d_inode->i_sb ) && 
		( inFilePtr->f_path.dentry->d_inode->i_ino ==  opFilePtr->f_path.dentry->d_inode->i_ino ) ) {
		printk( KERN_ALERT "I/p and O/p file cannot be same" );
		errno = -EINVAL;
		goto OUT_FILE;
	}

	/* Set the o/p file permission to i/p file */
	opFilePtr->f_path.dentry->d_inode->i_mode =  inFilePtr->f_path.dentry->d_inode->i_mode;
	
	/* Set the file position to the beginning of the file */
	inFilePtr->f_pos = 0;
	opFilePtr->f_pos = 0;

	/* Allocate buffer to read data into and to write data to. For performance reasons, set its size equal to PAGE_SIZE */
	ipBuf = kmalloc( PAGE_SIZE, GFP_KERNEL );
	if( NULL == ipBuf ) {
		errno = -ENOMEM;
		goto OUT_FILE;
	}
		
	memset( ipBuf, _NULL_CHAR_, PAGE_SIZE );

	opBuf = kmalloc( PAGE_SIZE, GFP_KERNEL );
	if( NULL == opBuf ) {
		errno = -ENOMEM;
		goto OUT_DATA_PAGE;
	}
		
	memset( opBuf, _NULL_CHAR_, PAGE_SIZE );

	/* Allocate tfm */
	tfm = crypto_alloc_blkcipher( algo, 0, CRYPTO_ALG_ASYNC );
	if ( NULL == tfm  ) {
		printk( KERN_ALERT "Failed to load transform for %s: %ld\n", algo, PTR_ERR( tfm ) );
		errno = -EINVAL;
		goto OUT_DATA_PAGE;
	}

	/* Initialize desc */
	desc.tfm = tfm;
	desc.flags = 0;

	ret = crypto_blkcipher_setkey( tfm, keybuf, keylen );
	if( ret ) {
		printk( "Setkey() failed. Flags=%x\n", crypto_blkcipher_get_flags( tfm ) );
		errno = -EINVAL;
		goto OUT_CIPHER;
	}

	/* Initialize sg structure */
	FILL_SG( &sg[0], ipBuf, PAGE_SIZE );
	FILL_SG( &sg[1], opBuf, PAGE_SIZE );

	/* Get the block size */
	blkSiz = ((tfm->base).__crt_alg)->cra_blocksize;

	/* Initialize IV */
	iv = kmalloc( blkSiz, GFP_KERNEL );
	if( NULL == iv ) {
		errno = -ENOMEM;
		goto OUT_CIPHER;
	}
		
	memset( iv, _NULL_CHAR_, blkSiz );
	crypto_blkcipher_set_iv( tfm, iv, crypto_blkcipher_ivsize( tfm ) );

	/* Store the key and file size in encrypted form in the preamble */
	switch( flags ) {
		case _FLAG_ENCRYPT_:
			memcpy( ipBuf, keybuf, keylen );
			prmbLen = keylen;
			fileSize = (unsigned int)inFilePtr->f_path.dentry->d_inode->i_size;

			while( fileSize ) {
				ipBuf[ prmbLen + idx ] = fileSize % 10;
				fileSize /= 10;
				++idx;
			}

			prmbLen += idx;

			#ifdef _DEBUG_
				printk( KERN_ALERT "idx=%d prmbLen=%d\n", idx, prmbLen );
			#endif

			memset( ipBuf + prmbLen, _ETX_, _UL_MAX_SIZE_ - idx );
			prmbLen += ( _UL_MAX_SIZE_ - idx );

			#ifdef _DEBUG_
				printk( KERN_ALERT "prmbLen=%d\n", prmbLen );
			#endif

			padLen = blkSiz - ( prmbLen % blkSiz );
			memset( ipBuf + prmbLen, _ETX_, padLen );
			prmbLen += padLen;
  
			#ifdef _DEBUG_
				printk( KERN_ALERT "padLen=%d prmbLen=%d\n", padLen, prmbLen );
			#endif

			ret = crypto_blkcipher_encrypt( &desc, &sg[1], &sg[0], prmbLen );
			if (ret) {
				printk( KERN_ALERT "Encryption failed. Flags=0x%x\n", tfm->base.crt_flags );
				delOpFile = 1;
				goto OUT_IV;
			}

			oldfs = get_fs();
			set_fs( KERNEL_DS );

			opFilePtr->f_op->write( opFilePtr, opBuf, prmbLen, &opFilePtr->f_pos );

			/* Reset the address space to user one */
			set_fs( oldfs );

			break;

		case _FLAG_DECRYPT_:
			/* Set the address space to kernel one */
			oldfs = get_fs();
			set_fs( KERNEL_DS );

			prmbLen = keylen + _UL_MAX_SIZE_;
			padLen = blkSiz - ( prmbLen % blkSiz );
			prmbLen += padLen;

			#ifdef _DEBUG_
				printk( KERN_ALERT "padLen=%d prmbLen=%d\n", padLen, prmbLen );
			#endif

			actReadLen = inFilePtr->f_op->read( inFilePtr, ipBuf, prmbLen, &inFilePtr->f_pos );
			if( actReadLen != prmbLen ) {
				printk( KERN_ALERT "Requested number of bytes for preamble are lesser" );
				delOpFile = 1;
				goto OUT_IV;
			}

			#ifdef _DEBUG_
				printk( KERN_ALERT "actReadLen=%d\n", actReadLen );
			#endif

			/* Reset the address space to user one */
			set_fs( oldfs );

			ret = crypto_blkcipher_decrypt( &desc, &sg[1], &sg[0], prmbLen );
			if (ret) {
				printk( KERN_ALERT "Decryption failed. Flags=0x%x\n", tfm->base.crt_flags );
				delOpFile = 1;
				goto OUT_IV;
			}

			ret = memcmp( keybuf, opBuf, keylen );
			if( ret ) {
				printk( "Wrong password entered." );
				errno = -EKEYREJECTED;
				goto OUT_IV;
			}

			idx = 0;
			fileSize = 0;

			while( opBuf[ keylen + idx ] != _ETX_ ) {
				fileSize += opBuf[ keylen + idx ] * factor;
				factor *= 10;
				++idx;
			}

			#ifdef _DEBUG_
				printk( KERN_ALERT "idx=%d fileSize=%u\n", idx, fileSize );
			#endif

			break;
	}

	/* Read file till the file pointer reaches to the EOF */
	while( inFilePtr->f_pos < inFilePtr->f_path.dentry->d_inode->i_size ) {
		/* Initialize it to NULL char */
		memset( ipBuf, _NULL_CHAR_, PAGE_SIZE );
		memset( opBuf, _NULL_CHAR_, PAGE_SIZE );

		/* Set the address space to kernel one */
		oldfs = get_fs();
		set_fs( KERNEL_DS );

		actReadLen = inFilePtr->f_op->read( inFilePtr, ipBuf, PAGE_SIZE, &inFilePtr->f_pos );

		/* Reset the address space to user one */
		set_fs( oldfs );

		/* As per the i/p flag, do encryption/decryption */
		switch( flags ) {
			case _FLAG_ENCRYPT_:
				/* For encryption ensure padding as per the block size */
				#ifdef _DEBUG_
					printk( KERN_ALERT "Bytes read from I/P file ::%d::\n", actReadLen );
				#endif

				if( actReadLen % blkSiz ) {
					padLen = blkSiz - ( actReadLen % blkSiz );
					memset( ipBuf + actReadLen, _ETX_, padLen );
					actReadLen += padLen;
				}

				#ifdef _DEBUG_
					printk( KERN_ALERT "Pad Length ::%d::\n", padLen );
					printk( KERN_ALERT "Data read from I/P file ::%s::\n", ipBuf );
				#endif

				/* Encrypt the data */
				ret = crypto_blkcipher_encrypt( &desc, &sg[1], &sg[0], PAGE_SIZE );
				if (ret) {
					printk( KERN_ALERT "Encryption failed. Flags=0x%x\n", tfm->base.crt_flags );
					delOpFile = 1;
					goto OUT_IV;
				}

				break;

			case _FLAG_DECRYPT_:
				/* Decrypt the data */
				ret = crypto_blkcipher_decrypt( &desc, &sg[1], &sg[0], PAGE_SIZE );
				if (ret) {
					printk( KERN_ALERT "Decryption failed. Flags=0x%x\n", tfm->base.crt_flags );
					delOpFile = 1;
					goto OUT_IV;
				}

				#ifdef _DEBUG_
					printk( KERN_ALERT "Bytes read from I/P file ::%d::\n", actReadLen );
				#endif

				while( _ETX_ == opBuf[ actReadLen - 1 ] ) {
					opBuf[ actReadLen - 1 ] = _NULL_CHAR_;
					--actReadLen;
				}

				#ifdef _DEBUG_
					printk( KERN_ALERT "Bytes read from I/P file ::%d::\n", actReadLen );
					printk( KERN_ALERT "Data read from I/P file ::%s::\n", opBuf );
				#endif


				break;
		}

		/*
		 * Start writing to the o/p file
		 * Set the address space to kernel one
		 */
		oldfs = get_fs();
		set_fs( KERNEL_DS );

		actWriteLen = opFilePtr->f_op->write( opFilePtr, opBuf, actReadLen, &opFilePtr->f_pos );

		/* Reset the address space to user one */
		set_fs( oldfs );

		#ifdef _DEBUG_
			printk( KERN_ALERT "Bytes written to O/P file ::%d::\n", actWriteLen );
		#endif
	}

	/* Free iv */
	OUT_IV:
		kfree( iv );
		iv = NULL;
		printk( KERN_ALERT "Memory for IV freed ..." );

	/* Free tfm */
	OUT_CIPHER:
		crypto_free_blkcipher( tfm );
		printk( KERN_ALERT "Encryption Transform freed ..." );

	/* Free i/p and o/p buffers */
	OUT_DATA_PAGE:
		if( ipBuf ) {
			kfree( ipBuf );
			ipBuf = NULL;
		}

		if( opBuf ) {
			kfree( opBuf );
			opBuf = NULL;
		}

		printk( KERN_ALERT "Memory for encrption/decryption freed ..." );

	/* Close any open files */
	OUT_FILE:
		if( inFilePtr ) {
			filp_close( inFilePtr, NULL );
			inFilePtr = NULL;
			printk( KERN_ALERT "I/p file closed ..." );
		}

		if( opFilePtr ) {
			filp_close( opFilePtr, NULL );
			opFilePtr = NULL;
			printk( KERN_ALERT "O/p file closed ..." );
		}

		if( delOpFile ) {
			opFilePtr = filp_open( opFile, O_WRONLY , 0 );
			if ( !opFilePtr || IS_ERR( opFilePtr ) ) {
				opFilePtr = NULL;
				goto OUT_IP;
			}

			tmpDentry = opFilePtr->f_path.dentry;
			tmpInode = tmpDentry->d_parent->d_inode;

			filp_close( opFilePtr, NULL );
			vfs_unlink( tmpInode, tmpDentry );
			printk( KERN_ALERT "O/p file deleted ..." );
		}

	OUT_IP:
		if( inFile ) {
			kfree( inFile );
			inFile = NULL;
		}

		if( opFile ) {
			kfree( opFile );
			opFile = NULL;
		}

		if( keyBuf ) {
			kfree( keyBuf );
			keyBuf = NULL;
		}

		printk( KERN_ALERT "Memory for I/P parameters freed ..." );

	/* Return final status */
	OUT_OK:
		printk( KERN_ALERT "Exiting function sys_xcrypt ..." );
		return errno;
}
Ejemplo n.º 22
0
static int ceph_aes_encrypt(const void *key, int key_len,
			    void *dst, size_t *dst_len,
			    const void *src, size_t src_len)
{
	struct scatterlist sg_in[2], sg_out[1];
	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
	struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
	int ret;
	void *iv;
	int ivsize;
	size_t zero_padding = (0x10 - (src_len & 0x0f));
	char pad[16];

	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	memset(pad, zero_padding, zero_padding);

	*dst_len = src_len + zero_padding;

	crypto_blkcipher_setkey((void *)tfm, key, key_len);
	sg_init_table(sg_in, 2);
	sg_set_buf(&sg_in[0], src, src_len);
	sg_set_buf(&sg_in[1], pad, zero_padding);
	sg_init_table(sg_out, 1);
	sg_set_buf(sg_out, dst, *dst_len);
	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	memcpy(iv, aes_iv, ivsize);
	/*
	print_hex_dump(KERN_ERR, "enc key: ", DUMP_PREFIX_NONE, 16, 1,
		       key, key_len, 1);
	print_hex_dump(KERN_ERR, "enc src: ", DUMP_PREFIX_NONE, 16, 1,
			src, src_len, 1);
	print_hex_dump(KERN_ERR, "enc pad: ", DUMP_PREFIX_NONE, 16, 1,
			pad, zero_padding, 1);
	*/
	ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
				     src_len + zero_padding);
	crypto_free_blkcipher(tfm);
	if (ret < 0)
		pr_err("ceph_aes_crypt failed %d\n", ret);
	/*
	print_hex_dump(KERN_ERR, "enc out: ", DUMP_PREFIX_NONE, 16, 1,
		       dst, *dst_len, 1);
	*/
	return 0;
}

static int ceph_aes_encrypt2(const void *key, int key_len, void *dst,
			     size_t *dst_len,
			     const void *src1, size_t src1_len,
			     const void *src2, size_t src2_len)
{
	struct scatterlist sg_in[3], sg_out[1];
	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
	struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
	int ret;
	void *iv;
	int ivsize;
	size_t zero_padding = (0x10 - ((src1_len + src2_len) & 0x0f));
	char pad[16];

	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	memset(pad, zero_padding, zero_padding);

	*dst_len = src1_len + src2_len + zero_padding;

	crypto_blkcipher_setkey((void *)tfm, key, key_len);
	sg_init_table(sg_in, 3);
	sg_set_buf(&sg_in[0], src1, src1_len);
	sg_set_buf(&sg_in[1], src2, src2_len);
	sg_set_buf(&sg_in[2], pad, zero_padding);
	sg_init_table(sg_out, 1);
	sg_set_buf(sg_out, dst, *dst_len);
	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	memcpy(iv, aes_iv, ivsize);
	/*
	print_hex_dump(KERN_ERR, "enc  key: ", DUMP_PREFIX_NONE, 16, 1,
		       key, key_len, 1);
	print_hex_dump(KERN_ERR, "enc src1: ", DUMP_PREFIX_NONE, 16, 1,
			src1, src1_len, 1);
	print_hex_dump(KERN_ERR, "enc src2: ", DUMP_PREFIX_NONE, 16, 1,
			src2, src2_len, 1);
	print_hex_dump(KERN_ERR, "enc  pad: ", DUMP_PREFIX_NONE, 16, 1,
			pad, zero_padding, 1);
	*/
	ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
				     src1_len + src2_len + zero_padding);
	crypto_free_blkcipher(tfm);
	if (ret < 0)
		pr_err("ceph_aes_crypt2 failed %d\n", ret);
	/*
	print_hex_dump(KERN_ERR, "enc  out: ", DUMP_PREFIX_NONE, 16, 1,
		       dst, *dst_len, 1);
	*/
	return 0;
}

static int ceph_aes_decrypt(const void *key, int key_len,
			    void *dst, size_t *dst_len,
			    const void *src, size_t src_len)
{
	struct scatterlist sg_in[1], sg_out[2];
	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
	struct blkcipher_desc desc = { .tfm = tfm };
	char pad[16];
	void *iv;
	int ivsize;
	int ret;
	int last_byte;

	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	crypto_blkcipher_setkey((void *)tfm, key, key_len);
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 2);
	sg_set_buf(sg_in, src, src_len);
	sg_set_buf(&sg_out[0], dst, *dst_len);
	sg_set_buf(&sg_out[1], pad, sizeof(pad));

	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	memcpy(iv, aes_iv, ivsize);

	/*
	print_hex_dump(KERN_ERR, "dec key: ", DUMP_PREFIX_NONE, 16, 1,
		       key, key_len, 1);
	print_hex_dump(KERN_ERR, "dec  in: ", DUMP_PREFIX_NONE, 16, 1,
		       src, src_len, 1);
	*/

	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
	crypto_free_blkcipher(tfm);
	if (ret < 0) {
		pr_err("ceph_aes_decrypt failed %d\n", ret);
		return ret;
	}

	if (src_len <= *dst_len)
		last_byte = ((char *)dst)[src_len - 1];
	else
		last_byte = pad[src_len - *dst_len - 1];
	if (last_byte <= 16 && src_len >= last_byte) {
		*dst_len = src_len - last_byte;
	} else {
		pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
		       last_byte, (int)src_len);
		return -EPERM;  /* bad padding */
	}
	/*
	print_hex_dump(KERN_ERR, "dec out: ", DUMP_PREFIX_NONE, 16, 1,
		       dst, *dst_len, 1);
	*/
	return 0;
}

static int ceph_aes_decrypt2(const void *key, int key_len,
			     void *dst1, size_t *dst1_len,
			     void *dst2, size_t *dst2_len,
			     const void *src, size_t src_len)
{
	struct scatterlist sg_in[1], sg_out[3];
	struct crypto_blkcipher *tfm = ceph_crypto_alloc_cipher();
	struct blkcipher_desc desc = { .tfm = tfm };
	char pad[16];
	void *iv;
	int ivsize;
	int ret;
	int last_byte;

	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	sg_init_table(sg_in, 1);
	sg_set_buf(sg_in, src, src_len);
	sg_init_table(sg_out, 3);
	sg_set_buf(&sg_out[0], dst1, *dst1_len);
	sg_set_buf(&sg_out[1], dst2, *dst2_len);
	sg_set_buf(&sg_out[2], pad, sizeof(pad));

	crypto_blkcipher_setkey((void *)tfm, key, key_len);
	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	memcpy(iv, aes_iv, ivsize);

	/*
	print_hex_dump(KERN_ERR, "dec  key: ", DUMP_PREFIX_NONE, 16, 1,
		       key, key_len, 1);
	print_hex_dump(KERN_ERR, "dec   in: ", DUMP_PREFIX_NONE, 16, 1,
		       src, src_len, 1);
	*/

	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
	crypto_free_blkcipher(tfm);
	if (ret < 0) {
		pr_err("ceph_aes_decrypt failed %d\n", ret);
		return ret;
	}

	if (src_len <= *dst1_len)
		last_byte = ((char *)dst1)[src_len - 1];
	else if (src_len <= *dst1_len + *dst2_len)
		last_byte = ((char *)dst2)[src_len - *dst1_len - 1];
	else
		last_byte = pad[src_len - *dst1_len - *dst2_len - 1];
	if (last_byte <= 16 && src_len >= last_byte) {
		src_len -= last_byte;
	} else {
		pr_err("ceph_aes_decrypt got bad padding %d on src len %d\n",
		       last_byte, (int)src_len);
		return -EPERM;  /* bad padding */
	}

	if (src_len < *dst1_len) {
		*dst1_len = src_len;
		*dst2_len = 0;
	} else {
		*dst2_len = src_len - *dst1_len;
	}
	/*
	print_hex_dump(KERN_ERR, "dec  out1: ", DUMP_PREFIX_NONE, 16, 1,
		       dst1, *dst1_len, 1);
	print_hex_dump(KERN_ERR, "dec  out2: ", DUMP_PREFIX_NONE, 16, 1,
		       dst2, *dst2_len, 1);
	*/

	return 0;
}


int ceph_decrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
		 const void *src, size_t src_len)
{
	switch (secret->type) {
	case CEPH_CRYPTO_NONE:
		if (*dst_len < src_len)
			return -ERANGE;
		memcpy(dst, src, src_len);
		*dst_len = src_len;
		return 0;

	case CEPH_CRYPTO_AES:
		return ceph_aes_decrypt(secret->key, secret->len, dst,
					dst_len, src, src_len);

	default:
		return -EINVAL;
	}
}

int ceph_decrypt2(struct ceph_crypto_key *secret,
			void *dst1, size_t *dst1_len,
			void *dst2, size_t *dst2_len,
			const void *src, size_t src_len)
{
	size_t t;

	switch (secret->type) {
	case CEPH_CRYPTO_NONE:
		if (*dst1_len + *dst2_len < src_len)
			return -ERANGE;
		t = min(*dst1_len, src_len);
		memcpy(dst1, src, t);
		*dst1_len = t;
		src += t;
		src_len -= t;
		if (src_len) {
			t = min(*dst2_len, src_len);
			memcpy(dst2, src, t);
			*dst2_len = t;
		}
		return 0;

	case CEPH_CRYPTO_AES:
		return ceph_aes_decrypt2(secret->key, secret->len,
					 dst1, dst1_len, dst2, dst2_len,
					 src, src_len);

	default:
		return -EINVAL;
	}
}

int ceph_encrypt(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
		 const void *src, size_t src_len)
{
	switch (secret->type) {
	case CEPH_CRYPTO_NONE:
		if (*dst_len < src_len)
			return -ERANGE;
		memcpy(dst, src, src_len);
		*dst_len = src_len;
		return 0;

	case CEPH_CRYPTO_AES:
		return ceph_aes_encrypt(secret->key, secret->len, dst,
					dst_len, src, src_len);

	default:
		return -EINVAL;
	}
}

int ceph_encrypt2(struct ceph_crypto_key *secret, void *dst, size_t *dst_len,
		  const void *src1, size_t src1_len,
		  const void *src2, size_t src2_len)
{
	switch (secret->type) {
	case CEPH_CRYPTO_NONE:
		if (*dst_len < src1_len + src2_len)
			return -ERANGE;
		memcpy(dst, src1, src1_len);
		memcpy(dst + src1_len, src2, src2_len);
		*dst_len = src1_len + src2_len;
		return 0;

	case CEPH_CRYPTO_AES:
		return ceph_aes_encrypt2(secret->key, secret->len, dst, dst_len,
					 src1, src1_len, src2, src2_len);

	default:
		return -EINVAL;
	}
}