Beispiel #1
0
static int
finalize_hash (struct hash_desc *desc, unsigned char * out, unsigned int out_size)
{
	int ret = -1;

	if (!desc || !desc->tfm || !out || !out_size)
	{
		printk(KERN_ERR "FIPS(%s): Invalid args", __FUNCTION__);		
		return ret;
	}

	if (crypto_hash_digestsize(desc->tfm) > out_size)
	{
		printk(KERN_ERR "FIPS(%s): Not enough space for digest", __FUNCTION__);		
		return ret;
	}

	ret = crypto_hash_final (desc, out);

	if (ret)
	{
		printk(KERN_ERR "FIPS(%s): crypto_hash_final failed", __FUNCTION__);		
		return -1;
	}

	return 0;
}
Beispiel #2
0
static struct crypto_hash *pohmelfs_init_hash(struct pohmelfs_sb *psb)
{
	int err;
	struct crypto_hash *hash;

	hash = crypto_alloc_hash(psb->hash_string, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash)) {
		err = PTR_ERR(hash);
		dprintk("%s: idx: %u: failed to allocate hash '%s', err: %d.\n",
				__func__, psb->idx, psb->hash_string, err);
		goto err_out_exit;
	}

	psb->crypto_attached_size = crypto_hash_digestsize(hash);

	if (!psb->hash_keysize)
		return hash;

	err = crypto_hash_setkey(hash, psb->hash_key, psb->hash_keysize);
	if (err) {
		dprintk("%s: idx: %u: failed to set key for hash '%s', err: %d.\n",
				__func__, psb->idx, psb->hash_string, err);
		goto err_out_free;
	}

	return hash;

err_out_free:
	crypto_free_hash(hash);
err_out_exit:
	return ERR_PTR(err);
}
Beispiel #3
0
/* Initialise ESSIV - compute salt but no local memory allocations */
static int crypt_iv_essiv_init(struct crypt_config *cc)
{
    struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
    struct hash_desc desc;
    struct scatterlist sg;
    struct crypto_cipher *essiv_tfm;
    int err;

    sg_init_one(&sg, cc->key, cc->key_size);
    desc.tfm = essiv->hash_tfm;
    desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;

    err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
    if (err)
        return err;

    essiv_tfm = cc->iv_private;

    err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
                               crypto_hash_digestsize(essiv->hash_tfm));
    if (err)
        return err;

    return 0;
}
Beispiel #4
0
/*
 * Crypto machinery: hash/cipher support for the given crypto controls.
 */
static struct crypto_hash *dst_init_hash(struct dst_crypto_ctl *ctl, u8 *key)
{
	int err;
	struct crypto_hash *hash;

	hash = crypto_alloc_hash(ctl->hash_algo, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash)) {
		err = PTR_ERR(hash);
		dprintk("%s: failed to allocate hash '%s', err: %d.\n",
				__func__, ctl->hash_algo, err);
		goto err_out_exit;
	}

	ctl->crypto_attached_size = crypto_hash_digestsize(hash);

	if (!ctl->hash_keysize)
		return hash;

	err = crypto_hash_setkey(hash, key, ctl->hash_keysize);
	if (err) {
		dprintk("%s: failed to set key for hash '%s', err: %d.\n",
				__func__, ctl->hash_algo, err);
		goto err_out_free;
	}

	return hash;

err_out_free:
	crypto_free_hash(hash);
err_out_exit:
	return ERR_PTR(err);
}
Beispiel #5
0
__be32
nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
{
	struct xdr_netobj cksum;
	struct hash_desc desc;
	struct scatterlist sg;
	__be32 status = nfserr_resource;

	dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
			clname->len, clname->data);
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(desc.tfm))
		goto out_no_tfm;
	cksum.len = crypto_hash_digestsize(desc.tfm);
	cksum.data = kmalloc(cksum.len, GFP_KERNEL);
	if (cksum.data == NULL)
 		goto out;

	sg_init_one(&sg, clname->data, clname->len);

	if (crypto_hash_digest(&desc, &sg, sg.length, cksum.data))
		goto out;

	md5_to_hex(dname, cksum.data);

	kfree(cksum.data);
	status = nfs_ok;
out:
	crypto_free_hash(desc.tfm);
out_no_tfm:
	return status;
}
Beispiel #6
0
/* checksum the plaintext data and hdrlen bytes of the token header */
s32
make_checksum(char *cksumname, char *header, int hdrlen, struct xdr_buf *body,
		   int body_offset, struct xdr_netobj *cksum)
{
	struct hash_desc                desc; /* XXX add to ctx? */
	struct scatterlist              sg[1];
	int err;

	desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(desc.tfm))
		return GSS_S_FAILURE;
	cksum->len = crypto_hash_digestsize(desc.tfm);
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;

	err = crypto_hash_init(&desc);
	if (err)
		goto out;
	sg_set_buf(sg, header, hdrlen);
	err = crypto_hash_update(&desc, sg, hdrlen);
	if (err)
		goto out;
	err = xdr_process_buf(body, body_offset, body->len - body_offset,
			      checksummer, &desc);
	if (err)
		goto out;
	err = crypto_hash_final(&desc, cksum->data);

out:
	crypto_free_hash(desc.tfm);
	return err ? GSS_S_FAILURE : 0;
}
Beispiel #7
0
/*      If hash_len pointer is NULL - destroy descriptor. */
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
			  unsigned char *hash, unsigned int *hash_len)
{
	int     err;
	int     size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm);

	if (hash_len == NULL) {
		crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
		kfree(hdesc);
		return 0;
	}
	if (hash == NULL || *hash_len < size) {
		*hash_len = size;
		return -ENOSPC;
	}
	err = crypto_hash_final((struct hash_desc *) hdesc, hash);

	if (err < 0) {
		/* May be caller can fix error */
		return err;
	}
	crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
	kfree(hdesc);
	return err;
}
Beispiel #8
0
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
                              const char *opts)
{
    struct crypto_cipher *essiv_tfm = NULL;
    struct crypto_hash *hash_tfm = NULL;
    u8 *salt = NULL;
    int err;

    if (!opts) {
        ti->error = "Digest algorithm missing for ESSIV mode";
        return -EINVAL;
    }

    /* Allocate hash algorithm */
    hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
    if (IS_ERR(hash_tfm)) {
        ti->error = "Error initializing ESSIV hash";
        err = PTR_ERR(hash_tfm);
        goto bad;
    }

    salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
    if (!salt) {
        ti->error = "Error kmallocing salt storage in ESSIV";
        err = -ENOMEM;
        goto bad;
    }

    cc->iv_gen_private.essiv.salt = salt;
    cc->iv_gen_private.essiv.hash_tfm = hash_tfm;

    essiv_tfm = setup_essiv_cpu(cc, ti, salt,
                                crypto_hash_digestsize(hash_tfm));
    if (IS_ERR(essiv_tfm)) {
        crypt_iv_essiv_dtr(cc);
        return PTR_ERR(essiv_tfm);
    }
    cc->iv_private = essiv_tfm;

    return 0;

bad:
    if (hash_tfm && !IS_ERR(hash_tfm))
        crypto_free_hash(hash_tfm);
    kfree(salt);
    return err;
}
Beispiel #9
0
static int crypt_iv_essiv_ctr(struct crypt_config *cc, struct dm_target *ti,
			      const char *opts)
{
	struct crypto_cipher *essiv_tfm = NULL;
	struct crypto_hash *hash_tfm = NULL;
	u8 *salt = NULL;
	int err;

	if (!opts) {
		ti->error = "Digest algorithm missing for ESSIV mode";
		return -EINVAL;
	}

	/* Allocate hash algorithm */
	hash_tfm = crypto_alloc_hash(opts, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(hash_tfm)) {
		ti->error = "Error initializing ESSIV hash";
		err = PTR_ERR(hash_tfm);
		goto bad;
	}

	salt = kzalloc(crypto_hash_digestsize(hash_tfm), GFP_KERNEL);
	if (!salt) {
		ti->error = "Error kmallocing salt storage in ESSIV";
		err = -ENOMEM;
		goto bad;
	}

	/* Allocate essiv_tfm */
	essiv_tfm = crypto_alloc_cipher(cc->cipher, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(essiv_tfm)) {
		ti->error = "Error allocating crypto tfm for ESSIV";
		err = PTR_ERR(essiv_tfm);
		goto bad;
	}
	if (crypto_cipher_blocksize(essiv_tfm) !=
	    crypto_ablkcipher_ivsize(cc->tfm)) {
		ti->error = "Block size of ESSIV cipher does "
			    "not match IV size of block cipher";
		err = -EINVAL;
		goto bad;
	}

	cc->iv_gen_private.essiv.salt = salt;
	cc->iv_gen_private.essiv.tfm = essiv_tfm;
	cc->iv_gen_private.essiv.hash_tfm = hash_tfm;

	return 0;

bad:
	if (essiv_tfm && !IS_ERR(essiv_tfm))
		crypto_free_cipher(essiv_tfm);
	if (hash_tfm && !IS_ERR(hash_tfm))
		crypto_free_hash(hash_tfm);
	kfree(salt);
	return err;
}
Beispiel #10
0
/* Wipe salt and reset key derived from volume key */
static int crypt_iv_essiv_wipe(struct crypt_config *cc)
{
	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
	unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);

	memset(essiv->salt, 0, salt_size);

	return crypto_cipher_setkey(essiv->tfm, essiv->salt, salt_size);
}
static int
DriverEnvironment_HMAC(const char *algo, 
                       const void *key,
                       size_t key_len,
                       const void *data,
                       size_t data_len,
                       void *result,
                       size_t result_len)
{
   struct crypto_hash *tfm;
   struct scatterlist sg[1];
   struct hash_desc desc;
   int ret;

   tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
   if(IS_ERR(tfm)) {
      DE_TRACE_INT(TR_CRYPTO, "failed to allocate hash (%ld)\n", PTR_ERR(tfm));
      return WIFI_ENGINE_FAILURE;
   }

   if(crypto_hash_digestsize(tfm) > result_len) {
      crypto_free_hash(tfm);
      return WIFI_ENGINE_FAILURE_INVALID_LENGTH;
   }

   sg_init_one(&sg[0], data, data_len);

   crypto_hash_clear_flags(tfm, ~0);

   ret = crypto_hash_setkey(tfm, key, key_len);
   if(ret != 0) {
      DE_TRACE_INT(TR_CRYPTO, "failed to set key (%d)\n", ret);
      crypto_free_hash(tfm);
      return WIFI_ENGINE_FAILURE;
   }

   desc.tfm = tfm;
   desc.flags = 0;

   ret = crypto_hash_digest(&desc, sg, data_len, result);
   if(ret != 0) {
      DE_TRACE_INT(TR_CRYPTO, "faild to digest (%d)\n", ret);
      crypto_free_hash(tfm);
      return WIFI_ENGINE_FAILURE;
   }

   crypto_free_hash(tfm);

   return WIFI_ENGINE_SUCCESS;
}
Beispiel #12
0
/* Wipe salt and reset key derived from volume key */
static int crypt_iv_essiv_wipe(struct crypt_config *cc)
{
    struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
    unsigned salt_size = crypto_hash_digestsize(essiv->hash_tfm);
    struct crypto_cipher *essiv_tfm;
    int r, err = 0;

    memset(essiv->salt, 0, salt_size);

    essiv_tfm = cc->iv_private;
    r = crypto_cipher_setkey(essiv_tfm, essiv->salt, salt_size);
    if (r)
        err = r;

    return err;
}
Beispiel #13
0
int __init init_mars(void)
{
	MARS_INF("init_mars()\n");

	set_fake();

#ifdef MARS_TRACING
	{
		int flags = O_CREAT | O_TRUNC | O_RDWR | O_LARGEFILE;
		int prot = 0600;
		mm_segment_t oldfs;
		oldfs = get_fs();
		set_fs(get_ds());
		mars_log_file = filp_open("/mars/trace.csv", flags, prot);
		set_fs(oldfs);
		if (IS_ERR(mars_log_file)) {
			MARS_ERR("cannot create trace logfile, status = %ld\n", PTR_ERR(mars_log_file));
			mars_log_file = NULL;
		}
	}
#endif

	mars_tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
	if (!mars_tfm) {
		MARS_ERR("cannot alloc crypto hash\n");
		return -ENOMEM;
	}
	if (IS_ERR(mars_tfm)) {
		MARS_ERR("alloc crypto hash failed, status = %d\n", (int)PTR_ERR(mars_tfm));
		return PTR_ERR(mars_tfm);
	}
#if 0
	if (crypto_tfm_alg_type(crypto_hash_tfm(mars_tfm)) != CRYPTO_ALG_TYPE_DIGEST) {
		MARS_ERR("bad crypto hash type\n");
		return -EINVAL;
	}
#endif
	mars_digest_size = crypto_hash_digestsize(mars_tfm);
	MARS_INF("digest_size = %d\n", mars_digest_size);

	return 0;
}
Beispiel #14
0
void s390_sha_final(struct crypto_tfm *tfm, u8 *out)
{
	struct s390_sha_ctx *ctx = crypto_tfm_ctx(tfm);
	unsigned int bsize = crypto_tfm_alg_blocksize(tfm);
	u64 bits;
	unsigned int index, end, plen;
	int ret;

	/* SHA-512 uses 128 bit padding length */
	plen = (bsize > SHA256_BLOCK_SIZE) ? 16 : 8;

	/* must perform manual padding */
	index = ctx->count & (bsize - 1);
	end = (index < bsize - plen) ? bsize : (2 * bsize);

	/* start pad with 1 */
	ctx->buf[index] = 0x80;
	index++;

	/* pad with zeros */
	memset(ctx->buf + index, 0x00, end - index - 8);

	/*
	 * Append message length. Well, SHA-512 wants a 128 bit lenght value,
	 * nevertheless we use u64, should be enough for now...
	 */
	bits = ctx->count * 8;
	memcpy(ctx->buf + end - 8, &bits, sizeof(bits));

	ret = crypt_s390_kimd(ctx->func, ctx->state, ctx->buf, end);
	BUG_ON(ret != end);

	/* copy digest to out */
	memcpy(out, ctx->state, crypto_hash_digestsize(crypto_hash_cast(tfm)));
	/* wipe context */
	memset(ctx, 0, sizeof *ctx);
}
Beispiel #15
0
/**
 * Finish hash calculation, copy hash digest to buffer, clean up hash descriptor
 *
 * \param[in]	hdesc		hash descriptor
 * \param[out]	hash		pointer to hash buffer to store hash digest
 * \param[in,out] hash_len	pointer to hash buffer size, if \a hash == NULL
 *				or hash_len == NULL only free \a hdesc instead
 *				of computing the hash
 *
 * \retval		0 for success
 * \retval		-EOVERFLOW if hash_len is too small for the hash digest
 * \retval		negative errno for other errors from lower layers
 */
int cfs_crypto_hash_final(struct cfs_crypto_hash_desc *hdesc,
			  unsigned char *hash, unsigned int *hash_len)
{
	int     size = crypto_hash_digestsize(((struct hash_desc *)hdesc)->tfm);
	int     err;

	if (hash == NULL || hash_len == NULL) {
		err = 0;
		goto free;
	}
	if (*hash_len < size) {
		err = -EOVERFLOW;
		goto free;
	}

	err = crypto_hash_final((struct hash_desc *)hdesc, hash);
	if (err == 0)
		*hash_len = size;
free:
	crypto_free_hash(((struct hash_desc *)hdesc)->tfm);
	kfree(hdesc);

	return err;
}
Beispiel #16
0
/*
 * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3.
 * Well, not what's written there, but rather what they meant.
 */
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
	struct scatterlist sg_in[1], sg_out[1];
	struct blkcipher_desc desc = { .tfm = state->arc4 };

	get_new_key_from_sha(state);
	if (!initial_key) {
		crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
					state->keylen);
		sg_init_table(sg_in, 1);
		sg_init_table(sg_out, 1);
		setup_sg(sg_in, state->sha1_digest, state->keylen);
		setup_sg(sg_out, state->session_key, state->keylen);
		if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
					     state->keylen) != 0) {
    		    printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
		}
	} else {
		memcpy(state->session_key, state->sha1_digest, state->keylen);
	}
	if (state->keylen == 8) {
		/* See RFC 3078 */
		state->session_key[0] = 0xd1;
		state->session_key[1] = 0x26;
		state->session_key[2] = 0x9e;
	}
	crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
}

/*
 * Allocate space for a (de)compressor.
 */
static void *mppe_alloc(unsigned char *options, int optlen)
{
	struct ppp_mppe_state *state;
	unsigned int digestsize;

	if (optlen != CILEN_MPPE + sizeof(state->master_key)
	    || options[0] != CI_MPPE || options[1] != CILEN_MPPE)
		goto out;

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (state == NULL)
		goto out;


	state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(state->arc4)) {
		state->arc4 = NULL;
		goto out_free;
	}

	state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(state->sha1)) {
		state->sha1 = NULL;
		goto out_free;
	}

	digestsize = crypto_hash_digestsize(state->sha1);
	if (digestsize < MPPE_MAX_KEY_LEN)
		goto out_free;

	state->sha1_digest = kmalloc(digestsize, GFP_KERNEL);
	if (!state->sha1_digest)
		goto out_free;

	/* Save keys. */
	memcpy(state->master_key, &options[CILEN_MPPE],
	       sizeof(state->master_key));
	memcpy(state->session_key, state->master_key,
	       sizeof(state->master_key));

	/*
	 * We defer initial key generation until mppe_init(), as mppe_alloc()
	 * is called frequently during negotiation.
	 */

	return (void *)state;

	out_free:
	    if (state->sha1_digest)
		kfree(state->sha1_digest);
	    if (state->sha1)
		crypto_free_hash(state->sha1);
	    if (state->arc4)
		crypto_free_blkcipher(state->arc4);
	    kfree(state);
	out:
	return NULL;
}

/*
 * Deallocate space for a (de)compressor.
 */
static void mppe_free(void *arg)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	if (state) {
	    if (state->sha1_digest)
		kfree(state->sha1_digest);
	    if (state->sha1)
		crypto_free_hash(state->sha1);
	    if (state->arc4)
		crypto_free_blkcipher(state->arc4);
	    kfree(state);
	}
}

/*
 * Initialize (de)compressor state.
 */
static int
mppe_init(void *arg, unsigned char *options, int optlen, int unit, int debug,
	  const char *debugstr)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	unsigned char mppe_opts;

	if (optlen != CILEN_MPPE
	    || options[0] != CI_MPPE || options[1] != CILEN_MPPE)
		return 0;

	MPPE_CI_TO_OPTS(&options[2], mppe_opts);
	if (mppe_opts & MPPE_OPT_128)
		state->keylen = 16;
	else if (mppe_opts & MPPE_OPT_40)
		state->keylen = 8;
	else {
		printk(KERN_WARNING "%s[%d]: unknown key length\n", debugstr,
		       unit);
		return 0;
	}
	if (mppe_opts & MPPE_OPT_STATEFUL)
		state->stateful = 1;

	/* Generate the initial session key. */
	mppe_rekey(state, 1);

	if (debug) {
		int i;
		char mkey[sizeof(state->master_key) * 2 + 1];
		char skey[sizeof(state->session_key) * 2 + 1];

		printk(KERN_DEBUG "%s[%d]: initialized with %d-bit %s mode\n",
		       debugstr, unit, (state->keylen == 16) ? 128 : 40,
		       (state->stateful) ? "stateful" : "stateless");

		for (i = 0; i < sizeof(state->master_key); i++)
			sprintf(mkey + i * 2, "%02x", state->master_key[i]);
		for (i = 0; i < sizeof(state->session_key); i++)
			sprintf(skey + i * 2, "%02x", state->session_key[i]);
		printk(KERN_DEBUG
		       "%s[%d]: keys: master: %s initial session: %s\n",
		       debugstr, unit, mkey, skey);
	}

	/*
	 * Initialize the coherency count.  The initial value is not specified
	 * in RFC 3078, but we can make a reasonable assumption that it will
	 * start at 0.  Setting it to the max here makes the comp/decomp code
	 * do the right thing (determined through experiment).
	 */
	state->ccount = MPPE_CCOUNT_SPACE - 1;

	/*
	 * Note that even though we have initialized the key table, we don't
	 * set the FLUSHED bit.  This is contrary to RFC 3078, sec. 3.1.
	 */
	state->bits = MPPE_BIT_ENCRYPTED;

	state->unit = unit;
	state->debug = debug;

	return 1;
}

static int
mppe_comp_init(void *arg, unsigned char *options, int optlen, int unit,
	       int hdrlen, int debug)
{
	/* ARGSUSED */
	return mppe_init(arg, options, optlen, unit, debug, "mppe_comp_init");
}

/*
 * We received a CCP Reset-Request (actually, we are sending a Reset-Ack),
 * tell the compressor to rekey.  Note that we MUST NOT rekey for
 * every CCP Reset-Request; we only rekey on the next xmit packet.
 * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost.
 * So, rekeying for every CCP Reset-Request is broken as the peer will not
 * know how many times we've rekeyed.  (If we rekey and THEN get another
 * CCP Reset-Request, we must rekey again.)
 */
static void mppe_comp_reset(void *arg)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;

	state->bits |= MPPE_BIT_FLUSHED;
}

/*
 * Compress (encrypt) a packet.
 * It's strange to call this a compressor, since the output is always
 * MPPE_OVHD + 2 bytes larger than the input.
 */
static int
mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
	      int isize, int osize)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	struct blkcipher_desc desc = { .tfm = state->arc4 };
	int proto;
	struct scatterlist sg_in[1], sg_out[1];

	/*
	 * Check that the protocol is in the range we handle.
	 */
	proto = PPP_PROTOCOL(ibuf);
	if (proto < 0x0021 || proto > 0x00fa)
		return 0;

	/* Make sure we have enough room to generate an encrypted packet. */
	if (osize < isize + MPPE_OVHD + 2) {
		/* Drop the packet if we should encrypt it, but can't. */
		printk(KERN_DEBUG "mppe_compress[%d]: osize too small! "
		       "(have: %d need: %d)\n", state->unit,
		       osize, osize + MPPE_OVHD + 2);
		return -1;
	}

	osize = isize + MPPE_OVHD + 2;

	/*
	 * Copy over the PPP header and set control bits.
	 */
	obuf[0] = PPP_ADDRESS(ibuf);
	obuf[1] = PPP_CONTROL(ibuf);
	obuf[2] = PPP_COMP >> 8;	/* isize + MPPE_OVHD + 1 */
	obuf[3] = PPP_COMP;	/* isize + MPPE_OVHD + 2 */
	obuf += PPP_HDRLEN;

	state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
	if (state->debug >= 7)
		printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
		       state->ccount);
	obuf[0] = state->ccount >> 8;
	obuf[1] = state->ccount & 0xff;

	if (!state->stateful ||	/* stateless mode     */
	    ((state->ccount & 0xff) == 0xff) ||	/* "flag" packet      */
	    (state->bits & MPPE_BIT_FLUSHED)) {	/* CCP Reset-Request  */
		/* We must rekey */
		if (state->debug && state->stateful)
			printk(KERN_DEBUG "mppe_compress[%d]: rekeying\n",
			       state->unit);
		mppe_rekey(state, 0);
		state->bits |= MPPE_BIT_FLUSHED;
	}
	obuf[0] |= state->bits;
	state->bits &= ~MPPE_BIT_FLUSHED;	/* reset for next xmit */

	obuf += MPPE_OVHD;
	ibuf += 2;		/* skip to proto field */
	isize -= 2;

	/* Encrypt packet */
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 1);
	setup_sg(sg_in, ibuf, isize);
	setup_sg(sg_out, obuf, osize);
	if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
		printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
		return -1;
	}

	state->stats.unc_bytes += isize;
	state->stats.unc_packets++;
	state->stats.comp_bytes += osize;
	state->stats.comp_packets++;

	return osize;
}

/*
 * Since every frame grows by MPPE_OVHD + 2 bytes, this is always going
 * to look bad ... and the longer the link is up the worse it will get.
 */
static void mppe_comp_stats(void *arg, struct compstat *stats)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;

	*stats = state->stats;
}

static int
mppe_decomp_init(void *arg, unsigned char *options, int optlen, int unit,
		 int hdrlen, int mru, int debug)
{
	/* ARGSUSED */
	return mppe_init(arg, options, optlen, unit, debug, "mppe_decomp_init");
}

/*
 * We received a CCP Reset-Ack.  Just ignore it.
 */
static void mppe_decomp_reset(void *arg)
{
	/* ARGSUSED */
	return;
}

/*
 * Decompress (decrypt) an MPPE packet.
 */
static int
mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
		int osize)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	struct blkcipher_desc desc = { .tfm = state->arc4 };
	unsigned ccount;
	int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
	int sanity = 0;
	struct scatterlist sg_in[1], sg_out[1];

	if (isize <= PPP_HDRLEN + MPPE_OVHD) {
		if (state->debug)
			printk(KERN_DEBUG
			       "mppe_decompress[%d]: short pkt (%d)\n",
			       state->unit, isize);
		return DECOMP_ERROR;
	}

	/*
	 * Make sure we have enough room to decrypt the packet.
	 * Note that for our test we only subtract 1 byte whereas in
	 * mppe_compress() we added 2 bytes (+MPPE_OVHD);
	 * this is to account for possible PFC.
	 */
	if (osize < isize - MPPE_OVHD - 1) {
		printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! "
		       "(have: %d need: %d)\n", state->unit,
		       osize, isize - MPPE_OVHD - 1);
		return DECOMP_ERROR;
	}
	osize = isize - MPPE_OVHD - 2;	/* assume no PFC */

	ccount = MPPE_CCOUNT(ibuf);
	if (state->debug >= 7)
		printk(KERN_DEBUG "mppe_decompress[%d]: ccount %d\n",
		       state->unit, ccount);

	/* sanity checks -- terminate with extreme prejudice */
	if (!(MPPE_BITS(ibuf) & MPPE_BIT_ENCRYPTED)) {
		printk(KERN_DEBUG
		       "mppe_decompress[%d]: ENCRYPTED bit not set!\n",
		       state->unit);
		state->sanity_errors += 100;
		sanity = 1;
	}
	if (!state->stateful && !flushed) {
		printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
		       "stateless mode!\n", state->unit);
		state->sanity_errors += 100;
		sanity = 1;
	}
	if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
		printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
		       "flag packet!\n", state->unit);
		state->sanity_errors += 100;
		sanity = 1;
	}

	if (sanity) {
		if (state->sanity_errors < SANITY_MAX)
			return DECOMP_ERROR;
		else
			/*
			 * Take LCP down if the peer is sending too many bogons.
			 * We don't want to do this for a single or just a few
			 * instances since it could just be due to packet corruption.
			 */
			return DECOMP_FATALERROR;
	}

	/*
	 * Check the coherency count.
	 */

	if (!state->stateful) {
		/* RFC 3078, sec 8.1.  Rekey for every packet. */
		while (state->ccount != ccount) {
			mppe_rekey(state, 0);
			state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
		}
	} else {
		/* RFC 3078, sec 8.2. */
		if (!state->discard) {
			/* normal state */
			state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
			if (ccount != state->ccount) {
				/*
				 * (ccount > state->ccount)
				 * Packet loss detected, enter the discard state.
				 * Signal the peer to rekey (by sending a CCP Reset-Request).
				 */
				state->discard = 1;
				return DECOMP_ERROR;
			}
		} else {
			/* discard state */
			if (!flushed) {
				/* ccp.c will be silent (no additional CCP Reset-Requests). */
				return DECOMP_ERROR;
			} else {
				/* Rekey for every missed "flag" packet. */
				while ((ccount & ~0xff) !=
				       (state->ccount & ~0xff)) {
					mppe_rekey(state, 0);
					state->ccount =
					    (state->ccount +
					     256) % MPPE_CCOUNT_SPACE;
				}

				/* reset */
				state->discard = 0;
				state->ccount = ccount;
				/*
				 * Another problem with RFC 3078 here.  It implies that the
				 * peer need not send a Reset-Ack packet.  But RFC 1962
				 * requires it.  Hopefully, M$ does send a Reset-Ack; even
				 * though it isn't required for MPPE synchronization, it is
				 * required to reset CCP state.
				 */
			}
		}
		if (flushed)
			mppe_rekey(state, 0);
	}

	/*
	 * Fill in the first part of the PPP header.  The protocol field
	 * comes from the decrypted data.
	 */
	obuf[0] = PPP_ADDRESS(ibuf);	/* +1 */
	obuf[1] = PPP_CONTROL(ibuf);	/* +1 */
	obuf += 2;
	ibuf += PPP_HDRLEN + MPPE_OVHD;
	isize -= PPP_HDRLEN + MPPE_OVHD;	/* -6 */
	/* net osize: isize-4 */

	/*
	 * Decrypt the first byte in order to check if it is
	 * a compressed or uncompressed protocol field.
	 */
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 1);
	setup_sg(sg_in, ibuf, 1);
	setup_sg(sg_out, obuf, 1);
	if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
		printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
		return DECOMP_ERROR;
	}

	/*
	 * Do PFC decompression.
	 * This would be nicer if we were given the actual sk_buff
	 * instead of a char *.
	 */
	if ((obuf[0] & 0x01) != 0) {
		obuf[1] = obuf[0];
		obuf[0] = 0;
		obuf++;
		osize++;
	}

	/* And finally, decrypt the rest of the packet. */
	setup_sg(sg_in, ibuf + 1, isize - 1);
	setup_sg(sg_out, obuf + 1, osize - 1);
	if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
		printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
		return DECOMP_ERROR;
	}

	state->stats.unc_bytes += osize;
	state->stats.unc_packets++;
	state->stats.comp_bytes += isize;
	state->stats.comp_packets++;

	/* good packet credit */
	state->sanity_errors >>= 1;

	return osize;
}

/*
 * Incompressible data has arrived (this should never happen!).
 * We should probably drop the link if the protocol is in the range
 * of what should be encrypted.  At the least, we should drop this
 * packet.  (How to do this?)
 */
static void mppe_incomp(void *arg, unsigned char *ibuf, int icnt)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;

	if (state->debug &&
	    (PPP_PROTOCOL(ibuf) >= 0x0021 && PPP_PROTOCOL(ibuf) <= 0x00fa))
		printk(KERN_DEBUG
		       "mppe_incomp[%d]: incompressible (unencrypted) data! "
		       "(proto %04x)\n", state->unit, PPP_PROTOCOL(ibuf));

	state->stats.inc_bytes += icnt;
	state->stats.inc_packets++;
	state->stats.unc_bytes += icnt;
	state->stats.unc_packets++;
}

/*************************************************************
 * Module interface table
 *************************************************************/

/*
 * Procedures exported to if_ppp.c.
 */
static struct compressor ppp_mppe = {
	.compress_proto = CI_MPPE,
	.comp_alloc     = mppe_alloc,
	.comp_free      = mppe_free,
	.comp_init      = mppe_comp_init,
	.comp_reset     = mppe_comp_reset,
	.compress       = mppe_compress,
	.comp_stat      = mppe_comp_stats,
	.decomp_alloc   = mppe_alloc,
	.decomp_free    = mppe_free,
	.decomp_init    = mppe_decomp_init,
	.decomp_reset   = mppe_decomp_reset,
	.decompress     = mppe_decompress,
	.incomp         = mppe_incomp,
	.decomp_stat    = mppe_comp_stats,
	.owner          = THIS_MODULE,
	.comp_extra     = MPPE_PAD,
};

/*
 * ppp_mppe_init()
 *
 * Prior to allowing load, try to load the arc4 and sha1 crypto
 * libraries.  The actual use will be allocated later, but
 * this way the module will fail to insmod if they aren't available.
 */

static int __init ppp_mppe_init(void)
{
	int answer;
	if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
	      crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
		return -ENODEV;

	sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
	if (!sha_pad)
		return -ENOMEM;
	sha_pad_init(sha_pad);

	answer = ppp_register_compressor(&ppp_mppe);

	if (answer == 0)
		printk(KERN_INFO "PPP MPPE Compression module registered\n");
	else
		kfree(sha_pad);

	return answer;
}

static void __exit ppp_mppe_cleanup(void)
{
	ppp_unregister_compressor(&ppp_mppe);
	kfree(sha_pad);
}

module_init(ppp_mppe_init);
module_exit(ppp_mppe_cleanup);
Beispiel #17
0
static int esp_init_state(struct xfrm_state *x)
{
	struct esp_data *esp = NULL;
	struct crypto_blkcipher *tfm;
	u32 align;

	/* null auth and encryption can have zero length keys */
	if (x->aalg) {
		if (x->aalg->alg_key_len > 512)
			goto error;
	}
	if (x->ealg == NULL)
		goto error;

	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
	if (esp == NULL)
		return -ENOMEM;

	if (x->aalg) {
		struct xfrm_algo_desc *aalg_desc;
		struct crypto_hash *hash;

		esp->auth.key = x->aalg->alg_key;
		esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
		hash = crypto_alloc_hash(x->aalg->alg_name, 0,
					 CRYPTO_ALG_ASYNC);
		if (IS_ERR(hash))
			goto error;

		esp->auth.tfm = hash;
		if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
			goto error;

		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
		BUG_ON(!aalg_desc);

		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
		    crypto_hash_digestsize(hash)) {
			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
				 x->aalg->alg_name,
				 crypto_hash_digestsize(hash),
				 aalg_desc->uinfo.auth.icv_fullbits/8);
			goto error;
		}

		esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
		esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;

		esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
		if (!esp->auth.work_icv)
			goto error;
	}
	esp->conf.key = x->ealg->alg_key;
	esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
	tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		goto error;
	esp->conf.tfm = tfm;
	esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
	esp->conf.padlen = 0;
	if (esp->conf.ivlen) {
		esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
		if (unlikely(esp->conf.ivec == NULL))
			goto error;
		esp->conf.ivinitted = 0;
	}
	if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
		goto error;
	x->props.header_len = sizeof(struct ip_esp_hdr) + esp->conf.ivlen;
	if (x->props.mode == XFRM_MODE_TUNNEL)
		x->props.header_len += sizeof(struct iphdr);
	else if (x->props.mode == XFRM_MODE_BEET)
		x->props.header_len += IPV4_BEET_PHMAXLEN;
	if (x->encap) {
		struct xfrm_encap_tmpl *encap = x->encap;

		switch (encap->encap_type) {
		default:
			goto error;
		case UDP_ENCAP_ESPINUDP:
			x->props.header_len += sizeof(struct udphdr);
			break;
		case UDP_ENCAP_ESPINUDP_NON_IKE:
			x->props.header_len += sizeof(struct udphdr) + 2 * sizeof(u32);
			break;
		}
	}
	x->data = esp;
	align = ALIGN(crypto_blkcipher_blocksize(esp->conf.tfm), 4);
	if (esp->conf.padlen)
		align = max_t(u32, align, esp->conf.padlen);
	x->props.trailer_len = align + 1 + esp->auth.icv_trunc_len;
	return 0;

error:
	x->data = esp;
	esp_destroy(x);
	x->data = NULL;
	return -EINVAL;
}
Beispiel #18
0
int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 cmd_iv,
		void *data, struct page *page, unsigned int size)
{
	int err;
	struct scatterlist sg;

	if (!e->cipher && !e->hash)
		return 0;

	dprintk("%s: eng: %p, iv: %llx, data: %p, page: %p/%lu, size: %u.\n",
		__func__, e, cmd_iv, data, page, (page) ? page->index : 0, size);

	if (data) {
		sg_init_one(&sg, data, size);
	} else {
		sg_init_table(&sg, 1);
		sg_set_page(&sg, page, size, 0);
	}

	if (e->cipher) {
		struct ablkcipher_request *req = e->data + crypto_hash_digestsize(e->hash);
		u8 iv[32];

		memset(iv, 0, sizeof(iv));
		memcpy(iv, &cmd_iv, sizeof(cmd_iv));

		ablkcipher_request_set_tfm(req, e->cipher);

		err = pohmelfs_crypto_process(req, &sg, &sg, iv, 0, e->timeout);
		if (err)
			goto err_out_exit;
	}

	if (e->hash) {
		struct hash_desc desc;
		void *dst = e->data + e->size/2;

		desc.tfm = e->hash;
		desc.flags = 0;

		err = crypto_hash_init(&desc);
		if (err)
			goto err_out_exit;

		err = crypto_hash_update(&desc, &sg, size);
		if (err)
			goto err_out_exit;

		err = crypto_hash_final(&desc, dst);
		if (err)
			goto err_out_exit;

		err = !!memcmp(dst, e->data, crypto_hash_digestsize(e->hash));

		if (err) {
#ifdef CONFIG_POHMELFS_DEBUG
			unsigned int i;
			unsigned char *recv = e->data, *calc = dst;

			dprintk("%s: eng: %p, hash: %p, cipher: %p: iv : %llx, hash mismatch (recv/calc): ",
					__func__, e, e->hash, e->cipher, cmd_iv);
			for (i = 0; i < crypto_hash_digestsize(e->hash); ++i) {
#if 0
				dprintka("%02x ", recv[i]);
				if (recv[i] != calc[i]) {
					dprintka("| calc byte: %02x.\n", calc[i]);
					break;
				}
#else
				dprintka("%02x/%02x ", recv[i], calc[i]);
#endif
			}
			dprintk("\n");
#endif
			goto err_out_exit;
		} else {
			dprintk("%s: eng: %p, hash: %p, cipher: %p: hashes matched.\n",
					__func__, e, e->hash, e->cipher);
		}
	}

	dprintk("%s: eng: %p, size: %u, hash: %p, cipher: %p: completed.\n",
			__func__, e, e->size, e->hash, e->cipher);

	return 0;

err_out_exit:
	dprintk("%s: eng: %p, hash: %p, cipher: %p: err: %d.\n",
			__func__, e, e->hash, e->cipher, err);
	return err;
}
Beispiel #19
0
static int tf_self_test_integrity(const char *alg_name, struct module *mod)
{
	unsigned char expected[32];
	unsigned char actual[32];
	struct scatterlist *sg = NULL;
	struct hash_desc desc = {NULL, 0};
	size_t digest_length;
	unsigned char *const key = tf_integrity_hmac_sha256_key;
	size_t const key_length = sizeof(tf_integrity_hmac_sha256_key);
	int error;

	if (mod->raw_binary_ptr == NULL)
		return -ENXIO;
	if (tf_integrity_hmac_sha256_expected_value == NULL)
		return -ENOENT;
	INFO("expected=%s", tf_integrity_hmac_sha256_expected_value);
	error = scan_hex(expected, sizeof(expected),
			 tf_integrity_hmac_sha256_expected_value);
	if (error < 0) {
		pr_err("tf_driver: Badly formatted hmac_sha256 parameter "
		       "(should be a hex string)\n");
		return -EIO;
	};

	desc.tfm = crypto_alloc_hash(alg_name, 0, 0);
	if (IS_ERR_OR_NULL(desc.tfm)) {
		ERROR("crypto_alloc_hash(%s) failed", alg_name);
		error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm);
		goto abort;
	}
	digest_length = crypto_hash_digestsize(desc.tfm);
	INFO("alg_name=%s driver_name=%s digest_length=%u",
	     alg_name,
	     crypto_tfm_alg_driver_name(crypto_hash_tfm(desc.tfm)),
	     digest_length);

	error = crypto_hash_setkey(desc.tfm, key, key_length);
	if (error) {
		ERROR("crypto_hash_setkey(%s) failed: %d",
		      alg_name, error);
		goto abort;
	}

	sg = vmalloc_to_sg(mod->raw_binary_ptr, mod->raw_binary_size);
	if (IS_ERR_OR_NULL(sg)) {
		ERROR("vmalloc_to_sg(%lu) failed: %d",
		      mod->raw_binary_size, (int)sg);
		error = (sg == NULL ? -ENOMEM : (int)sg);
		goto abort;
	}

	error = crypto_hash_digest(&desc, sg, mod->raw_binary_size, actual);
	if (error) {
		ERROR("crypto_hash_digest(%s) failed: %d",
		      alg_name, error);
		goto abort;
	}

	kfree(sg);
	crypto_free_hash(desc.tfm);

#ifdef CONFIG_TF_DRIVER_FAULT_INJECTION
	if (tf_fault_injection_mask & TF_CRYPTO_ALG_INTEGRITY) {
		pr_warning("TF: injecting fault in integrity check!\n");
		actual[0] = 0xff;
		actual[1] ^= 0xff;
	}
#endif
	TF_TRACE_ARRAY(expected, digest_length);
	TF_TRACE_ARRAY(actual, digest_length);
	if (memcmp(expected, actual, digest_length)) {
		ERROR("wrong %s digest value", alg_name);
		error = -EINVAL;
	} else {
		INFO("%s: digest successful", alg_name);
		error = 0;
	}

	return error;

abort:
	if (!IS_ERR_OR_NULL(sg))
		kfree(sg);
	if (!IS_ERR_OR_NULL(desc.tfm))
		crypto_free_hash(desc.tfm);
	return error == -ENOMEM ? error : -EIO;
}
Beispiel #20
0
static int tf_self_test_digest(const char *alg_name,
			       const struct digest_test_vector *tv)
{
	unsigned char digest[64];
	unsigned char input[256];
	struct scatterlist sg;
	struct hash_desc desc = {NULL, 0};
	int error;
	size_t digest_length;

	desc.tfm = crypto_alloc_hash(alg_name, 0, 0);
	if (IS_ERR_OR_NULL(desc.tfm)) {
		ERROR("crypto_alloc_hash(%s) failed", alg_name);
		error = (desc.tfm == NULL ? -ENOMEM : (int)desc.tfm);
		goto abort;
	}

	digest_length = crypto_hash_digestsize(desc.tfm);
	INFO("alg_name=%s driver_name=%s digest_length=%u",
	     alg_name,
	     crypto_tfm_alg_driver_name(crypto_hash_tfm(desc.tfm)),
	     digest_length);
	if (digest_length > sizeof(digest)) {
		ERROR("digest length too large (%zu > %zu)",
		      digest_length, sizeof(digest));
		error = -ENOMEM;
		goto abort;
	}

	if (tv->key != NULL) {
		error = crypto_hash_setkey(desc.tfm, tv->key, tv->key_length);
		if (error) {
			ERROR("crypto_hash_setkey(%s) failed: %d",
			      alg_name, error);
			goto abort;
		}
		TF_TRACE_ARRAY(tv->key, tv->key_length);
	}
	error = crypto_hash_init(&desc);
	if (error) {
		ERROR("crypto_hash_init(%s) failed: %d", alg_name, error);
		goto abort;
	}

	/* The test vector data is in vmalloc'ed memory since it's a module
	   global. Copy it to the stack, since the crypto API doesn't support
	   vmalloc'ed memory. */
	if (tv->length > sizeof(input)) {
		ERROR("data too large (%zu > %zu)",
		      tv->length, sizeof(input));
		error = -ENOMEM;
		goto abort;
	}
	memcpy(input, tv->text, tv->length);
	INFO("sg_init_one(%p, %p, %u)", &sg, input, tv->length);
	sg_init_one(&sg, input, tv->length);

	TF_TRACE_ARRAY(input, tv->length);
	error = crypto_hash_update(&desc, &sg, tv->length);
	if (error) {
		ERROR("crypto_hash_update(%s) failed: %d",
		      alg_name, error);
		goto abort;
	}

	error = crypto_hash_final(&desc, digest);
	if (error) {
		ERROR("crypto_hash_final(%s) failed: %d", alg_name, error);
		goto abort;
	}

	crypto_free_hash(desc.tfm);
	desc.tfm = NULL;

	if (memcmp(digest, tv->digest, digest_length)) {
		TF_TRACE_ARRAY(digest, digest_length);
		ERROR("wrong %s digest value", alg_name);
		pr_err("[SMC Driver] error: SMC Driver POST FAILURE (%s)\n",
		       alg_name);
		error = -EINVAL;
	} else {
		INFO("%s: digest successful", alg_name);
		error = 0;
	}
	return error;

abort:
	if (!IS_ERR_OR_NULL(desc.tfm))
		crypto_free_hash(desc.tfm);
	pr_err("[SMC Driver] error: SMC Driver POST FAILURE (%s)\n", alg_name);
	return error;
}
Beispiel #21
0
static int esp6_init_state(struct xfrm_state *x)
{
	struct esp_data *esp = NULL;
	struct crypto_blkcipher *tfm;

	/* null auth and encryption can have zero length keys */
	if (x->aalg) {
		if (x->aalg->alg_key_len > 512)
			goto error;
	}
	if (x->ealg == NULL)
		goto error;

	if (x->encap)
		goto error;

	esp = kzalloc(sizeof(*esp), GFP_KERNEL);
	if (esp == NULL)
		return -ENOMEM;

	if (x->aalg) {
		struct xfrm_algo_desc *aalg_desc;
		struct crypto_hash *hash;

		esp->auth.key = x->aalg->alg_key;
		esp->auth.key_len = (x->aalg->alg_key_len+7)/8;
		hash = crypto_alloc_hash(x->aalg->alg_name, 0,
					 CRYPTO_ALG_ASYNC);
		if (IS_ERR(hash))
			goto error;

		esp->auth.tfm = hash;
		if (crypto_hash_setkey(hash, esp->auth.key, esp->auth.key_len))
			goto error;
 
		aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
		BUG_ON(!aalg_desc);
 
		if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
		    crypto_hash_digestsize(hash)) {
			NETDEBUG(KERN_INFO "ESP: %s digestsize %u != %hu\n",
				 x->aalg->alg_name,
				 crypto_hash_digestsize(hash),
				 aalg_desc->uinfo.auth.icv_fullbits/8);
			goto error;
		}
 
		esp->auth.icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
		esp->auth.icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;
 
		esp->auth.work_icv = kmalloc(esp->auth.icv_full_len, GFP_KERNEL);
		if (!esp->auth.work_icv)
			goto error;
	}
	esp->conf.key = x->ealg->alg_key;
	esp->conf.key_len = (x->ealg->alg_key_len+7)/8;
	tfm = crypto_alloc_blkcipher(x->ealg->alg_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		goto error;
	esp->conf.tfm = tfm;
	esp->conf.ivlen = crypto_blkcipher_ivsize(tfm);
	esp->conf.padlen = 0;
	if (esp->conf.ivlen) {
		esp->conf.ivec = kmalloc(esp->conf.ivlen, GFP_KERNEL);
		if (unlikely(esp->conf.ivec == NULL))
			goto error;
		esp->conf.ivinitted = 0;
	}
	if (crypto_blkcipher_setkey(tfm, esp->conf.key, esp->conf.key_len))
		goto error;
	x->props.header_len = sizeof(struct ipv6_esp_hdr) + esp->conf.ivlen;
	if (x->props.mode == XFRM_MODE_TUNNEL)
		x->props.header_len += sizeof(struct ipv6hdr);
	x->data = esp;
	return 0;

error:
	x->data = esp;
	esp6_destroy(x);
	x->data = NULL;
	return -EINVAL;
}
static int ah_init_state(struct xfrm_state *x)
{
	struct ah_data *ahp = NULL;
	struct xfrm_algo_desc *aalg_desc;
	struct crypto_hash *tfm;

	if (!x->aalg)
		goto error;

	if (x->encap)
		goto error;

	ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
	if (ahp == NULL)
		return -ENOMEM;

	tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm))
		goto error;

	ahp->tfm = tfm;
	if (crypto_hash_setkey(tfm, x->aalg->alg_key,
			       (x->aalg->alg_key_len + 7) / 8))
		goto error;

	/*
	 * Lookup the algorithm description maintained by xfrm_algo,
	 * verify crypto transform properties, and store information
	 * we need for AH processing.  This lookup cannot fail here
	 * after a successful crypto_alloc_hash().
	 */
	aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
	BUG_ON(!aalg_desc);

	if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
	    crypto_hash_digestsize(tfm)) {
		printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
		       x->aalg->alg_name, crypto_hash_digestsize(tfm),
		       aalg_desc->uinfo.auth.icv_fullbits/8);
		goto error;
	}

	ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
	ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;

	BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);

	ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
	if (!ahp->work_icv)
		goto error;

	x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
					  ahp->icv_trunc_len);
	if (x->props.mode == XFRM_MODE_TUNNEL)
		x->props.header_len += sizeof(struct iphdr);
	x->data = ahp;

	return 0;

error:
	if (ahp) {
		kfree(ahp->work_icv);
		crypto_free_hash(ahp->tfm);
		kfree(ahp);
	}
	return -EINVAL;
}
Beispiel #23
0
static int ah_init_state(struct xfrm_state *x)
{
    struct ah_data *ahp = NULL;
    struct xfrm_algo_desc *aalg_desc;
    struct crypto_hash *tfm;

    if (!x->aalg)
        goto error;

    if (x->encap)
        goto error;

    ahp = kzalloc(sizeof(*ahp), GFP_KERNEL);
    if (ahp == NULL)
        return -ENOMEM;

    tfm = crypto_alloc_hash(x->aalg->alg_name, 0, CRYPTO_ALG_ASYNC);
    if (IS_ERR(tfm))
        goto error;

    ahp->tfm = tfm;
    if (crypto_hash_setkey(tfm, x->aalg->alg_key,
                           (x->aalg->alg_key_len + 7) / 8))
        goto error;


    aalg_desc = xfrm_aalg_get_byname(x->aalg->alg_name, 0);
    BUG_ON(!aalg_desc);

    if (aalg_desc->uinfo.auth.icv_fullbits/8 !=
            crypto_hash_digestsize(tfm)) {
        printk(KERN_INFO "AH: %s digestsize %u != %hu\n",
               x->aalg->alg_name, crypto_hash_digestsize(tfm),
               aalg_desc->uinfo.auth.icv_fullbits/8);
        goto error;
    }

    ahp->icv_full_len = aalg_desc->uinfo.auth.icv_fullbits/8;
    ahp->icv_trunc_len = aalg_desc->uinfo.auth.icv_truncbits/8;

    BUG_ON(ahp->icv_trunc_len > MAX_AH_AUTH_LEN);

    ahp->work_icv = kmalloc(ahp->icv_full_len, GFP_KERNEL);
    if (!ahp->work_icv)
        goto error;

    x->props.header_len = XFRM_ALIGN8(sizeof(struct ip_auth_hdr) +
                                      ahp->icv_trunc_len);
    if (x->props.mode == XFRM_MODE_TUNNEL)
        x->props.header_len += sizeof(struct iphdr);
    x->data = ahp;

    return 0;

error:
    if (ahp) {
        kfree(ahp->work_icv);
        crypto_free_hash(ahp->tfm);
        kfree(ahp);
    }
    return -EINVAL;
}
Beispiel #24
0
static int cs598_kernel_thread_fn(void *unused)
{
	unsigned long curr_pfn;
	struct page *page;
	void *data, *pagebuf;
	struct crypto_hash *tfm;
	struct scatterlist sg;
	struct hash_desc desc; //Hopefully this can go on the stack
	int err;

	/* Declare a waitqueue */
	DECLARE_WAITQUEUE(wait,current);

	/* Add wait queue to the head */
	add_wait_queue(&cs598_waitqueue,&wait);
	while (1) {
		/* Set current state to interruptible */
		set_current_state(TASK_INTERRUPTIBLE);

		/* give up the control */
		schedule();

		/* coming back to running state, check if it needs to stop */
		if (kthread_should_stop()) {
			printk(KERN_INFO "cs598: thread needs to stop\n");
			break;
		}

		printk(KERN_INFO "cs598: hash invoked by procfs\n");

		/* Loop over all of phys memory */
		printk(KERN_INFO "cs598: number of physical pages %lu\n", num_physpages);
		printk(KERN_INFO "cs598: number of pages that will be hashed %lu\n", NPAGES);

		for(curr_pfn=1;curr_pfn<NPAGES;curr_pfn++) {
			page = pfn_to_page(curr_pfn);  
			data = kmap(page);
			if(!data) {
				printk(KERN_ALERT "cs598: couldn't map page with pfn %lu\n", curr_pfn);
				break;
			}
			tfm = crypto_alloc_hash(HASH_ALG, 0, CRYPTO_ALG_ASYNC);
			if(tfm == NULL) {
				printk(KERN_ALERT "cs598: got NULL tfm \n");
				kunmap(data);
				break;
			}
			desc.tfm = tfm;
			desc.flags = 0;
			if(curr_pfn == 1)
				printk(KERN_INFO "cs598: hash size %d\n", crypto_hash_digestsize(tfm));
			if(crypto_hash_digestsize(tfm) > HASH_SIZE) {
				printk(KERN_ALERT "cs598: hash alg report size of %d, but HASH_SIZE = %d\n", crypto_hash_digestsize(tfm),HASH_SIZE);
				goto done_error;
			}
			//pagebuf = kmalloc(PAGE_SIZE, GFP_USER);
			//memcpy(pagebuf, data, PAGE_SIZE);
			sg_init_one(&sg, data, PAGE_SIZE);
			crypto_hash_init(&desc);	
			err = crypto_hash_update(&desc, &sg, PAGE_SIZE);
			if(err) {
				printk(KERN_ALERT "cs598: crypto_shash_update returned: %d\n",
						err);
				goto done_error;
			}
            err = crypto_hash_final(&desc, vmalloc_buffer+(curr_pfn*HASH_SIZE));
			if(err) {
				printk(KERN_ALERT "cs598: crypto_shash_digest returned: %d\n",
						err);
				goto done_error;
			}
			kunmap(data);
			crypto_free_hash(tfm);
            //Don't bring everything to an absolute halt
            if(curr_pfn % 10000)
                schedule();
		}
		hash_done_flag = 1;
		printk(KERN_INFO "cs598: Finished computing hashes\n");
	}
	
	/* exiting thread, set it to running state */
	set_current_state(TASK_RUNNING);
	
	/* remove the waitqueue */
	remove_wait_queue(&cs598_waitqueue, &wait);

	printk(KERN_INFO "cs598: thread killed\n");

	return 0;
	done_error:
		kunmap(data);	
		crypto_free_hash(tfm);
		printk(KERN_INFO "cs598: aborted computing hashes\n");
		set_current_state(TASK_RUNNING);
		remove_wait_queue(&cs598_waitqueue, &wait);
		return 0;
}
Beispiel #25
0
	int calculate_integrity(struct file *filp,
						char *ibuf,
						int ilen)
	{
	int r = -1 , ret = -1;
	ssize_t vfs_read_retval = 0;
	loff_t file_offset = 0;
	mm_segment_t oldfs = get_fs();

	char *buf = kmalloc(PAGE_SIZE, GFP_KERNEL);

	struct scatterlist sg;
	struct crypto_hash *tfm = NULL;
	struct hash_desc desc;
	char *algo = kmalloc(1024, GFP_KERNEL);
	if (!algo) {
		ret = -ENOMEM;
		goto out;
	}
	__initialize_with_null(algo, 1024);

#ifdef EXTRA_CREDIT
	ret = vfs_getxattr(filp->f_path.dentry,
				 INT_TYPE_XATTR,
				 algo,
				 1024);
	if (ret <= 0)
		__initialize_with_null(algo, 1024);

#endif
	if (*algo == '\0')
		strcpy(algo, DEFAULT_ALGO);


	if (!buf)
		goto out;
	__initialize_with_null(ibuf, ilen);

	if (!filp->f_op->read) {
		r = -2;
		goto out;
	}
	filp->f_pos = 0;
	set_fs(KERNEL_DS);

	tfm = crypto_alloc_hash(algo, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(tfm)) {
		r = -EINVAL;
		goto out;
	}
	desc.tfm = tfm;
	desc.flags = 0;

	if (crypto_hash_digestsize(tfm) > ilen) {
		r = -EINVAL;
		goto out;
	}
	crypto_hash_setkey(tfm, key, strlen(key));
	ret = crypto_hash_init(&desc);
	if (ret) {
		r = ret;
		goto out;
	}
	sg_init_table(&sg, 1);
	file_offset = 0;
	do {
		vfs_read_retval = vfs_read(filp, buf, PAGE_SIZE, &file_offset);

		if (vfs_read_retval < 0) {
			ret = vfs_read_retval;
			goto out;
		}
		sg_set_buf(&sg, (u8 *)buf, vfs_read_retval);
		ret = crypto_hash_update(&desc, &sg, sg.length);

		if (ret) {
			r = ret;
			goto out;
		}
		if (vfs_read_retval < ksize(buf))
			break;
	} while (1);
	ret = crypto_hash_final(&desc, ibuf);
	if (ret) {
		r = ret;
		goto out;
	}
out:
	kfree(buf);
	kfree(algo);
	if (!IS_ERR(tfm))
		crypto_free_hash(tfm);
	set_fs(oldfs);
	return ret;
	}
Beispiel #26
0
u32
spkm3_make_token(struct spkm3_ctx *ctx,
		   struct xdr_buf * text, struct xdr_netobj * token,
		   int toktype)
{
	s32			checksum_type;
	char			tokhdrbuf[25];
	char			cksumdata[16];
	struct xdr_netobj	md5cksum = {.len = 0, .data = cksumdata};
	struct xdr_netobj	mic_hdr = {.len = 0, .data = tokhdrbuf};
	int			tokenlen = 0;
	unsigned char		*ptr;
	s32			now;
	int			ctxelen = 0, ctxzbit = 0;
	int			md5elen = 0, md5zbit = 0;

	now = jiffies;

	if (ctx->ctx_id.len != 16) {
		dprintk("RPC:       spkm3_make_token BAD ctx_id.len %d\n",
				ctx->ctx_id.len);
		goto out_err;
	}

	if (!g_OID_equal(&ctx->intg_alg, &hmac_md5_oid)) {
		dprintk("RPC:       gss_spkm3_seal: unsupported I-ALG "
				"algorithm.  only support hmac-md5 I-ALG.\n");
		goto out_err;
	} else
		checksum_type = CKSUMTYPE_HMAC_MD5;

	if (!g_OID_equal(&ctx->conf_alg, &cast5_cbc_oid)) {
		dprintk("RPC:       gss_spkm3_seal: unsupported C-ALG "
				"algorithm\n");
		goto out_err;
	}

	if (toktype == SPKM_MIC_TOK) {
		/* Calculate checksum over the mic-header */
		asn1_bitstring_len(&ctx->ctx_id, &ctxelen, &ctxzbit);
		spkm3_mic_header(&mic_hdr.data, &mic_hdr.len, ctx->ctx_id.data,
				ctxelen, ctxzbit);
		if (make_spkm3_checksum(checksum_type, &ctx->derived_integ_key,
					(char *)mic_hdr.data, mic_hdr.len,
					text, 0, &md5cksum))
			goto out_err;

		asn1_bitstring_len(&md5cksum, &md5elen, &md5zbit);
		tokenlen = 10 + ctxelen + 1 + md5elen + 1;

		/* Create token header using generic routines */
		token->len = g_token_size(&ctx->mech_used, tokenlen);

		ptr = token->data;
		g_make_token_header(&ctx->mech_used, tokenlen, &ptr);

		spkm3_make_mic_token(&ptr, tokenlen, &mic_hdr, &md5cksum, md5elen, md5zbit);
	} else if (toktype == SPKM_WRAP_TOK) { /* Not Supported */
		dprintk("RPC:       gss_spkm3_seal: SPKM_WRAP_TOK "
				"not supported\n");
		goto out_err;
	}

	/* XXX need to implement sequence numbers, and ctx->expired */

	return  GSS_S_COMPLETE;
out_err:
	token->data = NULL;
	token->len = 0;
	return GSS_S_FAILURE;
}

static int
spkm3_checksummer(struct scatterlist *sg, void *data)
{
	struct hash_desc *desc = data;

	return crypto_hash_update(desc, sg, sg->length);
}

/* checksum the plaintext data and hdrlen bytes of the token header */
s32
make_spkm3_checksum(s32 cksumtype, struct xdr_netobj *key, char *header,
		    unsigned int hdrlen, struct xdr_buf *body,
		    unsigned int body_offset, struct xdr_netobj *cksum)
{
	char				*cksumname;
	struct hash_desc		desc; /* XXX add to ctx? */
	struct scatterlist		sg[1];
	int err;

	switch (cksumtype) {
		case CKSUMTYPE_HMAC_MD5:
			cksumname = "hmac(md5)";
			break;
		default:
			dprintk("RPC:       spkm3_make_checksum:"
					" unsupported checksum %d", cksumtype);
			return GSS_S_FAILURE;
	}

	if (key->data == NULL || key->len <= 0) return GSS_S_FAILURE;

	desc.tfm = crypto_alloc_hash(cksumname, 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(desc.tfm))
		return GSS_S_FAILURE;
	cksum->len = crypto_hash_digestsize(desc.tfm);
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;

	err = crypto_hash_setkey(desc.tfm, key->data, key->len);
	if (err)
		goto out;

	err = crypto_hash_init(&desc);
	if (err)
		goto out;

	sg_set_buf(sg, header, hdrlen);
	crypto_hash_update(&desc, sg, sg->length);

	xdr_process_buf(body, body_offset, body->len - body_offset,
			spkm3_checksummer, &desc);
	crypto_hash_final(&desc, cksum->data);

out:
	crypto_free_hash(desc.tfm);

	return err ? GSS_S_FAILURE : 0;
}