Example #1
0
static int virtblk_add_req_scsi(struct virtqueue *vq, struct virtblk_req *vbr,
		struct scatterlist *data_sg, bool have_data)
{
	struct scatterlist hdr, status, cmd, sense, inhdr, *sgs[6];
	unsigned int num_out = 0, num_in = 0;

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;
	sg_init_one(&cmd, vbr->sreq.cmd, vbr->sreq.cmd_len);
	sgs[num_out++] = &cmd;

	if (have_data) {
		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
			sgs[num_out++] = data_sg;
		else
			sgs[num_out + num_in++] = data_sg;
	}

	sg_init_one(&sense, vbr->sense, SCSI_SENSE_BUFFERSIZE);
	sgs[num_out + num_in++] = &sense;
	sg_init_one(&inhdr, &vbr->in_hdr, sizeof(vbr->in_hdr));
	sgs[num_out + num_in++] = &inhdr;
	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
Example #2
0
/*
 * Encryption operation is performed with the public key.  Hence it is done
 * in software
 */
static int tpm_key_encrypt(struct tpm_key *tk,
			   struct kernel_pkey_params *params,
			   const void *in, void *out)
{
	char alg_name[CRYPTO_MAX_ALG_NAME];
	struct crypto_akcipher *tfm;
	struct akcipher_request *req;
	struct crypto_wait cwait;
	struct scatterlist in_sg, out_sg;
	uint8_t der_pub_key[PUB_KEY_BUF_SIZE];
	uint32_t der_pub_key_len;
	int ret;

	pr_devel("==>%s()\n", __func__);

	ret = determine_akcipher(params->encoding, params->hash_algo, alg_name);
	if (ret < 0)
		return ret;

	tfm = crypto_alloc_akcipher(alg_name, 0, 0);
	if (IS_ERR(tfm))
		return PTR_ERR(tfm);

	der_pub_key_len = derive_pub_key(tk->pub_key, tk->pub_key_len,
					 der_pub_key);

	ret = crypto_akcipher_set_pub_key(tfm, der_pub_key, der_pub_key_len);
	if (ret < 0)
		goto error_free_tfm;

	req = akcipher_request_alloc(tfm, GFP_KERNEL);
	if (!req)
		goto error_free_tfm;

	sg_init_one(&in_sg, in, params->in_len);
	sg_init_one(&out_sg, out, params->out_len);
	akcipher_request_set_crypt(req, &in_sg, &out_sg, params->in_len,
				   params->out_len);
	crypto_init_wait(&cwait);
	akcipher_request_set_callback(req, CRYPTO_TFM_REQ_MAY_BACKLOG |
				      CRYPTO_TFM_REQ_MAY_SLEEP,
				      crypto_req_done, &cwait);

	ret = crypto_akcipher_encrypt(req);
	ret = crypto_wait_req(ret, &cwait);

	if (ret == 0)
		ret = req->dst_len;

	akcipher_request_free(req);
error_free_tfm:
	crypto_free_akcipher(tfm);
	pr_devel("<==%s() = %d\n", __func__, ret);
	return ret;
}
Example #3
0
void ide_map_sg(ide_drive_t *drive, struct request *rq)
{
	ide_hwif_t *hwif = drive->hwif;
	struct scatterlist *sg = hwif->sg_table;

	if (rq->cmd_type == REQ_TYPE_ATA_TASKFILE) {
		sg_init_one(sg, rq->buffer, rq->nr_sectors * SECTOR_SIZE);
		hwif->sg_nents = 1;
	} else if (!rq->bio) {
		sg_init_one(sg, rq->data, rq->data_len);
		hwif->sg_nents = 1;
	} else {
		hwif->sg_nents = blk_rq_map_sg(drive->queue, rq, sg);
	}
}
Example #4
0
// given a string, generate a 32-bit key
int generate_key(char *pwd, u8 *pkey)
{
	int len_pwd = strlen(pwd);
	struct scatterlist sg;
	struct crypto_hash *tfm;
	struct hash_desc desc;
	int i;

	unsigned char output[SHA1_LENGTH];  // key generated
	char *buf = kmalloc(MAX_PWD, GFP_KERNEL); // password buffer
	memset(buf, 0, MAX_PWD);
	strncpy(buf, pwd, len_pwd);

	tfm = crypto_alloc_hash("sha1", 1, CRYPTO_ALG_ASYNC);
	desc.tfm = tfm;
	desc.flags = 0;

	sg_init_one(&sg, buf, len_pwd);
	crypto_hash_init(&desc);

	crypto_hash_update(&desc, &sg, len_pwd);
	crypto_hash_final(&desc, output);

	for(i=0; i<16; i++)
		pkey[i] = output[i];

	for(i=0; i<16; i++)
		pkey[i+16] = output[i];

	crypto_free_hash(tfm);
	kfree(buf);

	return 0;
}
Example #5
0
__be32
nfs4_make_rec_clidname(char *dname, struct xdr_netobj *clname)
{
	struct xdr_netobj cksum;
	struct hash_desc desc;
	struct scatterlist sg;
	__be32 status = nfserr_resource;

	dprintk("NFSD: nfs4_make_rec_clidname for %.*s\n",
			clname->len, clname->data);
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;
	desc.tfm = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(desc.tfm))
		goto out_no_tfm;
	cksum.len = crypto_hash_digestsize(desc.tfm);
	cksum.data = kmalloc(cksum.len, GFP_KERNEL);
	if (cksum.data == NULL)
 		goto out;

	sg_init_one(&sg, clname->data, clname->len);

	if (crypto_hash_digest(&desc, &sg, sg.length, cksum.data))
		goto out;

	md5_to_hex(dname, cksum.data);

	kfree(cksum.data);
	status = nfs_ok;
out:
	crypto_free_hash(desc.tfm);
out_no_tfm:
	return status;
}
/* Initialise ESSIV - compute salt but no local memory allocations */
static int crypt_iv_essiv_init(struct crypt_config *cc)
{
	struct iv_essiv_private *essiv = &cc->iv_gen_private.essiv;
	struct hash_desc desc;
	struct scatterlist sg;
	struct crypto_cipher *essiv_tfm;
	int err;

	sg_init_one(&sg, cc->key, cc->key_size);
	desc.tfm = essiv->hash_tfm;
	desc.flags = CRYPTO_TFM_REQ_MAY_SLEEP;

	err = crypto_hash_digest(&desc, &sg, cc->key_size, essiv->salt);
	if (err)
		return err;

	essiv_tfm = cc->iv_private;

	err = crypto_cipher_setkey(essiv_tfm, essiv->salt,
			    crypto_hash_digestsize(essiv->hash_tfm));
	if (err)
		return err;

	return 0;
}
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
 * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
 * so the payload length increases with 8 bytes.
 *
 * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
 */
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
    struct prism2_wep_data *wep = priv;
    u32 klen, len;
    u8 key[WEP_KEY_LEN + 3];
    u8 *pos;
    struct cb_desc *tcb_desc = (struct cb_desc *)(skb->cb +
                               MAX_DEV_ADDR_SIZE);
    struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
    u32 crc;
    u8 *icv;
    struct scatterlist sg;
    if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
            skb->len < hdr_len) {
        printk(KERN_ERR "Error!!! headroom=%d tailroom=%d skblen=%d"
               " hdr_len=%d\n", skb_headroom(skb), skb_tailroom(skb),
               skb->len, hdr_len);
        return -1;
    }
    len = skb->len - hdr_len;
    pos = skb_push(skb, 4);
    memmove(pos, pos + 4, hdr_len);
    pos += hdr_len;

    klen = 3 + wep->key_len;

    wep->iv++;

    /* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
     * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
     * can be used to speedup attacks, so avoid using them. */
    if ((wep->iv & 0xff00) == 0xff00) {
        u8 B = (wep->iv >> 16) & 0xff;
        if (B >= 3 && B < klen)
            wep->iv += 0x0100;
    }

    /* Prepend 24-bit IV to RC4 key and TX frame */
    *pos++ = key[0] = (wep->iv >> 16) & 0xff;
    *pos++ = key[1] = (wep->iv >> 8) & 0xff;
    *pos++ = key[2] = wep->iv & 0xff;
    *pos++ = wep->key_idx << 6;

    /* Copy rest of the WEP key (the secret part) */
    memcpy(key + 3, wep->key, wep->key_len);

    if (!tcb_desc->bHwSec) {

        /* Append little-endian CRC32 and encrypt it to produce ICV */
        crc = ~crc32_le(~0, pos, len);
        icv = skb_put(skb, 4);
        icv[0] = crc;
        icv[1] = crc >> 8;
        icv[2] = crc >> 16;
        icv[3] = crc >> 24;

        sg_init_one(&sg, pos, len+4);
        crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
        return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
    }
Example #8
0
static int smp_e(struct crypto_blkcipher *tfm, const u8 *k, u8 *r)
{
	struct blkcipher_desc desc;
	struct scatterlist sg;
	int err, iv_len;
	unsigned char iv[128];

	if (tfm == NULL) {
		BT_ERR("tfm %p", tfm);
		return -EINVAL;
	}

	desc.tfm = tfm;
	desc.flags = 0;

	err = crypto_blkcipher_setkey(tfm, k, 16);
	if (err) {
		BT_ERR("cipher setkey failed: %d", err);
		return err;
	}

	sg_init_one(&sg, r, 16);

	iv_len = crypto_blkcipher_ivsize(tfm);
	if (iv_len) {
		memset(&iv, 0xff, iv_len);
		crypto_blkcipher_set_iv(tfm, iv, iv_len);
	}

	err = crypto_blkcipher_encrypt(&desc, &sg, &sg, 16);
	if (err)
		BT_ERR("Encrypt data error %d", err);

	return err;
}
Example #9
0
/**
 * Calculate hash digest for the passed buffer.
 *
 * This should be used when computing the hash on a single contiguous buffer.
 * It combines the hash initialization, computation, and cleanup.
 *
 * \param[in] hash_alg	id of hash algorithm (CFS_HASH_ALG_*)
 * \param[in] buf	data buffer on which to compute hash
 * \param[in] buf_len	length of \a buf in bytes
 * \param[in] key	initial value/state for algorithm, if \a key = NULL
 *			use default initial value
 * \param[in] key_len	length of \a key in bytes
 * \param[out] hash	pointer to computed hash value, if \a hash = NULL then
 *			\a hash_len is to digest size in bytes, retval -ENOSPC
 * \param[in,out] hash_len size of \a hash buffer
 *
 * \retval -EINVAL       \a buf, \a buf_len, \a hash_len, \a alg_id invalid
 * \retval -ENOENT       \a hash_alg is unsupported
 * \retval -ENOSPC       \a hash is NULL, or \a hash_len less than digest size
 * \retval		0 for success
 * \retval		negative errno for other errors from lower layers.
 */
int cfs_crypto_hash_digest(enum cfs_crypto_hash_alg hash_alg,
			   const void *buf, unsigned int buf_len,
			   unsigned char *key, unsigned int key_len,
			   unsigned char *hash, unsigned int *hash_len)
{
	struct scatterlist	sl;
	struct hash_desc	hdesc;
	int			err;
	const struct cfs_crypto_hash_type	*type;

	if (buf == NULL || buf_len == 0 || hash_len == NULL)
		return -EINVAL;

	err = cfs_crypto_hash_alloc(hash_alg, &type, &hdesc, key, key_len);
	if (err != 0)
		return err;

	if (hash == NULL || *hash_len < type->cht_size) {
		*hash_len = type->cht_size;
		crypto_free_hash(hdesc.tfm);
		return -ENOSPC;
	}
	sg_init_one(&sl, (void *)buf, buf_len);

	hdesc.flags = 0;
	err = crypto_hash_digest(&hdesc, &sl, sl.length, hash);
	crypto_free_hash(hdesc.tfm);

	return err;
}
Example #10
0
static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
                  u32 opcode, void *buf, unsigned len)
{
    struct mmc_request mrq = {0};
    struct mmc_command cmd = {0};
    struct mmc_data data = {0};
    struct scatterlist sg;
    void *data_buf;

    /* dma onto stack is unsafe/nonportable, but callers to this
     * routine normally provide temporary on-stack buffers ...
     */
    data_buf = kmalloc(len, GFP_KERNEL);
    if (data_buf == NULL)
        return -ENOMEM;

    mrq.cmd = &cmd;
    mrq.data = &data;

    cmd.opcode = opcode;
    cmd.arg = 0;

    /* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
     * rely on callers to never use this with "native" calls for reading
     * CSD or CID.  Native versions of those commands use the R2 type,
     * not R1 plus a data block.
     */
    cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

    data.blksz = len;
    data.blocks = 1;
    data.flags = MMC_DATA_READ;
    data.sg = &sg;
    data.sg_len = 1;

    sg_init_one(&sg, data_buf, len);

    if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
        /*
         * The spec states that CSR and CID accesses have a timeout
         * of 64 clock cycles.
         */
        data.timeout_ns = 0;
        data.timeout_clks = 64;
    } else
        mmc_set_data_timeout(&data, card);

    mmc_wait_for_req(host, &mrq);

    memcpy(buf, data_buf, len);
    kfree(data_buf);

    if (cmd.error)
        return cmd.error;
    if (data.error)
        return data.error;

    return 0;
}
Example #11
0
static int
cc_set_aead_conf_buf(struct device *dev, struct aead_req_ctx *areq_ctx,
		     u8 *config_data, struct buffer_array *sg_data,
		     unsigned int assoclen)
{
	dev_dbg(dev, " handle additional data config set to DLLI\n");
	/* create sg for the current buffer */
	sg_init_one(&areq_ctx->ccm_adata_sg, config_data,
		    AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size);
	if (dma_map_sg(dev, &areq_ctx->ccm_adata_sg, 1, DMA_TO_DEVICE) != 1) {
		dev_err(dev, "dma_map_sg() config buffer failed\n");
		return -ENOMEM;
	}
	dev_dbg(dev, "Mapped curr_buff: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
		&sg_dma_address(&areq_ctx->ccm_adata_sg),
		sg_page(&areq_ctx->ccm_adata_sg),
		sg_virt(&areq_ctx->ccm_adata_sg),
		areq_ctx->ccm_adata_sg.offset, areq_ctx->ccm_adata_sg.length);
	/* prepare for case of MLLI */
	if (assoclen > 0) {
		cc_add_sg_entry(dev, sg_data, 1, &areq_ctx->ccm_adata_sg,
				(AES_BLOCK_SIZE + areq_ctx->ccm_hdr_size),
				0, false, NULL);
	}
	return 0;
}
Example #12
0
static int init_vqs(struct virtio_balloon *vb)
{
	struct virtqueue *vqs[3];
	vq_callback_t *callbacks[] = { balloon_ack, balloon_ack, stats_request };
	static const char * const names[] = { "inflate", "deflate", "stats" };
	int err, nvqs;

	/*
	 * We expect two virtqueues: inflate and deflate, and
	 * optionally stat.
	 */
	nvqs = virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ) ? 3 : 2;
	err = vb->vdev->config->find_vqs(vb->vdev, nvqs, vqs, callbacks, names,
			NULL);
	if (err)
		return err;

	vb->inflate_vq = vqs[0];
	vb->deflate_vq = vqs[1];
	if (virtio_has_feature(vb->vdev, VIRTIO_BALLOON_F_STATS_VQ)) {
		struct scatterlist sg;
		vb->stats_vq = vqs[2];

		/*
		 * Prime this virtqueue with one buffer so the hypervisor can
		 * use it to signal us later (it can't be broken yet!).
		 */
		sg_init_one(&sg, vb->stats, sizeof vb->stats);
		if (virtqueue_add_outbuf(vb->stats_vq, &sg, 1, vb, GFP_KERNEL)
		    < 0)
			BUG();
		virtqueue_kick(vb->stats_vq);
	}
	return 0;
}
Example #13
0
unsigned int mmc_queue_map_sg(struct mmc_queue *mq)
{
	unsigned int sg_len;

	if (!mq->bounce_buf)
		return blk_rq_map_sg(mq->queue, mq->req, mq->sg);

	BUG_ON(!mq->bounce_sg);

	sg_len = blk_rq_map_sg(mq->queue, mq->req, mq->bounce_sg);

	mq->bounce_sg_len = sg_len;

	/*
	 * Shortcut in the event we only get a single entry.
	 */
	if (sg_len == 1) {
		memcpy(mq->sg, mq->bounce_sg, sizeof(struct scatterlist));
		return 1;
	}

	sg_init_one(mq->sg, mq->bounce_buf, 0);

	while (sg_len) {
		mq->sg[0].length += mq->bounce_sg[sg_len - 1].length;
		sg_len--;
	}

	return 1;
}
Example #14
0
static int ccp_aes_setkey(struct crypto_ablkcipher *tfm, const u8 *key,
			  unsigned int key_len)
{
	struct ccp_ctx *ctx = crypto_tfm_ctx(crypto_ablkcipher_tfm(tfm));
	struct ccp_crypto_ablkcipher_alg *alg =
		ccp_crypto_ablkcipher_alg(crypto_ablkcipher_tfm(tfm));

	switch (key_len) {
	case AES_KEYSIZE_128:
		ctx->u.aes.type = CCP_AES_TYPE_128;
		break;
	case AES_KEYSIZE_192:
		ctx->u.aes.type = CCP_AES_TYPE_192;
		break;
	case AES_KEYSIZE_256:
		ctx->u.aes.type = CCP_AES_TYPE_256;
		break;
	default:
		crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_RES_BAD_KEY_LEN);
		return -EINVAL;
	}
	ctx->u.aes.mode = alg->mode;
	ctx->u.aes.key_len = key_len;

	memcpy(ctx->u.aes.key, key, key_len);
	sg_init_one(&ctx->u.aes.key_sg, ctx->u.aes.key, key_len);

	return 0;
}
/**
 * derive_key_aes() - Derive a key using AES-128-ECB
 * @deriving_key: Encryption key used for derivation.
 * @source_key:   Source key to which to apply derivation.
 * @derived_key:  Derived key.
 *
 * Return: Zero on success; non-zero otherwise.
 */
static int derive_key_aes(u8 deriving_key[FS_AES_128_ECB_KEY_SIZE],
				u8 source_key[FS_AES_256_XTS_KEY_SIZE],
				u8 derived_key[FS_AES_256_XTS_KEY_SIZE])
{
	int res = 0;
	struct ablkcipher_request *req = NULL;
	DECLARE_FS_COMPLETION_RESULT(ecr);
	struct scatterlist src_sg, dst_sg;
	struct crypto_ablkcipher *tfm = crypto_alloc_ablkcipher("ecb(aes)", 0,
								0);

	if (IS_ERR(tfm)) {
		res = PTR_ERR(tfm);
		tfm = NULL;
		goto out;
	}
	crypto_ablkcipher_set_flags(tfm, CRYPTO_TFM_REQ_WEAK_KEY);
	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
	if (!req) {
		res = -ENOMEM;
		goto out;
	}
	ablkcipher_request_set_callback(req,
			CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
			derive_crypt_complete, &ecr);
	res = crypto_ablkcipher_setkey(tfm, deriving_key,
					FS_AES_128_ECB_KEY_SIZE);
	if (res < 0)
		goto out;

	sg_init_one(&src_sg, source_key, FS_AES_256_XTS_KEY_SIZE);
	sg_init_one(&dst_sg, derived_key, FS_AES_256_XTS_KEY_SIZE);
	ablkcipher_request_set_crypt(req, &src_sg, &dst_sg,
					FS_AES_256_XTS_KEY_SIZE, NULL);
	res = crypto_ablkcipher_encrypt(req);
	if (res == -EINPROGRESS || res == -EBUSY) {
		wait_for_completion(&ecr.completion);
		res = ecr.res;
	}
out:
	if (req)
		ablkcipher_request_free(req);
	if (tfm)
		crypto_free_ablkcipher(tfm);
	return res;
}
Example #16
0
int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
{
	int err;
	struct mmc_request mrq = {0};
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;
	void *data_buf;

	BUG_ON(!card);
	BUG_ON(!card->host);
	BUG_ON(!scr);

	/* NOTE: caller guarantees scr is heap-allocated */

	err = mmc_app_cmd(card->host, card);
	if (err)
		return err;

	/* dma onto stack is unsafe/nonportable, but callers to this
	 * routine normally provide temporary on-stack buffers ...
	 */
	data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
	if (data_buf == NULL)
		return -ENOMEM;

	mrq.cmd = &cmd;
	mrq.data = &data;

	cmd.opcode = SD_APP_SEND_SCR;
	cmd.arg = 0;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = 8;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

	sg_init_one(&sg, data_buf, 8);

	mmc_set_data_timeout(&data, card);

	mmc_wait_for_req(card->host, &mrq);

	memcpy(scr, data_buf, sizeof(card->raw_scr));
	kfree(data_buf);

	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;

	scr[0] = be32_to_cpu(scr[0]);
	scr[1] = be32_to_cpu(scr[1]);

	return 0;
}
Example #17
0
/**
 * Update hash digest computed on the specified data
 *
 * \param[in] hdesc	hash state descriptor
 * \param[in] buf	data buffer on which to compute the hash
 * \param[in] buf_len	length of \buf on which to compute hash
 *
 * \retval		0 for success
 * \retval		negative errno on failure
 */
int cfs_crypto_hash_update(struct cfs_crypto_hash_desc *hdesc,
			   const void *buf, unsigned int buf_len)
{
	struct scatterlist sl;

	sg_init_one(&sl, (void *)buf, buf_len);

	return crypto_hash_update((struct hash_desc *)hdesc, &sl, sl.length);
}
Example #18
0
static int lpc32xx_xmit_dma(struct mtd_info *mtd, dma_addr_t dma,
			    void *mem, int len, enum dma_transfer_direction dir)
{
	struct nand_chip *chip = mtd->priv;
	struct lpc32xx_nand_host *host = chip->priv;
	struct dma_async_tx_descriptor *desc;
	int flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
	int res;

	host->dma_slave_config.direction = dir;
	host->dma_slave_config.src_addr = dma;
	host->dma_slave_config.dst_addr = dma;
	host->dma_slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	host->dma_slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;
	host->dma_slave_config.src_maxburst = 4;
	host->dma_slave_config.dst_maxburst = 4;
	/* DMA controller does flow control: */
	host->dma_slave_config.device_fc = false;
	if (dmaengine_slave_config(host->dma_chan, &host->dma_slave_config)) {
		dev_err(mtd->dev.parent, "Failed to setup DMA slave\n");
		return -ENXIO;
	}

	sg_init_one(&host->sgl, mem, len);

	res = dma_map_sg(host->dma_chan->device->dev, &host->sgl, 1,
			 DMA_BIDIRECTIONAL);
	if (res != 1) {
		dev_err(mtd->dev.parent, "Failed to map sg list\n");
		return -ENXIO;
	}
	desc = dmaengine_prep_slave_sg(host->dma_chan, &host->sgl, 1, dir,
				       flags);
	if (!desc) {
		dev_err(mtd->dev.parent, "Failed to prepare slave sg\n");
		goto out1;
	}

	init_completion(&host->comp);
	desc->callback = lpc32xx_dma_complete_func;
	desc->callback_param = &host->comp;

	dmaengine_submit(desc);
	dma_async_issue_pending(host->dma_chan);

	wait_for_completion_timeout(&host->comp, msecs_to_jiffies(1000));

	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
		     DMA_BIDIRECTIONAL);

	return 0;
out1:
	dma_unmap_sg(host->dma_chan->device->dev, &host->sgl, 1,
		     DMA_BIDIRECTIONAL);
	return -ENXIO;
}
Example #19
0
static void register_buffer(void)
{
	struct scatterlist sg;

	sg_init_one(&sg, random_data+data_left, RANDOM_DATA_SIZE-data_left);
	/* There should always be room for one buffer. */
	if (vq->vq_ops->add_buf(vq, &sg, 0, 1, random_data) < 0)
		BUG();
	vq->vq_ops->kick(vq);
}
Example #20
0
inline void
iscsi_tcp_dgst_header(struct ahash_request *hash, const void *hdr,
		      size_t hdrlen, unsigned char digest[ISCSI_DIGEST_SIZE])
{
	struct scatterlist sg;

	sg_init_one(&sg, hdr, hdrlen);
	ahash_request_set_crypt(hash, &sg, digest, hdrlen);
	crypto_ahash_digest(hash);
}
Example #21
0
int mmc_app_send_scr(struct mmc_card *card, u32 *scr)
{
	int err;
	struct mmc_request mrq = {NULL};
	struct mmc_command cmd = {0};
	struct mmc_data data = {0};
	struct scatterlist sg;
	void *data_buf;

	BUG_ON(!card);
	BUG_ON(!card->host);
	BUG_ON(!scr);

	

	err = mmc_app_cmd(card->host, card);
	if (err)
		return err;

	data_buf = kmalloc(sizeof(card->raw_scr), GFP_KERNEL);
	if (data_buf == NULL)
		return -ENOMEM;

	mrq.cmd = &cmd;
	mrq.data = &data;

	cmd.opcode = SD_APP_SEND_SCR;
	cmd.arg = 0;
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = 8;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

	sg_init_one(&sg, data_buf, 8);

	mmc_set_data_timeout(&data, card);

	mmc_wait_for_req(card->host, &mrq);

	memcpy(scr, data_buf, sizeof(card->raw_scr));
	kfree(data_buf);

	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;

	scr[0] = be32_to_cpu(scr[0]);
	scr[1] = be32_to_cpu(scr[1]);

	return 0;
}
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
	struct prism2_wep_data *wep = priv;
	struct blkcipher_desc desc = { .tfm = wep->tx_tfm };
	u32 klen, len;
	u8 key[WEP_KEY_LEN + 3];
	u8 *pos;
	u32 crc;
	u8 *icv;
	struct scatterlist sg;

	if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
	    skb->len < hdr_len)
		return -1;

	len = skb->len - hdr_len;
	pos = skb_push(skb, 4);
	memmove(pos, pos + 4, hdr_len);
	pos += hdr_len;

	klen = 3 + wep->key_len;

	wep->iv++;

	/*                                                                
                                                                    
                                                         */
	if ((wep->iv & 0xff00) == 0xff00) {
		u8 B = (wep->iv >> 16) & 0xff;
		if (B >= 3 && B < klen)
			wep->iv += 0x0100;
	}

	/*                                           */
	*pos++ = key[0] = (wep->iv >> 16) & 0xff;
	*pos++ = key[1] = (wep->iv >> 8) & 0xff;
	*pos++ = key[2] = wep->iv & 0xff;
	*pos++ = wep->key_idx << 6;

	/*                                            */
	memcpy(key + 3, wep->key, wep->key_len);

	/*                                                          */
	crc = ~crc32_le(~0, pos, len);
	icv = skb_put(skb, 4);
	icv[0] = crc;
	icv[1] = crc >> 8;
	icv[2] = crc >> 16;
	icv[3] = crc >> 24;

	crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
	sg_init_one(&sg, pos, len + 4);

	return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
}
Example #23
0
static int virtblk_add_req(struct virtqueue *vq, struct virtblk_req *vbr,
		struct scatterlist *data_sg, bool have_data)
{
	struct scatterlist hdr, status, *sgs[3];
	unsigned int num_out = 0, num_in = 0;

	sg_init_one(&hdr, &vbr->out_hdr, sizeof(vbr->out_hdr));
	sgs[num_out++] = &hdr;

	if (have_data) {
		if (vbr->out_hdr.type & cpu_to_virtio32(vq->vdev, VIRTIO_BLK_T_OUT))
			sgs[num_out++] = data_sg;
		else
			sgs[num_out + num_in++] = data_sg;
	}

	sg_init_one(&status, &vbr->status, sizeof(vbr->status));
	sgs[num_out + num_in++] = &status;

	return virtqueue_add_sgs(vq, sgs, num_out, num_in, vbr, GFP_ATOMIC);
}
Example #24
0
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
						 int enc)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
	struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
	struct aead_request *subreq = &rctx->subreq;
	struct scatterlist *dst = req->dst;
	struct scatterlist *cipher = rctx->cipher;
	struct scatterlist *payload = rctx->payload;
	struct scatterlist *assoc = rctx->assoc;
	unsigned int authsize = crypto_aead_authsize(aead);
	unsigned int assoclen = req->assoclen;
	struct page *dstp;
	u8 *vdst;
	u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
			   crypto_aead_alignmask(ctx->child) + 1);

	memcpy(iv, ctx->nonce, 4);
	memcpy(iv + 4, req->iv, 8);

	/* construct cipher/plaintext */
	if (enc)
		memset(rctx->auth_tag, 0, authsize);
	else
		scatterwalk_map_and_copy(rctx->auth_tag, dst,
					 req->cryptlen - authsize,
					 authsize, 0);

	sg_init_one(cipher, rctx->auth_tag, authsize);

	/* construct the aad */
	dstp = sg_page(dst);
	vdst = PageHighMem(dstp) ? NULL : page_address(dstp) + dst->offset;

	sg_init_table(payload, 2);
	sg_set_buf(payload, req->iv, 8);
	scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2);
	assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);

	sg_init_table(assoc, 2);
	sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
		    req->assoc->offset);
	scatterwalk_crypto_chain(assoc, payload, 0, 2);

	aead_request_set_tfm(subreq, ctx->child);
	aead_request_set_callback(subreq, req->base.flags, req->base.complete,
				  req->base.data);
	aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
	aead_request_set_assoc(subreq, assoc, assoclen);

	return subreq;
}
Example #25
0
File: sha.c Project: DenisLug/mptcp
static int qce_ahash_async_req_handle(struct crypto_async_request *async_req)
{
	struct ahash_request *req = ahash_request_cast(async_req);
	struct qce_sha_reqctx *rctx = ahash_request_ctx(req);
	struct qce_sha_ctx *ctx = crypto_tfm_ctx(async_req->tfm);
	struct qce_alg_template *tmpl = to_ahash_tmpl(async_req->tfm);
	struct qce_device *qce = tmpl->qce;
	unsigned long flags = rctx->flags;
	int ret;

	if (IS_SHA_HMAC(flags)) {
		rctx->authkey = ctx->authkey;
		rctx->authklen = QCE_SHA_HMAC_KEY_SIZE;
	} else if (IS_CMAC(flags)) {
		rctx->authkey = ctx->authkey;
		rctx->authklen = AES_KEYSIZE_128;
	}

	rctx->src_nents = qce_countsg(req->src, req->nbytes,
				      &rctx->src_chained);
	ret = qce_mapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
			rctx->src_chained);
	if (ret < 0)
		return ret;

	sg_init_one(&rctx->result_sg, qce->dma.result_buf, QCE_RESULT_BUF_SZ);

	ret = qce_mapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
	if (ret < 0)
		goto error_unmap_src;

	ret = qce_dma_prep_sgs(&qce->dma, req->src, rctx->src_nents,
			       &rctx->result_sg, 1, qce_ahash_done, async_req);
	if (ret)
		goto error_unmap_dst;

	qce_dma_issue_pending(&qce->dma);

	ret = qce_start(async_req, tmpl->crypto_alg_type, 0, 0);
	if (ret)
		goto error_terminate;

	return 0;

error_terminate:
	qce_dma_terminate_all(&qce->dma);
error_unmap_dst:
	qce_unmapsg(qce->dev, &rctx->result_sg, 1, DMA_FROM_DEVICE, 0);
error_unmap_src:
	qce_unmapsg(qce->dev, req->src, rctx->src_nents, DMA_TO_DEVICE,
		    rctx->src_chained);
	return ret;
}
Example #26
0
File: fname.c Project: avagin/linux
/**
 * fname_decrypt() - decrypt a filename
 *
 * The caller must have allocated sufficient memory for the @oname string.
 *
 * Return: 0 on success, -errno on failure
 */
static int fname_decrypt(struct inode *inode,
				const struct fscrypt_str *iname,
				struct fscrypt_str *oname)
{
	struct skcipher_request *req = NULL;
	DECLARE_CRYPTO_WAIT(wait);
	struct scatterlist src_sg, dst_sg;
	struct fscrypt_info *ci = inode->i_crypt_info;
	struct crypto_skcipher *tfm = ci->ci_ctfm;
	union fscrypt_iv iv;
	int res;

	/* Allocate request */
	req = skcipher_request_alloc(tfm, GFP_NOFS);
	if (!req)
		return -ENOMEM;
	skcipher_request_set_callback(req,
		CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		crypto_req_done, &wait);

	/* Initialize IV */
	fscrypt_generate_iv(&iv, 0, ci);

	/* Create decryption request */
	sg_init_one(&src_sg, iname->name, iname->len);
	sg_init_one(&dst_sg, oname->name, oname->len);
	skcipher_request_set_crypt(req, &src_sg, &dst_sg, iname->len, &iv);
	res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
	skcipher_request_free(req);
	if (res < 0) {
		fscrypt_err(inode->i_sb,
			    "Filename decryption failed for inode %lu: %d",
			    inode->i_ino, res);
		return res;
	}

	oname->len = strnlen(oname->name, iname->len);
	return 0;
}
Example #27
0
int crypto4xx_hash_digest(struct ahash_request *req)
{
	struct crypto_ahash *ahash = crypto_ahash_reqtfm(req);
	struct crypto4xx_ctx *ctx = crypto_tfm_ctx(req->base.tfm);
	struct scatterlist dst;
	unsigned int ds = crypto_ahash_digestsize(ahash);

	sg_init_one(&dst, req->result, ds);

	return crypto4xx_build_pd(&req->base, ctx, req->src, &dst,
				  req->nbytes, NULL, 0, ctx->sa_in,
				  ctx->sa_len, 0);
}
Example #28
0
/*
 * Send a buffer to the output virtqueue of the given crypto_device. 
 * If nonblock is false wait until the host acknowledges the data receive.
 */
ssize_t send_buf(struct crypto_device *crdev, void *in_buf, size_t in_count,
                 bool nonblock)
{
	struct scatterlist sg[1];
	struct virtqueue *out_vq;
	ssize_t ret = 0;
	unsigned int len;
	
	debug("Entering\n");
	out_vq = crdev->ovq;
	
	/* Discard any consumed buffers. */
	reclaim_consumed_buffers(crdev);
	
	sg_init_one(sg, in_buf, in_count);
	
	/* add sg list to virtqueue and notify host */
	ret = virtqueue_add_buf(out_vq, sg, 1, 0, in_buf);
	
	if (ret < 0) {
		debug("Oops! Error adding buffer to vqueue\n");
		in_count = 0;
		goto done;
	}
	
	if (ret == 0) {
		printk(KERN_WARNING "ovq_full!!!!\n");
		crdev->ovq_full = true;
	}

	virtqueue_kick(out_vq); 
	
	if (nonblock)
		goto done;
	
	/*
	 * if nonblock is false we wait until the host acknowledges it pushed 
	 * out the data we sent.
	 */
	while (!virtqueue_get_buf(out_vq, &len))
		cpu_relax();
	debug("Leaving\n");

done:
	/*
	 * We're expected to return the amount of data we wrote -- all
	 * of it
	 */
	return in_count;
}
Example #29
0
static int gcm_hash_remain(struct aead_request *req,
			   struct crypto_gcm_req_priv_ctx *pctx,
			   unsigned int remain,
			   crypto_completion_t complete)
{
	struct ahash_request *ahreq = &pctx->u.ahreq;

	ahash_request_set_callback(ahreq, aead_request_flags(req),
				   complete, req);
	sg_init_one(pctx->src, gcm_zeroes, remain);
	ahash_request_set_crypt(ahreq, pctx->src, NULL, remain);

	return crypto_ahash_update(ahreq);
}
Example #30
0
/*
 * NOTE: void *buf, caller for the buf is required to use DMA-capable
 * buffer or on-stack buffer (with some overhead in callee).
 */
static int
mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
		u32 opcode, void *buf, unsigned len)
{
	struct mmc_request mrq = {};
	struct mmc_command cmd = {};
	struct mmc_data data = {};
	struct scatterlist sg;

	mrq.cmd = &cmd;
	mrq.data = &data;

	cmd.opcode = opcode;
	cmd.arg = 0;

	/* NOTE HACK:  the MMC_RSP_SPI_R1 is always correct here, but we
	 * rely on callers to never use this with "native" calls for reading
	 * CSD or CID.  Native versions of those commands use the R2 type,
	 * not R1 plus a data block.
	 */
	cmd.flags = MMC_RSP_SPI_R1 | MMC_RSP_R1 | MMC_CMD_ADTC;

	data.blksz = len;
	data.blocks = 1;
	data.flags = MMC_DATA_READ;
	data.sg = &sg;
	data.sg_len = 1;

	sg_init_one(&sg, buf, len);

	if (opcode == MMC_SEND_CSD || opcode == MMC_SEND_CID) {
		/*
		 * The spec states that CSR and CID accesses have a timeout
		 * of 64 clock cycles.
		 */
		data.timeout_ns = 0;
		data.timeout_clks = 64;
	} else
		mmc_set_data_timeout(&data, card);

	mmc_wait_for_req(host, &mrq);

	if (cmd.error)
		return cmd.error;
	if (data.error)
		return data.error;

	return 0;
}