Exemplo n.º 1
0
/*
 * Key Derivation, from RFC 3078, RFC 3079.
 * Equivalent to Get_Key() for MS-CHAP as described in RFC 3079.
 */
static void get_new_key_from_sha(struct ppp_mppe_state * state)
{
	struct hash_desc desc;
	struct scatterlist sg[4];
	unsigned int nbytes;

	sg_init_table(sg, 4);

	nbytes = setup_sg(&sg[0], state->master_key, state->keylen);
	nbytes += setup_sg(&sg[1], sha_pad->sha_pad1,
			   sizeof(sha_pad->sha_pad1));
	nbytes += setup_sg(&sg[2], state->session_key, state->keylen);
	nbytes += setup_sg(&sg[3], sha_pad->sha_pad2,
			   sizeof(sha_pad->sha_pad2));

	desc.tfm = state->sha1;
	desc.flags = 0;

	crypto_hash_digest(&desc, sg, nbytes, state->sha1_digest);
}
Exemplo n.º 2
0
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
				     struct scatterlist *src,
				     unsigned int len)
{
	for (;;) {
		if (!len)
			return src;

		if (src->length > len)
			break;

		len -= src->length;
		src = sg_next(src);
	}

	sg_init_table(dst, 2);
	sg_set_page(dst, sg_page(src), src->length - len, src->offset + len);
	scatterwalk_crypto_chain(dst, sg_next(src), 0, 2);

	return dst;
}
Exemplo n.º 3
0
static int cx23885_alsa_dma_init(struct cx23885_audio_dev *chip, int nr_pages)
{
	struct cx23885_audio_buffer *buf = chip->buf;
	struct page *pg;
	int i;

	buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
	if (NULL == buf->vaddr) {
		dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages);
		return -ENOMEM;
	}

	dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n",
				(unsigned long)buf->vaddr,
				nr_pages << PAGE_SHIFT);

	memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
	buf->nr_pages = nr_pages;

	buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
	if (NULL == buf->sglist)
		goto vzalloc_err;

	sg_init_table(buf->sglist, buf->nr_pages);
	for (i = 0; i < buf->nr_pages; i++) {
		pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
		if (NULL == pg)
			goto vmalloc_to_page_err;
		sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
	}
	return 0;

vmalloc_to_page_err:
	vfree(buf->sglist);
	buf->sglist = NULL;
vzalloc_err:
	vfree(buf->vaddr);
	buf->vaddr = NULL;
	return -ENOMEM;
}
Exemplo n.º 4
0
static int samsung_dmadev_prepare(unsigned ch,
			struct samsung_dma_prep_info *info)
{
	struct scatterlist sg;
	struct dma_chan *chan = (struct dma_chan *)ch;
	struct dma_async_tx_descriptor *desc;

	switch (info->cap) {
	case DMA_SLAVE:
		sg_init_table(&sg, 1);
		sg_dma_len(&sg) = info->len;
		sg_set_page(&sg, pfn_to_page(PFN_DOWN(info->buf)),
			    info->len, offset_in_page(info->buf));
		sg_dma_address(&sg) = info->buf;

		desc = chan->device->device_prep_slave_sg(chan,
			&sg, 1, info->direction, DMA_PREP_INTERRUPT);
		break;
	case DMA_CYCLIC:
		desc = chan->device->device_prep_dma_cyclic(chan,
			info->buf, info->len, info->period, info->direction);
		break;
	default:
		dev_err(&chan->dev->device, "unsupported format\n");
		return -EFAULT;
	}

	if (!desc) {
		dev_err(&chan->dev->device, "cannot prepare cyclic dma\n");
		return -EFAULT;
	}

	desc->callback = info->fp;
	desc->callback_param = info->fp_param;

	dmaengine_submit((struct dma_async_tx_descriptor *)desc);

	return 0;
}
/*
 * Return a scatterlist for a an array of userpages (NULL on errors).
 * Memory for the scatterlist is allocated using kmalloc.  The caller
 * must free the memory.
 */
static struct scatterlist *videobuf_pages_to_sg(struct page **pages,
					int nr_pages, int offset, size_t size)
{
	struct scatterlist *sglist;
	int i;

	if (NULL == pages[0])
		return NULL;
	sglist = vmalloc(nr_pages * sizeof(*sglist));
	if (NULL == sglist)
		return NULL;
	sg_init_table(sglist, nr_pages);

	if (PageHighMem(pages[0]))
		/* DMA to highmem pages might not work */
		goto highmem;
	sg_set_page(&sglist[0], pages[0],
			min_t(size_t, PAGE_SIZE - offset, size), offset);
	size -= min_t(size_t, PAGE_SIZE - offset, size);
	for (i = 1; i < nr_pages; i++) {
		if (NULL == pages[i])
			goto nopage;
		if (PageHighMem(pages[i]))
			goto highmem;
		sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0);
		size -= min_t(size_t, PAGE_SIZE, size);
	}
	return sglist;

nopage:
	dprintk(2, "sgl: oops - no page\n");
	vfree(sglist);
	return NULL;

highmem:
	dprintk(2, "sgl: oops - highmem page\n");
	vfree(sglist);
	return NULL;
}
Exemplo n.º 6
0
static void digest_data(struct hash_desc *hash, struct iscsi_cmnd *cmnd,
			struct tio *tio, u32 offset, u8 *crc)
{
	struct scatterlist *sg = cmnd->conn->hash_sg;
	u32 size, length;
	int i, idx, count;
	unsigned int nbytes;

	size = cmnd->pdu.datasize;
	nbytes = size = (size + 3) & ~3;

	offset += tio->offset;
	idx = offset >> PAGE_CACHE_SHIFT;
	offset &= ~PAGE_CACHE_MASK;
	count = get_pgcnt(size, offset);
	assert(idx + count <= tio->pg_cnt);

	assert(count <= ISCSI_CONN_IOV_MAX);

	sg_init_table(sg, ARRAY_SIZE(cmnd->conn->hash_sg));
	crypto_hash_init(hash);

	for (i = 0; size; i++) {
		if (offset + size > PAGE_CACHE_SIZE)
			length = PAGE_CACHE_SIZE - offset;
		else
			length = size;

		sg_set_page(&sg[i], tio->pvec[idx + i], length, offset);
		size -= length;
		offset = 0;
	}

	sg_mark_end(&sg[i - 1]);

	crypto_hash_update(hash, sg, nbytes);
	crypto_hash_final(hash, crc);
}
Exemplo n.º 7
0
struct scatterlist*
videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset)
{
	struct scatterlist *sglist;
	int i = 0;

	if (NULL == pages[0])
		return NULL;
	sglist = kcalloc(nr_pages, sizeof(*sglist), GFP_KERNEL);
	if (NULL == sglist)
		return NULL;
	sg_init_table(sglist, nr_pages);

	if (NULL == pages[0])
		goto nopage;
	if (PageHighMem(pages[0]))
		/* DMA to highmem pages might not work */
		goto highmem;
	sg_set_page(&sglist[0], pages[0], PAGE_SIZE - offset, offset);
	for (i = 1; i < nr_pages; i++) {
		if (NULL == pages[i])
			goto nopage;
		if (PageHighMem(pages[i]))
			goto highmem;
		sg_set_page(&sglist[i], pages[i], PAGE_SIZE, 0);
	}
	return sglist;

 nopage:
	dprintk(2,"sgl: oops - no page\n");
	kfree(sglist);
	return NULL;

 highmem:
	dprintk(2,"sgl: oops - highmem page\n");
	kfree(sglist);
	return NULL;
}
int msm_iommu_map_extra(struct iommu_domain *domain,
				unsigned long start_iova,
				phys_addr_t phy_addr,
				unsigned long size,
				unsigned long page_size,
				int prot)
{
	int ret = 0;
	int i = 0;
	unsigned long temp_iova = start_iova;
	/* the extra "padding" should never be written to. map it
	 * read-only. */
	prot &= ~IOMMU_WRITE;

	if (msm_iommu_page_size_is_supported(page_size)) {
		struct scatterlist *sglist;
		unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
		struct page *dummy_page = phys_to_page(phy_addr);

		sglist = vmalloc(sizeof(*sglist) * nrpages);
		if (!sglist) {
			ret = -ENOMEM;
			goto out;
		}

		sg_init_table(sglist, nrpages);

		for (i = 0; i < nrpages; i++)
			sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);

		ret = iommu_map_range(domain, temp_iova, sglist, size, prot);
		if (ret) {
			pr_err("%s: could not map extra %lx in domain %p\n",
				__func__, start_iova, domain);
		}

		vfree(sglist);
	} else {
Exemplo n.º 9
0
static int omap_crypto_copy_sg_lists(int total, int bs,
				     struct scatterlist **sg,
				     struct scatterlist *new_sg, u16 flags)
{
	int n = sg_nents(*sg);
	struct scatterlist *tmp;

	if (!(flags & OMAP_CRYPTO_FORCE_SINGLE_ENTRY)) {
		new_sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
		if (!new_sg)
			return -ENOMEM;

		sg_init_table(new_sg, n);
	}

	tmp = new_sg;

	while (*sg && total) {
		int len = (*sg)->length;

		if (total < len)
			len = total;

		if (len > 0) {
			total -= len;
			sg_set_page(tmp, sg_page(*sg), len, (*sg)->offset);
			if (total <= 0)
				sg_mark_end(tmp);
			tmp = sg_next(tmp);
		}

		*sg = sg_next(*sg);
	}

	*sg = new_sg;

	return 0;
}
Exemplo n.º 10
0
/* this is videobuf_vmalloc_to_sg() from videobuf-dma-sg.c
   make sure virt has been allocated with vmalloc_32(), otherwise the BUG()
   may be triggered on highmem machines */
static struct scatterlist* vmalloc_to_sg(unsigned char *virt, int nr_pages)
{
    struct scatterlist *sglist;
    struct page *pg;
    int i;

    sglist = kcalloc(nr_pages, sizeof(struct scatterlist), GFP_KERNEL);
    if (NULL == sglist)
        return NULL;
    sg_init_table(sglist, nr_pages);
    for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
        pg = vmalloc_to_page(virt);
        if (NULL == pg)
            goto err;
        BUG_ON(PageHighMem(pg));
        sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
    }
    return sglist;

err:
    kfree(sglist);
    return NULL;
}
Exemplo n.º 11
0
void ieee80211_aes_ccm_encrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
			       u8 *data, size_t data_len, u8 *mic)
{
	struct scatterlist assoc, pt, ct[2];
	struct {
		struct aead_request	req;
		u8			priv[crypto_aead_reqsize(tfm)];
	} aead_req;

	memset(&aead_req, 0, sizeof(aead_req));

	sg_init_one(&pt, data, data_len);
	sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
	sg_init_table(ct, 2);
	sg_set_buf(&ct[0], data, data_len);
	sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);

	aead_request_set_tfm(&aead_req.req, tfm);
	aead_request_set_assoc(&aead_req.req, &assoc, assoc.length);
	aead_request_set_crypt(&aead_req.req, &pt, ct, data_len, b_0);

	crypto_aead_encrypt(&aead_req.req);
}
Exemplo n.º 12
0
void ieee80211_aes_gcm_encrypt(struct crypto_aead *tfm, u8 *j_0, u8 *aad,
			       u8 *data, size_t data_len, u8 *mic)
{
	struct scatterlist sg[3];

	char aead_req_data[sizeof(struct aead_request) +
			   crypto_aead_reqsize(tfm)]
		__aligned(__alignof__(struct aead_request));
	struct aead_request *aead_req = (void *)aead_req_data;

	memset(aead_req, 0, sizeof(aead_req_data));

	sg_init_table(sg, 3);
	sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
	sg_set_buf(&sg[1], data, data_len);
	sg_set_buf(&sg[2], mic, IEEE80211_GCMP_MIC_LEN);

	aead_request_set_tfm(aead_req, tfm);
	aead_request_set_crypt(aead_req, sg, sg, data_len, j_0);
	aead_request_set_ad(aead_req, sg[0].length);

	crypto_aead_encrypt(aead_req);
}
Exemplo n.º 13
0
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, u8 *b_0, u8 *aad,
			      u8 *data, size_t data_len, u8 *mic)
{
	struct scatterlist assoc, pt, ct[2];
	char aead_req_data[sizeof(struct aead_request) +
				crypto_aead_reqsize(tfm)]
				__aligned(__alignof__(struct aead_request));
	struct aead_request *aead_req = (void *) aead_req_data;
	memset(aead_req, 0, sizeof(aead_req_data));

	sg_init_one(&pt, data, data_len);
	sg_init_one(&assoc, &aad[2], be16_to_cpup((__be16 *)aad));
	sg_init_table(ct, 2);
	sg_set_buf(&ct[0], data, data_len);
	sg_set_buf(&ct[1], mic, IEEE80211_CCMP_MIC_LEN);

	aead_request_set_tfm(aead_req, tfm);
	aead_request_set_assoc(aead_req, &assoc, assoc.length);
	aead_request_set_crypt(aead_req, ct, &pt,
			       data_len + IEEE80211_CCMP_MIC_LEN, b_0);

	return crypto_aead_decrypt(aead_req);
}
Exemplo n.º 14
0
void mars_digest(unsigned char *digest, void *data, int len)
{
	struct hash_desc desc = {
		.tfm = mars_tfm,
		.flags = 0,
	};
	struct scatterlist sg;

	memset(digest, 0, mars_digest_size);

	// TODO: use per-thread instance, omit locking
	down(&tfm_sem);

	crypto_hash_init(&desc);
	sg_init_table(&sg, 1);
	sg_set_buf(&sg, data, len);
	crypto_hash_update(&desc, &sg, sg.length);
	crypto_hash_final(&desc, digest);
	up(&tfm_sem);
}
EXPORT_SYMBOL_GPL(mars_digest);

void mref_checksum(struct mref_object *mref)
{
	unsigned char checksum[mars_digest_size];
	int len;

	if (mref->ref_cs_mode <= 0 || !mref->ref_data)
		return;

	mars_digest(checksum, mref->ref_data, mref->ref_len);

	len = sizeof(mref->ref_checksum);
	if (len > mars_digest_size)
		len = mars_digest_size;
	memcpy(&mref->ref_checksum, checksum, len);
}
Exemplo n.º 15
0
static struct sg_table *omap_gem_map_dma_buf(
		struct dma_buf_attachment *attachment,
		enum dma_data_direction dir)
{
	struct drm_gem_object *obj = attachment->dmabuf->priv;
	struct sg_table *sg;
	dma_addr_t dma_addr;
	int ret;

	sg = kzalloc(sizeof(*sg), GFP_KERNEL);
	if (!sg)
		return ERR_PTR(-ENOMEM);

	/* camera, etc, need physically contiguous.. but we need a
	 * better way to know this..
	 */
	ret = omap_gem_pin(obj, &dma_addr);
	if (ret)
		goto out;

	ret = sg_alloc_table(sg, 1, GFP_KERNEL);
	if (ret)
		goto out;

	sg_init_table(sg->sgl, 1);
	sg_dma_len(sg->sgl) = obj->size;
	sg_set_page(sg->sgl, pfn_to_page(PFN_DOWN(dma_addr)), obj->size, 0);
	sg_dma_address(sg->sgl) = dma_addr;

	/* this must be after omap_gem_pin() to ensure we have pages attached */
	omap_gem_dma_sync_buffer(obj, dir);

	return sg;
out:
	kfree(sg);
	return ERR_PTR(ret);
}
Exemplo n.º 16
0
int ieee80211_aes_ccm_decrypt(struct crypto_aead *tfm, struct sk_buff *skb,
			      const u64 pn, size_t mic_len)
{
	u8 aad[2 * AES_BLOCK_SIZE];
	u8 b_0[AES_BLOCK_SIZE];
	u8 *data, *mic;
	size_t data_len, hdr_len;
	struct ieee80211_hdr *hdr = (void *)skb->data;
	struct scatterlist sg[3];
	char aead_req_data[sizeof(struct aead_request) +
			   crypto_aead_reqsize(tfm)]
		__aligned(__alignof__(struct aead_request));
	struct aead_request *aead_req = (void *) aead_req_data;

	hdr_len = ieee80211_hdrlen(hdr->frame_control);
	data_len = skb->len - hdr_len - mic_len;

	if (data_len <= 0)
		return -EINVAL;

	ccmp_special_blocks(hdr, hdr_len, pn, b_0, aad);

	memset(aead_req, 0, sizeof(aead_req_data));
	mic = skb->data + skb->len - mic_len;
	data = skb->data + hdr_len;
	sg_init_table(sg, 3);
	sg_set_buf(&sg[0], &aad[2], be16_to_cpup((__be16 *)aad));
	sg_set_buf(&sg[1], data, data_len);
	sg_set_buf(&sg[2], mic, mic_len);

	aead_request_set_tfm(aead_req, tfm);
	aead_request_set_crypt(aead_req, sg, sg, data_len + mic_len, b_0);
	aead_request_set_ad(aead_req, sg[0].length);

	return crypto_aead_decrypt(aead_req);
}
Exemplo n.º 17
0
static void omap2_mcspi_tx_dma(struct spi_device *spi,
				struct spi_transfer *xfer,
				struct dma_slave_config cfg)
{
	struct omap2_mcspi	*mcspi;
	struct omap2_mcspi_dma  *mcspi_dma;
	unsigned int		count;

	mcspi = spi_master_get_devdata(spi->master);
	mcspi_dma = &mcspi->dma_channels[spi->chip_select];
	count = xfer->len;

	if (mcspi_dma->dma_tx) {
		struct dma_async_tx_descriptor *tx;
		struct scatterlist sg;

		dmaengine_slave_config(mcspi_dma->dma_tx, &cfg);

		sg_init_table(&sg, 1);
		sg_dma_address(&sg) = xfer->tx_dma;
		sg_dma_len(&sg) = xfer->len;

		tx = dmaengine_prep_slave_sg(mcspi_dma->dma_tx, &sg, 1,
		DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
		if (tx) {
			tx->callback = omap2_mcspi_tx_callback;
			tx->callback_param = spi;
			dmaengine_submit(tx);
		} else {
			/* FIXME: fall back to PIO? */
		}
	}
	dma_async_issue_pending(mcspi_dma->dma_tx);
	omap2_mcspi_set_dma_req(spi, 0, 1);

}
Exemplo n.º 18
0
/**
 * __sg_alloc_table - Allocate and initialize an sg table with given allocator
 * @table:	The sg table header to use
 * @nents:	Number of entries in sg list
 * @max_ents:	The maximum number of entries the allocator returns per call
 * @gfp_mask:	GFP allocation mask
 * @alloc_fn:	Allocator to use
 *
 * Description:
 *   This function returns a @table @nents long. The allocator is
 *   defined to return scatterlist chunks of maximum size @max_ents.
 *   Thus if @nents is bigger than @max_ents, the scatterlists will be
 *   chained in units of @max_ents.
 *
 * Notes:
 *   If this function returns non-0 (eg failure), the caller must call
 *   __sg_free_table() to cleanup any leftover allocations.
 *
 **/
int __sg_alloc_table(struct sg_table *table, unsigned int nents,
		     unsigned int max_ents, gfp_t gfp_mask,
		     sg_alloc_fn *alloc_fn)
{
	struct scatterlist *sg, *prv;
	unsigned int left;
	unsigned int total_alloc = 0;

#ifndef ARCH_HAS_SG_CHAIN
	BUG_ON(nents > max_ents);
#endif

	memset(table, 0, sizeof(*table));

	left = nents;
	prv = NULL;
	do {
		unsigned int sg_size, alloc_size = left;

		if (alloc_size > max_ents) {
			alloc_size = max_ents;
			sg_size = alloc_size - 1;
		} else
			sg_size = alloc_size;

		left -= sg_size;

		sg = alloc_fn(alloc_size, gfp_mask);
		if (unlikely(!sg)) {
			table->orig_nents = total_alloc;
			/* mark the end of previous entry */
			sg_mark_end(&prv[alloc_size - 1]);
			return -ENOMEM;
		}

		total_alloc += alloc_size;

		sg_init_table(sg, alloc_size);
		table->nents = table->orig_nents += sg_size;

		/*
		 * If this is the first mapping, assign the sg table header.
		 * If this is not the first mapping, chain previous part.
		 */
		if (prv)
			sg_chain(prv, max_ents, sg);
		else
			table->sgl = sg;

		/*
		 * If no more entries after this one, mark the end
		 */
		if (!left)
			sg_mark_end(&sg[sg_size - 1]);

		/*
		 * only really needed for mempool backed sg allocations (like
		 * SCSI), a possible improvement here would be to pass the
		 * table pointer into the allocator and let that clear these
		 * flags
		 */
		gfp_mask &= ~__GFP_WAIT;
		gfp_mask |= __GFP_HIGH;
		prv = sg;
	} while (left);

	return 0;
}
Exemplo n.º 19
0
/**
 *  @brief This function use SG mode to read/write data into card memory
 *
 *  @param handle   A Pointer to the moal_handle structure
 *  @param pmbuf_list   Pointer to a linked list of mlan_buffer structure
 *  @param port     Port
 *  @param write    write flag
 *
 *  @return         MLAN_STATUS_SUCCESS or MLAN_STATUS_FAILURE
 */
mlan_status
woal_sdio_rw_mb(moal_handle *handle, pmlan_buffer pmbuf_list, t_u32 port,
		t_u8 write)
{
	struct scatterlist sg_list[SDIO_MP_AGGR_DEF_PKT_LIMIT_MAX];
	int num_sg = pmbuf_list->use_count;
	int i = 0;
	mlan_buffer *pmbuf = NULL;
	struct mmc_request mmc_req;
	struct mmc_command mmc_cmd;
	struct mmc_data mmc_dat;
	struct sdio_func *func = ((struct sdio_mmc_card *)handle->card)->func;
	t_u32 ioport = (port & MLAN_SDIO_IO_PORT_MASK);
	t_u32 blkcnt = pmbuf_list->data_len / MLAN_SDIO_BLOCK_SIZE;
	int status;

	if (num_sg > SDIO_MP_AGGR_DEF_PKT_LIMIT_MAX) {
		PRINTM(MERROR, "ERROR: num_sg=%d", num_sg);
		return MLAN_STATUS_FAILURE;
	}
	sg_init_table(sg_list, num_sg);
	pmbuf = pmbuf_list->pnext;
	for (i = 0; i < num_sg; i++) {
		if (pmbuf == pmbuf_list)
			break;
		sg_set_buf(&sg_list[i], pmbuf->pbuf + pmbuf->data_offset,
			   pmbuf->data_len);
		pmbuf = pmbuf->pnext;
	}
	memset(&mmc_req, 0, sizeof(struct mmc_request));
	memset(&mmc_cmd, 0, sizeof(struct mmc_command));
	memset(&mmc_dat, 0, sizeof(struct mmc_data));

	mmc_dat.sg = sg_list;
	mmc_dat.sg_len = num_sg;
	mmc_dat.blksz = MLAN_SDIO_BLOCK_SIZE;
	mmc_dat.blocks = blkcnt;
	mmc_dat.flags = write ? MMC_DATA_WRITE : MMC_DATA_READ;

	mmc_cmd.opcode = SD_IO_RW_EXTENDED;
	mmc_cmd.arg = write ? 1 << 31 : 0;
	mmc_cmd.arg |= (func->num & 0x7) << 28;
	mmc_cmd.arg |= 1 << 27;	/* block basic */
	mmc_cmd.arg |= 0;	/* fix address */
	mmc_cmd.arg |= (ioport & 0x1FFFF) << 9;
	mmc_cmd.arg |= blkcnt & 0x1FF;
	mmc_cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;

	mmc_req.cmd = &mmc_cmd;
	mmc_req.data = &mmc_dat;

#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
	sdio_claim_host(((struct sdio_mmc_card *)handle->card)->func);
#endif
	mmc_set_data_timeout(&mmc_dat,
			     ((struct sdio_mmc_card *)handle->card)->func->
			     card);
	mmc_wait_for_req(((struct sdio_mmc_card *)handle->card)->func->card->
			 host, &mmc_req);
	if (mmc_cmd.error || mmc_dat.error) {
		PRINTM(MERROR, "CMD53 %s cmd_error = %d data_error=%d\n",
		       write ? "write" : "read", mmc_cmd.error, mmc_dat.error);
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 0, 0)
        /* issue abort cmd52 command through F0*/
        sdio_f0_writeb(((struct sdio_mmc_card *)handle->card)->func, 0x01, SDIO_CCCR_ABORT, &status);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
	    sdio_release_host(((struct sdio_mmc_card *)handle->card)->func);
#endif
		return MLAN_STATUS_FAILURE;
	}
#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32)
    sdio_release_host(((struct sdio_mmc_card *)handle->card)->func);
#endif
	return MLAN_STATUS_SUCCESS;
}
Exemplo n.º 20
0
/**
 * mmc_init_queue - initialise a queue structure.
 * @mq: mmc queue
 * @card: mmc card to attach this queue
 * @lock: queue lock
 *
 * Initialise a MMC card request queue.
 */
int mmc_init_queue(struct mmc_queue *mq, struct mmc_card *card, spinlock_t *lock)
{
	struct mmc_host *host = card->host;
	u64 limit = BLK_BOUNCE_HIGH;
	int ret;

	if (mmc_dev(host)->dma_mask && *mmc_dev(host)->dma_mask)
		limit = *mmc_dev(host)->dma_mask;

	mq->card = card;
	mq->queue = blk_init_queue(mmc_request, lock);
	if (!mq->queue)
		return -ENOMEM;

	mq->queue->queuedata = mq;
	mq->req = NULL;

	blk_queue_prep_rq(mq->queue, mmc_prep_request);
	blk_queue_ordered(mq->queue, QUEUE_ORDERED_DRAIN, NULL);
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, mq->queue);

#ifdef CONFIG_MMC_BLOCK_BOUNCE
	if (host->max_hw_segs == 1) {
		unsigned int bouncesz;

		bouncesz = MMC_QUEUE_BOUNCESZ;

		if (bouncesz > host->max_req_size)
			bouncesz = host->max_req_size;
		if (bouncesz > host->max_seg_size)
			bouncesz = host->max_seg_size;
		if (bouncesz > (host->max_blk_count * 512))
			bouncesz = host->max_blk_count * 512;

		if (bouncesz > 512) {
			mq->bounce_buf = kmalloc(bouncesz, GFP_KERNEL);
			if (!mq->bounce_buf) {
				printk(KERN_WARNING "%s: unable to "
					"allocate bounce buffer\n",
					mmc_card_name(card));
			}
		}

		if (mq->bounce_buf) {
			blk_queue_bounce_limit(mq->queue, BLK_BOUNCE_ANY);
			blk_queue_max_sectors(mq->queue, bouncesz / 512);
			blk_queue_max_phys_segments(mq->queue, bouncesz / 512);
			blk_queue_max_hw_segments(mq->queue, bouncesz / 512);
			blk_queue_max_segment_size(mq->queue, bouncesz);

			mq->sg = kmalloc(sizeof(struct scatterlist),
				GFP_KERNEL);
			if (!mq->sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->sg, 1);

			mq->bounce_sg = kmalloc(sizeof(struct scatterlist) *
				bouncesz / 512, GFP_KERNEL);
			if (!mq->bounce_sg) {
				ret = -ENOMEM;
				goto cleanup_queue;
			}
			sg_init_table(mq->bounce_sg, bouncesz / 512);
		}
	}
#endif

	if (!mq->bounce_buf) {
		blk_queue_bounce_limit(mq->queue, limit);
		blk_queue_max_sectors(mq->queue,
			min(host->max_blk_count, host->max_req_size / 512));
		blk_queue_max_phys_segments(mq->queue, host->max_phys_segs);
		blk_queue_max_hw_segments(mq->queue, host->max_hw_segs);
		blk_queue_max_segment_size(mq->queue, host->max_seg_size);

		mq->sg = kmalloc(sizeof(struct scatterlist) *
			host->max_phys_segs, GFP_KERNEL);
		if (!mq->sg) {
			ret = -ENOMEM;
			goto cleanup_queue;
		}
		sg_init_table(mq->sg, host->max_phys_segs);
	}

	init_MUTEX(&mq->thread_sem);

	mq->thread = kthread_run(mmc_queue_thread, mq, "mmcqd");
	if (IS_ERR(mq->thread)) {
		ret = PTR_ERR(mq->thread);
		goto free_bounce_sg;
	}

	return 0;
 free_bounce_sg:
 	if (mq->bounce_sg)
 		kfree(mq->bounce_sg);
 	mq->bounce_sg = NULL;
 cleanup_queue:
 	if (mq->sg)
		kfree(mq->sg);
	mq->sg = NULL;
	if (mq->bounce_buf)
		kfree(mq->bounce_buf);
	mq->bounce_buf = NULL;
	blk_cleanup_queue(mq->queue);
	return ret;
}
Exemplo n.º 21
0
/**
 * sg_init_one - Initialize a single entry sg list
 * @sg:		 SG entry
 * @buf:	 Virtual address for IO
 * @buflen:	 IO length
 *
 **/
void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
{
	sg_init_table(sg, 1);
	sg_set_buf(sg, buf, buflen);
}
Exemplo n.º 22
0
static int pohmelfs_trans_iter(struct netfs_trans *t, struct pohmelfs_crypto_engine *e,
		int (*iterator) (struct pohmelfs_crypto_engine *e,
				  struct scatterlist *dst,
				  struct scatterlist *src))
{
	void *data = t->iovec.iov_base + sizeof(struct netfs_cmd) + t->psb->crypto_attached_size;
	unsigned int size = t->iovec.iov_len - sizeof(struct netfs_cmd) - t->psb->crypto_attached_size;
	struct netfs_cmd *cmd = data;
	unsigned int sz, pages = t->attached_pages, i, csize, cmd_cmd, dpage_idx;
	struct scatterlist sg_src, sg_dst;
	int err;

	while (size) {
		cmd = data;
		cmd_cmd = __be16_to_cpu(cmd->cmd);
		csize = __be32_to_cpu(cmd->size);
		cmd->iv = __cpu_to_be64(e->iv);

		if (cmd_cmd == NETFS_READ_PAGES || cmd_cmd == NETFS_READ_PAGE)
			csize = __be16_to_cpu(cmd->ext);

		sz = csize + __be16_to_cpu(cmd->cpad) + sizeof(struct netfs_cmd);

		dprintk("%s: size: %u, sz: %u, cmd_size: %u, cmd_cpad: %u.\n",
				__func__, size, sz, __be32_to_cpu(cmd->size), __be16_to_cpu(cmd->cpad));

		data += sz;
		size -= sz;

		sg_init_one(&sg_src, cmd->data, sz - sizeof(struct netfs_cmd));
		sg_init_one(&sg_dst, cmd->data, sz - sizeof(struct netfs_cmd));

		err = iterator(e, &sg_dst, &sg_src);
		if (err)
			return err;
	}

	if (!pages)
		return 0;

	dpage_idx = 0;
	for (i = 0; i < t->page_num; ++i) {
		struct page *page = t->pages[i];
		struct page *dpage = e->pages[dpage_idx];

		if (!page)
			continue;

		sg_init_table(&sg_src, 1);
		sg_init_table(&sg_dst, 1);
		sg_set_page(&sg_src, page, page_private(page), 0);
		sg_set_page(&sg_dst, dpage, page_private(page), 0);

		err = iterator(e, &sg_dst, &sg_src);
		if (err)
			return err;

		pages--;
		if (!pages)
			break;
		dpage_idx++;
	}

	return 0;
}
Exemplo n.º 23
0
static int __init example_init(void)
{
	int			i;
	unsigned int		ret;
	unsigned int		nents;
	struct scatterlist	sg[10];

	printk(KERN_INFO "DMA fifo test start\n");

	if (kfifo_alloc(&fifo, FIFO_SIZE, GFP_KERNEL)) {
		printk(KERN_WARNING "error kfifo_alloc\n");
		return -ENOMEM;
	}

	printk(KERN_INFO "queue size: %u\n", kfifo_size(&fifo));

	kfifo_in(&fifo, "test", 4);

	for (i = 0; i != 9; i++)
		kfifo_put(&fifo, i);

	/* kick away first byte */
	kfifo_skip(&fifo);

	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));

	/*
	 * Configure the kfifo buffer to receive data from DMA input.
	 *
	 *  .--------------------------------------.
	 *  | 0 | 1 | 2 | ... | 12 | 13 | ... | 31 |
	 *  |---|------------------|---------------|
	 *   \_/ \________________/ \_____________/
	 *    \          \                  \
	 *     \          \_allocated data   \
	 *      \_*free space*                \_*free space*
	 *
	 * We need two different SG entries: one for the free space area at the
	 * end of the kfifo buffer (19 bytes) and another for the first free
	 * byte at the beginning, after the kfifo_skip().
	 */
	sg_init_table(sg, ARRAY_SIZE(sg));
	nents = kfifo_dma_in_prepare(&fifo, sg, ARRAY_SIZE(sg), FIFO_SIZE);
	printk(KERN_INFO "DMA sgl entries: %d\n", nents);
	if (!nents) {
		/* fifo is full and no sgl was created */
		printk(KERN_WARNING "error kfifo_dma_in_prepare\n");
		return -EIO;
	}

	/* receive data */
	printk(KERN_INFO "scatterlist for receive:\n");
	for (i = 0; i < nents; i++) {
		printk(KERN_INFO
		"sg[%d] -> "
		"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
			i, sg[i].page_link, sg[i].offset, sg[i].length);

		if (sg_is_last(&sg[i]))
			break;
	}

	/* put here your code to setup and exectute the dma operation */
	/* ... */

	/* example: zero bytes received */
	ret = 0;

	/* finish the dma operation and update the received data */
	kfifo_dma_in_finish(&fifo, ret);

	/* Prepare to transmit data, example: 8 bytes */
	nents = kfifo_dma_out_prepare(&fifo, sg, ARRAY_SIZE(sg), 8);
	printk(KERN_INFO "DMA sgl entries: %d\n", nents);
	if (!nents) {
		/* no data was available and no sgl was created */
		printk(KERN_WARNING "error kfifo_dma_out_prepare\n");
		return -EIO;
	}

	printk(KERN_INFO "scatterlist for transmit:\n");
	for (i = 0; i < nents; i++) {
		printk(KERN_INFO
		"sg[%d] -> "
		"page_link 0x%.8lx offset 0x%.8x length 0x%.8x\n",
			i, sg[i].page_link, sg[i].offset, sg[i].length);

		if (sg_is_last(&sg[i]))
			break;
	}

	/* put here your code to setup and exectute the dma operation */
	/* ... */

	/* example: 5 bytes transmitted */
	ret = 5;

	/* finish the dma operation and update the transmitted data */
	kfifo_dma_out_finish(&fifo, ret);

	ret = kfifo_len(&fifo);
	printk(KERN_INFO "queue len: %u\n", kfifo_len(&fifo));

	if (ret != 7) {
		printk(KERN_WARNING "size mismatch: test failed");
		return -EIO;
	}
	printk(KERN_INFO "test passed\n");

	return 0;
}
Exemplo n.º 24
0
int pohmelfs_crypto_process_input_data(struct pohmelfs_crypto_engine *e, u64 cmd_iv,
		void *data, struct page *page, unsigned int size)
{
	int err;
	struct scatterlist sg;

	if (!e->cipher && !e->hash)
		return 0;

	dprintk("%s: eng: %p, iv: %llx, data: %p, page: %p/%lu, size: %u.\n",
		__func__, e, cmd_iv, data, page, (page) ? page->index : 0, size);

	if (data) {
		sg_init_one(&sg, data, size);
	} else {
		sg_init_table(&sg, 1);
		sg_set_page(&sg, page, size, 0);
	}

	if (e->cipher) {
		struct ablkcipher_request *req = e->data + crypto_hash_digestsize(e->hash);
		u8 iv[32];

		memset(iv, 0, sizeof(iv));
		memcpy(iv, &cmd_iv, sizeof(cmd_iv));

		ablkcipher_request_set_tfm(req, e->cipher);

		err = pohmelfs_crypto_process(req, &sg, &sg, iv, 0, e->timeout);
		if (err)
			goto err_out_exit;
	}

	if (e->hash) {
		struct hash_desc desc;
		void *dst = e->data + e->size/2;

		desc.tfm = e->hash;
		desc.flags = 0;

		err = crypto_hash_init(&desc);
		if (err)
			goto err_out_exit;

		err = crypto_hash_update(&desc, &sg, size);
		if (err)
			goto err_out_exit;

		err = crypto_hash_final(&desc, dst);
		if (err)
			goto err_out_exit;

		err = !!memcmp(dst, e->data, crypto_hash_digestsize(e->hash));

		if (err) {
#ifdef CONFIG_POHMELFS_DEBUG
			unsigned int i;
			unsigned char *recv = e->data, *calc = dst;

			dprintk("%s: eng: %p, hash: %p, cipher: %p: iv : %llx, hash mismatch (recv/calc): ",
					__func__, e, e->hash, e->cipher, cmd_iv);
			for (i = 0; i < crypto_hash_digestsize(e->hash); ++i) {
#if 0
				dprintka("%02x ", recv[i]);
				if (recv[i] != calc[i]) {
					dprintka("| calc byte: %02x.\n", calc[i]);
					break;
				}
#else
				dprintka("%02x/%02x ", recv[i], calc[i]);
#endif
			}
			dprintk("\n");
#endif
			goto err_out_exit;
		} else {
			dprintk("%s: eng: %p, hash: %p, cipher: %p: hashes matched.\n",
					__func__, e, e->hash, e->cipher);
		}
	}

	dprintk("%s: eng: %p, size: %u, hash: %p, cipher: %p: completed.\n",
			__func__, e, e->size, e->hash, e->cipher);

	return 0;

err_out_exit:
	dprintk("%s: eng: %p, hash: %p, cipher: %p: err: %d.\n",
			__func__, e, e->hash, e->cipher, err);
	return err;
}
Exemplo n.º 25
0
static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req,
        bool enc)
{
    struct crypto_aead *aead = crypto_aead_reqtfm(req);
    struct crypto_rfc4543_ctx *ctx = crypto_aead_ctx(aead);
    struct crypto_rfc4543_req_ctx *rctx = crypto_rfc4543_reqctx(req);
    struct aead_request *subreq = &rctx->subreq;
    struct scatterlist *src = req->src;
    struct scatterlist *cipher = rctx->cipher;
    struct scatterlist *payload = rctx->payload;
    struct scatterlist *assoc = rctx->assoc;
    unsigned int authsize = crypto_aead_authsize(aead);
    unsigned int assoclen = req->assoclen;
    struct page *srcp;
    u8 *vsrc;
    u8 *iv = PTR_ALIGN((u8 *)(rctx + 1) + crypto_aead_reqsize(ctx->child),
                       crypto_aead_alignmask(ctx->child) + 1);

    memcpy(iv, ctx->nonce, 4);
    memcpy(iv + 4, req->iv, 8);

    /* construct cipher/plaintext */
    if (enc)
        memset(rctx->auth_tag, 0, authsize);
    else
        scatterwalk_map_and_copy(rctx->auth_tag, src,
                                 req->cryptlen - authsize,
                                 authsize, 0);

    sg_init_one(cipher, rctx->auth_tag, authsize);

    /* construct the aad */
    srcp = sg_page(src);
    vsrc = PageHighMem(srcp) ? NULL : page_address(srcp) + src->offset;

    sg_init_table(payload, 2);
    sg_set_buf(payload, req->iv, 8);
    scatterwalk_crypto_chain(payload, src, vsrc == req->iv + 8, 2);
    assoclen += 8 + req->cryptlen - (enc ? 0 : authsize);

    if (req->assoc->length == req->assoclen) {
        sg_init_table(assoc, 2);
        sg_set_page(assoc, sg_page(req->assoc), req->assoc->length,
                    req->assoc->offset);
    } else {
        BUG_ON(req->assoclen > sizeof(rctx->assocbuf));

        scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0,
                                 req->assoclen, 0);

        sg_init_table(assoc, 2);
        sg_set_buf(assoc, rctx->assocbuf, req->assoclen);
    }
    scatterwalk_crypto_chain(assoc, payload, 0, 2);

    aead_request_set_tfm(subreq, ctx->child);
    aead_request_set_callback(subreq, req->base.flags, crypto_rfc4543_done,
                              req);
    aead_request_set_crypt(subreq, cipher, cipher, enc ? 0 : authsize, iv);
    aead_request_set_assoc(subreq, assoc, assoclen);

    return subreq;
}
Exemplo n.º 26
0
/**
 * iscsi_tcp_segment_done - check whether the segment is complete
 * @tcp_conn: iscsi tcp connection
 * @segment: iscsi segment to check
 * @recv: set to one of this is called from the recv path
 * @copied: number of bytes copied
 *
 * Check if we're done receiving this segment. If the receive
 * buffer is full but we expect more data, move on to the
 * next entry in the scatterlist.
 *
 * If the amount of data we received isn't a multiple of 4,
 * we will transparently receive the pad bytes, too.
 *
 * This function must be re-entrant.
 */
int iscsi_tcp_segment_done(struct iscsi_tcp_conn *tcp_conn,
			   struct iscsi_segment *segment, int recv,
			   unsigned copied)
{
	struct scatterlist sg;
	unsigned int pad;

	ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "copied %u %u size %u %s\n",
		      segment->copied, copied, segment->size,
		      recv ? "recv" : "xmit");
	if (segment->hash && copied) {
		/*
		 * If a segment is kmapd we must unmap it before sending
		 * to the crypto layer since that will try to kmap it again.
		 */
		iscsi_tcp_segment_unmap(segment);

		if (!segment->data) {
			sg_init_table(&sg, 1);
			sg_set_page(&sg, sg_page(segment->sg), copied,
				    segment->copied + segment->sg_offset +
							segment->sg->offset);
		} else
			sg_init_one(&sg, segment->data + segment->copied,
				    copied);
		crypto_hash_update(segment->hash, &sg, copied);
	}

	segment->copied += copied;
	if (segment->copied < segment->size) {
		iscsi_tcp_segment_map(segment, recv);
		return 0;
	}

	segment->total_copied += segment->copied;
	segment->copied = 0;
	segment->size = 0;

	/* Unmap the current scatterlist page, if there is one. */
	iscsi_tcp_segment_unmap(segment);

	/* Do we have more scatterlist entries? */
	ISCSI_DBG_TCP(tcp_conn->iscsi_conn, "total copied %u total size %u\n",
		      segment->total_copied, segment->total_size);
	if (segment->total_copied < segment->total_size) {
		/* Proceed to the next entry in the scatterlist. */
		iscsi_tcp_segment_init_sg(segment, sg_next(segment->sg),
					  0);
		iscsi_tcp_segment_map(segment, recv);
		BUG_ON(segment->size == 0);
		return 0;
	}

	/* Do we need to handle padding? */
	if (!(tcp_conn->iscsi_conn->session->tt->caps & CAP_PADDING_OFFLOAD)) {
		pad = iscsi_padding(segment->total_copied);
		if (pad != 0) {
			ISCSI_DBG_TCP(tcp_conn->iscsi_conn,
				      "consume %d pad bytes\n", pad);
			segment->total_size += pad;
			segment->size = pad;
			segment->data = segment->padbuf;
			return 0;
		}
	}

	/*
	 * Set us up for transferring the data digest. hdr digest
	 * is completely handled in hdr done function.
	 */
	if (segment->hash) {
		crypto_hash_final(segment->hash, segment->digest);
		iscsi_tcp_segment_splice_digest(segment,
				 recv ? segment->recv_digest : segment->digest);
		return 0;
	}

	return 1;
}
Exemplo n.º 27
0
static int aml_keybox_aes_encrypt(const void *key, int key_len,
                                  const u8 *aes_iv, void *dst, size_t *dst_len,
                                  const void *src, size_t src_len)
{
    struct scatterlist sg_in[1], sg_out[1];
    struct crypto_blkcipher *tfm = aml_keybox_crypto_alloc_cipher();
    struct blkcipher_desc desc =
        { .tfm = tfm, .flags = 0 };
	int ret;
	void *iv;
	int ivsize;
	if(src_len & 0x0f){
		printk("%s:%d,src_len %d is not 16byte align",__func__,__LINE__,src_len);
		return -1;
	}

	if (IS_ERR(tfm)){
		printk("%s:%d,crypto_alloc fail\n",__func__,__LINE__);
		return PTR_ERR(tfm);
	}

	*dst_len = src_len ;

	crypto_blkcipher_setkey((void *) tfm, key, key_len);
	sg_init_table(sg_in, 1);
	sg_set_buf(&sg_in[0], src, src_len);
	sg_init_table(sg_out, 1);
	sg_set_buf(sg_out, dst, *dst_len);
	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	//printk("key_len:%d,ivsize:%d\n",key_len,ivsize);

	memcpy(iv, aes_iv, ivsize);

    ret = crypto_blkcipher_encrypt(&desc, sg_out, sg_in,src_len);
    crypto_free_blkcipher(tfm);
    if (ret < 0){
        printk("%s:%d,ceph_aes_crypt failed %d\n", __func__,__LINE__,ret);
        return ret;
	}
    return 0;
}
static int aml_keybox_aes_decrypt(const void *key, int key_len,
                                      const u8 *aes_iv, void *dst,
                                      size_t *dst_len, const void *src,
                                      size_t src_len)
{
    struct scatterlist sg_in[1], sg_out[1];
    struct crypto_blkcipher *tfm = aml_keybox_crypto_alloc_cipher();
    struct blkcipher_desc desc =
        { .tfm = tfm };
    void *iv;
    int ivsize;
    int ret;
//    int last_byte;
	if(src_len &0x0f){
		printk("%s:%d,src_len %d is not 16byte align",__func__,__LINE__,src_len);
		return -1;
	}

	if (IS_ERR(tfm)){
		printk("%s:%d,crypto_alloc fail\n",__func__,__LINE__);
		return PTR_ERR(tfm);
	}

	crypto_blkcipher_setkey((void *) tfm, key, key_len);
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 1);
	sg_set_buf(sg_in, src, src_len);
	sg_set_buf(&sg_out[0], dst, *dst_len);

	iv = crypto_blkcipher_crt(tfm)->iv;
	ivsize = crypto_blkcipher_ivsize(tfm);

	//printk("key_len:%d,ivsize:%d\n",key_len,ivsize);

	memcpy(iv, aes_iv, ivsize);

	ret = crypto_blkcipher_decrypt(&desc, sg_out, sg_in, src_len);
	crypto_free_blkcipher(tfm);
	if (ret < 0){
		printk("%s:%d,ceph_aes_decrypt failed %d\n", __func__,__LINE__,ret);
		return ret;
	}
	*dst_len = src_len;
    return 0;
}

int aes_crypto_encrypt(void *dst,size_t *dst_len,const void *src,size_t src_len)
{
	int ret;
	unsigned char iv_aes[16];
	unsigned char key_aes[32];
	memcpy(iv_aes,&default_AESkey[0],16);
	memcpy(key_aes,&default_AESkey[16],32);

	ret = aml_keybox_aes_encrypt(key_aes,sizeof(key_aes),iv_aes,dst,dst_len,src,src_len);
	return ret;
}
Exemplo n.º 28
0
/* Main encrypt function
 * referenced from ceph_aes_encrypt() function
 * returns length encrypted
 */
int encrypt_data(const void *key, int length_key, void *to_buffer, const void *from_buffer, size_t *to_length, 
    size_t from_length, char *algo_name){
    
    struct scatterlist scatter_list_src[2];
    struct scatterlist scatter_list_dest[1];
    struct crypto_blkcipher *tfm = crypto_alloc_blkcipher(algo_name, 0, CRYPTO_ALG_ASYNC);
    struct blkcipher_desc desc = { .tfm = tfm, .flags = 0 };
    size_t null_padding = (0x10 - (from_length & 0x0f));
    int return_value = 0;
    char padding_array[48];
    
    printk("algo_name: %s\n", algo_name);
    /* check to see if the cipher struct is set properly */
    if(IS_ERR(tfm))
    {
        printk("Error in setting tfm\n");
        return PTR_ERR(tfm);
    }
    memset(padding_array, null_padding, null_padding);
    
    *to_length = from_length + null_padding;
    
    /* let's set the key for the cipher */
    crypto_blkcipher_setkey((void *)tfm, key, length_key);
	sg_init_table(scatter_list_src, 2);
	sg_set_buf(&scatter_list_src[0], from_buffer, from_length);
	sg_set_buf(&scatter_list_src[1], padding_array, null_padding);
	sg_init_table(scatter_list_dest, 1);
	sg_set_buf(scatter_list_dest, to_buffer,*to_length);
    
    /* let's start encrypting */
    return_value = crypto_blkcipher_encrypt(&desc, scatter_list_dest, scatter_list_src, from_length + null_padding);
    
    /* free up the blk cipher */
    crypto_free_blkcipher(tfm);
    
    if (return_value < 0)
    {
        printk(KERN_CRIT "crypto_blcipher encryption failed with errno %d.\n",return_value);
    }
    
    return return_value;
    
}

int decrypt_data(const void *key, int length_key, void *to_buffer, const void *from_buffer, 
    size_t *to_length, size_t from_length, char *algo_name)
{

	int return_value =0;
	int end_element;
	char padding_array[48];
	struct scatterlist scatter_list_src[1];
    struct scatterlist scatter_list_dest[2];
	struct crypto_blkcipher *tfm = crypto_alloc_blkcipher(algo_name, 0, CRYPTO_ALG_ASYNC);
	struct blkcipher_desc desc = { .tfm = tfm };
    
    printk("algo_name: %s\n", algo_name);
    
	/* check to see if the cipher struct is set properly */
    if(IS_ERR(tfm))
    {
        return PTR_ERR(tfm);
    }
    
	/* Setting the key for Block cipher */
	crypto_blkcipher_setkey((void *)tfm, key, length_key);
	sg_init_table(scatter_list_src, 1);
	sg_init_table(scatter_list_dest, 2);
	sg_set_buf(scatter_list_src, from_buffer, from_length);
	sg_set_buf(&scatter_list_dest[0], to_buffer, *to_length);
	sg_set_buf(&scatter_list_dest[1], padding_array, sizeof(padding_array));
    
	/* let's decrypt using crypto_blkcipher */
	return_value = crypto_blkcipher_decrypt(&desc, scatter_list_dest, scatter_list_src, from_length);
    /* Free up the blk cipher */
	crypto_free_blkcipher(tfm);
    
	if (return_value < 0) {
        printk(KERN_CRIT "crypto_blcipher decryption failed 1.\n");
		return return_value;
	}
    
	if (from_length <= *to_length)
		end_element = ((char *)to_buffer)[from_length - 1];
	else
		end_element = padding_array[from_length - *to_length - 1];
    
	if (end_element <= 16 && from_length >= end_element) {
        
		*to_length = from_length - end_element;
	}
	else 
    {
        printk(KERN_CRIT "crypto_blcipher decryption failed 2.\n");
        return -EPERM;  //bad padding
    }
    return return_value;
}
Exemplo n.º 29
0
/*
 * Perform the MPPE rekey algorithm, from RFC 3078, sec. 7.3.
 * Well, not what's written there, but rather what they meant.
 */
static void mppe_rekey(struct ppp_mppe_state * state, int initial_key)
{
	struct scatterlist sg_in[1], sg_out[1];
	struct blkcipher_desc desc = { .tfm = state->arc4 };

	get_new_key_from_sha(state);
	if (!initial_key) {
		crypto_blkcipher_setkey(state->arc4, state->sha1_digest,
					state->keylen);
		sg_init_table(sg_in, 1);
		sg_init_table(sg_out, 1);
		setup_sg(sg_in, state->sha1_digest, state->keylen);
		setup_sg(sg_out, state->session_key, state->keylen);
		if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in,
					     state->keylen) != 0) {
    		    printk(KERN_WARNING "mppe_rekey: cipher_encrypt failed\n");
		}
	} else {
		memcpy(state->session_key, state->sha1_digest, state->keylen);
	}
	if (state->keylen == 8) {
		/* See RFC 3078 */
		state->session_key[0] = 0xd1;
		state->session_key[1] = 0x26;
		state->session_key[2] = 0x9e;
	}
	crypto_blkcipher_setkey(state->arc4, state->session_key, state->keylen);
}

/*
 * Allocate space for a (de)compressor.
 */
static void *mppe_alloc(unsigned char *options, int optlen)
{
	struct ppp_mppe_state *state;
	unsigned int digestsize;

	if (optlen != CILEN_MPPE + sizeof(state->master_key)
	    || options[0] != CI_MPPE || options[1] != CILEN_MPPE)
		goto out;

	state = kzalloc(sizeof(*state), GFP_KERNEL);
	if (state == NULL)
		goto out;


	state->arc4 = crypto_alloc_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(state->arc4)) {
		state->arc4 = NULL;
		goto out_free;
	}

	state->sha1 = crypto_alloc_hash("sha1", 0, CRYPTO_ALG_ASYNC);
	if (IS_ERR(state->sha1)) {
		state->sha1 = NULL;
		goto out_free;
	}

	digestsize = crypto_hash_digestsize(state->sha1);
	if (digestsize < MPPE_MAX_KEY_LEN)
		goto out_free;

	state->sha1_digest = kmalloc(digestsize, GFP_KERNEL);
	if (!state->sha1_digest)
		goto out_free;

	/* Save keys. */
	memcpy(state->master_key, &options[CILEN_MPPE],
	       sizeof(state->master_key));
	memcpy(state->session_key, state->master_key,
	       sizeof(state->master_key));

	/*
	 * We defer initial key generation until mppe_init(), as mppe_alloc()
	 * is called frequently during negotiation.
	 */

	return (void *)state;

	out_free:
	    if (state->sha1_digest)
		kfree(state->sha1_digest);
	    if (state->sha1)
		crypto_free_hash(state->sha1);
	    if (state->arc4)
		crypto_free_blkcipher(state->arc4);
	    kfree(state);
	out:
	return NULL;
}

/*
 * Deallocate space for a (de)compressor.
 */
static void mppe_free(void *arg)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	if (state) {
	    if (state->sha1_digest)
		kfree(state->sha1_digest);
	    if (state->sha1)
		crypto_free_hash(state->sha1);
	    if (state->arc4)
		crypto_free_blkcipher(state->arc4);
	    kfree(state);
	}
}

/*
 * Initialize (de)compressor state.
 */
static int
mppe_init(void *arg, unsigned char *options, int optlen, int unit, int debug,
	  const char *debugstr)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	unsigned char mppe_opts;

	if (optlen != CILEN_MPPE
	    || options[0] != CI_MPPE || options[1] != CILEN_MPPE)
		return 0;

	MPPE_CI_TO_OPTS(&options[2], mppe_opts);
	if (mppe_opts & MPPE_OPT_128)
		state->keylen = 16;
	else if (mppe_opts & MPPE_OPT_40)
		state->keylen = 8;
	else {
		printk(KERN_WARNING "%s[%d]: unknown key length\n", debugstr,
		       unit);
		return 0;
	}
	if (mppe_opts & MPPE_OPT_STATEFUL)
		state->stateful = 1;

	/* Generate the initial session key. */
	mppe_rekey(state, 1);

	if (debug) {
		int i;
		char mkey[sizeof(state->master_key) * 2 + 1];
		char skey[sizeof(state->session_key) * 2 + 1];

		printk(KERN_DEBUG "%s[%d]: initialized with %d-bit %s mode\n",
		       debugstr, unit, (state->keylen == 16) ? 128 : 40,
		       (state->stateful) ? "stateful" : "stateless");

		for (i = 0; i < sizeof(state->master_key); i++)
			sprintf(mkey + i * 2, "%02x", state->master_key[i]);
		for (i = 0; i < sizeof(state->session_key); i++)
			sprintf(skey + i * 2, "%02x", state->session_key[i]);
		printk(KERN_DEBUG
		       "%s[%d]: keys: master: %s initial session: %s\n",
		       debugstr, unit, mkey, skey);
	}

	/*
	 * Initialize the coherency count.  The initial value is not specified
	 * in RFC 3078, but we can make a reasonable assumption that it will
	 * start at 0.  Setting it to the max here makes the comp/decomp code
	 * do the right thing (determined through experiment).
	 */
	state->ccount = MPPE_CCOUNT_SPACE - 1;

	/*
	 * Note that even though we have initialized the key table, we don't
	 * set the FLUSHED bit.  This is contrary to RFC 3078, sec. 3.1.
	 */
	state->bits = MPPE_BIT_ENCRYPTED;

	state->unit = unit;
	state->debug = debug;

	return 1;
}

static int
mppe_comp_init(void *arg, unsigned char *options, int optlen, int unit,
	       int hdrlen, int debug)
{
	/* ARGSUSED */
	return mppe_init(arg, options, optlen, unit, debug, "mppe_comp_init");
}

/*
 * We received a CCP Reset-Request (actually, we are sending a Reset-Ack),
 * tell the compressor to rekey.  Note that we MUST NOT rekey for
 * every CCP Reset-Request; we only rekey on the next xmit packet.
 * We might get multiple CCP Reset-Requests if our CCP Reset-Ack is lost.
 * So, rekeying for every CCP Reset-Request is broken as the peer will not
 * know how many times we've rekeyed.  (If we rekey and THEN get another
 * CCP Reset-Request, we must rekey again.)
 */
static void mppe_comp_reset(void *arg)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;

	state->bits |= MPPE_BIT_FLUSHED;
}

/*
 * Compress (encrypt) a packet.
 * It's strange to call this a compressor, since the output is always
 * MPPE_OVHD + 2 bytes larger than the input.
 */
static int
mppe_compress(void *arg, unsigned char *ibuf, unsigned char *obuf,
	      int isize, int osize)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	struct blkcipher_desc desc = { .tfm = state->arc4 };
	int proto;
	struct scatterlist sg_in[1], sg_out[1];

	/*
	 * Check that the protocol is in the range we handle.
	 */
	proto = PPP_PROTOCOL(ibuf);
	if (proto < 0x0021 || proto > 0x00fa)
		return 0;

	/* Make sure we have enough room to generate an encrypted packet. */
	if (osize < isize + MPPE_OVHD + 2) {
		/* Drop the packet if we should encrypt it, but can't. */
		printk(KERN_DEBUG "mppe_compress[%d]: osize too small! "
		       "(have: %d need: %d)\n", state->unit,
		       osize, osize + MPPE_OVHD + 2);
		return -1;
	}

	osize = isize + MPPE_OVHD + 2;

	/*
	 * Copy over the PPP header and set control bits.
	 */
	obuf[0] = PPP_ADDRESS(ibuf);
	obuf[1] = PPP_CONTROL(ibuf);
	obuf[2] = PPP_COMP >> 8;	/* isize + MPPE_OVHD + 1 */
	obuf[3] = PPP_COMP;	/* isize + MPPE_OVHD + 2 */
	obuf += PPP_HDRLEN;

	state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
	if (state->debug >= 7)
		printk(KERN_DEBUG "mppe_compress[%d]: ccount %d\n", state->unit,
		       state->ccount);
	obuf[0] = state->ccount >> 8;
	obuf[1] = state->ccount & 0xff;

	if (!state->stateful ||	/* stateless mode     */
	    ((state->ccount & 0xff) == 0xff) ||	/* "flag" packet      */
	    (state->bits & MPPE_BIT_FLUSHED)) {	/* CCP Reset-Request  */
		/* We must rekey */
		if (state->debug && state->stateful)
			printk(KERN_DEBUG "mppe_compress[%d]: rekeying\n",
			       state->unit);
		mppe_rekey(state, 0);
		state->bits |= MPPE_BIT_FLUSHED;
	}
	obuf[0] |= state->bits;
	state->bits &= ~MPPE_BIT_FLUSHED;	/* reset for next xmit */

	obuf += MPPE_OVHD;
	ibuf += 2;		/* skip to proto field */
	isize -= 2;

	/* Encrypt packet */
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 1);
	setup_sg(sg_in, ibuf, isize);
	setup_sg(sg_out, obuf, osize);
	if (crypto_blkcipher_encrypt(&desc, sg_out, sg_in, isize) != 0) {
		printk(KERN_DEBUG "crypto_cypher_encrypt failed\n");
		return -1;
	}

	state->stats.unc_bytes += isize;
	state->stats.unc_packets++;
	state->stats.comp_bytes += osize;
	state->stats.comp_packets++;

	return osize;
}

/*
 * Since every frame grows by MPPE_OVHD + 2 bytes, this is always going
 * to look bad ... and the longer the link is up the worse it will get.
 */
static void mppe_comp_stats(void *arg, struct compstat *stats)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;

	*stats = state->stats;
}

static int
mppe_decomp_init(void *arg, unsigned char *options, int optlen, int unit,
		 int hdrlen, int mru, int debug)
{
	/* ARGSUSED */
	return mppe_init(arg, options, optlen, unit, debug, "mppe_decomp_init");
}

/*
 * We received a CCP Reset-Ack.  Just ignore it.
 */
static void mppe_decomp_reset(void *arg)
{
	/* ARGSUSED */
	return;
}

/*
 * Decompress (decrypt) an MPPE packet.
 */
static int
mppe_decompress(void *arg, unsigned char *ibuf, int isize, unsigned char *obuf,
		int osize)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;
	struct blkcipher_desc desc = { .tfm = state->arc4 };
	unsigned ccount;
	int flushed = MPPE_BITS(ibuf) & MPPE_BIT_FLUSHED;
	int sanity = 0;
	struct scatterlist sg_in[1], sg_out[1];

	if (isize <= PPP_HDRLEN + MPPE_OVHD) {
		if (state->debug)
			printk(KERN_DEBUG
			       "mppe_decompress[%d]: short pkt (%d)\n",
			       state->unit, isize);
		return DECOMP_ERROR;
	}

	/*
	 * Make sure we have enough room to decrypt the packet.
	 * Note that for our test we only subtract 1 byte whereas in
	 * mppe_compress() we added 2 bytes (+MPPE_OVHD);
	 * this is to account for possible PFC.
	 */
	if (osize < isize - MPPE_OVHD - 1) {
		printk(KERN_DEBUG "mppe_decompress[%d]: osize too small! "
		       "(have: %d need: %d)\n", state->unit,
		       osize, isize - MPPE_OVHD - 1);
		return DECOMP_ERROR;
	}
	osize = isize - MPPE_OVHD - 2;	/* assume no PFC */

	ccount = MPPE_CCOUNT(ibuf);
	if (state->debug >= 7)
		printk(KERN_DEBUG "mppe_decompress[%d]: ccount %d\n",
		       state->unit, ccount);

	/* sanity checks -- terminate with extreme prejudice */
	if (!(MPPE_BITS(ibuf) & MPPE_BIT_ENCRYPTED)) {
		printk(KERN_DEBUG
		       "mppe_decompress[%d]: ENCRYPTED bit not set!\n",
		       state->unit);
		state->sanity_errors += 100;
		sanity = 1;
	}
	if (!state->stateful && !flushed) {
		printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set in "
		       "stateless mode!\n", state->unit);
		state->sanity_errors += 100;
		sanity = 1;
	}
	if (state->stateful && ((ccount & 0xff) == 0xff) && !flushed) {
		printk(KERN_DEBUG "mppe_decompress[%d]: FLUSHED bit not set on "
		       "flag packet!\n", state->unit);
		state->sanity_errors += 100;
		sanity = 1;
	}

	if (sanity) {
		if (state->sanity_errors < SANITY_MAX)
			return DECOMP_ERROR;
		else
			/*
			 * Take LCP down if the peer is sending too many bogons.
			 * We don't want to do this for a single or just a few
			 * instances since it could just be due to packet corruption.
			 */
			return DECOMP_FATALERROR;
	}

	/*
	 * Check the coherency count.
	 */

	if (!state->stateful) {
		/* RFC 3078, sec 8.1.  Rekey for every packet. */
		while (state->ccount != ccount) {
			mppe_rekey(state, 0);
			state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
		}
	} else {
		/* RFC 3078, sec 8.2. */
		if (!state->discard) {
			/* normal state */
			state->ccount = (state->ccount + 1) % MPPE_CCOUNT_SPACE;
			if (ccount != state->ccount) {
				/*
				 * (ccount > state->ccount)
				 * Packet loss detected, enter the discard state.
				 * Signal the peer to rekey (by sending a CCP Reset-Request).
				 */
				state->discard = 1;
				return DECOMP_ERROR;
			}
		} else {
			/* discard state */
			if (!flushed) {
				/* ccp.c will be silent (no additional CCP Reset-Requests). */
				return DECOMP_ERROR;
			} else {
				/* Rekey for every missed "flag" packet. */
				while ((ccount & ~0xff) !=
				       (state->ccount & ~0xff)) {
					mppe_rekey(state, 0);
					state->ccount =
					    (state->ccount +
					     256) % MPPE_CCOUNT_SPACE;
				}

				/* reset */
				state->discard = 0;
				state->ccount = ccount;
				/*
				 * Another problem with RFC 3078 here.  It implies that the
				 * peer need not send a Reset-Ack packet.  But RFC 1962
				 * requires it.  Hopefully, M$ does send a Reset-Ack; even
				 * though it isn't required for MPPE synchronization, it is
				 * required to reset CCP state.
				 */
			}
		}
		if (flushed)
			mppe_rekey(state, 0);
	}

	/*
	 * Fill in the first part of the PPP header.  The protocol field
	 * comes from the decrypted data.
	 */
	obuf[0] = PPP_ADDRESS(ibuf);	/* +1 */
	obuf[1] = PPP_CONTROL(ibuf);	/* +1 */
	obuf += 2;
	ibuf += PPP_HDRLEN + MPPE_OVHD;
	isize -= PPP_HDRLEN + MPPE_OVHD;	/* -6 */
	/* net osize: isize-4 */

	/*
	 * Decrypt the first byte in order to check if it is
	 * a compressed or uncompressed protocol field.
	 */
	sg_init_table(sg_in, 1);
	sg_init_table(sg_out, 1);
	setup_sg(sg_in, ibuf, 1);
	setup_sg(sg_out, obuf, 1);
	if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, 1) != 0) {
		printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
		return DECOMP_ERROR;
	}

	/*
	 * Do PFC decompression.
	 * This would be nicer if we were given the actual sk_buff
	 * instead of a char *.
	 */
	if ((obuf[0] & 0x01) != 0) {
		obuf[1] = obuf[0];
		obuf[0] = 0;
		obuf++;
		osize++;
	}

	/* And finally, decrypt the rest of the packet. */
	setup_sg(sg_in, ibuf + 1, isize - 1);
	setup_sg(sg_out, obuf + 1, osize - 1);
	if (crypto_blkcipher_decrypt(&desc, sg_out, sg_in, isize - 1)) {
		printk(KERN_DEBUG "crypto_cypher_decrypt failed\n");
		return DECOMP_ERROR;
	}

	state->stats.unc_bytes += osize;
	state->stats.unc_packets++;
	state->stats.comp_bytes += isize;
	state->stats.comp_packets++;

	/* good packet credit */
	state->sanity_errors >>= 1;

	return osize;
}

/*
 * Incompressible data has arrived (this should never happen!).
 * We should probably drop the link if the protocol is in the range
 * of what should be encrypted.  At the least, we should drop this
 * packet.  (How to do this?)
 */
static void mppe_incomp(void *arg, unsigned char *ibuf, int icnt)
{
	struct ppp_mppe_state *state = (struct ppp_mppe_state *) arg;

	if (state->debug &&
	    (PPP_PROTOCOL(ibuf) >= 0x0021 && PPP_PROTOCOL(ibuf) <= 0x00fa))
		printk(KERN_DEBUG
		       "mppe_incomp[%d]: incompressible (unencrypted) data! "
		       "(proto %04x)\n", state->unit, PPP_PROTOCOL(ibuf));

	state->stats.inc_bytes += icnt;
	state->stats.inc_packets++;
	state->stats.unc_bytes += icnt;
	state->stats.unc_packets++;
}

/*************************************************************
 * Module interface table
 *************************************************************/

/*
 * Procedures exported to if_ppp.c.
 */
static struct compressor ppp_mppe = {
	.compress_proto = CI_MPPE,
	.comp_alloc     = mppe_alloc,
	.comp_free      = mppe_free,
	.comp_init      = mppe_comp_init,
	.comp_reset     = mppe_comp_reset,
	.compress       = mppe_compress,
	.comp_stat      = mppe_comp_stats,
	.decomp_alloc   = mppe_alloc,
	.decomp_free    = mppe_free,
	.decomp_init    = mppe_decomp_init,
	.decomp_reset   = mppe_decomp_reset,
	.decompress     = mppe_decompress,
	.incomp         = mppe_incomp,
	.decomp_stat    = mppe_comp_stats,
	.owner          = THIS_MODULE,
	.comp_extra     = MPPE_PAD,
};

/*
 * ppp_mppe_init()
 *
 * Prior to allowing load, try to load the arc4 and sha1 crypto
 * libraries.  The actual use will be allocated later, but
 * this way the module will fail to insmod if they aren't available.
 */

static int __init ppp_mppe_init(void)
{
	int answer;
	if (!(crypto_has_blkcipher("ecb(arc4)", 0, CRYPTO_ALG_ASYNC) &&
	      crypto_has_hash("sha1", 0, CRYPTO_ALG_ASYNC)))
		return -ENODEV;

	sha_pad = kmalloc(sizeof(struct sha_pad), GFP_KERNEL);
	if (!sha_pad)
		return -ENOMEM;
	sha_pad_init(sha_pad);

	answer = ppp_register_compressor(&ppp_mppe);

	if (answer == 0)
		printk(KERN_INFO "PPP MPPE Compression module registered\n");
	else
		kfree(sha_pad);

	return answer;
}

static void __exit ppp_mppe_cleanup(void)
{
	ppp_unregister_compressor(&ppp_mppe);
	kfree(sha_pad);
}

module_init(ppp_mppe_init);
module_exit(ppp_mppe_cleanup);
Exemplo n.º 30
0
    /* called by async task to perform the operation synchronously using direct MMC APIs  */
A_STATUS DoHifReadWriteScatter(HIF_DEVICE *device, BUS_REQUEST *busrequest)
{
    int                     i;
    A_UINT8                 rw;
    A_UINT8                 opcode;
    struct mmc_request      mmcreq;
    struct mmc_command      cmd;
    struct mmc_data         data;
    HIF_SCATTER_REQ_PRIV   *pReqPriv;   
    HIF_SCATTER_REQ        *pReq;       
    A_STATUS                status = A_OK;
    struct                  scatterlist *pSg;
    
    pReqPriv = busrequest->pScatterReq;
    
    A_ASSERT(pReqPriv != NULL);
    
    pReq = pReqPriv->pHifScatterReq;
    
    memset(&mmcreq, 0, sizeof(struct mmc_request));
    memset(&cmd, 0, sizeof(struct mmc_command));
    memset(&data, 0, sizeof(struct mmc_data));
       
    data.blksz = HIF_MBOX_BLOCK_SIZE;
    data.blocks = pReq->TotalLength / HIF_MBOX_BLOCK_SIZE;
                        
    AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: (%s) Address: 0x%X, (BlockLen: %d, BlockCount: %d) , (tot:%d,sg:%d)\n",
              (pReq->Request & HIF_WRITE) ? "WRITE":"READ", pReq->Address, data.blksz, data.blocks,
              pReq->TotalLength,pReq->ValidScatterEntries));
         
    if (pReq->Request  & HIF_WRITE) {
        rw = _CMD53_ARG_WRITE;
        data.flags = MMC_DATA_WRITE;
    } else {
        rw = _CMD53_ARG_READ;
        data.flags = MMC_DATA_READ;
    }

    if (pReq->Request & HIF_FIXED_ADDRESS) {
        opcode = _CMD53_ARG_FIXED_ADDRESS;
    } else {
        opcode = _CMD53_ARG_INCR_ADDRESS;
    }
    
        /* fill SG entries */
    pSg = pReqPriv->sgentries;   
    sg_init_table(pSg, pReq->ValidScatterEntries); 
          
        /* assemble SG list */   
    for (i = 0 ; i < pReq->ValidScatterEntries ; i++, pSg++) {
            /* setup each sg entry */
        if ((unsigned long)pReq->ScatterList[i].pBuffer & 0x3) {
                /* note some scatter engines can handle unaligned buffers, print this
                 * as informational only */
            AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER,
                            ("HIF: (%s) Scatter Buffer is unaligned 0x%lx\n",
                            pReq->Request & HIF_WRITE ? "WRITE":"READ",
                            (unsigned long)pReq->ScatterList[i].pBuffer)); 
        }
        
        AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("  %d:  Addr:0x%lX, Len:%d \n",
            i,(unsigned long)pReq->ScatterList[i].pBuffer,pReq->ScatterList[i].Length));
            
        sg_set_buf(pSg, pReq->ScatterList[i].pBuffer, pReq->ScatterList[i].Length);
    }
        /* set scatter-gather table for request */
    data.sg = pReqPriv->sgentries;
    data.sg_len = pReq->ValidScatterEntries;
        /* set command argument */    
    SDIO_SET_CMD53_ARG(cmd.arg, 
                       rw, 
                       device->func->num, 
                       _CMD53_ARG_BLOCK_BASIS, 
                       opcode,  
                       pReq->Address,
                       data.blocks);  
                       
    cmd.opcode = SD_IO_RW_EXTENDED;
    cmd.flags = MMC_RSP_SPI_R5 | MMC_RSP_R5 | MMC_CMD_ADTC;
    
    mmcreq.cmd = &cmd;
    mmcreq.data = &data;
    
    mmc_set_data_timeout(&data, device->func->card);    
        /* synchronous call to process request */
    mmc_wait_for_req(device->func->card->host, &mmcreq);
 
    if (cmd.error) {
        status = A_ERROR;   
        AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: cmd error: %d \n",cmd.error));
    }
               
    if (data.error) {
        status = A_ERROR;
        AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: data error: %d \n",data.error));   
    }

    if (A_FAILED(status)) {
        AR_DEBUG_PRINTF(ATH_DEBUG_ERROR, ("HIF-SCATTER: FAILED!!! (%s) Address: 0x%X, Block mode (BlockLen: %d, BlockCount: %d)\n",
              (pReq->Request & HIF_WRITE) ? "WRITE":"READ",pReq->Address, data.blksz, data.blocks));        
    }
    
        /* set completion status, fail or success */
    pReq->CompletionStatus = status;
    
    if (pReq->Request & HIF_ASYNCHRONOUS) {
        AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER: async_task completion routine req: 0x%lX (%d)\n",(unsigned long)busrequest, status));
            /* complete the request */
        A_ASSERT(pReq->CompletionRoutine != NULL);
        pReq->CompletionRoutine(pReq);
    } else {
        AR_DEBUG_PRINTF(ATH_DEBUG_SCATTER, ("HIF-SCATTER async_task upping busrequest : 0x%lX (%d)\n", (unsigned long)busrequest,status));
            /* signal wait */
        up(&busrequest->sem_req);
    }
                                                               
    return status;   
}