Ejemplo n.º 1
0
static int
cryptoloop_transfer(struct loop_device *lo, int cmd,
		    struct page *raw_page, unsigned raw_off,
		    struct page *loop_page, unsigned loop_off,
		    int size, sector_t IV)
{
	struct crypto_blkcipher *tfm = lo->key_data;
	struct blkcipher_desc desc = {
		.tfm = tfm,
		.flags = CRYPTO_TFM_REQ_MAY_SLEEP,
	};
	struct scatterlist sg_out;
	struct scatterlist sg_in;

	encdec_cbc_t encdecfunc;
	struct page *in_page, *out_page;
	unsigned in_offs, out_offs;
	int err;

	sg_init_table(&sg_out, 1);
	sg_init_table(&sg_in, 1);

	if (cmd == READ) {
		in_page = raw_page;
		in_offs = raw_off;
		out_page = loop_page;
		out_offs = loop_off;
		encdecfunc = crypto_blkcipher_crt(tfm)->decrypt;
	} else {
		in_page = loop_page;
		in_offs = loop_off;
		out_page = raw_page;
		out_offs = raw_off;
		encdecfunc = crypto_blkcipher_crt(tfm)->encrypt;
	}

	while (size > 0) {
		const int sz = min(size, LOOP_IV_SECTOR_SIZE);
		u32 iv[4] = { 0, };
		iv[0] = cpu_to_le32(IV & 0xffffffff);

		sg_set_page(&sg_in, in_page, sz, in_offs);
		sg_set_page(&sg_out, out_page, sz, out_offs);

		desc.info = iv;
		err = encdecfunc(&desc, &sg_out, &sg_in, sz);
		if (err)
			return err;

		IV++;
		size -= sz;
		in_offs += sz;
		out_offs += sz;
	}

	return 0;
}
Ejemplo n.º 2
0
static int do_page_crypto(struct inode *inode,
			fscrypt_direction_t rw, pgoff_t index,
			struct page *src_page, struct page *dest_page,
			gfp_t gfp_flags)
{
	struct {
		__le64 index;
		u8 padding[FS_XTS_TWEAK_SIZE - sizeof(__le64)];
	} xts_tweak;
	struct skcipher_request *req = NULL;
	DECLARE_FS_COMPLETION_RESULT(ecr);
	struct scatterlist dst, src;
	struct fscrypt_info *ci = inode->i_crypt_info;
	struct crypto_skcipher *tfm = ci->ci_ctfm;
	int res = 0;

	req = skcipher_request_alloc(tfm, gfp_flags);
	if (!req) {
		printk_ratelimited(KERN_ERR
				"%s: crypto_request_alloc() failed\n",
				__func__);
		return -ENOMEM;
	}

	skcipher_request_set_callback(
		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		page_crypt_complete, &ecr);

	BUILD_BUG_ON(sizeof(xts_tweak) != FS_XTS_TWEAK_SIZE);
	xts_tweak.index = cpu_to_le64(index);
	memset(xts_tweak.padding, 0, sizeof(xts_tweak.padding));

	sg_init_table(&dst, 1);
	sg_set_page(&dst, dest_page, PAGE_SIZE, 0);
	sg_init_table(&src, 1);
	sg_set_page(&src, src_page, PAGE_SIZE, 0);
	skcipher_request_set_crypt(req, &src, &dst, PAGE_SIZE, &xts_tweak);
	if (rw == FS_DECRYPT)
		res = crypto_skcipher_decrypt(req);
	else
		res = crypto_skcipher_encrypt(req);
	if (res == -EINPROGRESS || res == -EBUSY) {
		BUG_ON(req->base.data != &ecr);
		wait_for_completion(&ecr.completion);
		res = ecr.res;
	}
	skcipher_request_free(req);
	if (res) {
		printk_ratelimited(KERN_ERR
			"%s: crypto_skcipher_encrypt() returned %d\n",
			__func__, res);
		return res;
	}
	return 0;
}
Ejemplo n.º 3
0
/**
 * sgl_map_user_pages() - Pin user pages and put them into a scatter gather list
 * @sgl: Scatter gather list to fill
 * @nr_pages: Number of pages
 * @uaddr: User buffer address
 * @count: Length of user buffer
 * @rw: Direction (0=read from userspace / 1 = write to userspace)
 * @to_user: 1 - transfer is to/from a user-space buffer. 0 - kernel buffer.
 *
 *  This function pins the pages of the userspace buffer and fill in the
 * scatter gather list.
 */
static int sgl_map_user_pages(struct scatterlist *sgl,
			      const unsigned int nr_pages, unsigned long uaddr,
			      size_t length, int rw, int to_user)
{
	int rc;
	int i;
	struct page **pages;

	if ((pages = kmalloc(nr_pages * sizeof(struct page *),
			     GFP_KERNEL)) == NULL)
		return -ENOMEM;

	if (to_user) {
		rc = sgl_fill_user_pages(pages, uaddr, nr_pages, rw);
		if (rc >= 0 && rc < nr_pages) {
			/* Some pages were pinned, release these */
			for (i = 0; i < rc; i++)
				page_cache_release(pages[i]);
			rc = -ENOMEM;
			goto out_free;
		}
	} else {
		rc = sgl_fill_kernel_pages(pages, uaddr, nr_pages, rw);
	}

	if (rc < 0)
		/* We completely failed to get the pages */
		goto out_free;

	/* Populate the scatter/gather list */
	sg_init_table(sgl, nr_pages);

	/* Take a shortcut here when we only have a single page transfer */
	if (nr_pages > 1) {
		unsigned int off = offset_in_page(uaddr);
		unsigned int len = PAGE_SIZE - off;

		sg_set_page (&sgl[0], pages[0], len, off);
		length -= len;

		for (i = 1; i < nr_pages; i++) {
			sg_set_page (&sgl[i], pages[i],
				     (length < PAGE_SIZE) ? length : PAGE_SIZE,
				     0);
			length -= PAGE_SIZE;
		}
	} else
		sg_set_page (&sgl[0], pages[0], length, offset_in_page(uaddr));

out_free:
	/* We do not need the pages array anymore */
	kfree(pages);

	return nr_pages;
}
Ejemplo n.º 4
0
static int ext4_page_crypto(struct inode *inode,
			    ext4_direction_t rw,
			    pgoff_t index,
			    struct page *src_page,
			    struct page *dest_page)

{
	u8 xts_tweak[EXT4_XTS_TWEAK_SIZE];
	struct ablkcipher_request *req = NULL;
	DECLARE_EXT4_COMPLETION_RESULT(ecr);
	struct scatterlist dst, src;
	struct ext4_crypt_info *ci = EXT4_I(inode)->i_crypt_info;
	struct crypto_ablkcipher *tfm = ci->ci_ctfm;
	int res = 0;

	req = ablkcipher_request_alloc(tfm, GFP_NOFS);
	if (!req) {
		printk_ratelimited(KERN_ERR
				   "%s: crypto_request_alloc() failed\n",
				   __func__);
		return -ENOMEM;
	}
	ablkcipher_request_set_callback(
		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		ext4_crypt_complete, &ecr);

	BUILD_BUG_ON(EXT4_XTS_TWEAK_SIZE < sizeof(index));
	memcpy(xts_tweak, &index, sizeof(index));
	memset(&xts_tweak[sizeof(index)], 0,
	       EXT4_XTS_TWEAK_SIZE - sizeof(index));

	sg_init_table(&dst, 1);
	sg_set_page(&dst, dest_page, PAGE_CACHE_SIZE, 0);
	sg_init_table(&src, 1);
	sg_set_page(&src, src_page, PAGE_CACHE_SIZE, 0);
	ablkcipher_request_set_crypt(req, &src, &dst, PAGE_CACHE_SIZE,
				     xts_tweak);
	if (rw == EXT4_DECRYPT)
		res = crypto_ablkcipher_decrypt(req);
	else
		res = crypto_ablkcipher_encrypt(req);
	if (res == -EINPROGRESS || res == -EBUSY) {
		wait_for_completion(&ecr.completion);
		res = ecr.res;
	}
	ablkcipher_request_free(req);
	if (res) {
		printk_ratelimited(
			KERN_ERR
			"%s: crypto_ablkcipher_encrypt() returned %d\n",
			__func__, res);
		return res;
	}
	return 0;
}
Ejemplo n.º 5
0
static int crypt_convert_block(struct crypt_config *cc,
                               struct convert_context *ctx,
                               struct ablkcipher_request *req)
{
    struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in);
    struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out);
    struct dm_crypt_request *dmreq;
    u8 *iv;
    int r;

    dmreq = dmreq_of_req(cc, req);
    iv = iv_of_dmreq(cc, dmreq);

    dmreq->iv_sector = ctx->cc_sector;
    dmreq->ctx = ctx;
    sg_init_table(&dmreq->sg_in, 1);
    sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT,
                bv_in->bv_offset + ctx->offset_in);

    sg_init_table(&dmreq->sg_out, 1);
    sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT,
                bv_out->bv_offset + ctx->offset_out);

    ctx->offset_in += 1 << SECTOR_SHIFT;
    if (ctx->offset_in >= bv_in->bv_len) {
        ctx->offset_in = 0;
        ctx->idx_in++;
    }

    ctx->offset_out += 1 << SECTOR_SHIFT;
    if (ctx->offset_out >= bv_out->bv_len) {
        ctx->offset_out = 0;
        ctx->idx_out++;
    }

    if (cc->iv_gen_ops) {
        r = cc->iv_gen_ops->generator(cc, iv, dmreq);
        if (r < 0)
            return r;
    }

    ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
                                 1 << SECTOR_SHIFT, iv);

    if (bio_data_dir(ctx->bio_in) == WRITE)
        r = crypto_ablkcipher_encrypt(req);
    else
        r = crypto_ablkcipher_decrypt(req);

    if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
        r = cc->iv_gen_ops->post(cc, iv, dmreq);

    return r;
}
Ejemplo n.º 6
0
static struct sg_table *
huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
	const unsigned long nreal = obj->scratch / PAGE_SIZE;
	const unsigned long npages = obj->base.size / PAGE_SIZE;
	struct scatterlist *sg, *src, *end;
	struct sg_table *pages;
	unsigned long n;

	pages = kmalloc(sizeof(*pages), GFP);
	if (!pages)
		return ERR_PTR(-ENOMEM);

	if (sg_alloc_table(pages, npages, GFP)) {
		kfree(pages);
		return ERR_PTR(-ENOMEM);
	}

	sg = pages->sgl;
	for (n = 0; n < nreal; n++) {
		struct page *page;

		page = alloc_page(GFP | __GFP_HIGHMEM);
		if (!page) {
			sg_mark_end(sg);
			goto err;
		}

		sg_set_page(sg, page, PAGE_SIZE, 0);
		sg = __sg_next(sg);
	}
	if (nreal < npages) {
		for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
			sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
			src = __sg_next(src);
			if (src == end)
				src = pages->sgl;
		}
	}

	if (i915_gem_gtt_prepare_pages(obj, pages))
		goto err;

	return pages;

err:
	huge_free_pages(obj, pages);
	return ERR_PTR(-ENOMEM);
#undef GFP
}
/**
 * pack_sg_list_p - Just like pack_sg_list. Instead of taking a buffer,
 * this takes a list of pages.
 * @sg: scatter/gather list to pack into
 * @start: which segment of the sg_list to start at
 * @pdata: a list of pages to add into sg.
 * @nr_pages: number of pages to pack into the scatter/gather list
 * @data: data to pack into scatter/gather list
 * @count: amount of data to pack into the scatter/gather list
 */
static int
pack_sg_list_p(struct scatterlist *sg, int start, int limit,
	       struct page **pdata, int nr_pages, char *data, int count)
{
	int i = 0, s;
	int data_off;
	int index = start;

	BUG_ON(nr_pages > (limit - start));
	/*
	 * if the first page doesn't start at
	 * page boundary find the offset
	 */
	data_off = offset_in_page(data);
	while (nr_pages) {
		s = rest_of_page(data);
		if (s > count)
			s = count;
		/* Make sure we don't terminate early. */
		sg_unmark_end(&sg[index]);
		sg_set_page(&sg[index++], pdata[i++], s, data_off);
		data_off = 0;
		data += s;
		count -= s;
		nr_pages--;
	}

	if (index-start)
		sg_mark_end(&sg[index - 1]);
	return index - start;
}
Ejemplo n.º 8
0
/**
 * sg_init_one - Initialize a single entry sg list
 * @sg:		 SG entry
 * @buf:	 Virtual address for IO
 * @buflen:	 IO length
 *
 **/
void sg_init_one(struct scatterlist *sg, const void *buf, unsigned int buflen)
{
  //pr_debug("### %s:%d SG %08x %d\n", __FILE__,__LINE__, buf, buflen);
	sg_init_table(sg, 1);
	sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
	//sg_set_buf(sg, buf, buflen);
}
static int ion_mm_heap_allocate(struct ion_heap *heap,
                                struct ion_buffer *buffer,
                                unsigned long size, unsigned long align,
                                unsigned long flags)
{
    ion_mm_buffer_info* pBufferInfo = NULL;
    int ret;
    unsigned int addr;
    struct sg_table *table;
    struct scatterlist *sg;
    void* pVA;
    ION_FUNC_ENTER;
    pVA = vmalloc_user(size);
    buffer->priv_virt = NULL;
    if (IS_ERR_OR_NULL(pVA))
    {
        printk("[ion_mm_heap_allocate]: Error. Allocate buffer failed.\n");
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    pBufferInfo = (ion_mm_buffer_info*) kzalloc(sizeof(ion_mm_buffer_info), GFP_KERNEL);
    if (IS_ERR_OR_NULL(pBufferInfo))
    {
        vfree(pVA);
        printk("[ion_mm_heap_allocate]: Error. Allocate ion_buffer failed.\n");
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
    if (!table)
    {
        vfree(pVA);
        kfree(pBufferInfo);
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
    if (ret)
    {
        vfree(pVA);
        kfree(pBufferInfo);
        kfree(table);
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    sg = table->sgl;
    for (addr=(unsigned int)pVA; addr < (unsigned int) pVA + size; addr += PAGE_SIZE)
    {
        struct page *page = vmalloc_to_page((void*)addr);
        sg_set_page(sg, page, PAGE_SIZE, 0);
        sg = sg_next(sg);
    }
    buffer->sg_table = table;

    pBufferInfo->pVA = pVA;
    pBufferInfo->eModuleID = -1;
    buffer->priv_virt = pBufferInfo;
    ION_FUNC_LEAVE;
    return 0;
}
Ejemplo n.º 10
0
struct scatterlist *ion_system_heap_map_dma(struct ion_heap *heap,
					    struct ion_buffer *buffer)
{
	struct scatterlist *sglist;
	struct page *page;
	int i;
	int npages = PAGE_ALIGN(buffer->size) / PAGE_SIZE;
	void *vaddr = buffer->priv_virt;

	sglist = vmalloc(npages * sizeof(struct scatterlist));
	if (!sglist)
		return ERR_PTR(-ENOMEM);
	memset(sglist, 0, npages * sizeof(struct scatterlist));
	sg_init_table(sglist, npages);
	for (i = 0; i < npages; i++) {
		page = vmalloc_to_page(vaddr);
		if (!page)
			goto end;
		sg_set_page(&sglist[i], page, PAGE_SIZE, 0);
		vaddr += PAGE_SIZE;
	}
	/* XXX do cache maintenance for dma? */
	return sglist;
end:
	vfree(sglist);
	return NULL;
}
Ejemplo n.º 11
0
static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
					 enum dma_data_direction dir)
{
	struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
	struct sg_table *st;
	struct scatterlist *sg;
	int i, err;

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		return ERR_PTR(-ENOMEM);

	err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
	if (err)
		goto err_free;

	sg = st->sgl;
	for (i = 0; i < mock->npages; i++) {
		sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
		sg = sg_next(sg);
	}

	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
		err = -ENOMEM;
		goto err_st;
	}

	return st;

err_st:
	sg_free_table(st);
err_free:
	kfree(st);
	return ERR_PTR(err);
}
Ejemplo n.º 12
0
/* Allocate a scatterlist for a vmalloc block. The scatterlist is allocated
   with kmalloc. Buffers of arbitrary alignment are supported.
   This function is derived from other vmalloc_to_sg functions in the kernel
   tree, but note that its second argument is a size in bytes, not in pages.
 */
static struct scatterlist *vmalloc_to_sg(unsigned char *const buf,
					 size_t const bytes)
{
	struct scatterlist *sg_array = NULL;
	struct page *pg;
	/* Allow non-page-aligned pointers, so the first and last page may
	   both be partial. */
	unsigned const page_count = bytes / PAGE_SIZE + 2;
	unsigned char *ptr;
	unsigned i;

	sg_array = kcalloc(page_count, sizeof(*sg_array), GFP_KERNEL);
	if (sg_array == NULL)
		goto abort;
	sg_init_table(sg_array, page_count);
	for (i = 0, ptr = (void *)((unsigned long)buf & PAGE_MASK);
	     ptr < buf + bytes;
	     i++, ptr += PAGE_SIZE) {
		pg = vmalloc_to_page(ptr);
		if (pg == NULL)
			goto abort;
		sg_set_page(&sg_array[i], pg, PAGE_SIZE, 0);
	}
	/* Rectify the first page which may be partial. The last page may
	   also be partial but its offset is correct so it doesn't matter. */
	sg_array[0].offset = offset_in_page(buf);
	sg_array[0].length = PAGE_SIZE - offset_in_page(buf);
	return sg_array;
abort:
	if (sg_array != NULL)
		kfree(sg_array);
	return NULL;
}
int msm_iommu_map_extra(struct iommu_domain *domain,
				unsigned long start_iova,
				unsigned long size,
				unsigned long page_size,
				int cached)
{
	int ret = 0;
	int i = 0;
	unsigned long phy_addr = ALIGN(virt_to_phys(iommu_dummy), page_size);
	unsigned long temp_iova = start_iova;
	if (page_size == SZ_4K) {
		struct scatterlist *sglist;
		unsigned int nrpages = PFN_ALIGN(size) >> PAGE_SHIFT;
		struct page *dummy_page = phys_to_page(phy_addr);

		sglist = kmalloc(sizeof(*sglist) * nrpages, GFP_KERNEL);
		if (!sglist) {
			ret = -ENOMEM;
			goto out;
		}

		sg_init_table(sglist, nrpages);

		for (i = 0; i < nrpages; i++)
			sg_set_page(&sglist[i], dummy_page, PAGE_SIZE, 0);

		ret = iommu_map_range(domain, temp_iova, sglist, size, cached);
		if (ret) {
			pr_err("%s: could not map extra %lx in domain %p\n",
				__func__, start_iova, domain);
		}

		kfree(sglist);
	} else {
Ejemplo n.º 14
0
struct rds_message *rds_message_map_pages(unsigned long *page_addrs, unsigned int total_len)
{
	struct rds_message *rm;
	unsigned int i;
	int num_sgs = ceil(total_len, PAGE_SIZE);
	int extra_bytes = num_sgs * sizeof(struct scatterlist);

	rm = rds_message_alloc(extra_bytes, GFP_NOWAIT);
	if (!rm)
		return ERR_PTR(-ENOMEM);

	set_bit(RDS_MSG_PAGEVEC, &rm->m_flags);
	rm->m_inc.i_hdr.h_len = cpu_to_be32(total_len);
	rm->data.op_nents = ceil(total_len, PAGE_SIZE);
	rm->data.op_sg = rds_message_alloc_sgs(rm, num_sgs);
	if (!rm->data.op_sg) {
		rds_message_put(rm);
		return ERR_PTR(-ENOMEM);
	}

	for (i = 0; i < rm->data.op_nents; ++i) {
		sg_set_page(&rm->data.op_sg[i],
				virt_to_page(page_addrs[i]),
				PAGE_SIZE, 0);
	}

	return rm;
}
Ejemplo n.º 15
0
/*
 * Return a scatterlist for some page-aligned vmalloc()'ed memory
 * block (NULL on errors).  Memory for the scatterlist is allocated
 * using kmalloc.  The caller must free the memory.
 */
static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt,
						  int nr_pages)
{
	struct scatterlist *sglist;
	struct page *pg;
	int i;

	sglist = vmalloc(nr_pages * sizeof(*sglist));
	if (NULL == sglist)
		return NULL;
	memset(sglist, 0, nr_pages * sizeof(*sglist));
	sg_init_table(sglist, nr_pages);
	for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) {
		pg = vmalloc_to_page(virt);
		if (NULL == pg)
			goto err;
		BUG_ON(PageHighMem(pg));
		sg_set_page(&sglist[i], pg, PAGE_SIZE, 0);
	}
	return sglist;

err:
	vfree(sglist);
	return NULL;
}
Ejemplo n.º 16
0
static int mmc_test_multi_read_high(struct mmc_test_card *test)
{
	int ret;
	unsigned int size;
	struct scatterlist sg;

	if (test->card->host->max_blk_count == 1)
		return RESULT_UNSUP_HOST;

	size = PAGE_SIZE * 2;
	size = min(size, test->card->host->max_req_size);
	size = min(size, test->card->host->max_seg_size);
	size = min(size, test->card->host->max_blk_count * 512);

	if (size < 1024)
		return RESULT_UNSUP_HOST;

	sg_init_table(&sg, 1);
	sg_set_page(&sg, test->highmem, size, 0);

	ret = mmc_test_transfer(test, &sg, 1, 0, size/512, 512, 0);
	if (ret)
		return ret;

	return 0;
}
static void *ion_page_pool_alloc_pages(struct ion_page_pool *pool)
{
	struct page *page;
	struct scatterlist sg;

	page = alloc_pages(pool->gfp_mask & ~__GFP_ZERO, pool->order);

	if (!page)
		return NULL;

	if (pool->gfp_mask & __GFP_ZERO)
		if (ion_heap_high_order_page_zero(
				page, pool->order, pool->should_invalidate))
			goto error_free_pages;

	sg_init_table(&sg, 1);
	sg_set_page(&sg, page, PAGE_SIZE << pool->order, 0);
	sg_dma_address(&sg) = sg_phys(&sg);
	dma_sync_sg_for_device(NULL, &sg, 1, DMA_BIDIRECTIONAL);

	return page;
error_free_pages:
	__free_pages(page, pool->order);
	return NULL;
}
Ejemplo n.º 18
0
static struct sg_table *exynos_pages_to_sg(struct page **pages, int nr_pages,
		unsigned int page_size)
{
	struct sg_table *sgt = NULL;
	struct scatterlist *sgl;
	int i, ret;

	sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
	if (!sgt)
		goto out;

	ret = sg_alloc_table(sgt, nr_pages, GFP_KERNEL);
	if (ret)
		goto err_free_sgt;

	if (page_size < PAGE_SIZE)
		page_size = PAGE_SIZE;

	for_each_sg(sgt->sgl, sgl, nr_pages, i)
		sg_set_page(sgl, pages[i], page_size, 0);

	return sgt;

err_free_sgt:
	kfree(sgt);
	sgt = NULL;
out:
	return NULL;
}
Ejemplo n.º 19
0
/*---------------------------------------------------------------------------*/
static inline void xio_sg_set_addr(struct scatterlist *sg, void *addr)
{
	/* keep the length */
#ifdef XIO_DEBUG_SG
	BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
	sg_set_page(sg, virt_to_page(addr), sg->length, offset_in_page(addr));
}
Ejemplo n.º 20
0
/*---------------------------------------------------------------------------*/
static inline void xio_sg_set_buf(struct scatterlist *sg, const void *buf,
				  uint32_t buflen, void *mr)
{
#ifdef XIO_DEBUG_SG
	BUG_ON(sg->sg_magic != SG_MAGIC);
#endif
	sg_set_page(sg, virt_to_page(buf), buflen, offset_in_page(buf));
}
Ejemplo n.º 21
0
static bool ux500_configure_channel(struct dma_channel *channel,
                                    u16 packet_sz, u8 mode,
                                    dma_addr_t dma_addr, u32 len)
{
    struct ux500_dma_channel *ux500_channel = channel->private_data;
    struct musb_hw_ep *hw_ep = ux500_channel->hw_ep;
    struct dma_chan *dma_chan = ux500_channel->dma_chan;
    struct dma_async_tx_descriptor *dma_desc;
    enum dma_transfer_direction direction;
    struct scatterlist sg;
    struct dma_slave_config slave_conf;
    enum dma_slave_buswidth addr_width;
    dma_addr_t usb_fifo_addr = (MUSB_FIFO_OFFSET(hw_ep->epnum) +
                                ux500_channel->controller->phy_base);
    struct musb *musb = ux500_channel->controller->private_data;

    dev_dbg(musb->controller,
            "packet_sz=%d, mode=%d, dma_addr=0x%llu, len=%d is_tx=%d\n",
            packet_sz, mode, (unsigned long long) dma_addr,
            len, ux500_channel->is_tx);

    ux500_channel->cur_len = len;

    sg_init_table(&sg, 1);
    sg_set_page(&sg, pfn_to_page(PFN_DOWN(dma_addr)), len,
                offset_in_page(dma_addr));
    sg_dma_address(&sg) = dma_addr;
    sg_dma_len(&sg) = len;

    direction = ux500_channel->is_tx ? DMA_MEM_TO_DEV : DMA_DEV_TO_MEM;
    addr_width = (len & 0x3) ? DMA_SLAVE_BUSWIDTH_1_BYTE :
                 DMA_SLAVE_BUSWIDTH_4_BYTES;

    slave_conf.direction = direction;
    slave_conf.src_addr = usb_fifo_addr;
    slave_conf.src_addr_width = addr_width;
    slave_conf.src_maxburst = 16;
    slave_conf.dst_addr = usb_fifo_addr;
    slave_conf.dst_addr_width = addr_width;
    slave_conf.dst_maxburst = 16;
    slave_conf.device_fc = false;

    dma_chan->device->device_control(dma_chan, DMA_SLAVE_CONFIG,
                                     (unsigned long) &slave_conf);

    dma_desc = dmaengine_prep_slave_sg(dma_chan, &sg, 1, direction,
                                       DMA_PREP_INTERRUPT | DMA_CTRL_ACK);
    if (!dma_desc)
        return false;

    dma_desc->callback = ux500_dma_callback;
    dma_desc->callback_param = channel;
    ux500_channel->cookie = dma_desc->tx_submit(dma_desc);

    dma_async_issue_pending(dma_chan);

    return true;
}
Ejemplo n.º 22
0
static int crypt_convert_block(struct crypt_config *cc,
			       struct convert_context *ctx,
			       struct ablkcipher_request *req)
{
	struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
	struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
	struct dm_crypt_request *dmreq;
	u8 *iv;
	int r;

	dmreq = dmreq_of_req(cc, req);
	iv = iv_of_dmreq(cc, dmreq);

	dmreq->iv_sector = ctx->cc_sector;
	dmreq->ctx = ctx;
	sg_init_table(&dmreq->sg_in, 1);
	sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
		    bv_in.bv_offset);

	sg_init_table(&dmreq->sg_out, 1);
	sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
		    bv_out.bv_offset);

	bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
	bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);

	if (cc->iv_gen_ops) {
		r = cc->iv_gen_ops->generator(cc, iv, dmreq);
		if (r < 0)
			return r;
	}

	ablkcipher_request_set_crypt(req, &dmreq->sg_in, &dmreq->sg_out,
				     1 << SECTOR_SHIFT, iv);

	if (bio_data_dir(ctx->bio_in) == WRITE)
		r = crypto_ablkcipher_encrypt(req);
	else
		r = crypto_ablkcipher_decrypt(req);

	if (!r && cc->iv_gen_ops && cc->iv_gen_ops->post)
		r = cc->iv_gen_ops->post(cc, iv, dmreq);

	return r;
}
Ejemplo n.º 23
0
int ivtv_udma_fill_sg_list (struct ivtv_user_dma *dma, struct ivtv_dma_page_info *dma_page, int map_offset)
{
	int i, offset;
	unsigned long flags;

	if (map_offset < 0)
		return map_offset;

	offset = dma_page->offset;

	/* Fill SG Array with new values */
	for (i = 0; i < dma_page->page_count; i++) {
		unsigned int len = (i == dma_page->page_count - 1) ?
			dma_page->tail : PAGE_SIZE - offset;

		if (PageHighMem(dma->map[map_offset])) {
			void *src;

			if (dma->bouncemap[map_offset] == NULL)
				dma->bouncemap[map_offset] = alloc_page(GFP_KERNEL);
			if (dma->bouncemap[map_offset] == NULL)
				return -1;
			local_irq_save(flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
			src = kmap_atomic(dma->map[map_offset], KM_BOUNCE_READ) + offset;
#else
			src = kmap_atomic(dma->map[map_offset]) + offset;
#endif
			memcpy(page_address(dma->bouncemap[map_offset]) + offset, src, len);
#if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
			kunmap_atomic(src, KM_BOUNCE_READ);
#else
			kunmap_atomic(src);
#endif
			local_irq_restore(flags);
			sg_set_page(&dma->SGlist[map_offset], dma->bouncemap[map_offset], len, offset);
		}
		else {
			sg_set_page(&dma->SGlist[map_offset], dma->map[map_offset], len, offset);
		}
		offset = 0;
		map_offset++;
	}
	return map_offset;
}
Ejemplo n.º 24
0
int fscrypt_do_page_crypto(const struct inode *inode, fscrypt_direction_t rw,
			   u64 lblk_num, struct page *src_page,
			   struct page *dest_page, unsigned int len,
			   unsigned int offs, gfp_t gfp_flags)
{
	union fscrypt_iv iv;
	struct skcipher_request *req = NULL;
	DECLARE_CRYPTO_WAIT(wait);
	struct scatterlist dst, src;
	struct fscrypt_info *ci = inode->i_crypt_info;
	struct crypto_skcipher *tfm = ci->ci_ctfm;
	int res = 0;

	BUG_ON(len == 0);

	fscrypt_generate_iv(&iv, lblk_num, ci);

	req = skcipher_request_alloc(tfm, gfp_flags);
	if (!req)
		return -ENOMEM;

	skcipher_request_set_callback(
		req, CRYPTO_TFM_REQ_MAY_BACKLOG | CRYPTO_TFM_REQ_MAY_SLEEP,
		crypto_req_done, &wait);

	sg_init_table(&dst, 1);
	sg_set_page(&dst, dest_page, len, offs);
	sg_init_table(&src, 1);
	sg_set_page(&src, src_page, len, offs);
	skcipher_request_set_crypt(req, &src, &dst, len, &iv);
	if (rw == FS_DECRYPT)
		res = crypto_wait_req(crypto_skcipher_decrypt(req), &wait);
	else
		res = crypto_wait_req(crypto_skcipher_encrypt(req), &wait);
	skcipher_request_free(req);
	if (res) {
		fscrypt_err(inode->i_sb,
			    "%scryption failed for inode %lu, block %llu: %d",
			    (rw == FS_DECRYPT ? "de" : "en"),
			    inode->i_ino, lblk_num, res);
		return res;
	}
	return 0;
}
Ejemplo n.º 25
0
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
					     enum dma_data_direction dir)
{
	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
	struct sg_table *st;
	struct scatterlist *src, *dst;
	int ret, i;

	ret = i915_mutex_lock_interruptible(obj->base.dev);
	if (ret)
		goto err;

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		goto err_unlock;

	i915_gem_object_pin_pages(obj);

	/* Copy sg so that we make an independent mapping */
	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (st == NULL) {
		ret = -ENOMEM;
		goto err_unpin;
	}

	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
	if (ret)
		goto err_free;

	src = obj->pages->sgl;
	dst = st->sgl;
	for (i = 0; i < obj->pages->nents; i++) {
		sg_set_page(dst, sg_page(src), src->length, 0);
		dst = sg_next(dst);
		src = sg_next(src);
	}

	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
		ret =-ENOMEM;
		goto err_free_sg;
	}

	mutex_unlock(&obj->base.dev->struct_mutex);
	return st;

err_free_sg:
	sg_free_table(st);
err_free:
	kfree(st);
err_unpin:
	i915_gem_object_unpin_pages(obj);
err_unlock:
	mutex_unlock(&obj->base.dev->struct_mutex);
err:
	return ERR_PTR(ret);
}
Ejemplo n.º 26
0
/**   struct pci_dev *pci,
 * dma_region_alloc - allocate a buffer and map it to the IOMMU
 */
int dma_field_alloc(struct dma_region *dma, unsigned long n_bytes,
		    struct pci_dev *dev, int direction)
{
	unsigned int i;

	/* round up to page size */
	/*  to align the pointer to the (next) page boundary
	   #define PAGE_ALIGN(addr)        (((addr) + PAGE_SIZE - 1) & PAGE_MASK)
	   this worked as PAGE_SIZE and PAGE_MASK were available in page.h.
	 */

	n_bytes = PAGE_ALIGN(n_bytes);

	dma->n_pages = n_bytes >> PAGE_SHIFT;

	dma->kvirt = vmalloc_32(n_bytes);
	if (!dma->kvirt) {
		goto err;
	}

	memset(dma->kvirt, 0, n_bytes);

	/* allocate scatter/gather list */
	dma->sglist = vmalloc(dma->n_pages * sizeof(*dma->sglist));
	if (!dma->sglist) {
		goto err;
	}

	sg_init_table(dma->sglist, dma->n_pages);

	/* fill scatter/gather list with pages */
	for (i = 0; i < dma->n_pages; i++) {
		unsigned long va =
		    (unsigned long)dma->kvirt + (i << PAGE_SHIFT);

		sg_set_page(&dma->sglist[i], vmalloc_to_page((void *)va),
			    PAGE_SIZE, 0);
	}

	/* map sglist to the IOMMU */
	dma->n_dma_pages =
	    pci_map_sg(dev, dma->sglist, dma->n_pages, direction);

	if (dma->n_dma_pages == 0) {
		goto err;
	}

	dma->dev = dev;
	dma->direction = direction;

	return 0;

err:
	dma_field_free(dma);
	return -ENOMEM;
}
Ejemplo n.º 27
0
static int alloc_sg(struct sock *sk, int len, struct scatterlist *sg,
		    int *sg_num_elem, unsigned int *sg_size,
		    int first_coalesce)
{
	struct page_frag *pfrag;
	unsigned int size = *sg_size;
	int num_elem = *sg_num_elem, use = 0, rc = 0;
	struct scatterlist *sge;
	unsigned int orig_offset;

	len -= size;
	pfrag = sk_page_frag(sk);

	while (len > 0) {
		if (!sk_page_frag_refill(sk, pfrag)) {
			rc = -ENOMEM;
			goto out;
		}

		use = min_t(int, len, pfrag->size - pfrag->offset);

		if (!sk_wmem_schedule(sk, use)) {
			rc = -ENOMEM;
			goto out;
		}

		sk_mem_charge(sk, use);
		size += use;
		orig_offset = pfrag->offset;
		pfrag->offset += use;

		sge = sg + num_elem - 1;
		if (num_elem > first_coalesce && sg_page(sg) == pfrag->page &&
		    sg->offset + sg->length == orig_offset) {
			sg->length += use;
		} else {
			sge++;
			sg_unmark_end(sge);
			sg_set_page(sge, pfrag->page, use, orig_offset);
			get_page(pfrag->page);
			++num_elem;
			if (num_elem == MAX_SKB_FRAGS) {
				rc = -ENOSPC;
				break;
			}
		}

		len -= use;
	}
	goto out;

out:
	*sg_size = size;
	*sg_num_elem = num_elem;
	return rc;
}
Ejemplo n.º 28
0
static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
				      struct page *page, size_t size)
{
	int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);

	if (!ret)
		sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);

	return ret;
}
Ejemplo n.º 29
0
/*
 * DST uses generic iteration approach for data crypto processing.
 * Single block IO request is switched into array of scatterlists,
 * which are submitted to the crypto processing iterator.
 *
 * Input and output iterator initialization are different, since
 * in output case we can not encrypt data in-place and need a
 * temporary storage, which is then being sent to the remote peer.
 */
static int dst_trans_iter_out(struct bio *bio, struct dst_crypto_engine *e,
		int (*iterator) (struct dst_crypto_engine *e,
				  struct scatterlist *dst,
				  struct scatterlist *src))
{
	struct bio_vec *bv;
	int err, i;

	sg_init_table(e->src, bio->bi_vcnt);
	sg_init_table(e->dst, bio->bi_vcnt);

	bio_for_each_segment(bv, bio, i) {
		sg_set_page(&e->src[i], bv->bv_page, bv->bv_len, bv->bv_offset);
		sg_set_page(&e->dst[i], e->pages[i], bv->bv_len, bv->bv_offset);

		err = iterator(e, &e->dst[i], &e->src[i]);
		if (err)
			return err;
	}
Ejemplo n.º 30
0
static void chain_to_walk(struct scatterlist *sg, struct scatter_walk *walk)
{
	struct scatterlist *src = walk->sg;
	int diff = walk->offset - src->offset;

	sg_set_page(sg, sg_page(src),
		    src->length - diff, walk->offset);

	scatterwalk_crypto_chain(sg, sg_next(src), 2);
}