コード例 #1
0
ファイル: zc.c プロジェクト: gtalusan/cryptodev-linux
/* fetch the pages addr resides in into pg and initialise sg with them */
int __get_userbuf(uint8_t __user *addr, uint32_t len, int write,
		unsigned int pgcount, struct page **pg, struct scatterlist *sg,
		struct task_struct *task, struct mm_struct *mm)
{
	int ret, pglen, i = 0;
	struct scatterlist *sgp;

	if (unlikely(!pgcount || !len || !addr)) {
		sg_mark_end(sg);
		return 0;
	}

	down_read(&mm->mmap_sem);
	ret = get_user_pages(task, mm,
			(unsigned long)addr, pgcount, write, 0, pg, NULL);
	up_read(&mm->mmap_sem);
	if (ret != pgcount)
		return -EINVAL;

	sg_init_table(sg, pgcount);

	pglen = min((ptrdiff_t)(PAGE_SIZE - PAGEOFFSET(addr)), (ptrdiff_t)len);
	sg_set_page(sg, pg[i++], pglen, PAGEOFFSET(addr));

	len -= pglen;
	for (sgp = sg_next(sg); len; sgp = sg_next(sgp)) {
		pglen = min((uint32_t)PAGE_SIZE, len);
		sg_set_page(sgp, pg[i++], pglen, 0);
		len -= pglen;
	}
	sg_mark_end(sg_last(sg, pgcount));
	return 0;
}
コード例 #2
0
ファイル: xio_sg_scatter.c プロジェクト: xiaom/accelio
/*---------------------------------------------------------------------------*/
static inline void xio_tbl_set_nents(struct sg_table *tbl, uint32_t nents)
{
	struct scatterlist *sg;
	int i;

#ifdef XIO_DEBUG_SG
	verify_tbl(tbl);
#endif
	if (!tbl || tbl->orig_nents < nents)
		return;

	sg = tbl->sgl;
	/* tbl->nents is unsigned so if tbl->nents is ZERO then tbl->nents - 1
	 * is a huge number, so check this.
	 */
	if (tbl->nents && (tbl->nents < tbl->orig_nents)) {
		for (i = 0; i < tbl->nents - 1; i++)
			sg = sg_next(sg);
		sg_unmark_end(sg);
	}

	if (!nents) {
		tbl->nents = nents;
		return;
	}

	sg = tbl->sgl;
	for (i = 0; i < nents - 1; i++)
		sg = sg_next(sg);

	sg_mark_end(sg);

	tbl->nents = nents;
}
コード例 #3
0
ファイル: bcm-flexrm-mailbox.c プロジェクト: Lyude/linux
static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
{
	u32 cnt = 0;
	unsigned int dst_target = 0;
	struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;

	while (src_sg || dst_sg) {
		if (src_sg) {
			cnt++;
			dst_target = src_sg->length;
			src_sg = sg_next(src_sg);
		} else
			dst_target = UINT_MAX;

		while (dst_target && dst_sg) {
			cnt++;
			if (dst_sg->length < dst_target)
				dst_target -= dst_sg->length;
			else
				dst_target = 0;
			dst_sg = sg_next(dst_sg);
		}
	}

	return cnt;
}
コード例 #4
0
ファイル: bcm-flexrm-mailbox.c プロジェクト: Lyude/linux
static bool flexrm_spu_sanity_check(struct brcm_message *msg)
{
	struct scatterlist *sg;

	if (!msg->spu.src || !msg->spu.dst)
		return false;
	for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
		if (sg->length & 0xf) {
			if (sg->length > SRC_LENGTH_MASK)
				return false;
		} else {
			if (sg->length > (MSRC_LENGTH_MASK * 16))
				return false;
		}
	}
	for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
		if (sg->length & 0xf) {
			if (sg->length > DST_LENGTH_MASK)
				return false;
		} else {
			if (sg->length > (MDST_LENGTH_MASK * 16))
				return false;
		}
	}

	return true;
}
コード例 #5
0
ファイル: cc_buffer_mgr.c プロジェクト: Anjali05/linux
static int cc_dma_map_sg(struct device *dev, struct scatterlist *sg, u32 nents,
			 enum dma_data_direction direction)
{
	u32 i, j;
	struct scatterlist *l_sg = sg;

	for (i = 0; i < nents; i++) {
		if (!l_sg)
			break;
		if (dma_map_sg(dev, l_sg, 1, direction) != 1) {
			dev_err(dev, "dma_map_page() sg buffer failed\n");
			goto err;
		}
		l_sg = sg_next(l_sg);
	}
	return nents;

err:
	/* Restore mapped parts */
	for (j = 0; j < i; j++) {
		if (!sg)
			break;
		dma_unmap_sg(dev, sg, 1, direction);
		sg = sg_next(sg);
	}
	return 0;
}
コード例 #6
0
ファイル: sunxi_ss_proc.c プロジェクト: alex-deng/a33_linux
/* Make a sg_table based on sg[] of crypto request. */
static int ss_sg_table_init(struct sg_table *sgt, struct scatterlist *sg,
							int len, char *vbase, dma_addr_t pbase)
{
	int i;
	int npages = 0;
	int offset = 0;
	struct scatterlist *src_sg = sg;
	struct scatterlist *dst_sg = NULL;

	npages = ss_sg_cnt(sg, len);
	WARN_ON(npages == 0);

	if (sg_alloc_table(sgt, npages, GFP_KERNEL)) {
		SS_ERR("sg_alloc_table(%d) failed!\n", npages);
		WARN_ON(1);
	}

	dst_sg = sgt->sgl;
	for (i=0; i<npages; i++) {
		sg_set_buf(dst_sg, vbase + offset, sg_dma_len(src_sg));
		offset += sg_dma_len(src_sg);
		src_sg = sg_next(src_sg);
		dst_sg = sg_next(dst_sg);
	}
	return 0;
}
コード例 #7
0
ファイル: bcm-flexrm-mailbox.c プロジェクト: Lyude/linux
static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
				     u32 reqid, void *desc_ptr, u32 toggle,
				     void *start_desc, void *end_desc)
{
	u64 d;
	u32 nhpos = 0;
	void *orig_desc_ptr = desc_ptr;
	unsigned int dst_target = 0;
	struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;

	while (src_sg || dst_sg) {
		if (src_sg) {
			if (sg_dma_len(src_sg) & 0xf)
				d = flexrm_src_desc(sg_dma_address(src_sg),
						     sg_dma_len(src_sg));
			else
				d = flexrm_msrc_desc(sg_dma_address(src_sg),
						      sg_dma_len(src_sg)/16);
			flexrm_enqueue_desc(nhpos, nhcnt, reqid,
					     d, &desc_ptr, &toggle,
					     start_desc, end_desc);
			nhpos++;
			dst_target = sg_dma_len(src_sg);
			src_sg = sg_next(src_sg);
		} else
			dst_target = UINT_MAX;

		while (dst_target && dst_sg) {
			if (sg_dma_len(dst_sg) & 0xf)
				d = flexrm_dst_desc(sg_dma_address(dst_sg),
						     sg_dma_len(dst_sg));
			else
				d = flexrm_mdst_desc(sg_dma_address(dst_sg),
						      sg_dma_len(dst_sg)/16);
			flexrm_enqueue_desc(nhpos, nhcnt, reqid,
					     d, &desc_ptr, &toggle,
					     start_desc, end_desc);
			nhpos++;
			if (sg_dma_len(dst_sg) < dst_target)
				dst_target -= sg_dma_len(dst_sg);
			else
				dst_target = 0;
			dst_sg = sg_next(dst_sg);
		}
	}

	/* Null descriptor with invalid toggle bit */
	flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));

	/* Ensure that descriptors have been written to memory */
	wmb();

	/* Flip toggle bit in header */
	flexrm_flip_header_toogle(orig_desc_ptr);

	return desc_ptr;
}
コード例 #8
0
ファイル: bfa_ioim.c プロジェクト: 3sOx/asuswrt-merlin
static void
bfa_ioim_sgpg_setup(struct bfa_ioim_s *ioim)
{
	int             sgeid, nsges, i;
	struct bfi_sge_s      *sge;
	struct bfa_sgpg_s *sgpg;
	u32        pgcumsz;
	u64        addr;
	struct scatterlist *sg;
	struct scsi_cmnd *cmnd = (struct scsi_cmnd *) ioim->dio;

	sgeid = BFI_SGE_INLINE;
	ioim->sgpg = sgpg = bfa_q_first(&ioim->sgpg_q);

	sg = scsi_sglist(cmnd);
	sg = sg_next(sg);

	do {
		sge = sgpg->sgpg->sges;
		nsges = ioim->nsges - sgeid;
		if (nsges > BFI_SGPG_DATA_SGES)
			nsges = BFI_SGPG_DATA_SGES;

		pgcumsz = 0;
		for (i = 0; i < nsges; i++, sge++, sgeid++, sg = sg_next(sg)) {
			addr = bfa_os_sgaddr(sg_dma_address(sg));
			sge->sga = *(union bfi_addr_u *) &addr;
			sge->sg_len = sg_dma_len(sg);
			pgcumsz += sge->sg_len;

			/**
			 * set flags
			 */
			if (i < (nsges - 1))
				sge->flags = BFI_SGE_DATA;
			else if (sgeid < (ioim->nsges - 1))
				sge->flags = BFI_SGE_DATA_CPL;
			else
				sge->flags = BFI_SGE_DATA_LAST;
		}

		sgpg = (struct bfa_sgpg_s *) bfa_q_next(sgpg);

		/**
		 * set the link element of each page
		 */
		if (sgeid == ioim->nsges) {
			sge->flags = BFI_SGE_PGDLEN;
			sge->sga.a32.addr_lo = 0;
			sge->sga.a32.addr_hi = 0;
		} else {
			sge->flags = BFI_SGE_LINK;
			sge->sga = sgpg->sgpg_pa;
		}
		sge->sg_len = pgcumsz;
	} while (sgeid < ioim->nsges);
}
コード例 #9
0
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
					     enum dma_data_direction dir)
{
	struct drm_i915_gem_object *obj = dma_buf_to_obj(attachment->dmabuf);
	struct sg_table *st;
	struct scatterlist *src, *dst;
	int ret, i;

	ret = i915_mutex_lock_interruptible(obj->base.dev);
	if (ret)
		goto err;

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		goto err_unlock;

	i915_gem_object_pin_pages(obj);

	/* Copy sg so that we make an independent mapping */
	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (st == NULL) {
		ret = -ENOMEM;
		goto err_unpin;
	}

	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
	if (ret)
		goto err_free;

	src = obj->pages->sgl;
	dst = st->sgl;
	for (i = 0; i < obj->pages->nents; i++) {
		sg_set_page(dst, sg_page(src), src->length, 0);
		dst = sg_next(dst);
		src = sg_next(src);
	}

	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
		ret =-ENOMEM;
		goto err_free_sg;
	}

	mutex_unlock(&obj->base.dev->struct_mutex);
	return st;

err_free_sg:
	sg_free_table(st);
err_free:
	kfree(st);
err_unpin:
	i915_gem_object_unpin_pages(obj);
err_unlock:
	mutex_unlock(&obj->base.dev->struct_mutex);
err:
	return ERR_PTR(ret);
}
コード例 #10
0
ファイル: tee_shm.c プロジェクト: flowher/optee_linuxdriver
static struct sg_table *_tee_shm_dma_buf_map_dma_buf(
		struct dma_buf_attachment *attach, enum dma_data_direction dir)
{
	struct tee_shm_attach *tee_shm_attach = attach->priv;
	struct tee_shm *tee_shm = attach->dmabuf->priv;
	struct sg_table *sgt = NULL;
	struct scatterlist *rd, *wr;
	unsigned int i;
	int nents, ret;
	struct tee *tee;

	tee = tee_shm->tee;

	INMSG();

	/* just return current sgt if already requested. */
	if (tee_shm_attach->dir == dir && tee_shm_attach->is_mapped) {
		OUTMSGX(&tee_shm_attach->sgt);
		return &tee_shm_attach->sgt;
	}

	sgt = &tee_shm_attach->sgt;

	ret = sg_alloc_table(sgt, tee_shm->sgt.orig_nents, GFP_KERNEL);
	if (ret) {
		dev_err(_DEV(tee), "failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
	}

	rd = tee_shm->sgt.sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	if (dir != DMA_NONE) {
		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
		if (!nents) {
			dev_err(_DEV(tee), "failed to map sgl with iommu.\n");
			sg_free_table(sgt);
			sgt = ERR_PTR(-EIO);
			goto err_unlock;
		}
	}

	tee_shm_attach->is_mapped = true;
	tee_shm_attach->dir = dir;
	attach->priv = tee_shm_attach;

err_unlock:
	OUTMSGX(sgt);
	return sgt;
}
コード例 #11
0
ファイル: sahara.c プロジェクト: vikash-g-samsung-com/linux
static int sahara_sha_hw_links_create(struct sahara_dev *dev,
				       struct sahara_sha_reqctx *rctx,
				       int start)
{
	struct scatterlist *sg;
	unsigned int i;
	int ret;

	dev->in_sg = rctx->in_sg;

	dev->nb_in_sg = sahara_sg_length(dev->in_sg, rctx->total);
	if ((dev->nb_in_sg) > SAHARA_MAX_HW_LINK) {
		dev_err(dev->device, "not enough hw links (%d)\n",
			dev->nb_in_sg + dev->nb_out_sg);
		return -EINVAL;
	}

	if (rctx->in_sg_chained) {
		i = start;
		sg = dev->in_sg;
		while (sg) {
			ret = dma_map_sg(dev->device, sg, 1,
					 DMA_TO_DEVICE);
			if (!ret)
				return -EFAULT;

			dev->hw_link[i]->len = sg->length;
			dev->hw_link[i]->p = sg->dma_address;
			dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
			sg = sg_next(sg);
			i += 1;
		}
		dev->hw_link[i-1]->next = 0;
	} else {
		sg = dev->in_sg;
		ret = dma_map_sg(dev->device, dev->in_sg, dev->nb_in_sg,
				 DMA_TO_DEVICE);
		if (!ret)
			return -EFAULT;

		for (i = start; i < dev->nb_in_sg + start; i++) {
			dev->hw_link[i]->len = sg->length;
			dev->hw_link[i]->p = sg->dma_address;
			if (i == (dev->nb_in_sg + start - 1)) {
				dev->hw_link[i]->next = 0;
			} else {
				dev->hw_link[i]->next = dev->hw_phys_link[i + 1];
				sg = sg_next(sg);
			}
		}
	}

	return i;
}
コード例 #12
0
ファイル: cxgbit_ddp.c プロジェクト: Anjali05/linux
static void
cxgbit_set_one_ppod(struct cxgbi_pagepod *ppod,
		    struct cxgbi_task_tag_info *ttinfo,
		    struct scatterlist **sg_pp, unsigned int *sg_off)
{
	struct scatterlist *sg = sg_pp ? *sg_pp : NULL;
	unsigned int offset = sg_off ? *sg_off : 0;
	dma_addr_t addr = 0UL;
	unsigned int len = 0;
	int i;

	memcpy(ppod, &ttinfo->hdr, sizeof(struct cxgbi_pagepod_hdr));

	if (sg) {
		addr = sg_dma_address(sg);
		len = sg_dma_len(sg);
	}

	for (i = 0; i < PPOD_PAGES_MAX; i++) {
		if (sg) {
			ppod->addr[i] = cpu_to_be64(addr + offset);
			offset += PAGE_SIZE;
			if (offset == (len + sg->offset)) {
				offset = 0;
				sg = sg_next(sg);
				if (sg) {
					addr = sg_dma_address(sg);
					len = sg_dma_len(sg);
				}
			}
		} else {
			ppod->addr[i] = 0ULL;
		}
	}

	/*
	 * the fifth address needs to be repeated in the next ppod, so do
	 * not move sg
	 */
	if (sg_pp) {
		*sg_pp = sg;
		*sg_off = offset;
	}

	if (offset == len) {
		offset = 0;
		if (sg) {
			sg = sg_next(sg);
			if (sg)
				addr = sg_dma_address(sg);
		}
	}
	ppod->addr[i] = sg ? cpu_to_be64(addr + offset) : 0ULL;
}
コード例 #13
0
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
					     enum dma_data_direction dir)
{
	struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
	struct sg_table *st;
	struct scatterlist *src, *dst;
	int ret, i;

	ret = i915_mutex_lock_interruptible(obj->base.dev);
	if (ret)
		return ERR_PTR(ret);

	ret = i915_gem_object_get_pages(obj);
	if (ret) {
		st = ERR_PTR(ret);
		goto out;
	}

	/* Copy sg so that we make an independent mapping */
	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (st == NULL) {
		st = ERR_PTR(-ENOMEM);
		goto out;
	}

	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
	if (ret) {
		kfree(st);
		st = ERR_PTR(ret);
		goto out;
	}

	src = obj->pages->sgl;
	dst = st->sgl;
	for (i = 0; i < obj->pages->nents; i++) {
		sg_set_page(dst, sg_page(src), PAGE_SIZE, 0);
		dst = sg_next(dst);
		src = sg_next(src);
	}

	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
		sg_free_table(st);
		kfree(st);
		st = ERR_PTR(-ENOMEM);
		goto out;
	}

	i915_gem_object_pin_pages(obj);

out:
	mutex_unlock(&obj->base.dev->struct_mutex);
	return st;
}
コード例 #14
0
static void mci_xfer(struct ak98_mci_host *host)
{
	int  sg_remain;
	u32 *tempbuf,xferlen;
	u8 dir;

	PK("%s\n", __func__);

	if (host->data->flags & MMC_DATA_WRITE) {		
		dir = MEM2BUF;
	} else {
	    //ak98_l2_clr_status(l2_sdio_bufid);
		dir = BUF2MEM;
	}	

    tempbuf = sg_virt(host->sg_ptr) + host->sg_off;
    sg_remain = host->sg_ptr->length - host->sg_off;
    
    if (sg_remain <= 0)
    {        
        host->sg_ptr = sg_next(host->sg_ptr);        
        if (host->sg_ptr == NULL)
				return;
				
		host->sg_off = 0;
		tempbuf = sg_virt(host->sg_ptr) + host->sg_off;
        sg_remain = host->sg_ptr->length - host->sg_off;
    }
  
    xferlen = (sg_remain > host->data->blksz) ? host->data->blksz : sg_remain;    
    ak98_l2_combuf_cpu((unsigned long)tempbuf, l2_sdio_bufid, xferlen, dir); 
    host->sg_off += xferlen;
    host->data_xfered += xferlen;    
		
}
コード例 #15
0
ファイル: scatterlist.c プロジェクト: inteos/WBSAirback
/**
 * sg_copy - copy one SG vector to another
 * @dst_sg:	destination SG
 * @src_sg:	source SG
 * @nents_to_copy: maximum number of entries to copy
 * @copy_len:	maximum amount of data to copy. If 0, then copy all.
 * @d_km_type:	kmap_atomic type for the destination SG
 * @s_km_type:	kmap_atomic type for the source SG
 *
 * Description:
 *    Data from the source SG vector will be copied to the destination SG
 *    vector. End of the vectors will be determined by sg_next() returning
 *    NULL. Returns number of bytes copied.
 */
int sg_copy(struct scatterlist *dst_sg, struct scatterlist *src_sg,
	    int nents_to_copy, size_t copy_len,
	    enum km_type d_km_type, enum km_type s_km_type)
{
	int res = 0;
	size_t dst_len, dst_offs;

	if (copy_len == 0)
		copy_len = 0x7FFFFFFF; /* copy all */

	if (nents_to_copy == 0)
		nents_to_copy = 0x7FFFFFFF; /* copy all */

	dst_len = dst_sg->length;
	dst_offs = dst_sg->offset;

	do {
		int copied = sg_copy_elem(&dst_sg, &dst_len, &dst_offs,
				src_sg, copy_len, d_km_type, s_km_type);
		copy_len -= copied;
		res += copied;
		if ((copy_len == 0) || (dst_sg == NULL))
			goto out;

		nents_to_copy--;
		if (nents_to_copy == 0)
			goto out;

		src_sg = sg_next(src_sg);
	} while (src_sg != NULL);

out:
	return res;
}
コード例 #16
0
static int sg_copy(struct scatterlist **sg, size_t *offset, void *buf,
	size_t buflen, size_t total, int out)
{
	unsigned int count, off = 0;

	while (buflen && total) {
		count = min((*sg)->length - *offset, total);
		count = min(count, buflen);

		if (!count)
			return off;

		sg_copy_buf(buf + off, *sg, *offset, count, out);

		off += count;
		buflen -= count;
		*offset += count;
		total -= count;

		if (*offset == (*sg)->length) {
			*sg = sg_next(*sg);
			if (*sg)
				*offset = 0;
			else
				total = 0;
		}
	}

	return off;
}
コード例 #17
0
ファイル: scatterlist.c プロジェクト: 32bitmicro/xvisor
/**
 * sg_miter_next - proceed mapping iterator to the next mapping
 * @miter: sg mapping iter to proceed
 *
 * Description:
 *   Proceeds @miter@ to the next mapping.  @miter@ should have been
 *   started using sg_miter_start().  On successful return,
 *   @miter@->page, @miter@->addr and @miter@->length point to the
 *   current mapping.
 *
 * Context:
 *   IRQ disabled if SG_MITER_ATOMIC.  IRQ must stay disabled till
 *   @miter@ is stopped.  May sleep if !SG_MITER_ATOMIC.
 *
 * Returns:
 *   true if @miter contains the next mapping.  false if end of sg
 *   list is reached.
 */
bool sg_miter_next(struct sg_mapping_iter *miter)
{
	unsigned int off, len;

	/* check for end and drop resources from the last iteration */
	if (!miter->__nents)
		return false;

	sg_miter_stop(miter);

	/* get to the next sg if necessary.  __offset is adjusted by stop */
	while (miter->__offset == miter->__sg->length) {
		if (--miter->__nents) {
			miter->__sg = sg_next(miter->__sg);
			miter->__offset = 0;
		} else
			return false;
	}

	/* map the next page */
	off = miter->__sg->offset + miter->__offset;
	len = miter->__sg->length - miter->__offset;

	miter->page = VMM_PAGE_NTH(sg_page(miter->__sg), off >> VMM_PAGE_SHIFT);
	off &= ~VMM_PAGE_MASK;
	miter->length = min_t(unsigned int, len, VMM_PAGE_SIZE - off);
	miter->consumed = miter->length;

	miter->addr = (void *)(miter->page + off);

	return true;
}
コード例 #18
0
static void scatterwalk_pagedone(struct scatter_walk *walk, int out,
				 unsigned int more)
{
	/* walk->data may be pointing the first byte of the next page;
<<<<<<< HEAD
	   however, we know we transferred at least one byte.  So,
=======
	   however, we know we transfered at least one byte.  So,
>>>>>>> 296c66da8a02d52243f45b80521febece5ed498a
	   walk->data - 1 will be a virtual address in the mapped page. */

	if (out)
		flush_dcache_page(walk->page);

	if (more) {
		walk->len_this_segment -= walk->len_this_page;

		if (walk->len_this_segment) {
			walk->page++;
			walk->len_this_page = min(walk->len_this_segment,
						  (unsigned)PAGE_CACHE_SIZE);
			walk->offset = 0;
		}
		else
			scatterwalk_start(walk, sg_next(walk->sg));
	}
}
コード例 #19
0
static int ion_mm_heap_allocate(struct ion_heap *heap,
                                struct ion_buffer *buffer,
                                unsigned long size, unsigned long align,
                                unsigned long flags)
{
    ion_mm_buffer_info* pBufferInfo = NULL;
    int ret;
    unsigned int addr;
    struct sg_table *table;
    struct scatterlist *sg;
    void* pVA;
    ION_FUNC_ENTER;
    pVA = vmalloc_user(size);
    buffer->priv_virt = NULL;
    if (IS_ERR_OR_NULL(pVA))
    {
        printk("[ion_mm_heap_allocate]: Error. Allocate buffer failed.\n");
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    pBufferInfo = (ion_mm_buffer_info*) kzalloc(sizeof(ion_mm_buffer_info), GFP_KERNEL);
    if (IS_ERR_OR_NULL(pBufferInfo))
    {
        vfree(pVA);
        printk("[ion_mm_heap_allocate]: Error. Allocate ion_buffer failed.\n");
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
    if (!table)
    {
        vfree(pVA);
        kfree(pBufferInfo);
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    ret = sg_alloc_table(table, PAGE_ALIGN(size) / PAGE_SIZE, GFP_KERNEL);
    if (ret)
    {
        vfree(pVA);
        kfree(pBufferInfo);
        kfree(table);
        ION_FUNC_LEAVE;
        return -ENOMEM;
    }
    sg = table->sgl;
    for (addr=(unsigned int)pVA; addr < (unsigned int) pVA + size; addr += PAGE_SIZE)
    {
        struct page *page = vmalloc_to_page((void*)addr);
        sg_set_page(sg, page, PAGE_SIZE, 0);
        sg = sg_next(sg);
    }
    buffer->sg_table = table;

    pBufferInfo->pVA = pVA;
    pBufferInfo->eModuleID = -1;
    buffer->priv_virt = pBufferInfo;
    ION_FUNC_LEAVE;
    return 0;
}
コード例 #20
0
bool sg_miter_next(struct sg_mapping_iter *miter)
{
	unsigned int off, len;

	
	if (!miter->__nents)
		return false;

	sg_miter_stop(miter);

	
	while (miter->__offset == miter->__sg->length) {
		if (--miter->__nents) {
			miter->__sg = sg_next(miter->__sg);
			miter->__offset = 0;
		} else
			return false;
	}

	
	off = miter->__sg->offset + miter->__offset;
	len = miter->__sg->length - miter->__offset;

	miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
	off &= ~PAGE_MASK;
	miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
	miter->consumed = miter->length;

	if (miter->__flags & SG_MITER_ATOMIC)
		miter->addr = kmap_atomic(miter->page) + off;
	else
		miter->addr = kmap(miter->page) + off;

	return true;
}
コード例 #21
0
ファイル: zfcp_qdio.c プロジェクト: Addision/LVS
/**
 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
 * @fsf_req: request to be processed
 * @sbtype: SBALE flags
 * @sg: scatter-gather list
 * @max_sbals: upper bound for number of SBALs to be used
 * Returns: number of bytes, or error (negativ)
 */
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio,
			    struct zfcp_queue_req *q_req,
			    unsigned long sbtype, struct scatterlist *sg,
			    int max_sbals)
{
	struct qdio_buffer_element *sbale;
	int retval, bytes = 0;

	/* figure out last allowed SBAL */
	zfcp_qdio_sbal_limit(qdio, q_req, max_sbals);

	/* set storage-block type for this request */
	sbale = zfcp_qdio_sbale_req(qdio, q_req);
	sbale->flags |= sbtype;

	for (; sg; sg = sg_next(sg)) {
		retval = zfcp_qdio_fill_sbals(qdio, q_req, sbtype,
					      sg_virt(sg), sg->length);
		if (retval < 0)
			return retval;
		bytes += sg->length;
	}

	/* assume that no other SBALEs are to follow in the same SBAL */
	sbale = zfcp_qdio_sbale_curr(qdio, q_req);
	sbale->flags |= SBAL_FLAGS_LAST_ENTRY;

	return bytes;
}
コード例 #22
0
ファイル: zfcp_qdio.c プロジェクト: CSCLOG/beaglebone
/**
 * zfcp_qdio_sbals_from_sg - fill SBALs from scatter-gather list
 * @qdio: pointer to struct zfcp_qdio
 * @q_req: pointer to struct zfcp_qdio_req
 * @sg: scatter-gather list
 * @max_sbals: upper bound for number of SBALs to be used
 * Returns: number of bytes, or error (negativ)
 */
int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req,
			    struct scatterlist *sg)
{
	struct qdio_buffer_element *sbale;
	int bytes = 0;

	/* set storage-block type for this request */
	sbale = zfcp_qdio_sbale_req(qdio, q_req);
	sbale->sflags |= q_req->sbtype;

	for (; sg; sg = sg_next(sg)) {
		sbale = zfcp_qdio_sbale_next(qdio, q_req);
		if (!sbale) {
			atomic_inc(&qdio->req_q_full);
			zfcp_qdio_zero_sbals(qdio->req_q, q_req->sbal_first,
					     q_req->sbal_number);
			return -EINVAL;
		}

		sbale->addr = sg_virt(sg);
		sbale->length = sg->length;

		bytes += sg->length;
	}

	return bytes;
}
コード例 #23
0
static void dump_data(struct mmc_data *data)
{
	struct scatterlist *sg;
	u8 *sg_dat, *sg_end;
	unsigned int blks, blkdat;

	printk("%s\n", __func__);

	sg = data->sg;
	sg_dat = sg_virt(sg);
	sg_end = sg_dat + sg->length;

	for (blks = 0; blks < data->blocks; blks++) {
		for (blkdat = 0; blkdat < data->blksz; blkdat++) {
			printk("%02X ", *sg_dat);
			if ((blkdat % 16) == 15)
				printk("\n");
			sg_dat++;
			if (sg_dat >= sg_end) {
				sg = sg_next(sg);
				if (sg == NULL)
					break;
				sg_dat = sg_virt(sg);
				sg_end = sg_dat + sg->length;
			}
		}
		printk("\n");
	}
}
コード例 #24
0
ファイル: abd.c プロジェクト: tuomari/zfs
/*
 * Advance the iterator by offset.
 * Cannot be called when a page is mapped.
 * Returns 0 if exhausted.
 * This can be safely called when the aiter has already exhausted, in which
 * case this does nothing.
 */
static int
abd_miter_advance(struct abd_miter *aiter, int offset)
{
	ASSERT(!aiter->addr);

	if (!aiter->nents)
		return (0);

	aiter->offset += offset;
	if (aiter->is_linear) {
		aiter->length -= offset;
		if (aiter->length <= 0) {
			aiter->nents--;
			aiter->length = 0;
			return (0);
		}
	} else {
		while (aiter->offset >= aiter->sg->length) {
			aiter->offset -= aiter->sg->length;
			aiter->nents--;
			aiter->sg = sg_next(aiter->sg);
			if (!aiter->nents) {
				aiter->length = 0;
				return (0);
			}
		}
		aiter->length = aiter->sg->length - aiter->offset;
	}
	return (1);
}
コード例 #25
0
ファイル: cc_buffer_mgr.c プロジェクト: Anjali05/linux
static int cc_render_sg_to_mlli(struct device *dev, struct scatterlist *sgl,
				u32 sgl_data_len, u32 sgl_offset,
				u32 *curr_nents, u32 **mlli_entry_pp)
{
	struct scatterlist *curr_sgl = sgl;
	u32 *mlli_entry_p = *mlli_entry_pp;
	s32 rc = 0;

	for ( ; (curr_sgl && sgl_data_len);
	      curr_sgl = sg_next(curr_sgl)) {
		u32 entry_data_len =
			(sgl_data_len > sg_dma_len(curr_sgl) - sgl_offset) ?
				sg_dma_len(curr_sgl) - sgl_offset :
				sgl_data_len;
		sgl_data_len -= entry_data_len;
		rc = cc_render_buff_to_mlli(dev, sg_dma_address(curr_sgl) +
					    sgl_offset, entry_data_len,
					    curr_nents, &mlli_entry_p);
		if (rc)
			return rc;

		sgl_offset = 0;
	}
	*mlli_entry_pp = mlli_entry_p;
	return 0;
}
コード例 #26
0
/**
 * sg_miter_next - proceed mapping iterator to the next mapping
 * @miter: sg mapping iter to proceed
 *
 * Description:
 *   Proceeds @miter@ to the next mapping.  @miter@ should have been
 *   started using sg_miter_start().  On successful return,
 *   @miter@->page, @miter@->addr and @miter@->length point to the
 *   current mapping.
 *
 * Context:
 *   IRQ disabled if SG_MITER_ATOMIC.  IRQ must stay disabled till
 *   @miter@ is stopped.  May sleep if !SG_MITER_ATOMIC.
 *
 * Returns:
 *   true if @miter contains the next mapping.  false if end of sg
 *   list is reached.
 */
bool sg_miter_next(struct sg_mapping_iter *miter)
{
	unsigned int off, len;

	/* check for end and drop resources from the last iteration */
	if (!miter->__nents)
		return false;

	sg_miter_stop(miter);

	/* get to the next sg if necessary.  __offset is adjusted by stop */
	while (miter->__offset == miter->__sg->length) {
		if (--miter->__nents) {
			miter->__sg = sg_next(miter->__sg);
			miter->__offset = 0;
		} else
			return false;
	}

	/* map the next page */
	off = miter->__sg->offset + miter->__offset;
	len = miter->__sg->length - miter->__offset;

	miter->page = nth_page(sg_page(miter->__sg), off >> PAGE_SHIFT);
	off &= ~PAGE_MASK;
	miter->length = min_t(unsigned int, len, PAGE_SIZE - off);
	miter->consumed = miter->length;

	if (miter->__flags & SG_MITER_ATOMIC)
		miter->addr = kmap_atomic(miter->page, KM_BIO_SRC_IRQ) + off;
	else
		miter->addr = kmap(miter->page) + off;

	return true;
}
コード例 #27
0
ファイル: mock_dmabuf.c プロジェクト: grate-driver/linux
static struct sg_table *mock_map_dma_buf(struct dma_buf_attachment *attachment,
					 enum dma_data_direction dir)
{
	struct mock_dmabuf *mock = to_mock(attachment->dmabuf);
	struct sg_table *st;
	struct scatterlist *sg;
	int i, err;

	st = kmalloc(sizeof(*st), GFP_KERNEL);
	if (!st)
		return ERR_PTR(-ENOMEM);

	err = sg_alloc_table(st, mock->npages, GFP_KERNEL);
	if (err)
		goto err_free;

	sg = st->sgl;
	for (i = 0; i < mock->npages; i++) {
		sg_set_page(sg, mock->pages[i], PAGE_SIZE, 0);
		sg = sg_next(sg);
	}

	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
		err = -ENOMEM;
		goto err_st;
	}

	return st;

err_st:
	sg_free_table(st);
err_free:
	kfree(st);
	return ERR_PTR(err);
}
コード例 #28
0
ファイル: scatterlist.c プロジェクト: ashmew2/kolibriosSVN
/**
 * sg_nents - return total count of entries in scatterlist
 * @sg:		The scatterlist
 *
 * Description:
 * Allows to know how many entries are in sg, taking into acount
 * chaining as well
 *
 **/
int sg_nents(struct scatterlist *sg)
{
	int nents;
	for (nents = 0; sg; sg = sg_next(sg))
		nents++;
	return nents;
}
コード例 #29
0
ファイル: esas2r_main.c プロジェクト: Anjali05/linux
/* Callback for building a request scatter/gather list */
static u32 get_physaddr_from_sgc(struct esas2r_sg_context *sgc, u64 *addr)
{
	u32 len;

	if (likely(sgc->cur_offset == sgc->exp_offset)) {
		/*
		 * the normal case: caller used all bytes from previous call, so
		 * expected offset is the same as the current offset.
		 */

		if (sgc->sgel_count < sgc->num_sgel) {
			/* retrieve next segment, except for first time */
			if (sgc->exp_offset > (u8 *)0) {
				/* advance current segment */
				sgc->cur_sgel = sg_next(sgc->cur_sgel);
				++(sgc->sgel_count);
			}


			len = sg_dma_len(sgc->cur_sgel);
			(*addr) = sg_dma_address(sgc->cur_sgel);

			/* save the total # bytes returned to caller so far */
			sgc->exp_offset += len;

		} else {
			len = 0;
		}
	} else if (sgc->cur_offset < sgc->exp_offset) {
		/*
		 * caller did not use all bytes from previous call. need to
		 * compute the address based on current segment.
		 */

		len = sg_dma_len(sgc->cur_sgel);
		(*addr) = sg_dma_address(sgc->cur_sgel);

		sgc->exp_offset -= len;

		/* calculate PA based on prev segment address and offsets */
		*addr = *addr +
			(sgc->cur_offset - sgc->exp_offset);

		sgc->exp_offset += len;

		/* re-calculate length based on offset */
		len = lower_32_bits(
			sgc->exp_offset - sgc->cur_offset);
	} else {   /* if ( sgc->cur_offset > sgc->exp_offset ) */
		   /*
		    * we don't expect the caller to skip ahead.
		    * cur_offset will never exceed the len we return
		    */
		len = 0;
	}

	return len;
}
コード例 #30
0
static int crypto_ccm_decrypt(struct aead_request *req)
{
	struct crypto_aead *aead = crypto_aead_reqtfm(req);
	struct crypto_ccm_ctx *ctx = crypto_aead_ctx(aead);
	struct crypto_ccm_req_priv_ctx *pctx = crypto_ccm_reqctx(req);
	struct ablkcipher_request *abreq = &pctx->abreq;
	struct scatterlist *dst;
	unsigned int authsize = crypto_aead_authsize(aead);
	unsigned int cryptlen = req->cryptlen;
	u8 *authtag = pctx->auth_tag;
	u8 *odata = pctx->odata;
	u8 *iv = req->iv;
	int err;

	cryptlen -= authsize;

	err = crypto_ccm_init_crypt(req, authtag);
	if (err)
		return err;

	scatterwalk_map_and_copy(authtag, sg_next(pctx->src), cryptlen,
				 authsize, 0);

	dst = pctx->src;
	if (req->src != req->dst)
		dst = pctx->dst;

	ablkcipher_request_set_tfm(abreq, ctx->ctr);
	ablkcipher_request_set_callback(abreq, pctx->flags,
					crypto_ccm_decrypt_done, req);
	ablkcipher_request_set_crypt(abreq, pctx->src, dst, cryptlen + 16, iv);
	err = crypto_ablkcipher_decrypt(abreq);
	if (err)
		return err;

	err = crypto_ccm_auth(req, sg_next(dst), cryptlen);
	if (err)
		return err;

	/* verify */
	if (crypto_memneq(authtag, odata, authsize))
		return -EBADMSG;

	return err;
}