Example #1
0
static void rds_message_purge(struct rds_message *rm)
{
	unsigned long i;

	if (unlikely(test_bit(RDS_MSG_PAGEVEC, &rm->m_flags)))
		return;

	for (i = 0; i < rm->m_nents; i++) {
		rdsdebug("putting data page %p\n", (void *)sg_page(&rm->m_sg[i]));
		/* XXX will have to put_page for page refs */
		__free_page(sg_page(&rm->m_sg[i]));
	}
	rm->m_nents = 0;

	if (rm->m_rdma_op)
		rds_rdma_free_op(rm->m_rdma_op);
	if (rm->m_rdma_mr)
		rds_mr_put(rm->m_rdma_mr);
}
static int siw_dma_map_sg(struct ib_device *dev, struct scatterlist *sgl,
			  int n_sge, enum dma_data_direction dir)
{
	struct scatterlist *sg;
	int i;

	BUG_ON(!valid_dma_direction(dir));

	for_each_sg(sgl, sg, n_sge, i) {
		/* This is just a validity check */
		if (unlikely(page_address(sg_page(sg)) == NULL)) {
			n_sge = 0;
			break;
		}
		sg->dma_address = (dma_addr_t) page_address(sg_page(sg));
		sg_dma_len(sg) = sg->length;
	}
	return n_sge;
}
Example #3
0
void ide_pio_bytes(ide_drive_t *drive, struct ide_cmd *cmd,
		   unsigned int write, unsigned int len)
{
	ide_hwif_t *hwif = drive->hwif;
	struct scatterlist *sg = hwif->sg_table;
	struct scatterlist *cursg = cmd->cursg;
	struct page *page;
	unsigned long flags;
	unsigned int offset;
	u8 *buf;

	cursg = cmd->cursg;
	if (cursg == NULL)
		cursg = cmd->cursg = sg;

	while (len) {
		unsigned nr_bytes = min(len, cursg->length - cmd->cursg_ofs);

		if (nr_bytes > PAGE_SIZE)
			nr_bytes = PAGE_SIZE;

		page = sg_page(cursg);
		offset = cursg->offset + cmd->cursg_ofs;

		/* get the current page and offset */
		page = nth_page(page, (offset >> PAGE_SHIFT));
		offset %= PAGE_SIZE;

		if (PageHighMem(page))
			local_irq_save_nort(flags);

		buf = kmap_atomic(page, KM_BIO_SRC_IRQ) + offset;

		cmd->nleft -= nr_bytes;
		cmd->cursg_ofs += nr_bytes;

		if (cmd->cursg_ofs == cursg->length) {
			cursg = cmd->cursg = sg_next(cmd->cursg);
			cmd->cursg_ofs = 0;
		}

		/* do the actual data transfer */
		if (write)
			hwif->tp_ops->output_data(drive, cmd, buf, nr_bytes);
		else
			hwif->tp_ops->input_data(drive, cmd, buf, nr_bytes);

		kunmap_atomic(buf, KM_BIO_SRC_IRQ);

		if (PageHighMem(page))
			local_irq_restore_nort(flags);

		len -= nr_bytes;
	}
}
Example #4
0
static struct sg_table *_tee_shm_dma_buf_map_dma_buf(
		struct dma_buf_attachment *attach, enum dma_data_direction dir)
{
	struct tee_shm_attach *tee_shm_attach = attach->priv;
	struct tee_shm *tee_shm = attach->dmabuf->priv;
	struct sg_table *sgt = NULL;
	struct scatterlist *rd, *wr;
	unsigned int i;
	int nents, ret;
	struct tee *tee;

	tee = tee_shm->tee;

	INMSG();

	/* just return current sgt if already requested. */
	if (tee_shm_attach->dir == dir && tee_shm_attach->is_mapped) {
		OUTMSGX(&tee_shm_attach->sgt);
		return &tee_shm_attach->sgt;
	}

	sgt = &tee_shm_attach->sgt;

	ret = sg_alloc_table(sgt, tee_shm->sgt.orig_nents, GFP_KERNEL);
	if (ret) {
		dev_err(_DEV(tee), "failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
	}

	rd = tee_shm->sgt.sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	if (dir != DMA_NONE) {
		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
		if (!nents) {
			dev_err(_DEV(tee), "failed to map sgl with iommu.\n");
			sg_free_table(sgt);
			sgt = ERR_PTR(-EIO);
			goto err_unlock;
		}
	}

	tee_shm_attach->is_mapped = true;
	tee_shm_attach->dir = dir;
	attach->priv = tee_shm_attach;

err_unlock:
	OUTMSGX(sgt);
	return sgt;
}
Example #5
0
int rds_message_copy_from_user(struct rds_message *rm, struct iov_iter *from)
{
	unsigned long to_copy, nbytes;
	unsigned long sg_off;
	struct scatterlist *sg;
	int ret = 0;

	rm->m_inc.i_hdr.h_len = cpu_to_be32(iov_iter_count(from));

	/*
	 * now allocate and copy in the data payload.
	 */
	sg = rm->data.op_sg;
	sg_off = 0; /* Dear gcc, sg->page will be null from kzalloc. */

	while (iov_iter_count(from)) {
		if (!sg_page(sg)) {
			ret = rds_page_remainder_alloc(sg, iov_iter_count(from),
						       GFP_HIGHUSER);
			if (ret)
				return ret;
			rm->data.op_nents++;
			sg_off = 0;
		}

		to_copy = min_t(unsigned long, iov_iter_count(from),
				sg->length - sg_off);

		rds_stats_add(s_copy_from_user, to_copy);
		nbytes = copy_page_from_iter(sg_page(sg), sg->offset + sg_off,
					     to_copy, from);
		if (nbytes != to_copy)
			return -EFAULT;

		sg_off += to_copy;

		if (sg_off == sg->length)
			sg++;
	}

	return ret;
}
static int vb2_ion_mmap(void *buf_priv, struct vm_area_struct *vma)
{
    struct vb2_ion_buf *buf = buf_priv;
    unsigned long vm_start  = vma->vm_start;
    unsigned long vm_end    = vma->vm_end;
    struct scatterlist *sg  = buf->cookie.sgt->sgl;
    unsigned long size;
    int ret;

    if (buf->size < (vm_end - vm_start))
        return -EINVAL;

    /* always noncached buffer */
    vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);

    size = min_t(size_t, vm_end - vm_start, sg_dma_len(sg));

    ret = remap_pfn_range(vma, vm_start, page_to_pfn(sg_page(sg)),
            size, vma->vm_page_prot);

    for (sg = sg_next(sg), vm_start +=size;
            !ret && sg && (vm_start < vm_end);
            vm_start += size, sg = sg_next(sg)) {
        size = min_t(size_t, vm_end - vm_start, sg_dma_len(sg));
        ret = remap_pfn_range(vma, vm_start, page_to_pfn(sg_page(sg)),
                size, vma->vm_page_prot);
    }

    if (ret)
        return ret;

    if (vm_start < vm_end)
        return -EINVAL;

    vma->vm_flags       |= VM_DONTEXPAND;
    vma->vm_private_data = &buf->handler;
    vma->vm_ops          = &vb2_common_vm_ops;

    vma->vm_ops->open(vma);

    return ret;
}
Example #7
0
/**
 * sg_copy_buffer - Copy data between a linear buffer and an SG list
 * @sgl:		 The SG list
 * @nents:		 Number of SG entries
 * @buf:		 Where to copy from
 * @buflen:		 The number of bytes to copy
 * @to_buffer: 		 transfer direction (non zero == from an sg list to a
 * 			 buffer, 0 == from a buffer to an sg list
 *
 * Returns the number of copied bytes.
 *
 **/
static size_t sg_copy_buffer(struct scatterlist *sgl, unsigned int nents,
			     void *buf, size_t buflen, int to_buffer)
{
	struct scatterlist *sg;
	size_t buf_off = 0;
	int i;

	WARN_ON(!irqs_disabled());

	for_each_sg(sgl, sg, nents, i) {
		struct page *page;
		int n = 0;
		unsigned int sg_off = sg->offset;
		unsigned int sg_copy = sg->length;

		if (sg_copy > buflen)
			sg_copy = buflen;
		buflen -= sg_copy;

		while (sg_copy > 0) {
			unsigned int page_copy;
			void *p;

			page_copy = PAGE_SIZE - sg_off;
			if (page_copy > sg_copy)
				page_copy = sg_copy;

			page = nth_page(sg_page(sg), n);
			p = kmap_atomic(page, KM_BIO_SRC_IRQ);

			if (to_buffer)
				memcpy(buf + buf_off, p + sg_off, page_copy);
			else {
				memcpy(p + sg_off, buf + buf_off, page_copy);
				flush_kernel_dcache_page(page);
			}

			kunmap_atomic(p, KM_BIO_SRC_IRQ);

			buf_off += page_copy;
			sg_off += page_copy;
			if (sg_off == PAGE_SIZE) {
				sg_off = 0;
				n++;
			}
			sg_copy -= page_copy;
		}

		if (!buflen)
			break;
	}

	return buf_off;
}
Example #8
0
int or1k_map_sg(struct device *dev, struct scatterlist *sg,
		int nents, enum dma_data_direction dir,
		struct dma_attrs *attrs)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i) {
		s->dma_address = or1k_map_page(dev, sg_page(s), s->offset,
					       s->length, dir, NULL);
	}
Example #9
0
void iscsi_tcp_segment_unmap(struct iscsi_segment *segment)
{
	if (segment->sg_mapped) {
		if (segment->atomic_mapped)
			kunmap_atomic(segment->sg_mapped);
		else
			kunmap(sg_page(segment->sg));
		segment->sg_mapped = NULL;
		segment->data = NULL;
	}
}
Example #10
0
static int xen_map_sg(struct device *hwdev, struct scatterlist *sg,
		      int nents,
		      enum dma_data_direction direction,
		      struct dma_attrs *attrs)
{
	struct scatterlist *s;
	struct page *page;
	int i, rc;

	BUG_ON(direction == DMA_NONE);
	WARN_ON(nents == 0 || sg[0].length == 0);

	for_each_sg(sg, s, nents, i) {
		BUG_ON(!sg_page(s));
		page = sg_page(s);
		s->dma_address = xen_dma_map_page(page) + s->offset;
		s->dma_length = s->length;
		IOMMU_BUG_ON(range_straddles_page_boundary(
				page_to_phys(page), s->length));
	}
Example #11
0
/**
 * dma_map_sg - map a set of SG buffers for streaming mode DMA
 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
 * @sg: list of buffers
 * @nents: number of buffers to map
 * @dir: DMA transfer direction
 *
 * Map a set of buffers described by scatterlist in streaming mode for DMA.
 * This is the scatter-gather version of the dma_map_single interface.
 * Here the scatter gather list elements are each tagged with the
 * appropriate dma address and length.  They are obtained via
 * sg_dma_{address,length}.
 *
 * Device ownership issues as mentioned for dma_map_single are the same
 * here.
 */
int BCMFASTPATH_HOST dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
		enum dma_data_direction dir)
{
	struct scatterlist *s;
	int i, j;

	for_each_sg(sg, s, nents, i) {
		s->dma_address = dma_map_page(dev, sg_page(s), s->offset, s->length, dir);
		if (dma_mapping_error(dev, s->dma_address))
			goto bad_mapping;
	}
Example #12
0
File: rw.c Project: Anjali05/linux
/**
 * rdma_rw_ctx_init - initialize a RDMA READ/WRITE context
 * @ctx:	context to initialize
 * @qp:		queue pair to operate on
 * @port_num:	port num to which the connection is bound
 * @sg:		scatterlist to READ/WRITE from/to
 * @sg_cnt:	number of entries in @sg
 * @sg_offset:	current byte offset into @sg
 * @remote_addr:remote address to read/write (relative to @rkey)
 * @rkey:	remote key to operate on
 * @dir:	%DMA_TO_DEVICE for RDMA WRITE, %DMA_FROM_DEVICE for RDMA READ
 *
 * Returns the number of WQEs that will be needed on the workqueue if
 * successful, or a negative error code.
 */
int rdma_rw_ctx_init(struct rdma_rw_ctx *ctx, struct ib_qp *qp, u8 port_num,
		struct scatterlist *sg, u32 sg_cnt, u32 sg_offset,
		u64 remote_addr, u32 rkey, enum dma_data_direction dir)
{
	struct ib_device *dev = qp->pd->device;
	int ret;

	if (is_pci_p2pdma_page(sg_page(sg)))
		ret = pci_p2pdma_map_sg(dev->dma_device, sg, sg_cnt, dir);
	else
		ret = ib_dma_map_sg(dev, sg, sg_cnt, dir);

	if (!ret)
		return -ENOMEM;
	sg_cnt = ret;

	/*
	 * Skip to the S/G entry that sg_offset falls into:
	 */
	for (;;) {
		u32 len = sg_dma_len(sg);

		if (sg_offset < len)
			break;

		sg = sg_next(sg);
		sg_offset -= len;
		sg_cnt--;
	}

	ret = -EIO;
	if (WARN_ON_ONCE(sg_cnt == 0))
		goto out_unmap_sg;

	if (rdma_rw_io_needs_mr(qp->device, port_num, dir, sg_cnt)) {
		ret = rdma_rw_init_mr_wrs(ctx, qp, port_num, sg, sg_cnt,
				sg_offset, remote_addr, rkey, dir);
	} else if (sg_cnt > 1) {
		ret = rdma_rw_init_map_wrs(ctx, qp, sg, sg_cnt, sg_offset,
				remote_addr, rkey, dir);
	} else {
		ret = rdma_rw_init_single_wr(ctx, qp, sg, sg_offset,
				remote_addr, rkey, dir);
	}

	if (ret < 0)
		goto out_unmap_sg;
	return ret;

out_unmap_sg:
	ib_dma_unmap_sg(dev, sg, sg_cnt, dir);
	return ret;
}
Example #13
0
static int cc_map_sg(struct device *dev, struct scatterlist *sg,
		     unsigned int nbytes, int direction, u32 *nents,
		     u32 max_sg_nents, u32 *lbytes, u32 *mapped_nents)
{
	bool is_chained = false;

	if (sg_is_last(sg)) {
		/* One entry only case -set to DLLI */
		if (dma_map_sg(dev, sg, 1, direction) != 1) {
			dev_err(dev, "dma_map_sg() single buffer failed\n");
			return -ENOMEM;
		}
		dev_dbg(dev, "Mapped sg: dma_address=%pad page=%p addr=%pK offset=%u length=%u\n",
			&sg_dma_address(sg), sg_page(sg), sg_virt(sg),
			sg->offset, sg->length);
		*lbytes = nbytes;
		*nents = 1;
		*mapped_nents = 1;
	} else {  /*sg_is_last*/
		*nents = cc_get_sgl_nents(dev, sg, nbytes, lbytes,
					  &is_chained);
		if (*nents > max_sg_nents) {
			*nents = 0;
			dev_err(dev, "Too many fragments. current %d max %d\n",
				*nents, max_sg_nents);
			return -ENOMEM;
		}
		if (!is_chained) {
			/* In case of mmu the number of mapped nents might
			 * be changed from the original sgl nents
			 */
			*mapped_nents = dma_map_sg(dev, sg, *nents, direction);
			if (*mapped_nents == 0) {
				*nents = 0;
				dev_err(dev, "dma_map_sg() sg buffer failed\n");
				return -ENOMEM;
			}
		} else {
			/*In this case the driver maps entry by entry so it
			 * must have the same nents before and after map
			 */
			*mapped_nents = cc_dma_map_sg(dev, sg, *nents,
						      direction);
			if (*mapped_nents != *nents) {
				*nents = *mapped_nents;
				dev_err(dev, "dma_map_sg() sg buffer failed\n");
				return -ENOMEM;
			}
		}
	}

	return 0;
}
Example #14
0
static int dma_virt_map_sg(struct device *dev, struct scatterlist *sgl,
			   int nents, enum dma_data_direction dir,
			   unsigned long attrs)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		BUG_ON(!sg_page(sg));
		sg_dma_address(sg) = (uintptr_t)sg_virt(sg);
		sg_dma_len(sg) = sg->length;
	}
static int mn10300_dma_map_sg(struct device *dev, struct scatterlist *sglist,
		int nents, enum dma_data_direction direction,
		unsigned long attrs)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sglist, sg, nents, i) {
		BUG_ON(!sg_page(sg));

		sg->dma_address = sg_phys(sg);
	}
Example #16
0
static void huge_free_pages(struct drm_i915_gem_object *obj,
			    struct sg_table *pages)
{
	unsigned long nreal = obj->scratch / PAGE_SIZE;
	struct scatterlist *sg;

	for (sg = pages->sgl; sg && nreal--; sg = __sg_next(sg))
		__free_page(sg_page(sg));

	sg_free_table(pages);
	kfree(pages);
}
Example #17
0
static void mips_dma_sync_sg_for_device(struct device *dev,
	struct scatterlist *sg, int nelems, enum dma_data_direction direction)
{
	int i;

	/* Make sure that gcc doesn't leave the empty loop body.  */
	for (i = 0; i < nelems; i++, sg++) {
		if (!plat_device_is_coherent(dev))
			__dma_sync((unsigned long)page_address(sg_page(sg)),
			           sg->length, direction);
	}
}
Example #18
0
static struct sg_table *i915_gem_map_dma_buf(struct dma_buf_attachment *attachment,
					     enum dma_data_direction dir)
{
	struct drm_i915_gem_object *obj = attachment->dmabuf->priv;
	struct sg_table *st;
	struct scatterlist *src, *dst;
	int ret, i;

	ret = i915_mutex_lock_interruptible(obj->base.dev);
	if (ret)
		return ERR_PTR(ret);

	ret = i915_gem_object_get_pages(obj);
	if (ret) {
		st = ERR_PTR(ret);
		goto out;
	}

	/* Copy sg so that we make an independent mapping */
	st = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (st == NULL) {
		st = ERR_PTR(-ENOMEM);
		goto out;
	}

	ret = sg_alloc_table(st, obj->pages->nents, GFP_KERNEL);
	if (ret) {
		kfree(st);
		st = ERR_PTR(ret);
		goto out;
	}

	src = obj->pages->sgl;
	dst = st->sgl;
	for (i = 0; i < obj->pages->nents; i++) {
		sg_set_page(dst, sg_page(src), src->length, 0);
		dst = sg_next(dst);
		src = sg_next(src);
	}

	if (!dma_map_sg(attachment->dev, st->sgl, st->nents, dir)) {
		sg_free_table(st);
		kfree(st);
		st = ERR_PTR(-ENOMEM);
		goto out;
	}

	i915_gem_object_pin_pages(obj);

out:
	mutex_unlock(&obj->base.dev->struct_mutex);
	return st;
}
Example #19
0
static int arc_dma_map_sg(struct device *dev, struct scatterlist *sg,
	   int nents, enum dma_data_direction dir, unsigned long attrs)
{
	struct scatterlist *s;
	int i;

	for_each_sg(sg, s, nents, i)
		s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
					       s->length, dir);

	return nents;
}
Example #20
0
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
			     int nents, enum dma_data_direction direction,
			     struct dma_attrs *attrs)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(sgl, sg, nents, i) {
		sg->dma_address = sg_phys(sg) + get_dma_offset(dev);
		sg->dma_length = sg->length;
		__dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
	}
Example #21
0
static void free_sg(struct sock *sk, struct scatterlist *sg,
		    int *sg_num_elem, unsigned int *sg_size)
{
	int i, n = *sg_num_elem;

	for (i = 0; i < n; ++i) {
		sk_mem_uncharge(sk, sg[i].length);
		put_page(sg_page(&sg[i]));
	}
	*sg_num_elem = 0;
	*sg_size = 0;
}
Example #22
0
/*
 * Copy from sg to a dma block - used for transfers
 */
static inline void at91_mci_sg_to_dma(struct at91mci_host *host, struct mmc_data *data)
{
	unsigned int len, i, size;
	unsigned *dmabuf = host->buffer;

	size = data->blksz * data->blocks;
	len = data->sg_len;

	/* MCI1 rev2xx Data Write Operation and number of bytes erratum */
	if (at91mci_is_mci1rev2xx())
		if (host->total_length == 12)
			memset(dmabuf, 0, 12);

	/*
	 * Just loop through all entries. Size might not
	 * be the entire list though so make sure that
	 * we do not transfer too much.
	 */
	for (i = 0; i < len; i++) {
		struct scatterlist *sg;
		int amount;
		unsigned int *sgbuffer;

		sg = &data->sg[i];

		sgbuffer = kmap_atomic(sg_page(sg), KM_BIO_SRC_IRQ) + sg->offset;
		amount = min(size, sg->length);
		size -= amount;

		if (cpu_is_at91rm9200()) {	/* AT91RM9200 errata */
			int index;

			for (index = 0; index < (amount / 4); index++)
				*dmabuf++ = swab32(sgbuffer[index]);
		} else {
			char *tmpv = (char *)dmabuf;
			memcpy(tmpv, sgbuffer, amount);
			tmpv += amount;
			dmabuf = (unsigned *)tmpv;
		}

		kunmap_atomic(sgbuffer, KM_BIO_SRC_IRQ);

		if (size == 0)
			break;
	}

	/*
	 * Check that we didn't get a request to transfer
	 * more data than can fit into the SG list.
	 */
	BUG_ON(size != 0);
}
/*
 *	PIO data transfer routine using the scatter gather table.
 */
static void ide_scsi_io_buffers(ide_drive_t *drive, struct ide_atapi_pc *pc,
				unsigned int bcount, int write)
{
	ide_hwif_t *hwif = drive->hwif;
	const struct ide_tp_ops *tp_ops = hwif->tp_ops;
	xfer_func_t *xf = write ? tp_ops->output_data : tp_ops->input_data;
	char *buf;
	int count;

	while (bcount) {
		count = min(pc->sg->length - pc->b_count, bcount);
		if (PageHighMem(sg_page(pc->sg))) {
			unsigned long flags;

			local_irq_save(flags);
			buf = kmap_atomic(sg_page(pc->sg), KM_IRQ0) +
					  pc->sg->offset;
			xf(drive, NULL, buf + pc->b_count, count);
			kunmap_atomic(buf - pc->sg->offset, KM_IRQ0);
			local_irq_restore(flags);
		} else {
			buf = sg_virt(pc->sg);
			xf(drive, NULL, buf + pc->b_count, count);
		}
		bcount -= count; pc->b_count += count;
		if (pc->b_count == pc->sg->length) {
			if (!--pc->sg_cnt)
				break;
			pc->sg = sg_next(pc->sg);
			pc->b_count = 0;
		}
	}

	if (bcount) {
		printk(KERN_ERR "%s: scatter gather table too small, %s\n",
				drive->name, write ? "padding with zeros"
						   : "discarding data");
		ide_pad_transfer(drive, write, bcount);
	}
}
Example #24
0
static void iser_data_buf_dump(struct iser_data_buf *data,
			       struct ib_device *ibdev)
{
	struct scatterlist *sg;
	int i;

	for_each_sg(data->sg, sg, data->dma_nents, i)
		iser_dbg("sg[%d] dma_addr:0x%lX page:0x%p "
			 "off:0x%x sz:0x%x dma_len:0x%x\n",
			 i, (unsigned long)ib_sg_dma_address(ibdev, sg),
			 sg_page(sg), sg->offset,
			 sg->length, ib_sg_dma_len(ibdev, sg));
}
Example #25
0
static int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl,
			     int nents, enum dma_data_direction direction,
			     struct dma_attrs *attrs)
{
	struct scatterlist *sg;
	int i;

	/* FIXME this part of code is untested */
	for_each_sg(sgl, sg, nents, i) {
		sg->dma_address = sg_phys(sg) + get_dma_direct_offset(dev);
		__dma_sync_page(page_to_phys(sg_page(sg)), sg->offset,
							sg->length, direction);
	}
Example #26
0
int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
		enum dma_data_direction dir, unsigned long attrs)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		sg->dma_address = dma_direct_map_page(dev, sg_page(sg),
				sg->offset, sg->length, dir, attrs);
		if (sg->dma_address == DMA_MAPPING_ERROR)
			goto out_unmap;
		sg_dma_len(sg) = sg->length;
	}
Example #27
0
static void free_sglist(struct scatterlist *sg, int nents)
{
	unsigned		i;

	if (!sg)
		return;
	for (i = 0; i < nents; i++) {
		if (!sg_page(&sg[i]))
			continue;
		kfree(sg_virt(&sg[i]));
	}
	kfree(sg);
}
Example #28
0
static struct sg_table *
huge_get_pages(struct drm_i915_gem_object *obj)
{
#define GFP (GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY)
	const unsigned long nreal = obj->scratch / PAGE_SIZE;
	const unsigned long npages = obj->base.size / PAGE_SIZE;
	struct scatterlist *sg, *src, *end;
	struct sg_table *pages;
	unsigned long n;

	pages = kmalloc(sizeof(*pages), GFP);
	if (!pages)
		return ERR_PTR(-ENOMEM);

	if (sg_alloc_table(pages, npages, GFP)) {
		kfree(pages);
		return ERR_PTR(-ENOMEM);
	}

	sg = pages->sgl;
	for (n = 0; n < nreal; n++) {
		struct page *page;

		page = alloc_page(GFP | __GFP_HIGHMEM);
		if (!page) {
			sg_mark_end(sg);
			goto err;
		}

		sg_set_page(sg, page, PAGE_SIZE, 0);
		sg = __sg_next(sg);
	}
	if (nreal < npages) {
		for (end = sg, src = pages->sgl; sg; sg = __sg_next(sg)) {
			sg_set_page(sg, sg_page(src), PAGE_SIZE, 0);
			src = __sg_next(src);
			if (src == end)
				src = pages->sgl;
		}
	}

	if (i915_gem_gtt_prepare_pages(obj, pages))
		goto err;

	return pages;

err:
	huge_free_pages(obj, pages);
	return ERR_PTR(-ENOMEM);
#undef GFP
}
Example #29
0
static int update2(struct hash_desc *desc,
		   struct scatterlist *sg, unsigned int nbytes)
{
	struct crypto_tfm *tfm = crypto_hash_tfm(desc->tfm);
	unsigned int alignmask = crypto_tfm_alg_alignmask(tfm);

	if (!nbytes)
		return 0;

	for (;;) {
		struct page *pg = sg_page(sg);
		unsigned int offset = sg->offset;
		unsigned int l = sg->length;

		if (unlikely(l > nbytes))
			l = nbytes;
		nbytes -= l;

		do {
			unsigned int bytes_from_page = min(l, ((unsigned int)
							   (PAGE_SIZE)) - 
							   offset);
			char *src = crypto_kmap(pg, 0);
			char *p = src + offset;

			if (unlikely(offset & alignmask)) {
				unsigned int bytes =
					alignmask + 1 - (offset & alignmask);
				bytes = min(bytes, bytes_from_page);
				tfm->__crt_alg->cra_digest.dia_update(tfm, p,
								      bytes);
				p += bytes;
				bytes_from_page -= bytes;
				l -= bytes;
			}
			tfm->__crt_alg->cra_digest.dia_update(tfm, p,
							      bytes_from_page);
			crypto_kunmap(src, 0);
			crypto_yield(desc->flags);
			offset = 0;
			pg++;
			l -= bytes_from_page;
		} while (l > 0);

		if (!nbytes)
			break;
		sg = scatterwalk_sg_next(sg);
	}

	return 0;
}
Example #30
0
static int alpha_noop_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
			     enum dma_data_direction dir, struct dma_attrs *attrs)
{
	int i;
	struct scatterlist *sg;

	for_each_sg(sgl, sg, nents, i) {
		void *va;

		BUG_ON(!sg_page(sg));
		va = sg_virt(sg);
		sg_dma_address(sg) = (dma_addr_t)virt_to_phys(va);
		sg_dma_len(sg) = sg->length;
	}