示例#1
0
/* Creates TCEs for a user provided buffer.  The user buffer must be
 * contiguous real kernel storage (not vmalloc).  The address of the buffer
 * passed here is the kernel (virtual) address of the buffer.  The buffer
 * need not be page aligned, the dma_addr_t returned will point to the same
 * byte within the page as vaddr.
 */
dma_addr_t iommu_map_single(struct iommu_table *tbl, void *vaddr,
		size_t size, unsigned long mask,
		enum dma_data_direction direction)
{
	dma_addr_t dma_handle = DMA_ERROR_CODE;
	unsigned long uaddr;
	unsigned int npages, align;

	BUG_ON(direction == DMA_NONE);

	uaddr = (unsigned long)vaddr;
	npages = iommu_num_pages(uaddr, size);

	if (tbl) {
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT &&
		    ((unsigned long)vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;

		dma_handle = iommu_alloc(tbl, vaddr, npages, direction,
					 mask >> IOMMU_PAGE_SHIFT, align);
		if (dma_handle == DMA_ERROR_CODE) {
			if (printk_ratelimit())  {
				printk(KERN_INFO "iommu_alloc failed, "
						"tbl %p vaddr %p npages %d\n",
						tbl, vaddr, npages);
			}
		} else
			dma_handle |= (uaddr & ~IOMMU_PAGE_MASK);
	}

	return dma_handle;
}
示例#2
0
void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist,
		int nelems, enum dma_data_direction direction)
{
	unsigned long flags;

	BUG_ON(direction == DMA_NONE);

	if (!tbl)
		return;

	spin_lock_irqsave(&(tbl->it_lock), flags);

	while (nelems--) {
		unsigned int npages;
		dma_addr_t dma_handle = sglist->dma_address;

		if (sglist->dma_length == 0)
			break;
		npages = iommu_num_pages(dma_handle,sglist->dma_length);
		__iommu_free(tbl, dma_handle, npages);
		sglist++;
	}

	/* Flush/invalidate TLBs if necessary. As for iommu_free(), we
	 * do not do an mb() here, the affected platforms do not need it
	 * when freeing.
	 */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);
}
示例#3
0
void iommu_unmap_single(struct iommu_table *tbl, dma_addr_t dma_handle,
		size_t size, enum dma_data_direction direction)
{
	unsigned int npages;

	BUG_ON(direction == DMA_NONE);

	if (tbl) {
		npages = iommu_num_pages(dma_handle, size);
		iommu_free(tbl, dma_handle, npages);
	}
}
示例#4
0
文件: pci_dma.c 项目: 03199618/linux
static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
				 size_t size, enum dma_data_direction direction,
				 struct dma_attrs *attrs)
{
	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
	unsigned long iommu_page_index;
	int npages;

	npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
	dma_addr = dma_addr & PAGE_MASK;
	if (dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
			     ZPCI_TABLE_PROTECTED | ZPCI_PTE_INVALID))
		dev_err(dev, "Failed to unmap addr: %Lx\n", dma_addr);

	atomic64_add(npages, (atomic64_t *) &zdev->fmb->unmapped_pages);
	iommu_page_index = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
	dma_free_iommu(zdev, iommu_page_index, npages);
}
示例#5
0
文件: pci_dma.c 项目: 03199618/linux
static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
				     unsigned long offset, size_t size,
				     enum dma_data_direction direction,
				     struct dma_attrs *attrs)
{
	struct zpci_dev *zdev = get_zdev(to_pci_dev(dev));
	unsigned long nr_pages, iommu_page_index;
	unsigned long pa = page_to_phys(page) + offset;
	int flags = ZPCI_PTE_VALID;
	dma_addr_t dma_addr;

	/* This rounds up number of pages based on size and offset */
	nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
	iommu_page_index = dma_alloc_iommu(zdev, nr_pages);
	if (iommu_page_index == -1)
		goto out_err;

	/* Use rounded up size */
	size = nr_pages * PAGE_SIZE;

	dma_addr = zdev->start_dma + iommu_page_index * PAGE_SIZE;
	if (dma_addr + size > zdev->end_dma) {
		dev_err(dev, "(dma_addr: 0x%16.16LX + size: 0x%16.16lx) > end_dma: 0x%16.16Lx\n",
			 dma_addr, size, zdev->end_dma);
		goto out_free;
	}

	if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
		flags |= ZPCI_TABLE_PROTECTED;

	if (!dma_update_trans(zdev, pa, dma_addr, size, flags)) {
		atomic64_add(nr_pages, (atomic64_t *) &zdev->fmb->mapped_pages);
		return dma_addr + (offset & ~PAGE_MASK);
	}

out_free:
	dma_free_iommu(zdev, iommu_page_index, nr_pages);
out_err:
	dev_err(dev, "Failed to map addr: %lx\n", pa);
	return DMA_ERROR_CODE;
}
示例#6
0
int iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		 struct scatterlist *sglist, int nelems,
		 unsigned long mask, enum dma_data_direction direction,
		 struct dma_attrs *attrs)
{
	dma_addr_t dma_next = 0, dma_addr;
	struct scatterlist *s, *outs, *segstart;
	int outcount, incount, i, build_fail = 0;
	unsigned int align;
	unsigned long handle;
	unsigned int max_seg_size;

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
	incount = nelems;
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

	DBG("sg mapping %d elements:\n", nelems);

	max_seg_size = dma_get_max_seg_size(dev);
	for_each_sg(sglist, s, nelems, i) {
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
		vaddr = (unsigned long) sg_virt(s);
		npages = iommu_num_pages(vaddr, slen, IOMMU_PAGE_SIZE(tbl));
		align = 0;
		if (tbl->it_page_shift < PAGE_SHIFT && slen >= PAGE_SIZE &&
		    (vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - tbl->it_page_shift;
		entry = iommu_range_alloc(dev, tbl, npages, &handle,
					  mask >> tbl->it_page_shift, align);

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
				dev_info(dev, "iommu_alloc failed, tbl %p "
					 "vaddr %lx npages %lu\n", tbl, vaddr,
					 npages);
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
		dma_addr = entry << tbl->it_page_shift;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK(tbl));

		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
			    npages, entry, dma_addr);

		/* Insert into HW table */
		build_fail = ppc_md.tce_build(tbl, entry, npages,
					      vaddr & IOMMU_PAGE_MASK(tbl),
					      direction, attrs);
		if(unlikely(build_fail))
			goto failure;

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
			if (novmerge || (dma_addr != dma_next) ||
			    (outs->dma_length + s->length > max_seg_size)) {
				/* Can't merge: create a new segment */
				segstart = s;
				outcount++;
				outs = sg_next(outs);
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
				DBG("    merged, new len: %ux\n", outs->dma_length);
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}
示例#7
0
static int __iommu_map_sg(struct device *dev, struct iommu_table *tbl,
		struct scatterlist *sglist, int nelems,
		unsigned long mask, enum dma_data_direction direction,
		int weak)
{
	dma_addr_t dma_next = 0, dma_addr;
	unsigned long flags;
	struct scatterlist *s, *outs, *segstart;
	int outcount, incount;
	unsigned int align;
	unsigned long handle;

	BUG_ON(direction == DMA_NONE);

	if ((nelems == 0) || !tbl)
		return 0;

	outs = s = segstart = &sglist[0];
	outcount = 1;
	incount = nelems;
	handle = 0;

	/* Init first segment length for backout at failure */
	outs->dma_length = 0;

	DBG("sg mapping %d elements:\n", nelems);

	spin_lock_irqsave(&(tbl->it_lock), flags);

	for (s = outs; nelems; nelems--, s++) {
		unsigned long vaddr, npages, entry, slen;

		slen = s->length;
		/* Sanity check */
		if (slen == 0) {
			dma_next = 0;
			continue;
		}
		/* Allocate iommu entries for that segment */
		vaddr = (unsigned long)page_address(s->page) + s->offset;
		npages = iommu_num_pages(vaddr, slen);
		align = 0;
		if (IOMMU_PAGE_SHIFT < PAGE_SHIFT && (vaddr & ~PAGE_MASK) == 0)
			align = PAGE_SHIFT - IOMMU_PAGE_SHIFT;
		entry = iommu_range_alloc(tbl, npages, &handle,
					  mask >> IOMMU_PAGE_SHIFT, align);

		DBG("  - vaddr: %lx, size: %lx\n", vaddr, slen);

		/* Handle failure */
		if (unlikely(entry == DMA_ERROR_CODE)) {
			if (printk_ratelimit())
				printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx"
				       " npages %lx\n", tbl, vaddr, npages);
			goto failure;
		}

		/* Convert entry to a dma_addr_t */
		entry += tbl->it_offset;
		dma_addr = entry << IOMMU_PAGE_SHIFT;
		dma_addr |= (s->offset & ~IOMMU_PAGE_MASK);

		DBG("  - %lu pages, entry: %lx, dma_addr: %lx\n",
			    npages, entry, dma_addr);

		/* Insert into HW table */
#ifdef CONFIG_HAVE_DMA_ATTRS /* Hack */
		if (weak && machine_is(cell))
			tce_build_cell_weak(tbl, entry, npages,
					vaddr & IOMMU_PAGE_MASK, direction);
		else
#endif
			ppc_md.tce_build(tbl, entry, npages,
					vaddr & IOMMU_PAGE_MASK, direction);

		/* If we are in an open segment, try merging */
		if (segstart != s) {
			DBG("  - trying merge...\n");
			/* We cannot merge if:
			 * - allocated dma_addr isn't contiguous to previous allocation
			 */
			if (novmerge || (dma_addr != dma_next)) {
				/* Can't merge: create a new segment */
				segstart = s;
				outcount++; outs++;
				DBG("    can't merge, new segment.\n");
			} else {
				outs->dma_length += s->length;
				DBG("    merged, new len: %ux\n", outs->dma_length);
			}
		}

		if (segstart == s) {
			/* This is a new segment, fill entries */
			DBG("  - filling new segment.\n");
			outs->dma_address = dma_addr;
			outs->dma_length = slen;
		}

		/* Calculate next page pointer for contiguous check */
		dma_next = dma_addr + slen;

		DBG("  - dma next is: %lx\n", dma_next);
	}

	/* Flush/invalidate TLB caches if necessary */
	if (ppc_md.tce_flush)
		ppc_md.tce_flush(tbl);

	spin_unlock_irqrestore(&(tbl->it_lock), flags);

	DBG("mapped %d elements:\n", outcount);

	/* For the sake of iommu_unmap_sg, we clear out the length in the
	 * next entry of the sglist if we didn't fill the list completely
	 */
	if (outcount < incount) {
		outs++;
		outs->dma_address = DMA_ERROR_CODE;
		outs->dma_length = 0;
	}

	/* Make sure updates are seen by hardware */
	mb();

	return outcount;

 failure:
	for (s = &sglist[0]; s <= outs; s++) {
		if (s->dma_length != 0) {
			unsigned long vaddr, npages;

			vaddr = s->dma_address & IOMMU_PAGE_MASK;
			npages = iommu_num_pages(s->dma_address, s->dma_length);
			__iommu_free(tbl, vaddr, npages);
			s->dma_address = DMA_ERROR_CODE;
			s->dma_length = 0;
		}
	}
	spin_unlock_irqrestore(&(tbl->it_lock), flags);
	return 0;
}