void iommu_unmap_sg(struct iommu_table *tbl, struct scatterlist *sglist, int nelems, enum dma_data_direction direction) { unsigned long flags; BUG_ON(direction == DMA_NONE); if (!tbl) return; spin_lock_irqsave(&(tbl->it_lock), flags); while (nelems--) { unsigned int npages; dma_addr_t dma_handle = sglist->dma_address; if (sglist->dma_length == 0) break; npages = iommu_num_pages(dma_handle,sglist->dma_length); __iommu_free(tbl, dma_handle, npages); sglist++; } /* Flush/invalidate TLBs if necessary. As for iommu_free(), we * do not do an mb() here, the affected platforms do not need it * when freeing. */ if (ppc_md.tce_flush) ppc_md.tce_flush(tbl); spin_unlock_irqrestore(&(tbl->it_lock), flags); }
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) { __iommu_free(tbl, dma_addr, npages); /* Make sure TLB cache is flushed if the HW needs it. We do * not do an mb() here on purpose, it is not needed on any of * the current platforms. */ if (ppc_md.tce_flush) ppc_md.tce_flush(tbl); }
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, unsigned long mask, unsigned int align_order, struct dma_attrs *attrs) { unsigned long entry, flags; dma_addr_t ret = DMA_ERROR_CODE; int build_fail; spin_lock_irqsave(&(tbl->it_lock), flags); entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); if (unlikely(entry == DMA_ERROR_CODE)) { spin_unlock_irqrestore(&(tbl->it_lock), flags); return DMA_ERROR_CODE; } entry += tbl->it_offset; /* Offset into real TCE table */ ret = entry << IOMMU_PAGE_SHIFT; /* Set the return dma address */ /* Put the TCEs in the HW table */ build_fail = ppc_md.tce_build(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK, direction, attrs); /* ppc_md.tce_build() only returns non-zero for transient errors. * Clean up the table bitmap in this case and return * DMA_ERROR_CODE. For all other errors the functionality is * not altered. */ if (unlikely(build_fail)) { __iommu_free(tbl, ret, npages); spin_unlock_irqrestore(&(tbl->it_lock), flags); return DMA_ERROR_CODE; } /* Flush/invalidate TLB caches if necessary */ if (ppc_md.tce_flush) ppc_md.tce_flush(tbl); spin_unlock_irqrestore(&(tbl->it_lock), flags); /* Make sure updates are seen by hardware */ mb(); return ret; }
static void iommu_free(struct iommu_table *tbl, dma_addr_t dma_addr, unsigned int npages) { unsigned long flags; spin_lock_irqsave(&(tbl->it_lock), flags); __iommu_free(tbl, dma_addr, npages); /* Make sure TLB cache is flushed if the HW needs it. We do * not do an mb() here on purpose, it is not needed on any of * the current platforms. */ if (ppc_md.tce_flush) ppc_md.tce_flush(tbl); spin_unlock_irqrestore(&(tbl->it_lock), flags); }
static dma_addr_t iommu_alloc(struct device *dev, struct iommu_table *tbl, void *page, unsigned int npages, enum dma_data_direction direction, unsigned long mask, unsigned int align_order, unsigned long attrs) { unsigned long entry; dma_addr_t ret = IOMMU_MAPPING_ERROR; int build_fail; entry = iommu_range_alloc(dev, tbl, npages, NULL, mask, align_order); if (unlikely(entry == IOMMU_MAPPING_ERROR)) return IOMMU_MAPPING_ERROR; entry += tbl->it_offset; /* Offset into real TCE table */ ret = entry << tbl->it_page_shift; /* Set the return dma address */ /* Put the TCEs in the HW table */ build_fail = tbl->it_ops->set(tbl, entry, npages, (unsigned long)page & IOMMU_PAGE_MASK(tbl), direction, attrs); /* tbl->it_ops->set() only returns non-zero for transient errors. * Clean up the table bitmap in this case and return * IOMMU_MAPPING_ERROR. For all other errors the functionality is * not altered. */ if (unlikely(build_fail)) { __iommu_free(tbl, ret, npages); return IOMMU_MAPPING_ERROR; } /* Flush/invalidate TLB caches if necessary */ if (tbl->it_ops->flush) tbl->it_ops->flush(tbl); /* Make sure updates are seen by hardware */ mb(); return ret; }
int iommu_map_sg(struct device *dev, struct iommu_table *tbl, struct scatterlist *sglist, int nelems, unsigned long mask, enum dma_data_direction direction) { dma_addr_t dma_next = 0, dma_addr; unsigned long flags; struct scatterlist *s, *outs, *segstart; int outcount, incount; unsigned long handle; BUG_ON(direction == DMA_NONE); if ((nelems == 0) || !tbl) return 0; outs = s = segstart = &sglist[0]; outcount = 1; incount = nelems; handle = 0; /* Init first segment length for backout at failure */ outs->dma_length = 0; DBG("mapping %d elements:\n", nelems); spin_lock_irqsave(&(tbl->it_lock), flags); for (s = outs; nelems; nelems--, s++) { unsigned long vaddr, npages, entry, slen; slen = s->length; /* Sanity check */ if (slen == 0) { dma_next = 0; continue; } /* Allocate iommu entries for that segment */ vaddr = (unsigned long)page_address(s->page) + s->offset; npages = PAGE_ALIGN(vaddr + slen) - (vaddr & PAGE_MASK); npages >>= PAGE_SHIFT; entry = iommu_range_alloc(tbl, npages, &handle, mask >> PAGE_SHIFT, 0); DBG(" - vaddr: %lx, size: %lx\n", vaddr, slen); /* Handle failure */ if (unlikely(entry == DMA_ERROR_CODE)) { if (printk_ratelimit()) printk(KERN_INFO "iommu_alloc failed, tbl %p vaddr %lx" " npages %lx\n", tbl, vaddr, npages); goto failure; } /* Convert entry to a dma_addr_t */ entry += tbl->it_offset; dma_addr = entry << PAGE_SHIFT; dma_addr |= s->offset; DBG(" - %lx pages, entry: %lx, dma_addr: %lx\n", npages, entry, dma_addr); /* Insert into HW table */ ppc_md.tce_build(tbl, entry, npages, vaddr & PAGE_MASK, direction); /* If we are in an open segment, try merging */ if (segstart != s) { DBG(" - trying merge...\n"); /* We cannot merge if: * - allocated dma_addr isn't contiguous to previous allocation */ if (novmerge || (dma_addr != dma_next)) { /* Can't merge: create a new segment */ segstart = s; outcount++; outs++; DBG(" can't merge, new segment.\n"); } else { outs->dma_length += s->length; DBG(" merged, new len: %lx\n", outs->dma_length); } } if (segstart == s) { /* This is a new segment, fill entries */ DBG(" - filling new segment.\n"); outs->dma_address = dma_addr; outs->dma_length = slen; } /* Calculate next page pointer for contiguous check */ dma_next = dma_addr + slen; DBG(" - dma next is: %lx\n", dma_next); } /* Flush/invalidate TLB caches if necessary */ if (ppc_md.tce_flush) ppc_md.tce_flush(tbl); spin_unlock_irqrestore(&(tbl->it_lock), flags); DBG("mapped %d elements:\n", outcount); /* For the sake of iommu_unmap_sg, we clear out the length in the * next entry of the sglist if we didn't fill the list completely */ if (outcount < incount) { outs++; outs->dma_address = DMA_ERROR_CODE; outs->dma_length = 0; } /* Make sure updates are seen by hardware */ mb(); return outcount; failure: for (s = &sglist[0]; s <= outs; s++) { if (s->dma_length != 0) { unsigned long vaddr, npages; vaddr = s->dma_address & PAGE_MASK; npages = (PAGE_ALIGN(s->dma_address + s->dma_length) - vaddr) >> PAGE_SHIFT; __iommu_free(tbl, vaddr, npages); s->dma_address = DMA_ERROR_CODE; s->dma_length = 0; } }