int ion_cma_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, void *vaddr, unsigned int offset, unsigned int length, unsigned int cmd) { void (*outer_cache_op)(phys_addr_t, phys_addr_t); switch (cmd) { case ION_IOC_CLEAN_CACHES: dmac_clean_range(vaddr, vaddr + length); outer_cache_op = outer_clean_range; break; case ION_IOC_INV_CACHES: dmac_inv_range(vaddr, vaddr + length); outer_cache_op = outer_inv_range; break; case ION_IOC_CLEAN_INV_CACHES: dmac_flush_range(vaddr, vaddr + length); outer_cache_op = outer_flush_range; break; default: return -EINVAL; } if (cma_heap_has_outer_cache) { struct ion_cma_buffer_info *info = buffer->priv_virt; outer_cache_op(info->handle, info->handle + length); } return 0; }
int ion_cp_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, void *vaddr, unsigned int offset, unsigned int length, unsigned int cmd) { void (*outer_cache_op)(phys_addr_t, phys_addr_t); struct ion_cp_heap *cp_heap = container_of(heap, struct ion_cp_heap, heap); switch (cmd) { case ION_IOC_CLEAN_CACHES: dmac_clean_range(vaddr, vaddr + length); outer_cache_op = outer_clean_range; break; case ION_IOC_INV_CACHES: dmac_inv_range(vaddr, vaddr + length); outer_cache_op = outer_inv_range; break; case ION_IOC_CLEAN_INV_CACHES: dmac_flush_range(vaddr, vaddr + length); outer_cache_op = outer_flush_range; break; default: return -EINVAL; } if (cp_heap->has_outer_cache) { unsigned long pstart = buffer->priv_phys + offset; outer_cache_op(pstart, pstart + length); } return 0; }
int ion_cma_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, void *vaddr, unsigned int offset, unsigned int length, unsigned int cmd) { void (*outer_cache_op)(phys_addr_t, phys_addr_t); switch (cmd) { case ION_IOC_CLEAN_CACHES: if (!vaddr) dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, buffer->sg_table->nents, DMA_TO_DEVICE); else dmac_clean_range(vaddr, vaddr + length); outer_cache_op = outer_clean_range; break; case ION_IOC_INV_CACHES: if (!vaddr) dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl, buffer->sg_table->nents, DMA_FROM_DEVICE); else dmac_inv_range(vaddr, vaddr + length); outer_cache_op = outer_inv_range; break; case ION_IOC_CLEAN_INV_CACHES: if (!vaddr) { dma_sync_sg_for_device(NULL, buffer->sg_table->sgl, buffer->sg_table->nents, DMA_TO_DEVICE); dma_sync_sg_for_cpu(NULL, buffer->sg_table->sgl, buffer->sg_table->nents, DMA_FROM_DEVICE); } else { dmac_flush_range(vaddr, vaddr + length); } outer_cache_op = outer_flush_range; break; default: return -EINVAL; } if (cma_heap_has_outer_cache) { struct ion_cma_buffer_info *info = buffer->priv_virt; outer_cache_op(info->handle, info->handle + length); } return 0; }
int ion_system_contig_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, void *vaddr, unsigned int offset, unsigned int length, unsigned int cmd) { void (*outer_cache_op)(phys_addr_t, phys_addr_t); switch (cmd) { case ION_IOC_CLEAN_CACHES: dmac_clean_range(vaddr, vaddr + length); outer_cache_op = outer_clean_range; break; case ION_IOC_INV_CACHES: dmac_inv_range(vaddr, vaddr + length); outer_cache_op = outer_inv_range; break; case ION_IOC_CLEAN_INV_CACHES: dmac_flush_range(vaddr, vaddr + length); outer_cache_op = outer_flush_range; break; default: return -EINVAL; } if (system_heap_contig_has_outer_cache) { unsigned long pstart; pstart = virt_to_phys(buffer->priv_virt) + offset; if (!pstart) { WARN(1, "Could not do virt to phys translation on %p\n", buffer->priv_virt); return -EINVAL; } outer_cache_op(pstart, pstart + PAGE_SIZE); } return 0; }
int ion_system_heap_cache_ops(struct ion_heap *heap, struct ion_buffer *buffer, void *vaddr, unsigned int offset, unsigned int length, unsigned int cmd) { void (*outer_cache_op)(phys_addr_t, phys_addr_t); switch (cmd) { case ION_IOC_CLEAN_CACHES: dmac_clean_range(vaddr, vaddr + length); outer_cache_op = outer_clean_range; break; case ION_IOC_INV_CACHES: dmac_inv_range(vaddr, vaddr + length); outer_cache_op = outer_inv_range; break; case ION_IOC_CLEAN_INV_CACHES: dmac_flush_range(vaddr, vaddr + length); outer_cache_op = outer_flush_range; break; default: return -EINVAL; } if (system_heap_has_outer_cache) { unsigned long pstart; void *vend; void *vtemp; unsigned long ln = 0; vend = buffer->priv_virt + buffer->size; vtemp = buffer->priv_virt + offset; if ((vtemp+length) > vend) { pr_err("Trying to flush outside of mapped range.\n"); pr_err("End of mapped range: %p, trying to flush to " "address %p\n", vend, vtemp+length); WARN(1, "%s: called with heap name %s, buffer size 0x%x, " "vaddr 0x%p, offset 0x%x, length: 0x%x\n", __func__, heap->name, buffer->size, vaddr, offset, length); return -EINVAL; } for (; ln < length && vtemp < vend; vtemp += PAGE_SIZE, ln += PAGE_SIZE) { struct page *page = vmalloc_to_page(vtemp); if (!page) { WARN(1, "Could not find page for virt. address %p\n", vtemp); return -EINVAL; } pstart = page_to_phys(page); /* * If page -> phys is returning NULL, something * has really gone wrong... */ if (!pstart) { WARN(1, "Could not translate %p to physical address\n", vtemp); return -EINVAL; } outer_cache_op(pstart, pstart + PAGE_SIZE); } } return 0; }