int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len, unsigned long vaddr, enum cache_operation cacheop) { struct omap_tiler_info *info; int n_pages; phys_addr_t paddr = tiler_virt2phys(vaddr); if (!buffer) { pr_err("%s(): buffer is NULL\n", __func__); return -EINVAL; } if (!buffer->cached) { pr_err("%s(): buffer not mapped as cacheable\n", __func__); return -EINVAL; } info = buffer->priv_virt; if (!info) { pr_err("%s(): tiler info of buffer is NULL\n", __func__); return -EINVAL; } n_pages = info->n_tiler_pages; if (len > (n_pages * PAGE_SIZE)) { pr_err("%s(): size to flush is greater than allocated size\n", __func__); return -EINVAL; } if (TILER_PIXEL_FMT_PAGE != info->fmt) { pr_err("%s(): only TILER 1D buffers can be cached\n", __func__); return -EINVAL; } #if 0 if (len > FULL_CACHE_FLUSH_THRESHOLD) { on_each_cpu(per_cpu_cache_flush_arm, NULL, 1); outer_flush_all(); return 0; } #endif if (cacheop == CACHE_FLUSH) { flush_cache_user_range(vaddr, vaddr + len); outer_flush_range(paddr, paddr + len); } else { outer_inv_range(paddr, paddr + len); dmac_map_area((const void*) vaddr, len, DMA_FROM_DEVICE); } return 0; }
int omap_tiler_cache_operation(struct ion_buffer *buffer, size_t len, unsigned long vaddr, enum cache_operation cacheop) { struct omap_tiler_info *info; int n_pages; if (!buffer) { pr_err("%s(): buffer is NULL\n", __func__); return -EINVAL; } if (!buffer->cached) { pr_err("%s(): buffer not mapped as cacheable\n", __func__); return -EINVAL; } info = buffer->priv_virt; if (!info) { pr_err("%s(): tiler info of buffer is NULL\n", __func__); return -EINVAL; } n_pages = info->n_tiler_pages; if (len > (n_pages * PAGE_SIZE)) { pr_err("%s(): size to flush is greater than allocated size\n", __func__); return -EINVAL; } if (TILER_PIXEL_FMT_PAGE != info->fmt) { pr_err("%s(): only TILER 1D buffers can be cached\n", __func__); return -EINVAL; } if (len > FULL_CACHE_FLUSH_THRESHOLD) { on_each_cpu(per_cpu_cache_flush_arm, NULL, 1); outer_flush_all(); return 0; } flush_cache_user_range(vaddr, vaddr + len); if (cacheop == CACHE_FLUSH) outer_flush_range(info->tiler_addrs[0], info->tiler_addrs[0] + len); else outer_inv_range(info->tiler_addrs[0], info->tiler_addrs[0] + len); return 0; }
/** * previewer_vbq_prepare - Videobuffer is prepared and mmapped. * @q: Structure containing the videobuffer queue. * @vb: Structure containing the videobuffer used for previewer processing. * @field: Type of field to set in videobuffer device. * * Returns 0 if successful, or -EINVAL if buffer couldn't get allocated, or * -EIO if the ISP MMU mapping fails **/ static int previewer_vbq_prepare(struct videobuf_queue *q, struct videobuf_buffer *vb, enum v4l2_field field) { struct prev_fh *fh = q->priv_data; struct prev_device *device = fh->device; int err = -EINVAL; unsigned int isp_addr; struct videobuf_dmabuf *dma = videobuf_to_dma(vb); dev_dbg(prev_dev, "previewer_vbq_prepare E\n"); if (q->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) { spin_lock(&device->inout_vbq_lock); if (vb->baddr) { vb->size = prev_bufsize; vb->bsize = prev_bufsize; DPRINTK_PREVIEWER("[%s] bsize = %d\n", __func__, vb->bsize); } else { spin_unlock(&device->inout_vbq_lock); dev_err(prev_dev, "No user buffer allocated\n"); goto out; } vb->width = device->params->size_params.hsize; vb->height = device->params->size_params.vsize; vb->field = field; spin_unlock(&device->inout_vbq_lock); if (vb->state == VIDEOBUF_NEEDS_INIT) { DPRINTK_PREVIEWER("[%s] baddr = %08x\n", __func__, (int)vb->baddr); err = videobuf_iolock(q, vb, NULL); if (!err) { isp_addr = ispmmu_map_sg(dma->sglist, dma->sglen); if (!isp_addr) { err = -EIO; } else { device->isp_addr_read = isp_addr; DPRINTK_PREVIEWER("[%s] " "isp_addr_read = %08x\n", __func__, isp_addr); } } } if (!err) { vb->state = VIDEOBUF_PREPARED; flush_cache_user_range(NULL, vb->baddr, (vb->baddr + vb->bsize)); } else { previewer_vbq_release(q, vb); } } else if (q->type == V4L2_BUF_TYPE_PRIVATE) { spin_lock(&device->lsc_vbq_lock); if (vb->baddr) { vb->size = lsc_bufsize; vb->bsize = lsc_bufsize; DPRINTK_PREVIEWER("[%s] bsize = %d\n", __func__, vb->bsize); } else { spin_unlock(&device->lsc_vbq_lock); dev_err(prev_dev, "No user buffer allocated\n"); goto out; } vb->width = device->params->size_params.hsize; vb->height = device->params->size_params.vsize; vb->field = field; spin_unlock(&device->lsc_vbq_lock); if (vb->state == VIDEOBUF_NEEDS_INIT) { DPRINTK_PREVIEWER("[%s] baddr = %08x\n", __func__, (int)vb->baddr); err = videobuf_iolock(q, vb, NULL); if (!err) { isp_addr = ispmmu_map_sg(dma->sglist, dma->sglen); if (!isp_addr) { err = -EIO; } else { device->isp_addr_lsc = isp_addr; DPRINTK_PREVIEWER("[%s] isp_addr_lsc =" " %08x\n", __func__, isp_addr); } } } if (!err) { vb->state = VIDEOBUF_PREPARED; flush_cache_user_range(NULL, vb->baddr, (vb->baddr + vb->bsize)); } else { previewer_vbq_release(q, vb); } } else { return -EINVAL; } dev_dbg(prev_dev, "previewer_vbq_prepare L\n"); out: return err; }