/* * The device passed to the DMA API depends on whether the statistics block uses * ISP DMA, external DMA or PIO to transfer data. * * The first case (for the AEWB and AF engines) passes the ISP device, resulting * in the DMA buffers being mapped through the ISP IOMMU. * * The second case (for the histogram engine) should pass the DMA engine device. * As that device isn't accessible through the OMAP DMA engine API the driver * passes NULL instead, resulting in the buffers being mapped directly as * physical pages. * * The third case (for the histogram engine) doesn't require any mapping. The * buffers could be allocated with kmalloc/vmalloc, but we still use * dma_alloc_coherent() for consistency purpose. */ static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) { struct device *dev = ISP_STAT_USES_DMAENGINE(stat) ? NULL : stat->isp->dev; unsigned long flags; unsigned int i; spin_lock_irqsave(&stat->isp->stat_lock, flags); BUG_ON(stat->locked_buf != NULL); /* Are the old buffers big enough? */ if (stat->buf_alloc_size >= size) { spin_unlock_irqrestore(&stat->isp->stat_lock, flags); return 0; } if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) { dev_info(stat->isp->dev, "%s: trying to allocate memory when busy\n", stat->subdev.name); spin_unlock_irqrestore(&stat->isp->stat_lock, flags); return -EBUSY; } spin_unlock_irqrestore(&stat->isp->stat_lock, flags); isp_stat_bufs_free(stat); stat->buf_alloc_size = size; for (i = 0; i < STAT_MAX_BUFS; i++) { struct ispstat_buffer *buf = &stat->buf[i]; int ret; ret = isp_stat_bufs_alloc_one(dev, buf, size); if (ret < 0) { dev_err(stat->isp->dev, "%s: Failed to allocate DMA buffer %u\n", stat->subdev.name, i); isp_stat_bufs_free(stat); return ret; } buf->empty = 1; dev_dbg(stat->isp->dev, "%s: buffer[%u] allocated. dma=0x%08lx virt=0x%08lx", stat->subdev.name, i, (unsigned long)buf->dma_addr, (unsigned long)buf->virt_addr); } return 0; }
static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) { unsigned long flags; spin_lock_irqsave(&stat->isp->stat_lock, flags); BUG_ON(stat->locked_buf != NULL); /* Are the old buffers big enough? */ if (stat->buf_alloc_size >= size) { spin_unlock_irqrestore(&stat->isp->stat_lock, flags); return 0; } if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) { dev_info(stat->isp->dev, "%s: trying to allocate memory when busy\n", stat->subdev.name); spin_unlock_irqrestore(&stat->isp->stat_lock, flags); return -EBUSY; } spin_unlock_irqrestore(&stat->isp->stat_lock, flags); isp_stat_bufs_free(stat); if (IS_COHERENT_BUF(stat)) return isp_stat_bufs_alloc_dma(stat, size); else return isp_stat_bufs_alloc_iommu(stat, size); }
static int isp_stat_bufs_alloc_dma(struct ispstat *stat, unsigned int size) { int i; stat->buf_alloc_size = size; for (i = 0; i < STAT_MAX_BUFS; i++) { struct ispstat_buffer *buf = &stat->buf[i]; WARN_ON(buf->iommu_addr); buf->virt_addr = dma_alloc_coherent(stat->isp->dev, size, &buf->dma_addr, GFP_KERNEL | GFP_DMA); if (!buf->virt_addr || !buf->dma_addr) { dev_info(stat->isp->dev, "%s: Can't acquire memory for " "DMA buffer %d\n", stat->subdev.name, i); isp_stat_bufs_free(stat); return -ENOMEM; } buf->empty = 1; dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." "dma_addr=0x%08lx virt_addr=0x%08lx\n", stat->subdev.name, i, (unsigned long)buf->dma_addr, (unsigned long)buf->virt_addr); } return 0; }
void omap3isp_stat_cleanup(struct ispstat *stat) { media_entity_cleanup(&stat->subdev.entity); mutex_destroy(&stat->ioctl_lock); isp_stat_bufs_free(stat); kfree(stat->buf); }
static int isp_stat_bufs_alloc_iommu(struct ispstat *stat, unsigned int size) { struct isp_device *isp = stat->isp; int i; stat->buf_alloc_size = size; for (i = 0; i < STAT_MAX_BUFS; i++) { struct ispstat_buffer *buf = &stat->buf[i]; struct iovm_struct *iovm; WARN_ON(buf->dma_addr); buf->iommu_addr = omap_iommu_vmalloc(isp->domain, isp->dev, 0, size, IOMMU_FLAG); if (IS_ERR((void *)buf->iommu_addr)) { dev_err(stat->isp->dev, "%s: Can't acquire memory for " "buffer %d\n", stat->subdev.name, i); isp_stat_bufs_free(stat); return -ENOMEM; } iovm = omap_find_iovm_area(isp->dev, buf->iommu_addr); if (!iovm || !dma_map_sg(isp->dev, iovm->sgt->sgl, iovm->sgt->nents, DMA_FROM_DEVICE)) { isp_stat_bufs_free(stat); return -ENOMEM; } buf->iovm = iovm; buf->virt_addr = omap_da_to_va(stat->isp->dev, (u32)buf->iommu_addr); buf->empty = 1; dev_dbg(stat->isp->dev, "%s: buffer[%d] allocated." "iommu_addr=0x%08lx virt_addr=0x%08lx", stat->subdev.name, i, buf->iommu_addr, (unsigned long)buf->virt_addr); } return 0; }
void omap3isp_stat_free(struct ispstat *stat) { isp_stat_bufs_free(stat); kfree(stat->buf); }