static int isp_stat_bufs_alloc(struct ispstat *stat, u32 size) { unsigned long flags; spin_lock_irqsave(&stat->isp->stat_lock, flags); BUG_ON(stat->locked_buf != NULL); /* Are the old buffers big enough? */ if (stat->buf_alloc_size >= size) { spin_unlock_irqrestore(&stat->isp->stat_lock, flags); return 0; } if (stat->state != ISPSTAT_DISABLED || stat->buf_processing) { dev_info(stat->isp->dev, "%s: trying to allocate memory when busy\n", stat->subdev.name); spin_unlock_irqrestore(&stat->isp->stat_lock, flags); return -EBUSY; } spin_unlock_irqrestore(&stat->isp->stat_lock, flags); isp_stat_bufs_free(stat); if (IS_COHERENT_BUF(stat)) return isp_stat_bufs_alloc_dma(stat, size); else return isp_stat_bufs_alloc_iommu(stat, size); }
static void ispstat_bufs_free(struct ispstat *stat) { struct isp_device *isp = dev_get_drvdata(stat->isp->dev); int i; for (i = 0; i < STAT_MAX_BUFS; i++) { struct ispstat_buffer *buf = &stat->buf[i]; if (!IS_COHERENT_BUF(stat)) { if (IS_ERR_OR_NULL((void *)buf->iommu_addr)) continue; if (buf->iovm) dma_unmap_sg(isp->dev, buf->iovm->sgt->sgl, buf->iovm->sgt->nents, DMA_FROM_DEVICE); iommu_vfree(isp->iommu, buf->iommu_addr); } else { if (!buf->virt_addr) continue; dma_free_coherent(stat->isp->dev, stat->buf_alloc_size, buf->virt_addr, buf->dma_addr); } buf->iommu_addr = 0; buf->iovm = NULL; buf->dma_addr = 0; buf->virt_addr = NULL; buf->empty = 1; } dev_dbg(stat->isp->dev, "%s: all buffers were freed.\n", stat->subdev.name); stat->buf_alloc_size = 0; stat->active_buf = NULL; }
static void isp_stat_buf_sync_for_cpu(struct ispstat *stat, struct ispstat_buffer *buf) { if (IS_COHERENT_BUF(stat)) return; dma_sync_sg_for_cpu(stat->isp->dev, buf->iovm->sgt->sgl, buf->iovm->sgt->nents, DMA_FROM_DEVICE); }
static void isp_stat_buf_sync_magic_for_device(struct ispstat *stat, struct ispstat_buffer *buf, u32 buf_size, enum dma_data_direction dir) { if (IS_COHERENT_BUF(stat)) return; __isp_stat_buf_sync_magic(stat, buf, buf_size, dir, dma_sync_single_range_for_device); }