struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) { struct msm_ringbuffer *ring; int ret; size = ALIGN(size, 4); /* size should be dword aligned */ ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { ret = -ENOMEM; goto fail; } ring->gpu = gpu; ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); if (IS_ERR(ring->bo)) { ret = PTR_ERR(ring->bo); ring->bo = NULL; goto fail; } ring->start = msm_gem_vaddr_locked(ring->bo); ring->end = ring->start + (size / 4); ring->cur = ring->start; ring->size = size; return ring; fail: if (ring) msm_ringbuffer_destroy(ring); return ERR_PTR(ret); }
void msm_gpu_cleanup(struct msm_gpu *gpu) { DBG("%s", gpu->name); WARN_ON(!list_empty(&gpu->active_list)); bs_fini(gpu); if (gpu->rb) { if (gpu->rb_iova) msm_gem_put_iova(gpu->rb->bo, gpu->id); msm_ringbuffer_destroy(gpu->rb); } if (gpu->iommu) iommu_domain_free(gpu->iommu); }
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) { struct msm_ringbuffer *ring; int ret; if (WARN_ON(!is_power_of_2(size))) return ERR_PTR(-EINVAL); ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { ret = -ENOMEM; goto fail; } ring->gpu = gpu; ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); if (IS_ERR(ring->bo)) { ret = PTR_ERR(ring->bo); ring->bo = NULL; goto fail; } ring->start = msm_gem_get_vaddr(ring->bo); if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); goto fail; } ring->end = ring->start + (size / 4); ring->cur = ring->start; ring->size = size; return ring; fail: if (ring) msm_ringbuffer_destroy(ring); return ERR_PTR(ret); }