/* called from IRQ to update cursor related registers (if needed). The * cursor registers, other than x/y position, appear not to be double * buffered, and changing them other than from vblank seems to trigger * underflow. */ static void update_cursor(struct drm_crtc *crtc) { struct mdp4_crtc *mdp4_crtc = to_mdp4_crtc(crtc); struct mdp4_kms *mdp4_kms = get_kms(crtc); enum mdp4_dma dma = mdp4_crtc->dma; unsigned long flags; spin_lock_irqsave(&mdp4_crtc->cursor.lock, flags); if (mdp4_crtc->cursor.stale) { struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo; struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo; uint32_t iova = mdp4_crtc->cursor.next_iova; if (next_bo) { /* take a obj ref + iova ref when we start scanning out: */ drm_gem_object_reference(next_bo); msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova); /* enable cursor: */ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma), MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) | MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height)); mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova); mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma), MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) | MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN); } else { /* disable cursor: */ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), mdp4_kms->blank_cursor_iova); } /* and drop the iova ref + obj rev when done scanning out: */ if (prev_bo) drm_flip_work_queue(&mdp4_crtc->unref_cursor_work, prev_bo); mdp4_crtc->cursor.scanout_bo = next_bo; mdp4_crtc->cursor.stale = false; } mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_POS(dma), MDP4_DMA_CURSOR_POS_X(mdp4_crtc->cursor.x) | MDP4_DMA_CURSOR_POS_Y(mdp4_crtc->cursor.y)); spin_unlock_irqrestore(&mdp4_crtc->cursor.lock, flags); }
/* get iova, taking a reference. Should have a matching put */ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova) { struct msm_gem_object *msm_obj = to_msm_bo(obj); int ret; /* this is safe right now because we don't unmap until the * bo is deleted: */ if (msm_obj->domain[id].iova) { *iova = msm_obj->domain[id].iova; return 0; } mutex_lock(&obj->dev->struct_mutex); ret = msm_gem_get_iova_locked(obj, id, iova); mutex_unlock(&obj->dev->struct_mutex); return ret; }
/* add bo's to gpu's ring, and kick gpu: */ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_file_private *ctx) { struct drm_device *dev = gpu->dev; struct msm_drm_private *priv = dev->dev_private; int i, ret; mutex_lock(&dev->struct_mutex); submit->fence = ++priv->next_fence; gpu->submitted_fence = submit->fence; ret = gpu->funcs->submit(gpu, submit, ctx); priv->lastctx = ctx; for (i = 0; i < submit->nr_bos; i++) { struct msm_gem_object *msm_obj = submit->bos[i].obj; /* can't happen yet.. but when we add 2d support we'll have * to deal w/ cross-ring synchronization: */ WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu)); if (!is_active(msm_obj)) { uint32_t iova; /* ring takes a reference to the bo and iova: */ drm_gem_object_reference(&msm_obj->base); msm_gem_get_iova_locked(&msm_obj->base, submit->gpu->id, &iova); } if (submit->bos[i].flags & MSM_SUBMIT_BO_READ) msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence); if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence); } hangcheck_timer_reset(gpu); mutex_unlock(&dev->struct_mutex); return ret; }
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, const char *ioname, const char *irqname, int ringsz) { int i, ret; gpu->dev = drm; gpu->funcs = funcs; gpu->name = name; INIT_LIST_HEAD(&gpu->active_list); INIT_WORK(&gpu->retire_work, retire_worker); INIT_WORK(&gpu->recover_work, recover_worker); setup_timer(&gpu->hangcheck_timer, hangcheck_handler, (unsigned long)gpu); BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks)); /* Map registers: */ gpu->mmio = msm_ioremap(pdev, ioname, name); if (IS_ERR(gpu->mmio)) { ret = PTR_ERR(gpu->mmio); goto fail; } /* Get Interrupt: */ gpu->irq = platform_get_irq_byname(pdev, irqname); if (gpu->irq < 0) { ret = gpu->irq; dev_err(drm->dev, "failed to get irq: %d\n", ret); goto fail; } ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler, IRQF_TRIGGER_HIGH, gpu->name, gpu); if (ret) { dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret); goto fail; } /* Acquire clocks: */ for (i = 0; i < ARRAY_SIZE(clk_names); i++) { gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]); DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]); if (IS_ERR(gpu->grp_clks[i])) gpu->grp_clks[i] = NULL; } gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk"); DBG("ebi1_clk: %p", gpu->ebi1_clk); if (IS_ERR(gpu->ebi1_clk)) gpu->ebi1_clk = NULL; /* Acquire regulators: */ gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd"); DBG("gpu_reg: %p", gpu->gpu_reg); if (IS_ERR(gpu->gpu_reg)) gpu->gpu_reg = NULL; gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx"); DBG("gpu_cx: %p", gpu->gpu_cx); if (IS_ERR(gpu->gpu_cx)) gpu->gpu_cx = NULL; /* Setup IOMMU.. eventually we will (I think) do this once per context * and have separate page tables per context. For now, to keep things * simple and to get something working, just use a single address space: */ gpu->iommu = iommu_domain_alloc(&platform_bus_type); if (!gpu->iommu) { dev_err(drm->dev, "failed to allocate IOMMU\n"); ret = -ENOMEM; goto fail; } gpu->id = msm_register_iommu(drm, gpu->iommu); /* Create ringbuffer: */ gpu->rb = msm_ringbuffer_new(gpu, ringsz); if (IS_ERR(gpu->rb)) { ret = PTR_ERR(gpu->rb); gpu->rb = NULL; dev_err(drm->dev, "could not create ringbuffer: %d\n", ret); goto fail; } ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova); if (ret) { gpu->rb_iova = 0; dev_err(drm->dev, "could not map ringbuffer: %d\n", ret); goto fail; } bs_init(gpu, pdev); return 0; fail: return ret; }