struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) { struct msm_ringbuffer *ring; int ret; size = ALIGN(size, 4); /* size should be dword aligned */ ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { ret = -ENOMEM; goto fail; } ring->gpu = gpu; ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); if (IS_ERR(ring->bo)) { ret = PTR_ERR(ring->bo); ring->bo = NULL; goto fail; } ring->start = msm_gem_vaddr_locked(ring->bo); ring->end = ring->start + (size / 4); ring->cur = ring->start; ring->size = size; return ring; fail: if (ring) msm_ringbuffer_destroy(ring); return ERR_PTR(ret); }
struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size) { struct msm_ringbuffer *ring; int ret; if (WARN_ON(!is_power_of_2(size))) return ERR_PTR(-EINVAL); ring = kzalloc(sizeof(*ring), GFP_KERNEL); if (!ring) { ret = -ENOMEM; goto fail; } ring->gpu = gpu; ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC); if (IS_ERR(ring->bo)) { ret = PTR_ERR(ring->bo); ring->bo = NULL; goto fail; } ring->start = msm_gem_get_vaddr(ring->bo); if (IS_ERR(ring->start)) { ret = PTR_ERR(ring->start); goto fail; } ring->end = ring->start + (size / 4); ring->cur = ring->start; ring->size = size; return ring; fail: if (ring) msm_ringbuffer_destroy(ring); return ERR_PTR(ret); }
/* convenience method to construct a GEM buffer object, and userspace handle */ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, uint32_t size, uint32_t flags, uint32_t *handle) { struct drm_gem_object *obj; int ret; ret = mutex_lock_interruptible(&dev->struct_mutex); if (ret) return ret; obj = msm_gem_new(dev, size, flags); mutex_unlock(&dev->struct_mutex); if (IS_ERR(obj)) return PTR_ERR(obj); ret = drm_gem_handle_create(file, obj, handle); /* drop reference from allocate - handle holds it now */ drm_gem_object_unreference_unlocked(obj); return ret; }
struct msm_kms *mdp4_kms_init(struct drm_device *dev) { struct platform_device *pdev = to_platform_device(dev->dev); struct mdp4_platform_config *config = mdp4_get_config(pdev); struct mdp4_kms *mdp4_kms; struct msm_kms *kms = NULL; struct msm_gem_address_space *aspace; int irq, ret; mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); if (!mdp4_kms) { dev_err(dev->dev, "failed to allocate kms\n"); ret = -ENOMEM; goto fail; } mdp_kms_init(&mdp4_kms->base, &kms_funcs); kms = &mdp4_kms->base.base; mdp4_kms->dev = dev; mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4"); if (IS_ERR(mdp4_kms->mmio)) { ret = PTR_ERR(mdp4_kms->mmio); goto fail; } irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = irq; dev_err(dev->dev, "failed to get irq: %d\n", ret); goto fail; } kms->irq = irq; /* NOTE: driver for this regulator still missing upstream.. use * _get_exclusive() and ignore the error if it does not exist * (and hope that the bootloader left it on for us) */ mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); if (IS_ERR(mdp4_kms->vdd)) mdp4_kms->vdd = NULL; if (mdp4_kms->vdd) { ret = regulator_enable(mdp4_kms->vdd); if (ret) { dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); goto fail; } } mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk"); if (IS_ERR(mdp4_kms->clk)) { dev_err(dev->dev, "failed to get core_clk\n"); ret = PTR_ERR(mdp4_kms->clk); goto fail; } mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk"); if (IS_ERR(mdp4_kms->pclk)) mdp4_kms->pclk = NULL; // XXX if (rev >= MDP_REV_42) { ??? mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); if (IS_ERR(mdp4_kms->lut_clk)) { dev_err(dev->dev, "failed to get lut_clk\n"); ret = PTR_ERR(mdp4_kms->lut_clk); goto fail; } mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "bus_clk"); if (IS_ERR(mdp4_kms->axi_clk)) { dev_err(dev->dev, "failed to get axi_clk\n"); ret = PTR_ERR(mdp4_kms->axi_clk); goto fail; } clk_set_rate(mdp4_kms->clk, config->max_clk); clk_set_rate(mdp4_kms->lut_clk, config->max_clk); pm_runtime_enable(dev->dev); mdp4_kms->rpm_enabled = true; /* make sure things are off before attaching iommu (bootloader could * have left things on, in which case we'll start getting faults if * we don't disable): */ mdp4_enable(mdp4_kms); mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); mdp4_disable(mdp4_kms); mdelay(16); if (config->iommu) { aspace = msm_gem_address_space_create(&pdev->dev, config->iommu, "mdp4"); if (IS_ERR(aspace)) { ret = PTR_ERR(aspace); goto fail; } kms->aspace = aspace; ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) goto fail; } else { dev_info(dev->dev, "no iommu, fallback to phys " "contig buffers for scanout\n"); aspace = NULL; } ret = modeset_init(mdp4_kms); if (ret) { dev_err(dev->dev, "modeset_init failed: %d\n", ret); goto fail; } mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); if (IS_ERR(mdp4_kms->blank_cursor_bo)) { ret = PTR_ERR(mdp4_kms->blank_cursor_bo); dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); mdp4_kms->blank_cursor_bo = NULL; goto fail; } ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, kms->aspace, &mdp4_kms->blank_cursor_iova); if (ret) { dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); goto fail; } dev->mode_config.min_width = 0; dev->mode_config.min_height = 0; dev->mode_config.max_width = 2048; dev->mode_config.max_height = 2048; return kms; fail: if (kms) mdp4_destroy(kms); return ERR_PTR(ret); }
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs) { struct adreno_platform_config *config = pdev->dev.platform_data; struct msm_gpu *gpu = &adreno_gpu->base; int ret; adreno_gpu->funcs = funcs; adreno_gpu->info = adreno_info(config->rev); adreno_gpu->gmem = adreno_gpu->info->gmem; adreno_gpu->revn = adreno_gpu->info->revn; adreno_gpu->rev = config->rev; gpu->fast_rate = config->fast_rate; gpu->bus_freq = config->bus_freq; #ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING gpu->bus_scale_table = config->bus_scale_table; #endif DBG("fast_rate=%u, slow_rate=27000000, bus_freq=%u", gpu->fast_rate, gpu->bus_freq); ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base, adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq", RB_SIZE); if (ret) return ret; pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD); pm_runtime_use_autosuspend(&pdev->dev); pm_runtime_enable(&pdev->dev); ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev); if (ret) { dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n", adreno_gpu->info->pm4fw, ret); return ret; } ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev); if (ret) { dev_err(drm->dev, "failed to load %s PFP firmware: %d\n", adreno_gpu->info->pfpfw, ret); return ret; } if (gpu->aspace && gpu->aspace->mmu) { struct msm_mmu *mmu = gpu->aspace->mmu; ret = mmu->funcs->attach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) return ret; } mutex_lock(&drm->struct_mutex); adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs), MSM_BO_UNCACHED); mutex_unlock(&drm->struct_mutex); if (IS_ERR(adreno_gpu->memptrs_bo)) { ret = PTR_ERR(adreno_gpu->memptrs_bo); adreno_gpu->memptrs_bo = NULL; dev_err(drm->dev, "could not allocate memptrs: %d\n", ret); return ret; } adreno_gpu->memptrs = msm_gem_get_vaddr(adreno_gpu->memptrs_bo); if (IS_ERR(adreno_gpu->memptrs)) { dev_err(drm->dev, "could not vmap memptrs\n"); return -ENOMEM; } ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id, &adreno_gpu->memptrs_iova); if (ret) { dev_err(drm->dev, "could not map memptrs: %d\n", ret); return ret; } return 0; }
struct msm_kms *mdp4_kms_init(struct drm_device *dev) { struct platform_device *pdev = dev->platformdev; struct mdp4_platform_config *config = mdp4_get_config(pdev); struct mdp4_kms *mdp4_kms; struct msm_kms *kms = NULL; struct msm_mmu *mmu; int ret; mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL); if (!mdp4_kms) { dev_err(dev->dev, "failed to allocate kms\n"); ret = -ENOMEM; goto fail; } mdp_kms_init(&mdp4_kms->base, &kms_funcs); kms = &mdp4_kms->base.base; mdp4_kms->dev = dev; mdp4_kms->mmio = msm_ioremap(pdev, NULL, "MDP4"); if (IS_ERR(mdp4_kms->mmio)) { ret = PTR_ERR(mdp4_kms->mmio); goto fail; } mdp4_kms->dsi_pll_vdda = devm_regulator_get_optional(&pdev->dev, "dsi_pll_vdda"); if (IS_ERR(mdp4_kms->dsi_pll_vdda)) mdp4_kms->dsi_pll_vdda = NULL; mdp4_kms->dsi_pll_vddio = devm_regulator_get_optional(&pdev->dev, "dsi_pll_vddio"); if (IS_ERR(mdp4_kms->dsi_pll_vddio)) mdp4_kms->dsi_pll_vddio = NULL; mdp4_kms->vdd = devm_regulator_get_exclusive(&pdev->dev, "vdd"); if (IS_ERR(mdp4_kms->vdd)) mdp4_kms->vdd = NULL; if (mdp4_kms->vdd) { ret = regulator_enable(mdp4_kms->vdd); if (ret) { dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret); goto fail; } } mdp4_kms->clk = devm_clk_get(&pdev->dev, "core_clk"); if (IS_ERR(mdp4_kms->clk)) { dev_err(dev->dev, "failed to get core_clk\n"); ret = PTR_ERR(mdp4_kms->clk); goto fail; } mdp4_kms->pclk = devm_clk_get(&pdev->dev, "iface_clk"); if (IS_ERR(mdp4_kms->pclk)) mdp4_kms->pclk = NULL; // XXX if (rev >= MDP_REV_42) { ??? mdp4_kms->lut_clk = devm_clk_get(&pdev->dev, "lut_clk"); if (IS_ERR(mdp4_kms->lut_clk)) { dev_err(dev->dev, "failed to get lut_clk\n"); ret = PTR_ERR(mdp4_kms->lut_clk); goto fail; } mdp4_kms->axi_clk = devm_clk_get(&pdev->dev, "mdp_axi_clk"); if (IS_ERR(mdp4_kms->axi_clk)) { dev_err(dev->dev, "failed to get axi_clk\n"); ret = PTR_ERR(mdp4_kms->axi_clk); goto fail; } clk_set_rate(mdp4_kms->clk, config->max_clk); clk_set_rate(mdp4_kms->lut_clk, config->max_clk); /* make sure things are off before attaching iommu (bootloader could * have left things on, in which case we'll start getting faults if * we don't disable): */ mdp4_enable(mdp4_kms); mdp4_write(mdp4_kms, REG_MDP4_DTV_ENABLE, 0); mdp4_write(mdp4_kms, REG_MDP4_LCDC_ENABLE, 0); mdp4_write(mdp4_kms, REG_MDP4_DSI_ENABLE, 0); mdp4_disable(mdp4_kms); mdelay(16); if (config->iommu) { mmu = msm_iommu_new(&pdev->dev, config->iommu); if (IS_ERR(mmu)) { ret = PTR_ERR(mmu); goto fail; } ret = mmu->funcs->attach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports)); if (ret) goto fail; } else { dev_info(dev->dev, "no iommu, fallback to phys " "contig buffers for scanout\n"); mmu = NULL; } mdp4_kms->id = msm_register_mmu(dev, mmu); if (mdp4_kms->id < 0) { ret = mdp4_kms->id; dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret); goto fail; } ret = modeset_init(mdp4_kms); if (ret) { dev_err(dev->dev, "modeset_init failed: %d\n", ret); goto fail; } mutex_lock(&dev->struct_mutex); mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC); mutex_unlock(&dev->struct_mutex); if (IS_ERR(mdp4_kms->blank_cursor_bo)) { ret = PTR_ERR(mdp4_kms->blank_cursor_bo); dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret); mdp4_kms->blank_cursor_bo = NULL; goto fail; } ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id, &mdp4_kms->blank_cursor_iova); if (ret) { dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret); goto fail; } return kms; fail: if (kms) mdp4_destroy(kms); return ERR_PTR(ret); }