static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu) { struct iommu_domain *domain; struct kgsl_iommu *iommu = mmu->priv; BUG_ON(mmu->hwpagetable == NULL); BUG_ON(mmu->hwpagetable->priv == NULL); domain = mmu->hwpagetable->priv; if (iommu->iommu_user_dev_attached) { iommu_detach_device(domain, iommu->iommu_user_dev); iommu->iommu_user_dev_attached = 0; KGSL_MEM_INFO(mmu->device, "iommu %p detached from user dev of MMU: %p\n", domain, mmu); } if (iommu->iommu_priv_dev_attached) { iommu_detach_device(domain, iommu->iommu_priv_dev); iommu->iommu_priv_dev_attached = 0; KGSL_MEM_INFO(mmu->device, "iommu %p detached from priv dev of MMU: %p\n", domain, mmu); } }
int vpe_disable(struct msm_cam_media_controller *mctl) { int rc = 0; unsigned long flags = 0; D("%s", __func__); spin_lock_irqsave(&vpe_ctrl->lock, flags); if (vpe_ctrl->state == VPE_STATE_IDLE) { D("%s: VPE already disabled", __func__); spin_unlock_irqrestore(&vpe_ctrl->lock, flags); return rc; } spin_unlock_irqrestore(&vpe_ctrl->lock, flags); #ifdef CONFIG_MSM_IOMMU iommu_detach_device(mctl->domain, vpe_ctrl->iommu_ctx_dst); iommu_detach_device(mctl->domain, vpe_ctrl->iommu_ctx_src); /* LGE_CHANGE_S, Patch for ION free, 2013.1.8, gayoung85.lee[Start] */ #if defined(CONFIG_LGE_GK_CAMERA) ||defined(CONFIG_MACH_APQ8064_AWIFI) #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION msm_camera_v4l2_put_ion_client(mctl->pcam_ptr); #endif #endif /* LGE_CHANGE_E, Patch for ION free, 2013.1.8, gayoung85.lee[End] */ #endif disable_irq(vpe_ctrl->vpeirq->start); tasklet_kill(&vpe_tasklet); msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info, vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0); regulator_disable(vpe_ctrl->fs_vpe); spin_lock_irqsave(&vpe_ctrl->lock, flags); vpe_ctrl->state = VPE_STATE_IDLE; spin_unlock_irqrestore(&vpe_ctrl->lock, flags); return rc; }
int vpe_disable(struct msm_cam_media_controller *mctl) { int rc = 0; unsigned long flags = 0; D("%s", __func__); spin_lock_irqsave(&vpe_ctrl->lock, flags); if (vpe_ctrl->state == VPE_STATE_IDLE) { D("%s: VPE already disabled", __func__); spin_unlock_irqrestore(&vpe_ctrl->lock, flags); return rc; } spin_unlock_irqrestore(&vpe_ctrl->lock, flags); #ifdef CONFIG_MSM_IOMMU iommu_detach_device(mctl->domain, vpe_ctrl->iommu_ctx_dst); iommu_detach_device(mctl->domain, vpe_ctrl->iommu_ctx_src); #endif disable_irq(vpe_ctrl->vpeirq->start); tasklet_kill(&vpe_tasklet); msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info, vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0); regulator_disable(vpe_ctrl->fs_vpe); spin_lock_irqsave(&vpe_ctrl->lock, flags); vpe_ctrl->state = VPE_STATE_IDLE; spin_unlock_irqrestore(&vpe_ctrl->lock, flags); return rc; }
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) { struct a6xx_gmu *gmu = &a6xx_gpu->gmu; if (!gmu->initialized) return; a6xx_gmu_stop(a6xx_gpu); pm_runtime_disable(gmu->dev); if (!IS_ERR_OR_NULL(gmu->gxpd)) { pm_runtime_disable(gmu->gxpd); dev_pm_domain_detach(gmu->gxpd, false); } iounmap(gmu->mmio); gmu->mmio = NULL; a6xx_gmu_memory_free(gmu, gmu->hfi); iommu_detach_device(gmu->domain, gmu->dev); iommu_domain_free(gmu->domain); free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); /* Drop reference taken in of_find_device_by_node */ put_device(gmu->dev); gmu->initialized = false; }
int mdss_iommu_dettach(struct mdss_data_type *mdata) { struct iommu_domain *domain; struct mdss_iommu_map_type *iomap; int i; if (!mdata->iommu_attached) { pr_debug("mdp iommu already dettached\n"); return 0; } for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) { iomap = mdata->iommu_map + i; domain = msm_get_iommu_domain(iomap->domain_idx); if (!domain) { pr_err("unable to get iommu domain(%d)\n", iomap->domain_idx); continue; } iommu_detach_device(domain, iomap->ctx); } mdata->iommu_attached = false; return 0; }
/* * kgsl_detach_pagetable_iommu_domain - Detach the IOMMU unit from a * pagetable * @mmu - Pointer to the device mmu structure * @priv - Flag indicating whether the private or user context is to be * detached * * Detach the IOMMU unit with the domain that is contained in the * hwpagetable of the given mmu. After detaching the IOMMU unit is not * in use because the PTBR will not be set after a detach * Return - void */ static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu) { struct kgsl_iommu_pt *iommu_pt; struct kgsl_iommu *iommu = mmu->priv; int i, j; BUG_ON(mmu->hwpagetable == NULL); BUG_ON(mmu->hwpagetable->priv == NULL); iommu_pt = mmu->hwpagetable->priv; for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; for (j = 0; j < iommu_unit->dev_count; j++) { if (iommu_unit->dev[j].attached) { iommu_detach_device(iommu_pt->domain, iommu_unit->dev[j].dev); iommu_unit->dev[j].attached = false; KGSL_MEM_INFO(mmu->device, "iommu %p detached " "from user dev of MMU: %p\n", iommu_pt->domain, mmu); } } } }
int vboxPciOsDevUnregisterWithIommu(PVBOXRAWPCIINS pIns) { #ifdef VBOX_WITH_IOMMU int rc = VINF_SUCCESS; PVBOXRAWPCIDRVVM pData = VBOX_DRV_VMDATA(pIns); if (!pData) { printk(KERN_DEBUG "vboxpci: VM data not inited (detach)\n"); return VERR_INVALID_PARAMETER; } if (!pData->pIommuDomain) { printk(KERN_DEBUG "vboxpci: No IOMMU domain (detach)\n"); return VERR_NOT_FOUND; } if (pIns->fIommuUsed) { iommu_detach_device(pData->pIommuDomain, &pIns->pPciDev->dev); printk(KERN_DEBUG "vboxpci: iommu_detach_device()\n"); pIns->fIommuUsed = false; } return rc; #else return VERR_NOT_SUPPORTED; #endif }
/* * kgsl_detach_pagetable_iommu_domain - Detach the IOMMU unit from a * pagetable * @mmu - Pointer to the device mmu structure * @priv - Flag indicating whether the private or user context is to be * detached * * Detach the IOMMU unit with the domain that is contained in the * hwpagetable of the given mmu. After detaching the IOMMU unit is not * in use because the PTBR will not be set after a detach * Return - void */ static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu) { struct kgsl_iommu_pt *iommu_pt; struct kgsl_iommu *iommu = mmu->priv; int i, j; for (i = 0; i < iommu->unit_count; i++) { struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i]; iommu_pt = mmu->defaultpagetable->priv; for (j = 0; j < iommu_unit->dev_count; j++) { /* * If there is a 2nd default pagetable then priv domain * is attached with this pagetable */ if (mmu->priv_bank_table && (KGSL_IOMMU_CONTEXT_PRIV == j)) iommu_pt = mmu->priv_bank_table->priv; if (iommu_unit->dev[j].attached) { iommu_detach_device(iommu_pt->domain, iommu_unit->dev[j].dev); iommu_unit->dev[j].attached = false; KGSL_MEM_INFO(mmu->device, "iommu %p detached " "from user dev of MMU: %p\n", iommu_pt->domain, mmu); } } } }
int vpe_enable(uint32_t clk_rate, struct msm_cam_media_controller *mctl) { int rc = 0; unsigned long flags = 0; D("%s", __func__); /* don't change the order of clock and irq.*/ spin_lock_irqsave(&vpe_ctrl->lock, flags); if (vpe_ctrl->state != VPE_STATE_IDLE) { pr_err("%s: VPE already enabled", __func__); spin_unlock_irqrestore(&vpe_ctrl->lock, flags); return 0; } vpe_ctrl->state = VPE_STATE_INIT; spin_unlock_irqrestore(&vpe_ctrl->lock, flags); enable_irq(vpe_ctrl->vpeirq->start); if (vpe_ctrl->fs_vpe) { rc = regulator_enable(vpe_ctrl->fs_vpe); if (rc) { pr_err("%s: Regulator enable failed\n", __func__); goto vpe_fs_failed; } } rc = msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info, vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 1); if (rc < 0) goto vpe_clk_failed; #ifdef CONFIG_MSM_IOMMU rc = iommu_attach_device(mctl->domain, vpe_ctrl->iommu_ctx_src); if (rc < 0) { pr_err("%s: Device attach failed\n", __func__); goto src_attach_failed; } rc = iommu_attach_device(mctl->domain, vpe_ctrl->iommu_ctx_dst); if (rc < 0) { pr_err("%s: Device attach failed\n", __func__); goto dst_attach_failed; } #endif return rc; #ifdef CONFIG_MSM_IOMMU dst_attach_failed: iommu_detach_device(mctl->domain, vpe_ctrl->iommu_ctx_src); src_attach_failed: #endif msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info, vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0); vpe_clk_failed: if (vpe_ctrl->fs_vpe) regulator_disable(vpe_ctrl->fs_vpe); vpe_fs_failed: disable_irq(vpe_ctrl->vpeirq->start); vpe_ctrl->state = VPE_STATE_IDLE; return rc; }
static void msm_iommu_detach(struct msm_mmu *mmu, const char * const *names, int cnt) { struct msm_iommu *iommu = to_msm_iommu(mmu); pm_runtime_get_sync(mmu->dev); iommu_detach_device(iommu->domain, mmu->dev); pm_runtime_put_sync(mmu->dev); }
void rockchip_drm_dma_detach_device(struct drm_device *drm_dev, struct device *dev) { struct rockchip_drm_private *private = drm_dev->dev_private; struct iommu_domain *domain = private->domain; if (!is_support_iommu) return; iommu_detach_device(domain, dev); }
/* * drm_iommu_detach_device -detach device address space mapping from device * * @drm_dev: DRM device * @subdrv_dev: device to be detached * * This function should be called by sub drivers to detach it from iommu * mapping */ void drm_iommu_detach_device(struct drm_device *drm_dev, struct device *subdrv_dev) { struct device *dev = drm_dev->dev; struct dma_iommu_mapping *mapping = dev->archdata.mapping; if (!mapping || !mapping->domain) return; iommu_detach_device(mapping->domain, subdrv_dev); drm_release_iommu_mapping(drm_dev); }
/* * drm_iommu_detach_device -detach device address space mapping from device * * @drm_dev: DRM device * @subdrv_dev: device to be detached * * This function should be called by sub drivers to detach it from iommu * mapping */ static void drm_iommu_detach_device(struct drm_device *drm_dev, struct device *subdrv_dev) { struct exynos_drm_private *priv = drm_dev->dev_private; if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) arm_iommu_detach_device(subdrv_dev); else if (IS_ENABLED(CONFIG_IOMMU_DMA)) iommu_detach_device(priv->mapping, subdrv_dev); clear_dma_max_seg_size(subdrv_dev); }
static int msm_jpeg_detach_iommu(struct msm_jpeg_device *pgmn_dev) { int i; for (i = 0; i < pgmn_dev->iommu_cnt; i++) { JPEG_DBG("%s:%d] dom 0x%lx ctx 0x%lx", __func__, __LINE__, (unsigned long)pgmn_dev->domain, (unsigned long)pgmn_dev->iommu_ctx_arr[i]); iommu_detach_device(pgmn_dev->domain, pgmn_dev->iommu_ctx_arr[i]); } return 0; }
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu) { struct a6xx_gmu *gmu = &a6xx_gpu->gmu; if (IS_ERR_OR_NULL(gmu->mmio)) return; pm_runtime_disable(gmu->dev); a6xx_gmu_stop(a6xx_gpu); a6xx_gmu_irq_disable(gmu); a6xx_gmu_memory_free(gmu, gmu->hfi); iommu_detach_device(gmu->domain, gmu->dev); iommu_domain_free(gmu->domain); }
static int host1x_remove(struct platform_device *pdev) { struct host1x *host = platform_get_drvdata(pdev); host1x_unregister(host); host1x_intr_deinit(host); host1x_syncpt_deinit(host); reset_control_assert(host->rst); clk_disable_unprepare(host->clk); if (host->domain) { put_iova_domain(&host->iova); iommu_detach_device(host->domain, &pdev->dev); iommu_domain_free(host->domain); } return 0; }
int msm_jpeg_platform_release(struct resource *mem, void *base, int irq, void *context) { int result = 0; int i = 0; struct msm_jpeg_device *pgmn_dev = (struct msm_jpeg_device *) context; free_irq(irq, context); #ifdef CONFIG_MSM_IOMMU for (i = 0; i < pgmn_dev->iommu_cnt; i++) { iommu_detach_device(pgmn_dev->domain, pgmn_dev->iommu_ctx_arr[i]); JPEG_DBG("%s:%d]", __func__, __LINE__); } #endif if (pgmn_dev->jpeg_bus_client) { msm_bus_scale_client_update_request( pgmn_dev->jpeg_bus_client, 0); msm_bus_scale_unregister_client(pgmn_dev->jpeg_bus_client); } msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info, pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 0); JPEG_DBG("%s:%d] clock disbale done", __func__, __LINE__); if (pgmn_dev->jpeg_fs) { result = regulator_disable(pgmn_dev->jpeg_fs); if (!result) regulator_put(pgmn_dev->jpeg_fs); else JPEG_PR_ERR("%s:%d] regulator disable failed %d", __func__, __LINE__, result); pgmn_dev->jpeg_fs = NULL; } iounmap(pgmn_dev->jpeg_vbif); iounmap(base); release_mem_region(mem->start, resource_size(mem)); ion_client_destroy(pgmn_dev->jpeg_client); pgmn_dev->state = MSM_JPEG_IDLE; JPEG_DBG("%s:%d] success\n", __func__, __LINE__); return result; }
static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu) { struct iommu_domain *domain; struct kgsl_iommu *iommu = mmu->priv; int i; BUG_ON(mmu->hwpagetable == NULL); BUG_ON(mmu->hwpagetable->priv == NULL); domain = mmu->hwpagetable->priv; for (i = 0; i < iommu->dev_count; i++) { iommu_detach_device(domain, iommu->dev[i].dev); iommu->dev[i].attached = 0; KGSL_MEM_INFO(mmu->device, "iommu %p detached from user dev of MMU: %p\n", domain, mmu); } }
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu) { struct iommu_domain *domain; int ret = 0; struct kgsl_iommu *iommu = mmu->priv; BUG_ON(mmu->hwpagetable == NULL); BUG_ON(mmu->hwpagetable->priv == NULL); domain = mmu->hwpagetable->priv; if (iommu->iommu_user_dev && !iommu->iommu_user_dev_attached) { ret = iommu_attach_device(domain, iommu->iommu_user_dev); if (ret) { KGSL_MEM_ERR(mmu->device, "Failed to attach device, err %d\n", ret); goto done; } iommu->iommu_user_dev_attached = 1; KGSL_MEM_INFO(mmu->device, "iommu %p attached to user dev of MMU: %p\n", domain, mmu); } if (iommu->iommu_priv_dev && !iommu->iommu_priv_dev_attached) { ret = iommu_attach_device(domain, iommu->iommu_priv_dev); if (ret) { KGSL_MEM_ERR(mmu->device, "Failed to attach device, err %d\n", ret); iommu_detach_device(domain, iommu->iommu_user_dev); iommu->iommu_user_dev_attached = 0; goto done; } iommu->iommu_priv_dev_attached = 1; KGSL_MEM_INFO(mmu->device, "iommu %p attached to priv dev of MMU: %p\n", domain, mmu); } done: return ret; }
/* * msm_fd_release - Fd device release method. * @file: Pointer to file struct. */ static int msm_fd_release(struct file *file) { struct fd_ctx *ctx = msm_fd_ctx_from_fh(file->private_data); vb2_queue_release(&ctx->vb2_q); vfree(ctx->stats); if (ctx->work_buf.handle) msm_fd_hw_unmap_buffer(&ctx->work_buf); iommu_detach_device(ctx->fd_device->iommu_domain, ctx->fd_device->iommu_dev); ion_client_destroy(ctx->mem_pool.client); v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return 0; }
static int vboxPciLinuxDevUnregisterWithIommu(PVBOXRAWPCIINS pIns) { #ifdef VBOX_WITH_IOMMU int rc = VINF_SUCCESS; struct pci_dev *pPciDev = pIns->pPciDev; PVBOXRAWPCIDRVVM pData = VBOX_DRV_VMDATA(pIns); IPRT_LINUX_SAVE_EFL_AC(); if (RT_LIKELY(pData)) { if (RT_LIKELY(pData->pIommuDomain)) { if (pIns->fIommuUsed) { iommu_detach_device(pData->pIommuDomain, &pIns->pPciDev->dev); vbpci_printk(KERN_DEBUG, pPciDev, "detached from IOMMU\n"); pIns->fIommuUsed = false; } } else { vbpci_printk(KERN_DEBUG, pPciDev, "cannot detach from IOMMU, no domain\n"); rc = VERR_NOT_FOUND; } } else { vbpci_printk(KERN_DEBUG, pPciDev, "cannot detach from IOMMU, no VM data\n"); rc = VERR_INVALID_PARAMETER; } IPRT_LINUX_RESTORE_EFL_AC(); return rc; #else return VERR_NOT_SUPPORTED; #endif }
int kvm_deassign_device(struct kvm *kvm, struct kvm_assigned_dev_kernel *assigned_dev) { struct iommu_domain *domain = kvm->arch.iommu_domain; struct pci_dev *pdev = NULL; /* check if iommu exists and in use */ if (!domain) return 0; pdev = assigned_dev->dev; if (pdev == NULL) return -ENODEV; iommu_detach_device(domain, &pdev->dev); printk(KERN_DEBUG "deassign device: host bdf = %x:%x:%x\n", assigned_dev->host_busnr, PCI_SLOT(assigned_dev->host_devfn), PCI_FUNC(assigned_dev->host_devfn)); return 0; }
int mdss_iommu_dettach(void) { struct iommu_domain *domain; int i, domain_idx; if (!mdss_res->iommu_attached) { pr_warn("mdp iommu already dettached\n"); return 0; } domain_idx = mdss_get_iommu_domain(); domain = msm_get_iommu_domain(domain_idx); if (!domain) { pr_err("unable to get iommu domain(%d)\n", domain_idx); return -EINVAL; } for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) iommu_detach_device(domain, mdp_iommu_ctx[i].ctx); mdss_res->iommu_attached = false; return 0; }
void iovmm_deactivate(struct device *dev) { struct exynos_iovmm *vmm = exynos_get_iovmm(dev); iommu_detach_device(vmm->domain, dev); }
/* * msm_fd_open - Fd device open method. * @file: Pointer to file struct. */ static int msm_fd_open(struct file *file) { struct msm_fd_device *device = video_drvdata(file); struct video_device *video = video_devdata(file); struct fd_ctx *ctx; int ret; ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); if (!ctx) return -ENOMEM; ctx->fd_device = device; /* Initialize work buffer handler */ ctx->work_buf.pool = NULL; ctx->work_buf.fd = -1; /* Set ctx defaults */ ctx->settings.speed = ctx->fd_device->clk_rates_num; ctx->settings.angle_index = MSM_FD_DEF_ANGLE_IDX; ctx->settings.direction_index = MSM_FD_DEF_DIR_IDX; ctx->settings.min_size_index = MSM_FD_DEF_MIN_SIZE_IDX; ctx->settings.threshold = MSM_FD_DEF_THRESHOLD; atomic_set(&ctx->subscribed_for_event, 0); v4l2_fh_init(&ctx->fh, video); file->private_data = &ctx->fh; v4l2_fh_add(&ctx->fh); ctx->vb2_q.drv_priv = ctx; ctx->vb2_q.mem_ops = &msm_fd_vb2_mem_ops; ctx->vb2_q.ops = &msm_fd_vb2_q_ops; ctx->vb2_q.buf_struct_size = sizeof(struct msm_fd_buffer); ctx->vb2_q.type = V4L2_BUF_TYPE_VIDEO_OUTPUT; ctx->vb2_q.io_modes = VB2_USERPTR; ctx->vb2_q.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY; ret = vb2_queue_init(&ctx->vb2_q); if (ret < 0) { dev_err(device->dev, "Error queue init\n"); goto error_vb2_queue_init; } ctx->mem_pool.client = msm_ion_client_create(MSM_FD_DRV_NAME); if (IS_ERR_OR_NULL(ctx->mem_pool.client)) { dev_err(device->dev, "Error ion client create\n"); goto error_ion_client_create; } ctx->mem_pool.domain_num = ctx->fd_device->iommu_domain_num; ret = iommu_attach_device(ctx->fd_device->iommu_domain, ctx->fd_device->iommu_dev); if (ret) { dev_err(device->dev, "Can not attach iommu domain\n"); goto error_iommu_attach; } ctx->stats = vmalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS); if (!ctx->stats) { dev_err(device->dev, "No memory for face statistics\n"); ret = -ENOMEM; goto error_stats_vmalloc; } return 0; error_stats_vmalloc: iommu_detach_device(ctx->fd_device->iommu_domain, ctx->fd_device->iommu_dev); error_iommu_attach: ion_client_destroy(ctx->mem_pool.client); error_ion_client_create: vb2_queue_release(&ctx->vb2_q); error_vb2_queue_init: v4l2_fh_del(&ctx->fh); v4l2_fh_exit(&ctx->fh); kfree(ctx); return ret; }
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { struct a6xx_gmu *gmu = &a6xx_gpu->gmu; struct platform_device *pdev = of_find_device_by_node(node); int ret; if (!pdev) return -ENODEV; gmu->dev = &pdev->dev; of_dma_configure(gmu->dev, node, false); /* Fow now, don't do anything fancy until we get our feet under us */ gmu->idle_level = GMU_IDLE_STATE_ACTIVE; pm_runtime_enable(gmu->dev); gmu->gx = devm_regulator_get(gmu->dev, "vdd"); /* Get the list of clocks */ ret = a6xx_gmu_clocks_probe(gmu); if (ret) return ret; /* Set up the IOMMU context bank */ ret = a6xx_gmu_memory_probe(gmu); if (ret) return ret; /* Allocate memory for for the HFI queues */ gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); if (IS_ERR(gmu->hfi)) goto err; /* Allocate memory for the GMU debug region */ gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); if (IS_ERR(gmu->debug)) goto err; /* Map the GMU registers */ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); /* Map the GPU power domain controller registers */ gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc"); if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio)) goto err; /* Get the HFI and GMU interrupts */ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) goto err; /* Set up a tasklet to handle GMU HFI responses */ tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu); /* Get the power levels for the GMU and GPU */ a6xx_gmu_pwrlevels_probe(gmu); /* Set up the HFI queues */ a6xx_hfi_init(gmu); return 0; err: a6xx_gmu_memory_free(gmu, gmu->hfi); if (gmu->domain) { iommu_detach_device(gmu->domain, gmu->dev); iommu_domain_free(gmu->domain); } return -ENODEV; }
int msm_jpeg_platform_init(struct platform_device *pdev, struct resource **mem, void **base, int *irq, irqreturn_t (*handler) (int, void *), void *context) { int rc = -1; int i = 0; int jpeg_irq; struct resource *jpeg_mem, *jpeg_io, *jpeg_irq_res; void *jpeg_base; struct msm_jpeg_device *pgmn_dev = (struct msm_jpeg_device *) context; pgmn_dev->state = MSM_JPEG_IDLE; jpeg_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!jpeg_mem) { JPEG_PR_ERR("%s: no mem resource?\n", __func__); return -ENODEV; } jpeg_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!jpeg_irq_res) { JPEG_PR_ERR("no irq resource?\n"); return -ENODEV; } jpeg_irq = jpeg_irq_res->start; JPEG_DBG("%s base address: 0x%x, jpeg irq number: %d\n", __func__, jpeg_mem->start, jpeg_irq); pgmn_dev->jpeg_bus_client = msm_bus_scale_register_client(&msm_jpeg_bus_client_pdata); if (!pgmn_dev->jpeg_bus_client) { JPEG_PR_ERR("%s: Registration Failed!\n", __func__); pgmn_dev->jpeg_bus_client = 0; return -EINVAL; } msm_bus_scale_client_update_request( pgmn_dev->jpeg_bus_client, 1); jpeg_io = request_mem_region(jpeg_mem->start, resource_size(jpeg_mem), pdev->name); if (!jpeg_io) { JPEG_PR_ERR("%s: region already claimed\n", __func__); return -EBUSY; } jpeg_base = ioremap(jpeg_mem->start, resource_size(jpeg_mem)); if (!jpeg_base) { rc = -ENOMEM; JPEG_PR_ERR("%s: ioremap failed\n", __func__); goto fail_remap; } pgmn_dev->jpeg_fs = regulator_get(&pgmn_dev->pdev->dev, "vdd"); rc = regulator_enable(pgmn_dev->jpeg_fs); if (rc) { JPEG_PR_ERR("%s:%d]jpeg regulator get failed\n", __func__, __LINE__); goto fail_fs; } rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info, pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 1); if (rc < 0) { JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc); goto fail_clk; } pgmn_dev->hw_version = readl_relaxed(jpeg_base + JPEG_HW_VERSION); JPEG_DBG_HIGH("%s:%d] jpeg HW version 0x%x", __func__, __LINE__, pgmn_dev->hw_version); pgmn_dev->jpeg_vbif = ioremap(VBIF_BASE_ADDRESS, VBIF_REGION_SIZE); if (!pgmn_dev->jpeg_vbif) { rc = -ENOMEM; JPEG_PR_ERR("%s:%d] ioremap failed\n", __func__, __LINE__); goto fail_vbif; } JPEG_DBG("%s:%d] jpeg_vbif 0x%x", __func__, __LINE__, (uint32_t)pgmn_dev->jpeg_vbif); #ifdef CONFIG_MSM_IOMMU for (i = 0; i < pgmn_dev->iommu_cnt; i++) { rc = iommu_attach_device(pgmn_dev->domain, pgmn_dev->iommu_ctx_arr[i]); if (rc < 0) { rc = -ENODEV; JPEG_PR_ERR("%s: Device attach failed\n", __func__); goto fail_iommu; } JPEG_DBG("%s:%d] dom 0x%x ctx 0x%x", __func__, __LINE__, (uint32_t)pgmn_dev->domain, (uint32_t)pgmn_dev->iommu_ctx_arr[i]); } #endif set_vbif_params(pgmn_dev, pgmn_dev->jpeg_vbif); #ifdef CONFIG_MACH_LGE *mem = jpeg_mem; *base = jpeg_base; #endif rc = request_irq(jpeg_irq, handler, IRQF_TRIGGER_RISING, "jpeg", context); if (rc) { JPEG_PR_ERR("%s: request_irq failed, %d\n", __func__, jpeg_irq); goto fail_request_irq; } #ifndef CONFIG_MACH_LGE /* QCT origin */ *mem = jpeg_mem; *base = jpeg_base; #endif *irq = jpeg_irq; pgmn_dev->jpeg_client = msm_ion_client_create(-1, "camera/jpeg"); JPEG_DBG("%s:%d] success\n", __func__, __LINE__); pgmn_dev->state = MSM_JPEG_INIT; return rc; fail_request_irq: #ifdef CONFIG_MACH_LGE *mem = NULL; *base = NULL; #endif #ifdef CONFIG_MSM_IOMMU for (i = 0; i < pgmn_dev->iommu_cnt; i++) { JPEG_PR_ERR("%s:%d] dom 0x%x ctx 0x%x", __func__, __LINE__, (uint32_t)pgmn_dev->domain, (uint32_t)pgmn_dev->iommu_ctx_arr[i]); iommu_detach_device(pgmn_dev->domain, pgmn_dev->iommu_ctx_arr[i]); } #endif fail_iommu: iounmap(pgmn_dev->jpeg_vbif); fail_vbif: msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info, pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 0); fail_clk: rc = regulator_disable(pgmn_dev->jpeg_fs); if (!rc) regulator_put(pgmn_dev->jpeg_fs); else JPEG_PR_ERR("%s:%d] regulator disable failed %d", __func__, __LINE__, rc); pgmn_dev->jpeg_fs = NULL; fail_fs: iounmap(jpeg_base); fail_remap: release_mem_region(jpeg_mem->start, resource_size(jpeg_mem)); JPEG_DBG("%s:%d] fail\n", __func__, __LINE__); return rc; }
static int host1x_probe(struct platform_device *pdev) { const struct of_device_id *id; struct host1x *host; struct resource *regs; int syncpt_irq; int err; id = of_match_device(host1x_of_match, &pdev->dev); if (!id) return -EINVAL; regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!regs) { dev_err(&pdev->dev, "failed to get registers\n"); return -ENXIO; } syncpt_irq = platform_get_irq(pdev, 0); if (syncpt_irq < 0) { dev_err(&pdev->dev, "failed to get IRQ\n"); return -ENXIO; } host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL); if (!host) return -ENOMEM; mutex_init(&host->devices_lock); INIT_LIST_HEAD(&host->devices); INIT_LIST_HEAD(&host->list); host->dev = &pdev->dev; host->info = id->data; /* set common host1x device data */ platform_set_drvdata(pdev, host); host->regs = devm_ioremap_resource(&pdev->dev, regs); if (IS_ERR(host->regs)) return PTR_ERR(host->regs); dma_set_mask_and_coherent(host->dev, host->info->dma_mask); if (host->info->init) { err = host->info->init(host); if (err) return err; } host->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(host->clk)) { dev_err(&pdev->dev, "failed to get clock\n"); err = PTR_ERR(host->clk); return err; } host->rst = devm_reset_control_get(&pdev->dev, "host1x"); if (IS_ERR(host->rst)) { err = PTR_ERR(host->rst); dev_err(&pdev->dev, "failed to get reset: %d\n", err); return err; } if (iommu_present(&platform_bus_type)) { struct iommu_domain_geometry *geometry; unsigned long order; host->domain = iommu_domain_alloc(&platform_bus_type); if (!host->domain) return -ENOMEM; err = iommu_attach_device(host->domain, &pdev->dev); if (err) goto fail_free_domain; geometry = &host->domain->geometry; order = __ffs(host->domain->pgsize_bitmap); init_iova_domain(&host->iova, 1UL << order, geometry->aperture_start >> order, geometry->aperture_end >> order); host->iova_end = geometry->aperture_end; } err = host1x_channel_list_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize channel list\n"); goto fail_detach_device; } err = clk_prepare_enable(host->clk); if (err < 0) { dev_err(&pdev->dev, "failed to enable clock\n"); goto fail_detach_device; } err = reset_control_deassert(host->rst); if (err < 0) { dev_err(&pdev->dev, "failed to deassert reset: %d\n", err); goto fail_unprepare_disable; } err = host1x_syncpt_init(host); if (err) { dev_err(&pdev->dev, "failed to initialize syncpts\n"); goto fail_reset_assert; } err = host1x_intr_init(host, syncpt_irq); if (err) { dev_err(&pdev->dev, "failed to initialize interrupts\n"); goto fail_deinit_syncpt; } host1x_debug_init(host); err = host1x_register(host); if (err < 0) goto fail_deinit_intr; return 0; fail_deinit_intr: host1x_intr_deinit(host); fail_deinit_syncpt: host1x_syncpt_deinit(host); fail_reset_assert: reset_control_assert(host->rst); fail_unprepare_disable: clk_disable_unprepare(host->clk); fail_detach_device: if (host->domain) { put_iova_domain(&host->iova); iommu_detach_device(host->domain, &pdev->dev); } fail_free_domain: if (host->domain) iommu_domain_free(host->domain); return err; }
int vpe_enable(uint32_t clk_rate, struct msm_cam_media_controller *mctl) { int rc = 0; unsigned long flags = 0; D("%s", __func__); /* don't change the order of clock and irq.*/ spin_lock_irqsave(&vpe_ctrl->lock, flags); if (vpe_ctrl->state != VPE_STATE_IDLE) { pr_err("%s: VPE already enabled", __func__); spin_unlock_irqrestore(&vpe_ctrl->lock, flags); return 0; } vpe_ctrl->state = VPE_STATE_INIT; spin_unlock_irqrestore(&vpe_ctrl->lock, flags); enable_irq(vpe_ctrl->vpeirq->start); if (vpe_ctrl->fs_vpe) { rc = regulator_enable(vpe_ctrl->fs_vpe); if (rc) { pr_err("%s: Regulator enable failed\n", __func__); goto vpe_fs_failed; } } rc = msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info, vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 1); if (rc < 0) goto vpe_clk_failed; #ifdef CONFIG_MSM_IOMMU rc = iommu_attach_device(mctl->domain, vpe_ctrl->iommu_ctx_src); if (rc < 0) { pr_err("%s: Device attach failed\n", __func__); goto src_attach_failed; } rc = iommu_attach_device(mctl->domain, vpe_ctrl->iommu_ctx_dst); if (rc < 0) { pr_err("%s: Device attach failed\n", __func__); goto dst_attach_failed; } /* LGE_CHANGE_S, Patch for ION free, 2013.1.8, gayoung85.lee[Start] */ #if defined(CONFIG_LGE_GK_CAMERA) ||defined(CONFIG_MACH_APQ8064_AWIFI) #ifdef CONFIG_MSM_MULTIMEDIA_USE_ION msm_camera_v4l2_get_ion_client(mctl->pcam_ptr); #endif #endif /* LGE_CHANGE_E, Patch for ION free, 2013.1.8, gayoung85.lee[End] */ #endif return rc; #ifdef CONFIG_MSM_IOMMU dst_attach_failed: iommu_detach_device(mctl->domain, vpe_ctrl->iommu_ctx_src); src_attach_failed: #endif msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info, vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0); vpe_clk_failed: if (vpe_ctrl->fs_vpe) regulator_disable(vpe_ctrl->fs_vpe); vpe_fs_failed: disable_irq(vpe_ctrl->vpeirq->start); vpe_ctrl->state = VPE_STATE_IDLE; return rc; }
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) { struct a6xx_gmu *gmu = &a6xx_gpu->gmu; struct platform_device *pdev = of_find_device_by_node(node); int ret; if (!pdev) return -ENODEV; gmu->dev = &pdev->dev; of_dma_configure(gmu->dev, node, true); /* Fow now, don't do anything fancy until we get our feet under us */ gmu->idle_level = GMU_IDLE_STATE_ACTIVE; pm_runtime_enable(gmu->dev); /* Get the list of clocks */ ret = a6xx_gmu_clocks_probe(gmu); if (ret) goto err_put_device; /* Set up the IOMMU context bank */ ret = a6xx_gmu_memory_probe(gmu); if (ret) goto err_put_device; /* Allocate memory for for the HFI queues */ gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K); if (IS_ERR(gmu->hfi)) goto err_memory; /* Allocate memory for the GMU debug region */ gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K); if (IS_ERR(gmu->debug)) goto err_memory; /* Map the GMU registers */ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu"); if (IS_ERR(gmu->mmio)) goto err_memory; /* Get the HFI and GMU interrupts */ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq); gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq); if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0) goto err_mmio; /* * Get a link to the GX power domain to reset the GPU in case of GMU * crash */ gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx"); /* Get the power levels for the GMU and GPU */ a6xx_gmu_pwrlevels_probe(gmu); /* Set up the HFI queues */ a6xx_hfi_init(gmu); gmu->initialized = true; return 0; err_mmio: iounmap(gmu->mmio); free_irq(gmu->gmu_irq, gmu); free_irq(gmu->hfi_irq, gmu); err_memory: a6xx_gmu_memory_free(gmu, gmu->hfi); if (gmu->domain) { iommu_detach_device(gmu->domain, gmu->dev); iommu_domain_free(gmu->domain); } ret = -ENODEV; err_put_device: /* Drop reference taken in of_find_device_by_node */ put_device(gmu->dev); return ret; }