Exemplo n.º 1
0
int vpe_enable(uint32_t clk_rate, struct msm_cam_media_controller *mctl)
{
	int rc = 0;
	unsigned long flags = 0;
	D("%s", __func__);
	/* don't change the order of clock and irq.*/
	spin_lock_irqsave(&vpe_ctrl->lock, flags);
	if (vpe_ctrl->state != VPE_STATE_IDLE) {
		pr_err("%s: VPE already enabled", __func__);
		spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
		return 0;
	}
	vpe_ctrl->state = VPE_STATE_INIT;
	spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
	enable_irq(vpe_ctrl->vpeirq->start);

	if (vpe_ctrl->fs_vpe) {
		rc = regulator_enable(vpe_ctrl->fs_vpe);
		if (rc) {
			pr_err("%s: Regulator enable failed\n", __func__);
			goto vpe_fs_failed;
		}
	}

	rc = msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info,
			vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 1);
	if (rc < 0)
		goto vpe_clk_failed;

#ifdef CONFIG_MSM_IOMMU
	rc = iommu_attach_device(mctl->domain, vpe_ctrl->iommu_ctx_src);
	if (rc < 0) {
		pr_err("%s: Device attach failed\n", __func__);
		goto src_attach_failed;
	}
	rc = iommu_attach_device(mctl->domain, vpe_ctrl->iommu_ctx_dst);
	if (rc < 0) {
		pr_err("%s: Device attach failed\n", __func__);
		goto dst_attach_failed;
	}
#endif
	return rc;

#ifdef CONFIG_MSM_IOMMU
dst_attach_failed:
	iommu_detach_device(mctl->domain, vpe_ctrl->iommu_ctx_src);
src_attach_failed:
#endif
	msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info,
		vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0);
vpe_clk_failed:
	if (vpe_ctrl->fs_vpe)
		regulator_disable(vpe_ctrl->fs_vpe);
vpe_fs_failed:
	disable_irq(vpe_ctrl->vpeirq->start);
	vpe_ctrl->state = VPE_STATE_IDLE;
	return rc;
}
Exemplo n.º 2
0
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
	int ret;

	/*
	 * The GMU address space is hardcoded to treat the range
	 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
	 * between the GMU and the CPU will live in this space
	 */
	gmu->uncached_iova_base = 0x60000000;


	gmu->domain = iommu_domain_alloc(&platform_bus_type);
	if (!gmu->domain)
		return -ENODEV;

	ret = iommu_attach_device(gmu->domain, gmu->dev);

	if (ret) {
		iommu_domain_free(gmu->domain);
		gmu->domain = NULL;
	}

	return ret;
}
Exemplo n.º 3
0
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
	struct iommu_domain *domain;
	struct kgsl_iommu *iommu = mmu->priv;
	int i, ret = 0;

	BUG_ON(mmu->hwpagetable == NULL);
	BUG_ON(mmu->hwpagetable->priv == NULL);

	domain = mmu->hwpagetable->priv;

	for (i = 0; i < iommu->dev_count; i++) {
		if (iommu->dev[i].attached == 0) {
			ret = iommu_attach_device(domain, iommu->dev[i].dev);
			if (ret) {
				KGSL_MEM_ERR(mmu->device,
					"Failed to attach device, err %d\n",
						ret);
				goto done;
			}

			iommu->dev[i].attached = 1;
			KGSL_MEM_INFO(mmu->device,
				"iommu %p detached from user dev of MMU: %p\n",
				domain, mmu);
		}
	}

done:
	return ret;
}
Exemplo n.º 4
0
int mdss_iommu_attach(void)
{
	struct iommu_domain *domain;
	int i, domain_idx;

	if (mdss_res->iommu_attached) {
		pr_warn("mdp iommu already attached\n");
		return 0;
	}

	domain_idx = mdss_get_iommu_domain();
	domain = msm_get_iommu_domain(domain_idx);
	if (!domain) {
		pr_err("unable to get iommu domain(%d)\n", domain_idx);
		return -EINVAL;
	}

	for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
		if (iommu_attach_device(domain, mdp_iommu_ctx[i].ctx)) {
			WARN(1, "could not attach iommu domain %d to ctx %s\n",
				domain_idx, mdp_iommu_ctx[i].name);
			return -EINVAL;
		}
	}
	mdss_res->iommu_attached = true;

	return 0;
}
Exemplo n.º 5
0
void
gckIOMMU_Destory(
    IN gckOS Os,
    IN gckIOMMU Iommu
    )
{
    gcmkHEADER();

    if (Iommu->domain && Iommu->device)
    {
        iommu_attach_device(Iommu->domain, Iommu->device);
    }

    if (Iommu->domain)
    {
        iommu_domain_free(Iommu->domain);
    }

    if (Iommu)
    {
        gcmkOS_SAFE_FREE(Os, Iommu);
    }

    gcmkFOOTER_NO();
}
Exemplo n.º 6
0
int mdss_iommu_attach(struct mdss_data_type *mdata)
{
	struct iommu_domain *domain;
	struct mdss_iommu_map_type *iomap;
	int i;

	if (mdata->iommu_attached) {
		pr_debug("mdp iommu already attached\n");
		return 0;
	}

	for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
		iomap = mdata->iommu_map + i;

		domain = msm_get_iommu_domain(iomap->domain_idx);
		if (!domain) {
			WARN(1, "could not attach iommu client %s to ctx %s\n",
				iomap->client_name, iomap->ctx_name);
			continue;
		}
		iommu_attach_device(domain, iomap->ctx);
	}

	mdata->iommu_attached = true;
	complete_all(&mdata->iommu_attach_done);
	return 0;
}
Exemplo n.º 7
0
/*
 * drm_iommu_attach_device- attach device to iommu mapping
 *
 * @drm_dev: DRM device
 * @subdrv_dev: device to be attach
 *
 * This function should be called by sub drivers to attach it to iommu
 * mapping.
 */
static int drm_iommu_attach_device(struct drm_device *drm_dev,
				struct device *subdrv_dev)
{
	struct exynos_drm_private *priv = drm_dev->dev_private;
	int ret;

	if (get_dma_ops(priv->dma_dev) != get_dma_ops(subdrv_dev)) {
		DRM_DEV_ERROR(subdrv_dev, "Device %s lacks support for IOMMU\n",
			  dev_name(subdrv_dev));
		return -EINVAL;
	}

	ret = configure_dma_max_seg_size(subdrv_dev);
	if (ret)
		return ret;

	if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
		if (to_dma_iommu_mapping(subdrv_dev))
			arm_iommu_detach_device(subdrv_dev);

		ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
	} else if (IS_ENABLED(CONFIG_IOMMU_DMA)) {
		ret = iommu_attach_device(priv->mapping, subdrv_dev);
	}

	if (ret)
		clear_dma_max_seg_size(subdrv_dev);

	return 0;
}
Exemplo n.º 8
0
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
	struct kgsl_iommu_pt *iommu_pt;
	struct kgsl_iommu *iommu = mmu->priv;
	int i, j, ret = 0;

	for (i = 0; i < iommu->unit_count; i++) {
		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
		iommu_pt = mmu->defaultpagetable->priv;
		for (j = 0; j < iommu_unit->dev_count; j++) {
			if (mmu->priv_bank_table &&
				(KGSL_IOMMU_CONTEXT_PRIV == j))
				iommu_pt = mmu->priv_bank_table->priv;
			if (!iommu_unit->dev[j].attached) {
				ret = iommu_attach_device(iommu_pt->domain,
							iommu_unit->dev[j].dev);
				if (ret) {
					KGSL_MEM_ERR(mmu->device,
						"Failed to attach device, err %d\n",
						ret);
					goto done;
				}
				iommu_unit->dev[j].attached = true;
				KGSL_MEM_INFO(mmu->device,
				"iommu pt %p attached to dev %p, ctx_id %d\n",
				iommu_pt->domain, iommu_unit->dev[j].dev,
				iommu_unit->dev[j].ctx_id);
			}
		}
	}
done:
	return ret;
}
Exemplo n.º 9
0
gceSTATUS
gckIOMMU_Construct(
    IN gckOS Os,
    OUT gckIOMMU * Iommu
    )
{
    gceSTATUS status;
    gckIOMMU iommu = gcvNULL;
    struct device *dev;
    int ret;

    gcmkHEADER();

    dev = &Os->device->platform->device->dev;

    gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsIOMMU), (gctPOINTER *)&iommu));

    gckOS_ZeroMemory(iommu, gcmSIZEOF(gcsIOMMU));

    iommu->domain = iommu_domain_alloc(&platform_bus_type);

    if (!iommu->domain)
    {
        gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "iommu_domain_alloc() fail");

        gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
    }

    iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler, dev);

    ret = iommu_attach_device(iommu->domain, dev);

    if (ret)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_INFO, gcvZONE_OS, "iommu_attach_device() fail %d", ret);

        gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
    }

    iommu->device = dev;

    _FlatMapping(iommu);

    *Iommu = iommu;

    gcmkFOOTER_NO();
    return gcvSTATUS_OK;

OnError:

    gckIOMMU_Destory(Os, iommu);

    gcmkFOOTER();
    return status;
}
Exemplo n.º 10
0
static void
nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
{
#if IS_ENABLED(CONFIG_IOMMU_API)
	struct device *dev = &tdev->pdev->dev;
	unsigned long pgsize_bitmap;
	int ret;

#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
	if (dev->archdata.mapping) {
		struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);

		arm_iommu_detach_device(dev);
		arm_iommu_release_mapping(mapping);
	}
#endif

	if (!tdev->func->iommu_bit)
		return;

	mutex_init(&tdev->iommu.mutex);

	if (iommu_present(&platform_bus_type)) {
		tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
		if (!tdev->iommu.domain)
			goto error;

		/*
		 * A IOMMU is only usable if it supports page sizes smaller
		 * or equal to the system's PAGE_SIZE, with a preference if
		 * both are equal.
		 */
		pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
		if (pgsize_bitmap & PAGE_SIZE) {
			tdev->iommu.pgshift = PAGE_SHIFT;
		} else {
			tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
			if (tdev->iommu.pgshift == 0) {
				dev_warn(dev, "unsupported IOMMU page size\n");
				goto free_domain;
			}
			tdev->iommu.pgshift -= 1;
		}

		ret = iommu_attach_device(tdev->iommu.domain, dev);
		if (ret)
			goto free_domain;

		ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
				   (1ULL << tdev->func->iommu_bit) >>
				   tdev->iommu.pgshift, 1);
		if (ret)
			goto detach_device;
	}
Exemplo n.º 11
0
static int msm_iommu_attach(struct msm_mmu *mmu, const char * const *names,
			    int cnt)
{
	struct msm_iommu *iommu = to_msm_iommu(mmu);
	int ret;

	pm_runtime_get_sync(mmu->dev);
	ret = iommu_attach_device(iommu->domain, mmu->dev);
	pm_runtime_put_sync(mmu->dev);

	return ret;
}
int hisi_ion_enable_iommu(struct platform_device *pdev)
{
	int ret;
	struct iommu_domain_capablity data;
	struct device *dev = &pdev->dev;
	struct hisi_iommu_domain *hisi_domain;
	struct device_node *np = pdev->dev.of_node;

	printk(KERN_ERR"in %s start \n",__func__);
	hisi_domain = kzalloc(sizeof(*hisi_domain), GFP_KERNEL);
	if (!hisi_domain) {
		dbg("alloc hisi_domain object fail \n");
		return -ENOMEM;
	}
	if (!iommu_present(dev->bus)) {
		dbg("iommu not found\n");
		kfree(hisi_domain);
		return 0;
	}

	/* create iommu domain */
	hisi_domain->domain = iommu_domain_alloc(dev->bus);
	if (!hisi_domain->domain) {
		ret = -EINVAL;
		goto error;
	}
	iommu_attach_device(hisi_domain->domain,dev);
	get_range_info(np,hisi_domain,&data);
	/* align mean in this pool allocation buffer is aligned by iommu align request*/
	hisi_domain->iova_pool = iova_pool_setup(data.iova_start,
			data.iova_end, data.iova_align);
	if (!hisi_domain->iova_pool) {
		ret = -EINVAL;
		goto error;
	}

	/* this is a global pointer */
	hisi_iommu_domain_p = hisi_domain;

	dbg("in %s end \n",__func__);
	return 0;

error:
	WARN(1, "hisi_iommu_domain_init failed!\n");
	if (hisi_domain->iova_pool)
		iova_pool_destory(hisi_domain->iova_pool);
	if (hisi_domain->domain)
		iommu_domain_free(hisi_domain->domain);
	if (hisi_domain)
		kfree(hisi_domain);

	return ret;
}
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
	struct iommu_domain *domain;
	int ret = 0;
	struct kgsl_iommu *iommu = mmu->priv;

	BUG_ON(mmu->hwpagetable == NULL);
	BUG_ON(mmu->hwpagetable->priv == NULL);

	domain = mmu->hwpagetable->priv;

	if (iommu->iommu_user_dev && !iommu->iommu_user_dev_attached) {
		ret = iommu_attach_device(domain, iommu->iommu_user_dev);
		if (ret) {
			KGSL_MEM_ERR(mmu->device,
			"Failed to attach device, err %d\n", ret);
			goto done;
		}
		iommu->iommu_user_dev_attached = 1;
		KGSL_MEM_INFO(mmu->device,
				"iommu %p attached to user dev of MMU: %p\n",
				domain, mmu);
	}
	if (iommu->iommu_priv_dev && !iommu->iommu_priv_dev_attached) {
		ret = iommu_attach_device(domain, iommu->iommu_priv_dev);
		if (ret) {
			KGSL_MEM_ERR(mmu->device,
				"Failed to attach device, err %d\n", ret);
			iommu_detach_device(domain, iommu->iommu_user_dev);
			iommu->iommu_user_dev_attached = 0;
			goto done;
		}
		iommu->iommu_priv_dev_attached = 1;
		KGSL_MEM_INFO(mmu->device,
				"iommu %p attached to priv dev of MMU: %p\n",
				domain, mmu);
	}
done:
	return ret;
}
Exemplo n.º 14
0
int kvm_assign_device(struct kvm *kvm,
                      struct kvm_assigned_dev_kernel *assigned_dev)
{
    struct pci_dev *pdev = NULL;
    struct iommu_domain *domain = kvm->arch.iommu_domain;
    int r, last_flags;

    /* check if iommu exists and in use */
    if (!domain)
        return 0;

    pdev = assigned_dev->dev;
    if (pdev == NULL)
        return -ENODEV;

    r = iommu_attach_device(domain, &pdev->dev);
    if (r) {
        printk(KERN_ERR "assign device %x:%x:%x.%x failed",
               pci_domain_nr(pdev->bus),
               pdev->bus->number,
               PCI_SLOT(pdev->devfn),
               PCI_FUNC(pdev->devfn));
        return r;
    }

    last_flags = kvm->arch.iommu_flags;
    if (iommu_domain_has_cap(kvm->arch.iommu_domain,
                             IOMMU_CAP_CACHE_COHERENCY))
        kvm->arch.iommu_flags |= KVM_IOMMU_CACHE_COHERENCY;

    /* Check if need to update IOMMU page table for guest memory */
    if ((last_flags ^ kvm->arch.iommu_flags) ==
            KVM_IOMMU_CACHE_COHERENCY) {
        kvm_iommu_unmap_memslots(kvm);
        r = kvm_iommu_map_memslots(kvm);
        if (r)
            goto out_unmap;
    }

    printk(KERN_DEBUG "assign device %x:%x:%x.%x\n",
           assigned_dev->host_segnr,
           assigned_dev->host_busnr,
           PCI_SLOT(assigned_dev->host_devfn),
           PCI_FUNC(assigned_dev->host_devfn));

    return 0;
out_unmap:
    kvm_iommu_unmap_memslots(kvm);
    return r;
}
Exemplo n.º 15
0
static int vboxPciLinuxDevRegisterWithIommu(PVBOXRAWPCIINS pIns)
{
#ifdef VBOX_WITH_IOMMU
    int rc = VINF_SUCCESS;
    struct pci_dev *pPciDev = pIns->pPciDev;
    PVBOXRAWPCIDRVVM pData = VBOX_DRV_VMDATA(pIns);
    IPRT_LINUX_SAVE_EFL_AC();

    if (RT_LIKELY(pData))
    {
        if (RT_LIKELY(pData->pIommuDomain))
        {
            /** @todo: KVM checks IOMMU_CAP_CACHE_COHERENCY and sets
             *  flag IOMMU_CACHE later used when mapping physical
             *  addresses, which could improve performance.
             */
            int rcLnx = iommu_attach_device(pData->pIommuDomain, &pPciDev->dev);
            if (!rcLnx)
            {
                vbpci_printk(KERN_DEBUG, pPciDev, "attached to IOMMU\n");
                pIns->fIommuUsed = true;
                rc = VINF_SUCCESS;
            }
            else
            {
                vbpci_printk(KERN_DEBUG, pPciDev, "failed to attach to IOMMU, error %d\n", rcLnx);
                rc = VERR_INTERNAL_ERROR;
            }
        }
        else
        {
           vbpci_printk(KERN_DEBUG, pIns->pPciDev, "cannot attach to IOMMU, no domain\n");
            rc = VERR_NOT_FOUND;
        }
    }
    else
    {
        vbpci_printk(KERN_DEBUG, pPciDev, "cannot attach to IOMMU, no VM data\n");
        rc = VERR_INVALID_PARAMETER;
    }

    IPRT_LINUX_RESTORE_EFL_AC();
    return rc;
#else
    return VERR_NOT_SUPPORTED;
#endif
}
static int msm_jpeg_attach_iommu(struct msm_jpeg_device *pgmn_dev)
{
	int i;

	for (i = 0; i < pgmn_dev->iommu_cnt; i++) {
		int rc = iommu_attach_device(pgmn_dev->domain,
				pgmn_dev->iommu_ctx_arr[i]);
		if (rc < 0) {
			JPEG_PR_ERR("%s: Device attach failed\n", __func__);
			return -ENODEV;
		}
		JPEG_DBG("%s:%d] dom 0x%lx ctx 0x%lx", __func__, __LINE__,
				(unsigned long)pgmn_dev->domain,
				(unsigned long)pgmn_dev->iommu_ctx_arr[i]);
	}
	return 0;
}
Exemplo n.º 17
0
/*
 * kgsl_attach_pagetable_iommu_domain - Attach the IOMMU unit to a
 * pagetable, i.e set the IOMMU's PTBR to the pagetable address and
 * setup other IOMMU registers for the device so that it becomes
 * active
 * @mmu - Pointer to the device mmu structure
 * @priv - Flag indicating whether the private or user context is to be
 * attached
 *
 * Attach the IOMMU unit with the domain that is contained in the
 * hwpagetable of the given mmu.
 * Return - 0 on success else error code
 */
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
	struct kgsl_iommu_pt *iommu_pt;
	struct kgsl_iommu *iommu = mmu->priv;
	int i, j, ret = 0;

	/*
	 * Loop through all the iommu devcies under all iommu units and
	 * attach the domain
	 */
	for (i = 0; i < iommu->unit_count; i++) {
		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
		iommu_pt = mmu->defaultpagetable->priv;
		for (j = 0; j < iommu_unit->dev_count; j++) {
			/*
			 * If there is a 2nd default pagetable then priv domain
			 * is attached to this pagetable
			 */
			if (mmu->priv_bank_table &&
				(KGSL_IOMMU_CONTEXT_PRIV == j))
				iommu_pt = mmu->priv_bank_table->priv;
			if (!iommu_unit->dev[j].attached) {
				ret = iommu_attach_device(iommu_pt->domain,
							iommu_unit->dev[j].dev);
				if (ret) {
					KGSL_MEM_ERR(mmu->device,
						"Failed to attach device, err %d\n",
						ret);
					goto done;
				}
				iommu_unit->dev[j].attached = true;
				KGSL_MEM_INFO(mmu->device,
				"iommu pt %p attached to dev %p, ctx_id %d\n",
				iommu_pt->domain, iommu_unit->dev[j].dev,
				iommu_unit->dev[j].ctx_id);
			}
		}
	}
done:
	return ret;
}
Exemplo n.º 18
0
int vboxPciOsDevRegisterWithIommu(PVBOXRAWPCIINS pIns)
{
#ifdef VBOX_WITH_IOMMU
    int rc;
    int status;
    PVBOXRAWPCIDRVVM pData = VBOX_DRV_VMDATA(pIns);

    if (!pData)
    {
        printk(KERN_DEBUG "vboxpci: VM data not initialized (attach)\n");
        return VERR_INVALID_PARAMETER;
    }

    if (!pData->pIommuDomain)
    {
        printk(KERN_DEBUG "vboxpci: No IOMMU domain (attach)\n");
        return VERR_NOT_FOUND;
    }

    status = iommu_attach_device(pData->pIommuDomain, &pIns->pPciDev->dev);
    if (status == 0)
    {
        printk(KERN_DEBUG "vboxpci: iommu_attach_device() success\n");
        pIns->fIommuUsed = true;
        rc = VINF_SUCCESS;;
    }
    else
    {
        printk(KERN_DEBUG "vboxpci: iommu_attach_device() failed\n");
        rc = VERR_INTERNAL_ERROR;
    }

    /* @todo: KVM checks IOMMU_CAP_CACHE_COHERENCY and sets
       flag IOMMU_CACHE later used when mapping physical
       addresses, which could improve performance. */

    return rc;
#else
    return VERR_NOT_SUPPORTED;
#endif
}
Exemplo n.º 19
0
/*
 * kgsl_attach_pagetable_iommu_domain - Attach the IOMMU unit to a
 * pagetable, i.e set the IOMMU's PTBR to the pagetable address and
 * setup other IOMMU registers for the device so that it becomes
 * active
 * @mmu - Pointer to the device mmu structure
 * @priv - Flag indicating whether the private or user context is to be
 * attached
 *
 * Attach the IOMMU unit with the domain that is contained in the
 * hwpagetable of the given mmu.
 * Return - 0 on success else error code
 */
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
	struct kgsl_iommu_pt *iommu_pt;
	struct kgsl_iommu *iommu = mmu->priv;
	int i, j, ret = 0;

	BUG_ON(mmu->hwpagetable == NULL);
	BUG_ON(mmu->hwpagetable->priv == NULL);

	iommu_pt = mmu->hwpagetable->priv;

	/*
	 * Loop through all the iommu devcies under all iommu units and
	 * attach the domain
	 */
	for (i = 0; i < iommu->unit_count; i++) {
		struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[i];
		for (j = 0; j < iommu_unit->dev_count; j++) {
			if (!iommu_unit->dev[j].attached) {
				ret = iommu_attach_device(iommu_pt->domain,
							iommu_unit->dev[j].dev);
				if (ret) {
					KGSL_MEM_ERR(mmu->device,
						"Failed to attach device, err %d\n",
						ret);
					goto done;
				}
				iommu_unit->dev[j].attached = true;
				KGSL_MEM_INFO(mmu->device,
				"iommu pt %p attached to dev %p, ctx_id %d\n",
				iommu_pt->domain, iommu_unit->dev[j].dev,
				iommu_unit->dev[j].ctx_id);
			}
		}
	}
done:
	return ret;
}
Exemplo n.º 20
0
/*
 * msm_fd_open - Fd device open method.
 * @file: Pointer to file struct.
 */
static int msm_fd_open(struct file *file)
{
	struct msm_fd_device *device = video_drvdata(file);
	struct video_device *video = video_devdata(file);
	struct fd_ctx *ctx;
	int ret;

	ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
	if (!ctx)
		return -ENOMEM;

	ctx->fd_device = device;

	/* Initialize work buffer handler */
	ctx->work_buf.pool = NULL;
	ctx->work_buf.fd = -1;

	/* Set ctx defaults */
	ctx->settings.speed = ctx->fd_device->clk_rates_num;
	ctx->settings.angle_index = MSM_FD_DEF_ANGLE_IDX;
	ctx->settings.direction_index = MSM_FD_DEF_DIR_IDX;
	ctx->settings.min_size_index = MSM_FD_DEF_MIN_SIZE_IDX;
	ctx->settings.threshold = MSM_FD_DEF_THRESHOLD;

	atomic_set(&ctx->subscribed_for_event, 0);

	v4l2_fh_init(&ctx->fh, video);

	file->private_data = &ctx->fh;
	v4l2_fh_add(&ctx->fh);

	ctx->vb2_q.drv_priv = ctx;
	ctx->vb2_q.mem_ops = &msm_fd_vb2_mem_ops;
	ctx->vb2_q.ops = &msm_fd_vb2_q_ops;
	ctx->vb2_q.buf_struct_size = sizeof(struct msm_fd_buffer);
	ctx->vb2_q.type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
	ctx->vb2_q.io_modes = VB2_USERPTR;
	ctx->vb2_q.timestamp_type = V4L2_BUF_FLAG_TIMESTAMP_COPY;
	ret = vb2_queue_init(&ctx->vb2_q);
	if (ret < 0) {
		dev_err(device->dev, "Error queue init\n");
		goto error_vb2_queue_init;
	}

	ctx->mem_pool.client = msm_ion_client_create(MSM_FD_DRV_NAME);
	if (IS_ERR_OR_NULL(ctx->mem_pool.client)) {
		dev_err(device->dev, "Error ion client create\n");
		goto error_ion_client_create;
	}
	ctx->mem_pool.domain_num = ctx->fd_device->iommu_domain_num;

	ret = iommu_attach_device(ctx->fd_device->iommu_domain,
		ctx->fd_device->iommu_dev);
	if (ret) {
		dev_err(device->dev, "Can not attach iommu domain\n");
		goto error_iommu_attach;
	}

	ctx->stats = vmalloc(sizeof(*ctx->stats) * MSM_FD_MAX_RESULT_BUFS);
	if (!ctx->stats) {
		dev_err(device->dev, "No memory for face statistics\n");
		ret = -ENOMEM;
		goto error_stats_vmalloc;
	}

	return 0;

error_stats_vmalloc:
	iommu_detach_device(ctx->fd_device->iommu_domain,
			ctx->fd_device->iommu_dev);
error_iommu_attach:
	ion_client_destroy(ctx->mem_pool.client);
error_ion_client_create:
	vb2_queue_release(&ctx->vb2_q);
error_vb2_queue_init:
	v4l2_fh_del(&ctx->fh);
	v4l2_fh_exit(&ctx->fh);
	kfree(ctx);
	return ret;
}
Exemplo n.º 21
0
int msm_jpeg_platform_init(struct platform_device *pdev,
	struct resource **mem,
	void **base,
	int *irq,
	irqreturn_t (*handler) (int, void *),
	void *context)
{
	int rc = -1;
	int i = 0;
	int jpeg_irq;
	struct resource *jpeg_mem, *jpeg_io, *jpeg_irq_res;
	void *jpeg_base;
	struct msm_jpeg_device *pgmn_dev =
		(struct msm_jpeg_device *) context;

	pgmn_dev->state = MSM_JPEG_IDLE;

	jpeg_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!jpeg_mem) {
		JPEG_PR_ERR("%s: no mem resource?\n", __func__);
		return -ENODEV;
	}

	jpeg_irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!jpeg_irq_res) {
		JPEG_PR_ERR("no irq resource?\n");
		return -ENODEV;
	}
	jpeg_irq = jpeg_irq_res->start;
	JPEG_DBG("%s base address: 0x%x, jpeg irq number: %d\n", __func__,
		jpeg_mem->start, jpeg_irq);

	pgmn_dev->jpeg_bus_client =
		msm_bus_scale_register_client(&msm_jpeg_bus_client_pdata);
	if (!pgmn_dev->jpeg_bus_client) {
		JPEG_PR_ERR("%s: Registration Failed!\n", __func__);
		pgmn_dev->jpeg_bus_client = 0;
		return -EINVAL;
	}
	msm_bus_scale_client_update_request(
		pgmn_dev->jpeg_bus_client, 1);

	jpeg_io = request_mem_region(jpeg_mem->start,
		resource_size(jpeg_mem), pdev->name);
	if (!jpeg_io) {
		JPEG_PR_ERR("%s: region already claimed\n", __func__);
		return -EBUSY;
	}

	jpeg_base = ioremap(jpeg_mem->start, resource_size(jpeg_mem));
	if (!jpeg_base) {
		rc = -ENOMEM;
		JPEG_PR_ERR("%s: ioremap failed\n", __func__);
		goto fail_remap;
	}

	pgmn_dev->jpeg_fs = regulator_get(&pgmn_dev->pdev->dev, "vdd");
	rc = regulator_enable(pgmn_dev->jpeg_fs);
	if (rc) {
		JPEG_PR_ERR("%s:%d]jpeg regulator get failed\n",
				__func__, __LINE__);
		goto fail_fs;
	}

	rc = msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
	 pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 1);
	if (rc < 0) {
		JPEG_PR_ERR("%s: clk failed rc = %d\n", __func__, rc);
		goto fail_clk;
	}

	pgmn_dev->hw_version = readl_relaxed(jpeg_base +
		JPEG_HW_VERSION);
	JPEG_DBG_HIGH("%s:%d] jpeg HW version 0x%x", __func__, __LINE__,
		pgmn_dev->hw_version);

	pgmn_dev->jpeg_vbif = ioremap(VBIF_BASE_ADDRESS, VBIF_REGION_SIZE);
	if (!pgmn_dev->jpeg_vbif) {
		rc = -ENOMEM;
		JPEG_PR_ERR("%s:%d] ioremap failed\n", __func__, __LINE__);
		goto fail_vbif;
	}
	JPEG_DBG("%s:%d] jpeg_vbif 0x%x", __func__, __LINE__,
		(uint32_t)pgmn_dev->jpeg_vbif);

#ifdef CONFIG_MSM_IOMMU
	for (i = 0; i < pgmn_dev->iommu_cnt; i++) {
		rc = iommu_attach_device(pgmn_dev->domain,
				pgmn_dev->iommu_ctx_arr[i]);
		if (rc < 0) {
			rc = -ENODEV;
			JPEG_PR_ERR("%s: Device attach failed\n", __func__);
			goto fail_iommu;
		}
		JPEG_DBG("%s:%d] dom 0x%x ctx 0x%x", __func__, __LINE__,
					(uint32_t)pgmn_dev->domain,
					(uint32_t)pgmn_dev->iommu_ctx_arr[i]);
	}
#endif
	set_vbif_params(pgmn_dev, pgmn_dev->jpeg_vbif);

#ifdef CONFIG_MACH_LGE
	*mem  = jpeg_mem;
	*base = jpeg_base;
#endif

	rc = request_irq(jpeg_irq, handler, IRQF_TRIGGER_RISING, "jpeg",
		context);
	if (rc) {
		JPEG_PR_ERR("%s: request_irq failed, %d\n", __func__,
			jpeg_irq);
		goto fail_request_irq;
	}

#ifndef CONFIG_MACH_LGE /* QCT origin */
	*mem  = jpeg_mem;
	*base = jpeg_base;
#endif
	*irq  = jpeg_irq;

	pgmn_dev->jpeg_client = msm_ion_client_create(-1, "camera/jpeg");
	JPEG_DBG("%s:%d] success\n", __func__, __LINE__);

	pgmn_dev->state = MSM_JPEG_INIT;
	return rc;

fail_request_irq:
#ifdef CONFIG_MACH_LGE
	*mem  = NULL;
	*base = NULL;
#endif

#ifdef CONFIG_MSM_IOMMU
	for (i = 0; i < pgmn_dev->iommu_cnt; i++) {
		JPEG_PR_ERR("%s:%d] dom 0x%x ctx 0x%x", __func__, __LINE__,
					(uint32_t)pgmn_dev->domain,
					(uint32_t)pgmn_dev->iommu_ctx_arr[i]);
		iommu_detach_device(pgmn_dev->domain,
					pgmn_dev->iommu_ctx_arr[i]);
	}
#endif

fail_iommu:
	iounmap(pgmn_dev->jpeg_vbif);

fail_vbif:
	msm_cam_clk_enable(&pgmn_dev->pdev->dev, jpeg_8x_clk_info,
	pgmn_dev->jpeg_clk, ARRAY_SIZE(jpeg_8x_clk_info), 0);

fail_clk:
	rc = regulator_disable(pgmn_dev->jpeg_fs);
	if (!rc)
		regulator_put(pgmn_dev->jpeg_fs);
	else
		JPEG_PR_ERR("%s:%d] regulator disable failed %d",
			__func__, __LINE__, rc);
	pgmn_dev->jpeg_fs = NULL;

fail_fs:
	iounmap(jpeg_base);

fail_remap:
	release_mem_region(jpeg_mem->start, resource_size(jpeg_mem));
	JPEG_DBG("%s:%d] fail\n", __func__, __LINE__);
	return rc;
}
static int ivp_smmu_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct ivp_smmu_dev *smmu_dev  = NULL;
    struct iommu_domain_data *domain_info = NULL;
	struct resource *res = NULL; 
	unsigned int *ver = NULL;
    int ret = 0;
	    
	pr_info("%s: smmu driver start\n",__func__);
	
	smmu_dev = devm_kzalloc(&pdev->dev, sizeof(*smmu_dev), GFP_KERNEL);
	if (!smmu_dev){
		pr_err("%s: devm_kzalloc is failed\n", __func__);
        return -ENOMEM;
	}
	smmu_dev->dev = &pdev->dev;
    smmu_dev->state = SMMU_STATE_DISABLE;

	/* get smmu version */
	ver = (unsigned int *)of_get_property(np, "hisi,smmu-version", NULL);
	if (ver) {
		smmu_dev->version = be32_to_cpu(*ver); 
		pr_info("%s: smmu version is %u\n", __func__, be32_to_cpu(*ver));
	}

	/* get IOMEM resource */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		pr_err("%s:platform_get_resource err\n", __func__);
		return -ENOENT;
	}
	smmu_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(smmu_dev->reg_base)) {
        pr_err("%s: remap resource err\n", __func__);
		return PTR_ERR(smmu_dev->reg_base);
    }
	smmu_dev->reg_size = resource_size(res);

	/* get IRQ resource */
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res){
		pr_err("%s: get IRQ IS failed\n", __func__);
		return -ENOENT;
	}
	smmu_dev->irq = (unsigned int)res->start;
	smmu_dev->isr = ivp_smmu_isr;

	/**
	 * get domain and physical pgd base address
	 */
	smmu_dev->domain = iommu_domain_alloc(pdev->dev.bus);
	if (!smmu_dev->domain) {
		pr_err("%s: get domain failed\n", __func__);
		return -ENODEV;
	} else {
		ret = iommu_attach_device(smmu_dev->domain, &pdev->dev);
        if (ret) {
            iommu_domain_free(smmu_dev->domain);
    		pr_err("%s: iommu attach failed ret[0x%x]\n", __func__, ret);
    		return -ENODEV;
        }
        
		domain_info = (struct iommu_domain_data *)smmu_dev->domain->priv;
        smmu_dev->pgd_base = (unsigned long)domain_info->phy_pgd_base;
	}

	/**
	 * for the ivp subsys, only support:
	 * Context Bank:0; Virtual Machine ID:0; CB attribute:S1_TRANS_S2_BYPASS
	 */
	smmu_dev->cbidx = SMMU_CB_IDX_IVP;
	smmu_dev->vmid  = SMMU_CB_VMID_IVP;
	smmu_dev->cbar  = SMMU_CBAR_TYPE_S1_TRANS_S2_BYPASS;

    spin_lock_init(&smmu_dev->spinlock);
    g_smmu_dev = smmu_dev;

	pr_info("%s: smmu driver probes finish\n", __func__);

    if(client_ivp == NULL)
    {
        client_ivp = dsm_register_client(&dev_ivp);
    }

	return 0;
}
Exemplo n.º 23
0
static int host1x_probe(struct platform_device *pdev)
{
	const struct of_device_id *id;
	struct host1x *host;
	struct resource *regs;
	int syncpt_irq;
	int err;

	id = of_match_device(host1x_of_match, &pdev->dev);
	if (!id)
		return -EINVAL;

	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!regs) {
		dev_err(&pdev->dev, "failed to get registers\n");
		return -ENXIO;
	}

	syncpt_irq = platform_get_irq(pdev, 0);
	if (syncpt_irq < 0) {
		dev_err(&pdev->dev, "failed to get IRQ\n");
		return -ENXIO;
	}

	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
	if (!host)
		return -ENOMEM;

	mutex_init(&host->devices_lock);
	INIT_LIST_HEAD(&host->devices);
	INIT_LIST_HEAD(&host->list);
	host->dev = &pdev->dev;
	host->info = id->data;

	/* set common host1x device data */
	platform_set_drvdata(pdev, host);

	host->regs = devm_ioremap_resource(&pdev->dev, regs);
	if (IS_ERR(host->regs))
		return PTR_ERR(host->regs);

	dma_set_mask_and_coherent(host->dev, host->info->dma_mask);

	if (host->info->init) {
		err = host->info->init(host);
		if (err)
			return err;
	}

	host->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(host->clk)) {
		dev_err(&pdev->dev, "failed to get clock\n");
		err = PTR_ERR(host->clk);
		return err;
	}

	host->rst = devm_reset_control_get(&pdev->dev, "host1x");
	if (IS_ERR(host->rst)) {
		err = PTR_ERR(host->rst);
		dev_err(&pdev->dev, "failed to get reset: %d\n", err);
		return err;
	}

	if (iommu_present(&platform_bus_type)) {
		struct iommu_domain_geometry *geometry;
		unsigned long order;

		host->domain = iommu_domain_alloc(&platform_bus_type);
		if (!host->domain)
			return -ENOMEM;

		err = iommu_attach_device(host->domain, &pdev->dev);
		if (err)
			goto fail_free_domain;

		geometry = &host->domain->geometry;

		order = __ffs(host->domain->pgsize_bitmap);
		init_iova_domain(&host->iova, 1UL << order,
				 geometry->aperture_start >> order,
				 geometry->aperture_end >> order);
		host->iova_end = geometry->aperture_end;
	}

	err = host1x_channel_list_init(host);
	if (err) {
		dev_err(&pdev->dev, "failed to initialize channel list\n");
		goto fail_detach_device;
	}

	err = clk_prepare_enable(host->clk);
	if (err < 0) {
		dev_err(&pdev->dev, "failed to enable clock\n");
		goto fail_detach_device;
	}

	err = reset_control_deassert(host->rst);
	if (err < 0) {
		dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
		goto fail_unprepare_disable;
	}

	err = host1x_syncpt_init(host);
	if (err) {
		dev_err(&pdev->dev, "failed to initialize syncpts\n");
		goto fail_reset_assert;
	}

	err = host1x_intr_init(host, syncpt_irq);
	if (err) {
		dev_err(&pdev->dev, "failed to initialize interrupts\n");
		goto fail_deinit_syncpt;
	}

	host1x_debug_init(host);

	err = host1x_register(host);
	if (err < 0)
		goto fail_deinit_intr;

	return 0;

fail_deinit_intr:
	host1x_intr_deinit(host);
fail_deinit_syncpt:
	host1x_syncpt_deinit(host);
fail_reset_assert:
	reset_control_assert(host->rst);
fail_unprepare_disable:
	clk_disable_unprepare(host->clk);
fail_detach_device:
	if (host->domain) {
		put_iova_domain(&host->iova);
		iommu_detach_device(host->domain, &pdev->dev);
	}
fail_free_domain:
	if (host->domain)
		iommu_domain_free(host->domain);

	return err;
}
Exemplo n.º 24
0
int iovmm_activate(struct device *dev)
{
	struct exynos_iovmm *vmm = exynos_get_iovmm(dev);
	return iommu_attach_device(vmm->domain, dev);
}
Exemplo n.º 25
0
int vpe_enable(uint32_t clk_rate, struct msm_cam_media_controller *mctl)
{
	int rc = 0;
	unsigned long flags = 0;
	D("%s", __func__);
	/* don't change the order of clock and irq.*/
	spin_lock_irqsave(&vpe_ctrl->lock, flags);
	if (vpe_ctrl->state != VPE_STATE_IDLE) {
		pr_err("%s: VPE already enabled", __func__);
		spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
		return 0;
	}
	vpe_ctrl->state = VPE_STATE_INIT;
	spin_unlock_irqrestore(&vpe_ctrl->lock, flags);
	enable_irq(vpe_ctrl->vpeirq->start);

	if (vpe_ctrl->fs_vpe) {
		rc = regulator_enable(vpe_ctrl->fs_vpe);
		if (rc) {
			pr_err("%s: Regulator enable failed\n", __func__);
			goto vpe_fs_failed;
		}
	}

	rc = msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info,
			vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 1);
	if (rc < 0)
		goto vpe_clk_failed;

#ifdef CONFIG_MSM_IOMMU
	rc = iommu_attach_device(mctl->domain, vpe_ctrl->iommu_ctx_src);
	if (rc < 0) {
		pr_err("%s: Device attach failed\n", __func__);
		goto src_attach_failed;
	}
	rc = iommu_attach_device(mctl->domain, vpe_ctrl->iommu_ctx_dst);
	if (rc < 0) {
		pr_err("%s: Device attach failed\n", __func__);
		goto dst_attach_failed;
	}
/* LGE_CHANGE_S, Patch for ION free, 2013.1.8, gayoung85.lee[Start] */
#if defined(CONFIG_LGE_GK_CAMERA) ||defined(CONFIG_MACH_APQ8064_AWIFI)
#ifdef CONFIG_MSM_MULTIMEDIA_USE_ION
	msm_camera_v4l2_get_ion_client(mctl->pcam_ptr);
#endif
#endif
/* LGE_CHANGE_E, Patch for ION free, 2013.1.8, gayoung85.lee[End] */
#endif
	return rc;

#ifdef CONFIG_MSM_IOMMU
dst_attach_failed:
	iommu_detach_device(mctl->domain, vpe_ctrl->iommu_ctx_src);
src_attach_failed:
#endif
	msm_cam_clk_enable(&vpe_ctrl->pdev->dev, vpe_clk_info,
		vpe_ctrl->vpe_clk, ARRAY_SIZE(vpe_clk_info), 0);
vpe_clk_failed:
	if (vpe_ctrl->fs_vpe)
		regulator_disable(vpe_ctrl->fs_vpe);
vpe_fs_failed:
	disable_irq(vpe_ctrl->vpeirq->start);
	vpe_ctrl->state = VPE_STATE_IDLE;
	return rc;
}