예제 #1
0
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;

	if (!gmu->initialized)
		return;

	a6xx_gmu_stop(a6xx_gpu);

	pm_runtime_disable(gmu->dev);

	if (!IS_ERR_OR_NULL(gmu->gxpd)) {
		pm_runtime_disable(gmu->gxpd);
		dev_pm_domain_detach(gmu->gxpd, false);
	}

	iounmap(gmu->mmio);
	gmu->mmio = NULL;

	a6xx_gmu_memory_free(gmu, gmu->hfi);

	iommu_detach_device(gmu->domain, gmu->dev);

	iommu_domain_free(gmu->domain);

	free_irq(gmu->gmu_irq, gmu);
	free_irq(gmu->hfi_irq, gmu);

	/* Drop reference taken in of_find_device_by_node */
	put_device(gmu->dev);

	gmu->initialized = false;
}
예제 #2
0
파일: iommu.c 프로젝트: Addision/LVS
int kvm_iommu_map_guest(struct kvm *kvm)
{
	int r;

	if (!iommu_found()) {
		printk(KERN_ERR "%s: iommu not found\n", __func__);
		return -ENODEV;
	}

	kvm->arch.iommu_domain = iommu_domain_alloc();
	if (!kvm->arch.iommu_domain)
		return -ENOMEM;

	if (!allow_unsafe_assigned_interrupts &&
	    !iommu_domain_has_cap(kvm->arch.iommu_domain,
				  IOMMU_CAP_INTR_REMAP)) {
		printk(KERN_WARNING "%s: No interrupt remapping support, disallowing device assignment.  Re-enble with \"allow_unsafe_assigned_interrupts=1\" module option.\n", __func__);
		iommu_domain_free(kvm->arch.iommu_domain);
		kvm->arch.iommu_domain = NULL;
		return -EPERM;
	}

	r = kvm_iommu_map_memslots(kvm);
	if (r)
		goto out_unmap;

	return 0;

out_unmap:
	kvm_iommu_unmap_memslots(kvm);
	return r;
}
예제 #3
0
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
	int ret;

	/*
	 * The GMU address space is hardcoded to treat the range
	 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
	 * between the GMU and the CPU will live in this space
	 */
	gmu->uncached_iova_base = 0x60000000;


	gmu->domain = iommu_domain_alloc(&platform_bus_type);
	if (!gmu->domain)
		return -ENODEV;

	ret = iommu_attach_device(gmu->domain, gmu->dev);

	if (ret) {
		iommu_domain_free(gmu->domain);
		gmu->domain = NULL;
	}

	return ret;
}
예제 #4
0
void
gckIOMMU_Destory(
    IN gckOS Os,
    IN gckIOMMU Iommu
    )
{
    gcmkHEADER();

    if (Iommu->domain && Iommu->device)
    {
        iommu_attach_device(Iommu->domain, Iommu->device);
    }

    if (Iommu->domain)
    {
        iommu_domain_free(Iommu->domain);
    }

    if (Iommu)
    {
        gcmkOS_SAFE_FREE(Os, Iommu);
    }

    gcmkFOOTER_NO();
}
예제 #5
0
/*
 * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
 * @mmu_specific_pt - Pointer to pagetable which is to be freed
 *
 * Return - void
 */
static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
{
	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
	if (iommu_pt->domain)
		iommu_domain_free(iommu_pt->domain);
	kfree(iommu_pt);
}
예제 #6
0
int kvm_iommu_unmap_guest(struct kvm *kvm)
{
    struct iommu_domain *domain = kvm->arch.iommu_domain;

    /* check if iommu exists and in use */
    if (!domain)
        return 0;

    kvm_iommu_unmap_memslots(kvm);
    iommu_domain_free(domain);
    return 0;
}
int hisi_ion_enable_iommu(struct platform_device *pdev)
{
	int ret;
	struct iommu_domain_capablity data;
	struct device *dev = &pdev->dev;
	struct hisi_iommu_domain *hisi_domain;
	struct device_node *np = pdev->dev.of_node;

	printk(KERN_ERR"in %s start \n",__func__);
	hisi_domain = kzalloc(sizeof(*hisi_domain), GFP_KERNEL);
	if (!hisi_domain) {
		dbg("alloc hisi_domain object fail \n");
		return -ENOMEM;
	}
	if (!iommu_present(dev->bus)) {
		dbg("iommu not found\n");
		kfree(hisi_domain);
		return 0;
	}

	/* create iommu domain */
	hisi_domain->domain = iommu_domain_alloc(dev->bus);
	if (!hisi_domain->domain) {
		ret = -EINVAL;
		goto error;
	}
	iommu_attach_device(hisi_domain->domain,dev);
	get_range_info(np,hisi_domain,&data);
	/* align mean in this pool allocation buffer is aligned by iommu align request*/
	hisi_domain->iova_pool = iova_pool_setup(data.iova_start,
			data.iova_end, data.iova_align);
	if (!hisi_domain->iova_pool) {
		ret = -EINVAL;
		goto error;
	}

	/* this is a global pointer */
	hisi_iommu_domain_p = hisi_domain;

	dbg("in %s end \n",__func__);
	return 0;

error:
	WARN(1, "hisi_iommu_domain_init failed!\n");
	if (hisi_domain->iova_pool)
		iova_pool_destory(hisi_domain->iova_pool);
	if (hisi_domain->domain)
		iommu_domain_free(hisi_domain->domain);
	if (hisi_domain)
		kfree(hisi_domain);

	return ret;
}
/*
 * kgsl_iommu_destroy_pagetable - Free up reaources help by a pagetable
 * @mmu_specific_pt - Pointer to pagetable which is to be freed
 *
 * Return - void
 */
static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
{
	struct kgsl_iommu_pt *iommu_pt = mmu_specific_pt;
	if (iommu_pt->domain)
		iommu_domain_free(iommu_pt->domain);
	if (iommu_pt->iommu) {
		if ((KGSL_IOMMU_ASID_REUSE == iommu_pt->asid) &&
			iommu_pt->iommu->asid_reuse)
			iommu_pt->iommu->asid_reuse--;
		if (!iommu_pt->iommu->asid_reuse ||
			(KGSL_IOMMU_ASID_REUSE != iommu_pt->asid))
			clear_bit(iommu_pt->asid, iommu_pt->iommu->asids);
	}
	kfree(iommu_pt);
}
예제 #9
0
파일: a6xx_gmu.c 프로젝트: Lyude/linux
void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
{
	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;

	if (IS_ERR_OR_NULL(gmu->mmio))
		return;

	pm_runtime_disable(gmu->dev);
	a6xx_gmu_stop(a6xx_gpu);

	a6xx_gmu_irq_disable(gmu);
	a6xx_gmu_memory_free(gmu, gmu->hfi);

	iommu_detach_device(gmu->domain, gmu->dev);

	iommu_domain_free(gmu->domain);
}
예제 #10
0
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
	DBG("%s", gpu->name);

	WARN_ON(!list_empty(&gpu->active_list));

	bs_fini(gpu);

	if (gpu->rb) {
		if (gpu->rb_iova)
			msm_gem_put_iova(gpu->rb->bo, gpu->id);
		msm_ringbuffer_destroy(gpu->rb);
	}

	if (gpu->iommu)
		iommu_domain_free(gpu->iommu);
}
예제 #11
0
파일: dev.c 프로젝트: asmalldev/linux
static int host1x_remove(struct platform_device *pdev)
{
	struct host1x *host = platform_get_drvdata(pdev);

	host1x_unregister(host);
	host1x_intr_deinit(host);
	host1x_syncpt_deinit(host);
	reset_control_assert(host->rst);
	clk_disable_unprepare(host->clk);

	if (host->domain) {
		put_iova_domain(&host->iova);
		iommu_detach_device(host->domain, &pdev->dev);
		iommu_domain_free(host->domain);
	}

	return 0;
}
예제 #12
0
파일: iommu.c 프로젝트: ramlaxman/linux
static void iommu_group_release(struct kobject *kobj)
{
	struct iommu_group *group = to_iommu_group(kobj);

	pr_debug("Releasing group %d\n", group->id);

	if (group->iommu_data_release)
		group->iommu_data_release(group->iommu_data);

	mutex_lock(&iommu_group_mutex);
	ida_remove(&iommu_group_ida, group->id);
	mutex_unlock(&iommu_group_mutex);

	if (group->default_domain)
		iommu_domain_free(group->default_domain);

	kfree(group->name);
	kfree(group);
}
static int ivp_smmu_remove(struct platform_device *pdev)
{
	struct ivp_smmu_dev *smmu_dev = g_smmu_dev;

	if (!smmu_dev) {
		pr_err("%s: smmu_dev invalid\n", __func__);
		return -ENODEV;
	}

	/**
	 * clientpd-0b1(bit0):transcation disable (switch to bypass mode)
	 */
    writel(0x200037, (smmu_dev->reg_base + SMMU_NS_CR0));

    if (smmu_dev->domain) {
        iommu_domain_free(smmu_dev->domain);
        smmu_dev->domain = NULL;
    }

    g_smmu_dev = NULL;
    
	return 0;
}
예제 #14
0
파일: dev.c 프로젝트: asmalldev/linux
static int host1x_probe(struct platform_device *pdev)
{
	const struct of_device_id *id;
	struct host1x *host;
	struct resource *regs;
	int syncpt_irq;
	int err;

	id = of_match_device(host1x_of_match, &pdev->dev);
	if (!id)
		return -EINVAL;

	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!regs) {
		dev_err(&pdev->dev, "failed to get registers\n");
		return -ENXIO;
	}

	syncpt_irq = platform_get_irq(pdev, 0);
	if (syncpt_irq < 0) {
		dev_err(&pdev->dev, "failed to get IRQ\n");
		return -ENXIO;
	}

	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
	if (!host)
		return -ENOMEM;

	mutex_init(&host->devices_lock);
	INIT_LIST_HEAD(&host->devices);
	INIT_LIST_HEAD(&host->list);
	host->dev = &pdev->dev;
	host->info = id->data;

	/* set common host1x device data */
	platform_set_drvdata(pdev, host);

	host->regs = devm_ioremap_resource(&pdev->dev, regs);
	if (IS_ERR(host->regs))
		return PTR_ERR(host->regs);

	dma_set_mask_and_coherent(host->dev, host->info->dma_mask);

	if (host->info->init) {
		err = host->info->init(host);
		if (err)
			return err;
	}

	host->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(host->clk)) {
		dev_err(&pdev->dev, "failed to get clock\n");
		err = PTR_ERR(host->clk);
		return err;
	}

	host->rst = devm_reset_control_get(&pdev->dev, "host1x");
	if (IS_ERR(host->rst)) {
		err = PTR_ERR(host->rst);
		dev_err(&pdev->dev, "failed to get reset: %d\n", err);
		return err;
	}

	if (iommu_present(&platform_bus_type)) {
		struct iommu_domain_geometry *geometry;
		unsigned long order;

		host->domain = iommu_domain_alloc(&platform_bus_type);
		if (!host->domain)
			return -ENOMEM;

		err = iommu_attach_device(host->domain, &pdev->dev);
		if (err)
			goto fail_free_domain;

		geometry = &host->domain->geometry;

		order = __ffs(host->domain->pgsize_bitmap);
		init_iova_domain(&host->iova, 1UL << order,
				 geometry->aperture_start >> order,
				 geometry->aperture_end >> order);
		host->iova_end = geometry->aperture_end;
	}

	err = host1x_channel_list_init(host);
	if (err) {
		dev_err(&pdev->dev, "failed to initialize channel list\n");
		goto fail_detach_device;
	}

	err = clk_prepare_enable(host->clk);
	if (err < 0) {
		dev_err(&pdev->dev, "failed to enable clock\n");
		goto fail_detach_device;
	}

	err = reset_control_deassert(host->rst);
	if (err < 0) {
		dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
		goto fail_unprepare_disable;
	}

	err = host1x_syncpt_init(host);
	if (err) {
		dev_err(&pdev->dev, "failed to initialize syncpts\n");
		goto fail_reset_assert;
	}

	err = host1x_intr_init(host, syncpt_irq);
	if (err) {
		dev_err(&pdev->dev, "failed to initialize interrupts\n");
		goto fail_deinit_syncpt;
	}

	host1x_debug_init(host);

	err = host1x_register(host);
	if (err < 0)
		goto fail_deinit_intr;

	return 0;

fail_deinit_intr:
	host1x_intr_deinit(host);
fail_deinit_syncpt:
	host1x_syncpt_deinit(host);
fail_reset_assert:
	reset_control_assert(host->rst);
fail_unprepare_disable:
	clk_disable_unprepare(host->clk);
fail_detach_device:
	if (host->domain) {
		put_iova_domain(&host->iova);
		iommu_detach_device(host->domain, &pdev->dev);
	}
fail_free_domain:
	if (host->domain)
		iommu_domain_free(host->domain);

	return err;
}
예제 #15
0
int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
{
	int ret, i;
	struct iommu_domain *iommu;
	enum etnaviv_iommu_version version;
	bool mmuv2;

	ret = pm_runtime_get_sync(gpu->dev);
	if (ret < 0)
		return ret;

	etnaviv_hw_identify(gpu);

	if (gpu->identity.model == 0) {
		dev_err(gpu->dev, "Unknown GPU model\n");
		ret = -ENXIO;
		goto fail;
	}

	/* Exclude VG cores with FE2.0 */
	if (gpu->identity.features & chipFeatures_PIPE_VG &&
	    gpu->identity.features & chipFeatures_FE20) {
		dev_info(gpu->dev, "Ignoring GPU with VG and FE2.0\n");
		ret = -ENXIO;
		goto fail;
	}

	/*
	 * Set the GPU linear window to be at the end of the DMA window, where
	 * the CMA area is likely to reside. This ensures that we are able to
	 * map the command buffers while having the linear window overlap as
	 * much RAM as possible, so we can optimize mappings for other buffers.
	 *
	 * For 3D cores only do this if MC2.0 is present, as with MC1.0 it leads
	 * to different views of the memory on the individual engines.
	 */
	if (!(gpu->identity.features & chipFeatures_PIPE_3D) ||
	    (gpu->identity.minor_features0 & chipMinorFeatures0_MC20)) {
		u32 dma_mask = (u32)dma_get_required_mask(gpu->dev);
		if (dma_mask < PHYS_OFFSET + SZ_2G)
			gpu->memory_base = PHYS_OFFSET;
		else
			gpu->memory_base = dma_mask - SZ_2G + 1;
	}

	ret = etnaviv_hw_reset(gpu);
	if (ret)
		goto fail;

	/* Setup IOMMU.. eventually we will (I think) do this once per context
	 * and have separate page tables per context.  For now, to keep things
	 * simple and to get something working, just use a single address space:
	 */
	mmuv2 = gpu->identity.minor_features1 & chipMinorFeatures1_MMU_VERSION;
	dev_dbg(gpu->dev, "mmuv2: %d\n", mmuv2);

	if (!mmuv2) {
		iommu = etnaviv_iommu_domain_alloc(gpu);
		version = ETNAVIV_IOMMU_V1;
	} else {
		iommu = etnaviv_iommu_v2_domain_alloc(gpu);
		version = ETNAVIV_IOMMU_V2;
	}

	if (!iommu) {
		ret = -ENOMEM;
		goto fail;
	}

	gpu->mmu = etnaviv_iommu_new(gpu, iommu, version);
	if (!gpu->mmu) {
		iommu_domain_free(iommu);
		ret = -ENOMEM;
		goto fail;
	}

	/* Create buffer: */
	gpu->buffer = etnaviv_gpu_cmdbuf_new(gpu, PAGE_SIZE, 0);
	if (!gpu->buffer) {
		ret = -ENOMEM;
		dev_err(gpu->dev, "could not create command buffer\n");
		goto destroy_iommu;
	}
	if (gpu->buffer->paddr - gpu->memory_base > 0x80000000) {
		ret = -EINVAL;
		dev_err(gpu->dev,
			"command buffer outside valid memory window\n");
		goto free_buffer;
	}

	/* Setup event management */
	spin_lock_init(&gpu->event_spinlock);
	init_completion(&gpu->event_free);
	for (i = 0; i < ARRAY_SIZE(gpu->event); i++) {
		gpu->event[i].used = false;
		complete(&gpu->event_free);
	}

	/* Now program the hardware */
	mutex_lock(&gpu->lock);
	etnaviv_gpu_hw_init(gpu);
	gpu->exec_state = -1;
	mutex_unlock(&gpu->lock);

	pm_runtime_mark_last_busy(gpu->dev);
	pm_runtime_put_autosuspend(gpu->dev);

	return 0;

free_buffer:
	etnaviv_gpu_cmdbuf_free(gpu->buffer);
	gpu->buffer = NULL;
destroy_iommu:
	etnaviv_iommu_destroy(gpu->mmu);
	gpu->mmu = NULL;
fail:
	pm_runtime_mark_last_busy(gpu->dev);
	pm_runtime_put_autosuspend(gpu->dev);

	return ret;
}
static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
{
	struct iommu_domain *domain = mmu_specific_pt;
	if (domain)
		iommu_domain_free(domain);
}
예제 #17
0
파일: mdp5_kms.c 프로젝트: 020gzh/linux
struct msm_kms *mdp5_kms_init(struct drm_device *dev)
{
	struct platform_device *pdev = dev->platformdev;
	struct mdp5_cfg *config;
	struct mdp5_kms *mdp5_kms;
	struct msm_kms *kms = NULL;
	struct msm_mmu *mmu;
	uint32_t major, minor;
	int i, ret;

	mdp5_kms = kzalloc(sizeof(*mdp5_kms), GFP_KERNEL);
	if (!mdp5_kms) {
		dev_err(dev->dev, "failed to allocate kms\n");
		ret = -ENOMEM;
		goto fail;
	}

	spin_lock_init(&mdp5_kms->resource_lock);

	mdp_kms_init(&mdp5_kms->base, &kms_funcs);

	kms = &mdp5_kms->base.base;

	mdp5_kms->dev = dev;

	/* mdp5_kms->mmio actually represents the MDSS base address */
	mdp5_kms->mmio = msm_ioremap(pdev, "mdp_phys", "MDP5");
	if (IS_ERR(mdp5_kms->mmio)) {
		ret = PTR_ERR(mdp5_kms->mmio);
		goto fail;
	}

	mdp5_kms->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
	if (IS_ERR(mdp5_kms->vbif)) {
		ret = PTR_ERR(mdp5_kms->vbif);
		goto fail;
	}

	mdp5_kms->vdd = devm_regulator_get(&pdev->dev, "vdd");
	if (IS_ERR(mdp5_kms->vdd)) {
		ret = PTR_ERR(mdp5_kms->vdd);
		goto fail;
	}

	ret = regulator_enable(mdp5_kms->vdd);
	if (ret) {
		dev_err(dev->dev, "failed to enable regulator vdd: %d\n", ret);
		goto fail;
	}

	/* mandatory clocks: */
	ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true);
	if (ret)
		goto fail;
	ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true);
	if (ret)
		goto fail;
	ret = get_clk(pdev, &mdp5_kms->src_clk, "core_clk_src", true);
	if (ret)
		goto fail;
	ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true);
	if (ret)
		goto fail;
	ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true);
	if (ret)
		goto fail;

	/* optional clocks: */
	get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false);

	/* we need to set a default rate before enabling.  Set a safe
	 * rate first, then figure out hw revision, and then set a
	 * more optimal rate:
	 */
	clk_set_rate(mdp5_kms->src_clk, 200000000);

	read_hw_revision(mdp5_kms, &major, &minor);

	mdp5_kms->cfg = mdp5_cfg_init(mdp5_kms, major, minor);
	if (IS_ERR(mdp5_kms->cfg)) {
		ret = PTR_ERR(mdp5_kms->cfg);
		mdp5_kms->cfg = NULL;
		goto fail;
	}

	config = mdp5_cfg_get_config(mdp5_kms->cfg);
	mdp5_kms->caps = config->hw->mdp.caps;

	/* TODO: compute core clock rate at runtime */
	clk_set_rate(mdp5_kms->src_clk, config->hw->max_clk);

	/*
	 * Some chipsets have a Shared Memory Pool (SMP), while others
	 * have dedicated latency buffering per source pipe instead;
	 * this section initializes the SMP:
	 */
	if (mdp5_kms->caps & MDP_CAP_SMP) {
		mdp5_kms->smp = mdp5_smp_init(mdp5_kms->dev, &config->hw->smp);
		if (IS_ERR(mdp5_kms->smp)) {
			ret = PTR_ERR(mdp5_kms->smp);
			mdp5_kms->smp = NULL;
			goto fail;
		}
	}

	mdp5_kms->ctlm = mdp5_ctlm_init(dev, mdp5_kms->mmio, mdp5_kms->cfg);
	if (IS_ERR(mdp5_kms->ctlm)) {
		ret = PTR_ERR(mdp5_kms->ctlm);
		mdp5_kms->ctlm = NULL;
		goto fail;
	}

	/* make sure things are off before attaching iommu (bootloader could
	 * have left things on, in which case we'll start getting faults if
	 * we don't disable):
	 */
	mdp5_enable(mdp5_kms);
	for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
		if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
				!config->hw->intf.base[i])
			continue;
		mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);

		mdp5_write(mdp5_kms, REG_MDP5_INTF_FRAME_LINE_COUNT_EN(i), 0x3);
	}
	mdp5_disable(mdp5_kms);
	mdelay(16);

	if (config->platform.iommu) {
		mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
		if (IS_ERR(mmu)) {
			ret = PTR_ERR(mmu);
			dev_err(dev->dev, "failed to init iommu: %d\n", ret);
			iommu_domain_free(config->platform.iommu);
			goto fail;
		}

		ret = mmu->funcs->attach(mmu, iommu_ports,
				ARRAY_SIZE(iommu_ports));
		if (ret) {
			dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
			mmu->funcs->destroy(mmu);
			goto fail;
		}
	} else {
		dev_info(dev->dev, "no iommu, fallback to phys "
				"contig buffers for scanout\n");
		mmu = NULL;
	}
	mdp5_kms->mmu = mmu;

	mdp5_kms->id = msm_register_mmu(dev, mmu);
	if (mdp5_kms->id < 0) {
		ret = mdp5_kms->id;
		dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
		goto fail;
	}

	ret = modeset_init(mdp5_kms);
	if (ret) {
		dev_err(dev->dev, "modeset_init failed: %d\n", ret);
		goto fail;
	}

	dev->mode_config.min_width = 0;
	dev->mode_config.min_height = 0;
	dev->mode_config.max_width = config->hw->lm.max_width;
	dev->mode_config.max_height = config->hw->lm.max_height;

	dev->driver->get_vblank_timestamp = mdp5_get_vblank_timestamp;
	dev->driver->get_scanout_position = mdp5_get_scanoutpos;
	dev->driver->get_vblank_counter = mdp5_get_vblank_counter;
	dev->max_vblank_count = 0xffffffff;
	dev->vblank_disable_immediate = true;

	return kms;

fail:
	if (kms)
		mdp5_destroy(kms);
	return ERR_PTR(ret);
}
예제 #18
0
int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
	struct platform_device *pdev = of_find_device_by_node(node);
	int ret;

	if (!pdev)
		return -ENODEV;

	gmu->dev = &pdev->dev;

	of_dma_configure(gmu->dev, node, true);

	/* Fow now, don't do anything fancy until we get our feet under us */
	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;

	pm_runtime_enable(gmu->dev);

	/* Get the list of clocks */
	ret = a6xx_gmu_clocks_probe(gmu);
	if (ret)
		goto err_put_device;

	/* Set up the IOMMU context bank */
	ret = a6xx_gmu_memory_probe(gmu);
	if (ret)
		goto err_put_device;

	/* Allocate memory for for the HFI queues */
	gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
	if (IS_ERR(gmu->hfi))
		goto err_memory;

	/* Allocate memory for the GMU debug region */
	gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
	if (IS_ERR(gmu->debug))
		goto err_memory;

	/* Map the GMU registers */
	gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
	if (IS_ERR(gmu->mmio))
		goto err_memory;

	/* Get the HFI and GMU interrupts */
	gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
	gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);

	if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
		goto err_mmio;

	/*
	 * Get a link to the GX power domain to reset the GPU in case of GMU
	 * crash
	 */
	gmu->gxpd = dev_pm_domain_attach_by_name(gmu->dev, "gx");

	/* Get the power levels for the GMU and GPU */
	a6xx_gmu_pwrlevels_probe(gmu);

	/* Set up the HFI queues */
	a6xx_hfi_init(gmu);

	gmu->initialized = true;

	return 0;

err_mmio:
	iounmap(gmu->mmio);
	free_irq(gmu->gmu_irq, gmu);
	free_irq(gmu->hfi_irq, gmu);
err_memory:
	a6xx_gmu_memory_free(gmu, gmu->hfi);

	if (gmu->domain) {
		iommu_detach_device(gmu->domain, gmu->dev);

		iommu_domain_free(gmu->domain);
	}
	ret = -ENODEV;

err_put_device:
	/* Drop reference taken in of_find_device_by_node */
	put_device(gmu->dev);

	return ret;
}
예제 #19
0
파일: a6xx_gmu.c 프로젝트: Lyude/linux
int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
{
	struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
	struct platform_device *pdev = of_find_device_by_node(node);
	int ret;

	if (!pdev)
		return -ENODEV;

	gmu->dev = &pdev->dev;

	of_dma_configure(gmu->dev, node, false);

	/* Fow now, don't do anything fancy until we get our feet under us */
	gmu->idle_level = GMU_IDLE_STATE_ACTIVE;

	pm_runtime_enable(gmu->dev);
	gmu->gx = devm_regulator_get(gmu->dev, "vdd");

	/* Get the list of clocks */
	ret = a6xx_gmu_clocks_probe(gmu);
	if (ret)
		return ret;

	/* Set up the IOMMU context bank */
	ret = a6xx_gmu_memory_probe(gmu);
	if (ret)
		return ret;

	/* Allocate memory for for the HFI queues */
	gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
	if (IS_ERR(gmu->hfi))
		goto err;

	/* Allocate memory for the GMU debug region */
	gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
	if (IS_ERR(gmu->debug))
		goto err;

	/* Map the GMU registers */
	gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");

	/* Map the GPU power domain controller registers */
	gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");

	if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
		goto err;

	/* Get the HFI and GMU interrupts */
	gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
	gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);

	if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
		goto err;

	/* Set up a tasklet to handle GMU HFI responses */
	tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);

	/* Get the power levels for the GMU and GPU */
	a6xx_gmu_pwrlevels_probe(gmu);

	/* Set up the HFI queues */
	a6xx_hfi_init(gmu);

	return 0;
err:
	a6xx_gmu_memory_free(gmu, gmu->hfi);

	if (gmu->domain) {
		iommu_detach_device(gmu->domain, gmu->dev);

		iommu_domain_free(gmu->domain);
	}

	return -ENODEV;
}
static int ivp_smmu_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct ivp_smmu_dev *smmu_dev  = NULL;
    struct iommu_domain_data *domain_info = NULL;
	struct resource *res = NULL; 
	unsigned int *ver = NULL;
    int ret = 0;
	    
	pr_info("%s: smmu driver start\n",__func__);
	
	smmu_dev = devm_kzalloc(&pdev->dev, sizeof(*smmu_dev), GFP_KERNEL);
	if (!smmu_dev){
		pr_err("%s: devm_kzalloc is failed\n", __func__);
        return -ENOMEM;
	}
	smmu_dev->dev = &pdev->dev;
    smmu_dev->state = SMMU_STATE_DISABLE;

	/* get smmu version */
	ver = (unsigned int *)of_get_property(np, "hisi,smmu-version", NULL);
	if (ver) {
		smmu_dev->version = be32_to_cpu(*ver); 
		pr_info("%s: smmu version is %u\n", __func__, be32_to_cpu(*ver));
	}

	/* get IOMEM resource */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		pr_err("%s:platform_get_resource err\n", __func__);
		return -ENOENT;
	}
	smmu_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(smmu_dev->reg_base)) {
        pr_err("%s: remap resource err\n", __func__);
		return PTR_ERR(smmu_dev->reg_base);
    }
	smmu_dev->reg_size = resource_size(res);

	/* get IRQ resource */
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res){
		pr_err("%s: get IRQ IS failed\n", __func__);
		return -ENOENT;
	}
	smmu_dev->irq = (unsigned int)res->start;
	smmu_dev->isr = ivp_smmu_isr;

	/**
	 * get domain and physical pgd base address
	 */
	smmu_dev->domain = iommu_domain_alloc(pdev->dev.bus);
	if (!smmu_dev->domain) {
		pr_err("%s: get domain failed\n", __func__);
		return -ENODEV;
	} else {
		ret = iommu_attach_device(smmu_dev->domain, &pdev->dev);
        if (ret) {
            iommu_domain_free(smmu_dev->domain);
    		pr_err("%s: iommu attach failed ret[0x%x]\n", __func__, ret);
    		return -ENODEV;
        }
        
		domain_info = (struct iommu_domain_data *)smmu_dev->domain->priv;
        smmu_dev->pgd_base = (unsigned long)domain_info->phy_pgd_base;
	}

	/**
	 * for the ivp subsys, only support:
	 * Context Bank:0; Virtual Machine ID:0; CB attribute:S1_TRANS_S2_BYPASS
	 */
	smmu_dev->cbidx = SMMU_CB_IDX_IVP;
	smmu_dev->vmid  = SMMU_CB_VMID_IVP;
	smmu_dev->cbar  = SMMU_CBAR_TYPE_S1_TRANS_S2_BYPASS;

    spin_lock_init(&smmu_dev->spinlock);
    g_smmu_dev = smmu_dev;

	pr_info("%s: smmu driver probes finish\n", __func__);

    if(client_ivp == NULL)
    {
        client_ivp = dsm_register_client(&dev_ivp);
    }

	return 0;
}
예제 #21
0
static void msm_iommu_destroy(struct msm_mmu *mmu)
{
	struct msm_iommu *iommu = to_msm_iommu(mmu);
	iommu_domain_free(iommu->domain);
	kfree(iommu);
}