Esempio n. 1
0
/*
 * kgsl_iommu_create_pagetable - Create a IOMMU pagetable
 *
 * Allocate memory to hold a pagetable and allocate the IOMMU
 * domain which is the actual IOMMU pagetable
 * Return - void
 */
void *kgsl_iommu_create_pagetable(void)
{
	struct kgsl_iommu_pt *iommu_pt;

	iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
	if (!iommu_pt) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu_pt));
		return NULL;
	}
	/* L2 redirect is not stable on IOMMU v2 */
	if (msm_soc_version_supports_iommu_v1())
		iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
					MSM_IOMMU_DOMAIN_PT_CACHEABLE);
	else
		iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
					0);
	if (!iommu_pt->domain) {
		KGSL_CORE_ERR("Failed to create iommu domain\n");
		kfree(iommu_pt);
		return NULL;
	} else {
		iommu_set_fault_handler(iommu_pt->domain,
			kgsl_iommu_fault_handler);
	}

	return iommu_pt;
}
Esempio n. 2
0
static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
{
	int ret;

	/*
	 * The GMU address space is hardcoded to treat the range
	 * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
	 * between the GMU and the CPU will live in this space
	 */
	gmu->uncached_iova_base = 0x60000000;


	gmu->domain = iommu_domain_alloc(&platform_bus_type);
	if (!gmu->domain)
		return -ENODEV;

	ret = iommu_attach_device(gmu->domain, gmu->dev);

	if (ret) {
		iommu_domain_free(gmu->domain);
		gmu->domain = NULL;
	}

	return ret;
}
Esempio n. 3
0
File: iommu.c Progetto: Addision/LVS
int kvm_iommu_map_guest(struct kvm *kvm)
{
	int r;

	if (!iommu_found()) {
		printk(KERN_ERR "%s: iommu not found\n", __func__);
		return -ENODEV;
	}

	kvm->arch.iommu_domain = iommu_domain_alloc();
	if (!kvm->arch.iommu_domain)
		return -ENOMEM;

	if (!allow_unsafe_assigned_interrupts &&
	    !iommu_domain_has_cap(kvm->arch.iommu_domain,
				  IOMMU_CAP_INTR_REMAP)) {
		printk(KERN_WARNING "%s: No interrupt remapping support, disallowing device assignment.  Re-enble with \"allow_unsafe_assigned_interrupts=1\" module option.\n", __func__);
		iommu_domain_free(kvm->arch.iommu_domain);
		kvm->arch.iommu_domain = NULL;
		return -EPERM;
	}

	r = kvm_iommu_map_memslots(kvm);
	if (r)
		goto out_unmap;

	return 0;

out_unmap:
	kvm_iommu_unmap_memslots(kvm);
	return r;
}
void *kgsl_iommu_create_pagetable(void)
{
	struct iommu_domain *domain = iommu_domain_alloc(0);
	if (!domain)
		KGSL_CORE_ERR("Failed to create iommu domain\n");

	return domain;
}
Esempio n. 5
0
gceSTATUS
gckIOMMU_Construct(
    IN gckOS Os,
    OUT gckIOMMU * Iommu
    )
{
    gceSTATUS status;
    gckIOMMU iommu = gcvNULL;
    struct device *dev;
    int ret;

    gcmkHEADER();

    dev = &Os->device->platform->device->dev;

    gcmkONERROR(gckOS_Allocate(Os, gcmSIZEOF(gcsIOMMU), (gctPOINTER *)&iommu));

    gckOS_ZeroMemory(iommu, gcmSIZEOF(gcsIOMMU));

    iommu->domain = iommu_domain_alloc(&platform_bus_type);

    if (!iommu->domain)
    {
        gcmkTRACE_ZONE(gcvLEVEL_INFO, gcvZONE_OS, "iommu_domain_alloc() fail");

        gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
    }

    iommu_set_fault_handler(iommu->domain, _IOMMU_Fault_Handler, dev);

    ret = iommu_attach_device(iommu->domain, dev);

    if (ret)
    {
        gcmkTRACE_ZONE(
            gcvLEVEL_INFO, gcvZONE_OS, "iommu_attach_device() fail %d", ret);

        gcmkONERROR(gcvSTATUS_NOT_SUPPORTED);
    }

    iommu->device = dev;

    _FlatMapping(iommu);

    *Iommu = iommu;

    gcmkFOOTER_NO();
    return gcvSTATUS_OK;

OnError:

    gckIOMMU_Destory(Os, iommu);

    gcmkFOOTER();
    return status;
}
Esempio n. 6
0
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
{
	static struct mdp4_platform_config config = {};

	/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
	config.max_clk = 266667000;
	config.iommu = iommu_domain_alloc(&platform_bus_type);

	return &config;
}
Esempio n. 7
0
static void
nvkm_device_tegra_probe_iommu(struct nvkm_device_tegra *tdev)
{
#if IS_ENABLED(CONFIG_IOMMU_API)
	struct device *dev = &tdev->pdev->dev;
	unsigned long pgsize_bitmap;
	int ret;

#if IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)
	if (dev->archdata.mapping) {
		struct dma_iommu_mapping *mapping = to_dma_iommu_mapping(dev);

		arm_iommu_detach_device(dev);
		arm_iommu_release_mapping(mapping);
	}
#endif

	if (!tdev->func->iommu_bit)
		return;

	mutex_init(&tdev->iommu.mutex);

	if (iommu_present(&platform_bus_type)) {
		tdev->iommu.domain = iommu_domain_alloc(&platform_bus_type);
		if (!tdev->iommu.domain)
			goto error;

		/*
		 * A IOMMU is only usable if it supports page sizes smaller
		 * or equal to the system's PAGE_SIZE, with a preference if
		 * both are equal.
		 */
		pgsize_bitmap = tdev->iommu.domain->ops->pgsize_bitmap;
		if (pgsize_bitmap & PAGE_SIZE) {
			tdev->iommu.pgshift = PAGE_SHIFT;
		} else {
			tdev->iommu.pgshift = fls(pgsize_bitmap & ~PAGE_MASK);
			if (tdev->iommu.pgshift == 0) {
				dev_warn(dev, "unsupported IOMMU page size\n");
				goto free_domain;
			}
			tdev->iommu.pgshift -= 1;
		}

		ret = iommu_attach_device(tdev->iommu.domain, dev);
		if (ret)
			goto free_domain;

		ret = nvkm_mm_init(&tdev->iommu.mm, 0, 0,
				   (1ULL << tdev->func->iommu_bit) >>
				   tdev->iommu.pgshift, 1);
		if (ret)
			goto detach_device;
	}
int hisi_ion_enable_iommu(struct platform_device *pdev)
{
	int ret;
	struct iommu_domain_capablity data;
	struct device *dev = &pdev->dev;
	struct hisi_iommu_domain *hisi_domain;
	struct device_node *np = pdev->dev.of_node;

	printk(KERN_ERR"in %s start \n",__func__);
	hisi_domain = kzalloc(sizeof(*hisi_domain), GFP_KERNEL);
	if (!hisi_domain) {
		dbg("alloc hisi_domain object fail \n");
		return -ENOMEM;
	}
	if (!iommu_present(dev->bus)) {
		dbg("iommu not found\n");
		kfree(hisi_domain);
		return 0;
	}

	/* create iommu domain */
	hisi_domain->domain = iommu_domain_alloc(dev->bus);
	if (!hisi_domain->domain) {
		ret = -EINVAL;
		goto error;
	}
	iommu_attach_device(hisi_domain->domain,dev);
	get_range_info(np,hisi_domain,&data);
	/* align mean in this pool allocation buffer is aligned by iommu align request*/
	hisi_domain->iova_pool = iova_pool_setup(data.iova_start,
			data.iova_end, data.iova_align);
	if (!hisi_domain->iova_pool) {
		ret = -EINVAL;
		goto error;
	}

	/* this is a global pointer */
	hisi_iommu_domain_p = hisi_domain;

	dbg("in %s end \n",__func__);
	return 0;

error:
	WARN(1, "hisi_iommu_domain_init failed!\n");
	if (hisi_domain->iova_pool)
		iova_pool_destory(hisi_domain->iova_pool);
	if (hisi_domain->domain)
		iommu_domain_free(hisi_domain->domain);
	if (hisi_domain)
		kfree(hisi_domain);

	return ret;
}
Esempio n. 9
0
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
{
	static struct mdp4_platform_config config = {};

	/* TODO: Chips that aren't apq8064 have a 200 Mhz max_clk */
	config.max_clk = 266667000;
	config.iommu = iommu_domain_alloc(&platform_bus_type);
	if (config.iommu) {
		config.iommu->geometry.aperture_start = 0x1000;
		config.iommu->geometry.aperture_end = 0xffffffff;
	}

	return &config;
}
Esempio n. 10
0
static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
{
	struct host1x_device *device = to_host1x_device(drm->dev);
	struct tegra_drm *tegra;
	int err;

	tegra = kzalloc(sizeof(*tegra), GFP_KERNEL);
	if (!tegra)
		return -ENOMEM;

	if (iommu_present(&platform_bus_type)) {
		u64 carveout_start, carveout_end, gem_start, gem_end;
		struct iommu_domain_geometry *geometry;
		unsigned long order;

		tegra->domain = iommu_domain_alloc(&platform_bus_type);
		if (!tegra->domain) {
			err = -ENOMEM;
			goto free;
		}

		err = iova_cache_get();
		if (err < 0)
			goto domain;

		geometry = &tegra->domain->geometry;
		gem_start = geometry->aperture_start;
		gem_end = geometry->aperture_end - CARVEOUT_SZ;
		carveout_start = gem_end + 1;
		carveout_end = geometry->aperture_end;

		order = __ffs(tegra->domain->pgsize_bitmap);
		init_iova_domain(&tegra->carveout.domain, 1UL << order,
				 carveout_start >> order);

		tegra->carveout.shift = iova_shift(&tegra->carveout.domain);
		tegra->carveout.limit = carveout_end >> tegra->carveout.shift;

		drm_mm_init(&tegra->mm, gem_start, gem_end - gem_start + 1);
		mutex_init(&tegra->mm_lock);

		DRM_DEBUG("IOMMU apertures:\n");
		DRM_DEBUG("  GEM: %#llx-%#llx\n", gem_start, gem_end);
		DRM_DEBUG("  Carveout: %#llx-%#llx\n", carveout_start,
			  carveout_end);
	}
Esempio n. 11
0
static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
{
	static struct mdp4_platform_config config = {};
#ifdef CONFIG_OF
	/* TODO */
	config.max_clk = 266667000;
	config.iommu = iommu_domain_alloc(&platform_bus_type);
#else
	if (cpu_is_apq8064())
		config.max_clk = 266667000;
	else
		config.max_clk = 200000000;

	config.iommu = msm_get_iommu_domain(DISPLAY_READ_DOMAIN);
#endif
	return &config;
}
Esempio n. 12
0
int exynos_init_iovmm(struct device *sysmmu, struct exynos_iovmm *vmm1)
{
	int ret = 0;
	struct sysmmu_drvdata *data = dev_get_drvdata(sysmmu);
	struct exynos_iovmm *vmm=&data->vmm;
	vmm->vmm_pool = gen_pool_create(PAGE_SHIFT, -1);
	if (!vmm->vmm_pool) {
		ret = -ENOMEM;
		goto err_setup_genalloc;
	}

	/* (1GB - 4KB) addr space from 0xC0000000 */
	ret = gen_pool_add(vmm->vmm_pool, IOVA_START, IOVM_SIZE, -1);
	if (ret)
	{
		goto err_setup_domain;
       }
	vmm->domain = iommu_domain_alloc(&platform_bus_type);
	if (!vmm->domain) {
	printk("exynos_init_iovmm-------line = %d\n",__LINE__);
		ret = -ENOMEM;
		goto err_setup_domain;
	}

	spin_lock_init(&vmm->lock);

	INIT_LIST_HEAD(&vmm->regions_list);

	dev_dbg(sysmmu, "IOVMM: Created %#x B IOVMM from %#x.\n",
						IOVM_SIZE, IOVA_START);
	return 0;
err_setup_domain:
	gen_pool_destroy(vmm->vmm_pool);
err_setup_genalloc:
	dev_dbg(sysmmu, "IOVMM: Failed to create IOVMM (%d)\n", ret);

	return ret;
}
Esempio n. 13
0
/*
 * kgsl_iommu_create_pagetable - Create a IOMMU pagetable
 *
 * Allocate memory to hold a pagetable and allocate the IOMMU
 * domain which is the actual IOMMU pagetable
 * Return - void
 */
void *kgsl_iommu_create_pagetable(void)
{
	struct kgsl_iommu_pt *iommu_pt;

	iommu_pt = kzalloc(sizeof(struct kgsl_iommu_pt), GFP_KERNEL);
	if (!iommu_pt) {
		KGSL_CORE_ERR("kzalloc(%d) failed\n",
				sizeof(struct kgsl_iommu_pt));
		return NULL;
	}
	iommu_pt->domain = iommu_domain_alloc(&platform_bus_type,
										  MSM_IOMMU_DOMAIN_PT_CACHEABLE);
	if (!iommu_pt->domain) {
		KGSL_CORE_ERR("Failed to create iommu domain\n");
		kfree(iommu_pt);
		return NULL;
	} else {
		iommu_set_fault_handler(iommu_pt->domain,
			kgsl_iommu_fault_handler);
	}

	return iommu_pt;
}
Esempio n. 14
0
int kvm_iommu_map_guest(struct kvm *kvm)
{
    int r;

    if (!iommu_found()) {
        printk(KERN_ERR "%s: iommu not found\n", __func__);
        return -ENODEV;
    }

    kvm->arch.iommu_domain = iommu_domain_alloc();
    if (!kvm->arch.iommu_domain)
        return -ENOMEM;

    r = kvm_iommu_map_memslots(kvm);
    if (r)
        goto out_unmap;

    return 0;

out_unmap:
    kvm_iommu_unmap_memslots(kvm);
    return r;
}
Esempio n. 15
0
static int host1x_probe(struct platform_device *pdev)
{
	const struct of_device_id *id;
	struct host1x *host;
	struct resource *regs;
	int syncpt_irq;
	int err;

	id = of_match_device(host1x_of_match, &pdev->dev);
	if (!id)
		return -EINVAL;

	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!regs) {
		dev_err(&pdev->dev, "failed to get registers\n");
		return -ENXIO;
	}

	syncpt_irq = platform_get_irq(pdev, 0);
	if (syncpt_irq < 0) {
		dev_err(&pdev->dev, "failed to get IRQ\n");
		return -ENXIO;
	}

	host = devm_kzalloc(&pdev->dev, sizeof(*host), GFP_KERNEL);
	if (!host)
		return -ENOMEM;

	mutex_init(&host->devices_lock);
	INIT_LIST_HEAD(&host->devices);
	INIT_LIST_HEAD(&host->list);
	host->dev = &pdev->dev;
	host->info = id->data;

	/* set common host1x device data */
	platform_set_drvdata(pdev, host);

	host->regs = devm_ioremap_resource(&pdev->dev, regs);
	if (IS_ERR(host->regs))
		return PTR_ERR(host->regs);

	dma_set_mask_and_coherent(host->dev, host->info->dma_mask);

	if (host->info->init) {
		err = host->info->init(host);
		if (err)
			return err;
	}

	host->clk = devm_clk_get(&pdev->dev, NULL);
	if (IS_ERR(host->clk)) {
		dev_err(&pdev->dev, "failed to get clock\n");
		err = PTR_ERR(host->clk);
		return err;
	}

	host->rst = devm_reset_control_get(&pdev->dev, "host1x");
	if (IS_ERR(host->rst)) {
		err = PTR_ERR(host->rst);
		dev_err(&pdev->dev, "failed to get reset: %d\n", err);
		return err;
	}

	if (iommu_present(&platform_bus_type)) {
		struct iommu_domain_geometry *geometry;
		unsigned long order;

		host->domain = iommu_domain_alloc(&platform_bus_type);
		if (!host->domain)
			return -ENOMEM;

		err = iommu_attach_device(host->domain, &pdev->dev);
		if (err)
			goto fail_free_domain;

		geometry = &host->domain->geometry;

		order = __ffs(host->domain->pgsize_bitmap);
		init_iova_domain(&host->iova, 1UL << order,
				 geometry->aperture_start >> order,
				 geometry->aperture_end >> order);
		host->iova_end = geometry->aperture_end;
	}

	err = host1x_channel_list_init(host);
	if (err) {
		dev_err(&pdev->dev, "failed to initialize channel list\n");
		goto fail_detach_device;
	}

	err = clk_prepare_enable(host->clk);
	if (err < 0) {
		dev_err(&pdev->dev, "failed to enable clock\n");
		goto fail_detach_device;
	}

	err = reset_control_deassert(host->rst);
	if (err < 0) {
		dev_err(&pdev->dev, "failed to deassert reset: %d\n", err);
		goto fail_unprepare_disable;
	}

	err = host1x_syncpt_init(host);
	if (err) {
		dev_err(&pdev->dev, "failed to initialize syncpts\n");
		goto fail_reset_assert;
	}

	err = host1x_intr_init(host, syncpt_irq);
	if (err) {
		dev_err(&pdev->dev, "failed to initialize interrupts\n");
		goto fail_deinit_syncpt;
	}

	host1x_debug_init(host);

	err = host1x_register(host);
	if (err < 0)
		goto fail_deinit_intr;

	return 0;

fail_deinit_intr:
	host1x_intr_deinit(host);
fail_deinit_syncpt:
	host1x_syncpt_deinit(host);
fail_reset_assert:
	reset_control_assert(host->rst);
fail_unprepare_disable:
	clk_disable_unprepare(host->clk);
fail_detach_device:
	if (host->domain) {
		put_iova_domain(&host->iova);
		iommu_detach_device(host->domain, &pdev->dev);
	}
fail_free_domain:
	if (host->domain)
		iommu_domain_free(host->domain);

	return err;
}
Esempio n. 16
0
	if (!is_support_iommu)
		return;

	iommu_detach_device(domain, dev);
}

static int rockchip_drm_init_iommu(struct drm_device *drm_dev)
{
	struct rockchip_drm_private *private = drm_dev->dev_private;
	struct iommu_domain_geometry *geometry;
	u64 start, end;

	if (!is_support_iommu)
		return 0;

	private->domain = iommu_domain_alloc(&platform_bus_type);
	if (!private->domain)
		return -ENOMEM;

	geometry = &private->domain->geometry;
	start = geometry->aperture_start;
	end = geometry->aperture_end;

	DRM_DEBUG("IOMMU context initialized (aperture: %#llx-%#llx)\n",
		  start, end);
	drm_mm_init(&private->mm, start, end - start + 1);
	mutex_init(&private->mm_lock);

	return 0;
}
static int ivp_smmu_probe(struct platform_device *pdev)
{
	struct device_node *np = pdev->dev.of_node;
	struct ivp_smmu_dev *smmu_dev  = NULL;
    struct iommu_domain_data *domain_info = NULL;
	struct resource *res = NULL; 
	unsigned int *ver = NULL;
    int ret = 0;
	    
	pr_info("%s: smmu driver start\n",__func__);
	
	smmu_dev = devm_kzalloc(&pdev->dev, sizeof(*smmu_dev), GFP_KERNEL);
	if (!smmu_dev){
		pr_err("%s: devm_kzalloc is failed\n", __func__);
        return -ENOMEM;
	}
	smmu_dev->dev = &pdev->dev;
    smmu_dev->state = SMMU_STATE_DISABLE;

	/* get smmu version */
	ver = (unsigned int *)of_get_property(np, "hisi,smmu-version", NULL);
	if (ver) {
		smmu_dev->version = be32_to_cpu(*ver); 
		pr_info("%s: smmu version is %u\n", __func__, be32_to_cpu(*ver));
	}

	/* get IOMEM resource */
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	if (!res) {
		pr_err("%s:platform_get_resource err\n", __func__);
		return -ENOENT;
	}
	smmu_dev->reg_base = devm_ioremap_resource(&pdev->dev, res);
	if (IS_ERR(smmu_dev->reg_base)) {
        pr_err("%s: remap resource err\n", __func__);
		return PTR_ERR(smmu_dev->reg_base);
    }
	smmu_dev->reg_size = resource_size(res);

	/* get IRQ resource */
	res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	if (!res){
		pr_err("%s: get IRQ IS failed\n", __func__);
		return -ENOENT;
	}
	smmu_dev->irq = (unsigned int)res->start;
	smmu_dev->isr = ivp_smmu_isr;

	/**
	 * get domain and physical pgd base address
	 */
	smmu_dev->domain = iommu_domain_alloc(pdev->dev.bus);
	if (!smmu_dev->domain) {
		pr_err("%s: get domain failed\n", __func__);
		return -ENODEV;
	} else {
		ret = iommu_attach_device(smmu_dev->domain, &pdev->dev);
        if (ret) {
            iommu_domain_free(smmu_dev->domain);
    		pr_err("%s: iommu attach failed ret[0x%x]\n", __func__, ret);
    		return -ENODEV;
        }
        
		domain_info = (struct iommu_domain_data *)smmu_dev->domain->priv;
        smmu_dev->pgd_base = (unsigned long)domain_info->phy_pgd_base;
	}

	/**
	 * for the ivp subsys, only support:
	 * Context Bank:0; Virtual Machine ID:0; CB attribute:S1_TRANS_S2_BYPASS
	 */
	smmu_dev->cbidx = SMMU_CB_IDX_IVP;
	smmu_dev->vmid  = SMMU_CB_VMID_IVP;
	smmu_dev->cbar  = SMMU_CBAR_TYPE_S1_TRANS_S2_BYPASS;

    spin_lock_init(&smmu_dev->spinlock);
    g_smmu_dev = smmu_dev;

	pr_info("%s: smmu driver probes finish\n", __func__);

    if(client_ivp == NULL)
    {
        client_ivp = dsm_register_client(&dev_ivp);
    }

	return 0;
}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
		struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
		const char *name, const char *ioname, const char *irqname, int ringsz)
{
	int i, ret;

	gpu->dev = drm;
	gpu->funcs = funcs;
	gpu->name = name;

	INIT_LIST_HEAD(&gpu->active_list);
	INIT_WORK(&gpu->retire_work, retire_worker);
	INIT_WORK(&gpu->recover_work, recover_worker);

	setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
			(unsigned long)gpu);

	BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));

	/* Map registers: */
	gpu->mmio = msm_ioremap(pdev, ioname, name);
	if (IS_ERR(gpu->mmio)) {
		ret = PTR_ERR(gpu->mmio);
		goto fail;
	}

	/* Get Interrupt: */
	gpu->irq = platform_get_irq_byname(pdev, irqname);
	if (gpu->irq < 0) {
		ret = gpu->irq;
		dev_err(drm->dev, "failed to get irq: %d\n", ret);
		goto fail;
	}

	ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
			IRQF_TRIGGER_HIGH, gpu->name, gpu);
	if (ret) {
		dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
		goto fail;
	}

	/* Acquire clocks: */
	for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
		gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
		DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
		if (IS_ERR(gpu->grp_clks[i]))
			gpu->grp_clks[i] = NULL;
	}

	gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
	DBG("ebi1_clk: %p", gpu->ebi1_clk);
	if (IS_ERR(gpu->ebi1_clk))
		gpu->ebi1_clk = NULL;

	/* Acquire regulators: */
	gpu->gpu_reg = devm_regulator_get(&pdev->dev, "vdd");
	DBG("gpu_reg: %p", gpu->gpu_reg);
	if (IS_ERR(gpu->gpu_reg))
		gpu->gpu_reg = NULL;

	gpu->gpu_cx = devm_regulator_get(&pdev->dev, "vddcx");
	DBG("gpu_cx: %p", gpu->gpu_cx);
	if (IS_ERR(gpu->gpu_cx))
		gpu->gpu_cx = NULL;

	/* Setup IOMMU.. eventually we will (I think) do this once per context
	 * and have separate page tables per context.  For now, to keep things
	 * simple and to get something working, just use a single address space:
	 */
	gpu->iommu = iommu_domain_alloc(&platform_bus_type);
	if (!gpu->iommu) {
		dev_err(drm->dev, "failed to allocate IOMMU\n");
		ret = -ENOMEM;
		goto fail;
	}
	gpu->id = msm_register_iommu(drm, gpu->iommu);

	/* Create ringbuffer: */
	gpu->rb = msm_ringbuffer_new(gpu, ringsz);
	if (IS_ERR(gpu->rb)) {
		ret = PTR_ERR(gpu->rb);
		gpu->rb = NULL;
		dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
		goto fail;
	}

	ret = msm_gem_get_iova_locked(gpu->rb->bo, gpu->id, &gpu->rb_iova);
	if (ret) {
		gpu->rb_iova = 0;
		dev_err(drm->dev, "could not map ringbuffer: %d\n", ret);
		goto fail;
	}

	bs_init(gpu, pdev);

	return 0;

fail:
	return ret;
}