Exemplo n.º 1
0
/* This gets called before _probe(), so read the DT entries directly */
int bpmp_linear_map_init(void)
{
	struct device_node *node;
	DEFINE_DMA_ATTRS(attrs);
	uint32_t of_start;
	uint32_t of_size;
	int ret;

	node = of_find_node_by_path("/bpmp");
	WARN_ON(!node);
	if (!node)
		return -ENODEV;

	ret = of_property_read_u32(node, "carveout-start", &of_start);
	if (ret)
		return ret;

	ret = of_property_read_u32(node, "carveout-size", &of_size);
	if (ret)
		return ret;

	dma_set_attr(DMA_ATTR_SKIP_IOVA_GAP, &attrs);
	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
	ret = dma_map_linear_attrs(device, of_start, of_size, 0, &attrs);
	if (ret == DMA_ERROR_CODE)
		return -ENOMEM;

	return 0;
}
Exemplo n.º 2
0
void tegra_iommu_zap_vm(struct tegra_iovmm_area *area)
{
	DEFINE_DMA_ATTRS(attrs);
	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
	dma_set_attr(DMA_ATTR_SKIP_FREE_IOVA, &attrs);
	dma_unmap_single_attrs(area->dev, area->iovm_start, area->iovm_length,
			       0, &attrs);
}
struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
		unsigned long size, bool alloc_kmap)
{
	struct mtk_drm_gem_obj *mtk_gem;
	struct drm_gem_object *obj;
	int ret;

	mtk_gem = mtk_drm_gem_init(dev, size);
	if (IS_ERR(mtk_gem))
		return ERR_CAST(mtk_gem);

	obj = &mtk_gem->base;

	init_dma_attrs(&mtk_gem->dma_attrs);
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs);

	if (!alloc_kmap)
		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs);

	mtk_gem->kvaddr = dma_alloc_attrs(dev->dev, obj->size,
			(dma_addr_t *)&mtk_gem->dma_addr, GFP_KERNEL,
			&mtk_gem->dma_attrs);
	if (!mtk_gem->kvaddr) {
		DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
		ret = -ENOMEM;
		goto err_dma;
	}

	{
		mtk_gem->sgt = kzalloc(sizeof(*mtk_gem->sgt), GFP_KERNEL);
		if (!mtk_gem->sgt) {
			ret = -ENOMEM;
			goto err_sgt;
		}

		ret = dma_get_sgtable_attrs(dev->dev, mtk_gem->sgt,
			mtk_gem->kvaddr, mtk_gem->dma_addr, obj->size,
			&mtk_gem->dma_attrs);
		if (ret) {
			DRM_ERROR("failed to allocate sgt, %d\n", ret);
			goto err_get_sgtable;
		}
	}

	DRM_INFO("kvaddr = %p dma_addr = %pad\n",
			mtk_gem->kvaddr, &mtk_gem->dma_addr);

	return mtk_gem;
err_get_sgtable:
	kfree(mtk_gem->sgt);
err_sgt:
	dma_free_attrs(dev->dev, obj->size, mtk_gem->kvaddr, mtk_gem->dma_addr,
			&mtk_gem->dma_attrs);
err_dma:
	kfree(mtk_gem);
	return ERR_PTR(ret);
}
Exemplo n.º 4
0
/**
 * bdisp_hw_alloc_nodes
 * @ctx:        bdisp context
 *
 * Allocate dma memory for nodes
 *
 * RETURNS:
 * 0 on success
 */
int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx)
{
	struct device *dev = ctx->bdisp_dev->dev;
	unsigned int i, node_size = sizeof(struct bdisp_node);
	void *base;
	dma_addr_t paddr;
	DEFINE_DMA_ATTRS(attrs);

	/* Allocate all the nodes within a single memory page */
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
	base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
			       GFP_KERNEL | GFP_DMA, &attrs);
	if (!base) {
		dev_err(dev, "%s no mem\n", __func__);
		return -ENOMEM;
	}

	memset(base, 0, node_size * MAX_NB_NODE);

	for (i = 0; i < MAX_NB_NODE; i++) {
		ctx->node[i] = base;
		ctx->node_paddr[i] = paddr;
		dev_dbg(dev, "node[%d]=0x%p (paddr=%pad)\n", i, ctx->node[i],
			&paddr);
		base += node_size;
		paddr += node_size;
	}

	return 0;
}
int pas_init_image(enum pas_id id, const u8 *metadata, size_t size)
{
	int ret;
	struct pas_init_image_req {
		u32	proc;
		u32	image_addr;
	} request;
	u32 scm_ret = 0;
	void *mdata_buf;
	dma_addr_t mdata_phys;
	DEFINE_DMA_ATTRS(attrs);

	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
	mdata_buf = dma_alloc_attrs(NULL, size, &mdata_phys, GFP_KERNEL,
	                                      &attrs);

	if (!mdata_buf) {
	        pr_err("Allocation for metadata failed.\n");
		return -ENOMEM;
        }

        memcpy(mdata_buf, metadata, size);

	request.proc = id;
	request.image_addr = mdata_phys;

	ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
			sizeof(request), &scm_ret, sizeof(scm_ret));

        dma_free_attrs(NULL, size, mdata_buf, mdata_phys, &attrs);

	if (ret)
		return ret;
	return scm_ret;
}
Exemplo n.º 6
0
static void vb2_dma_sg_put(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct sg_table *sgt = &buf->sg_table;
	int i = buf->num_pages;

	if (atomic_dec_and_test(&buf->refcount)) {
		DEFINE_DMA_ATTRS(attrs);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
		dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
#endif
		dprintk(1, "%s: Freeing buffer of %d pages\n", __func__,
			buf->num_pages);
		dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				   buf->dma_dir, &attrs);
		if (buf->vaddr)
			vm_unmap_ram(buf->vaddr, buf->num_pages);
		sg_free_table(buf->dma_sgt);
		while (--i >= 0)
			__free_page(buf->pages[i]);
		kfree(buf->pages);
		put_device(buf->dev);
		kfree(buf);
	}
}
Exemplo n.º 7
0
/*
 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
 *		 be used
 */
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct sg_table *sgt = &buf->sg_table;
	int i = buf->num_pages;
	DEFINE_DMA_ATTRS(attrs);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
#endif

	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
	       __func__, buf->num_pages);
	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
			   &attrs);
	if (buf->vaddr)
		vm_unmap_ram(buf->vaddr, buf->num_pages);
	sg_free_table(buf->dma_sgt);
	while (--i >= 0) {
		if (buf->dma_dir == DMA_FROM_DEVICE)
			set_page_dirty_lock(buf->pages[i]);
		if (!vma_is_io(buf->vma))
			put_page(buf->pages[i]);
	}
	kfree(buf->pages);
	vb2_put_vma(buf->vma);
	kfree(buf);
}
Exemplo n.º 8
0
static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g,
				    size_t compbit_backing_size)
{
	struct device *d = dev_from_gk20a(g);
	struct gr_gk20a *gr = &g->gr;
	DEFINE_DMA_ATTRS(attrs);
	dma_addr_t iova;
	int err;

	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);

	gr->compbit_store.pages =
		dma_alloc_attrs(d, compbit_backing_size, &iova,
				GFP_KERNEL, &attrs);
	if (!gr->compbit_store.pages) {
		gk20a_err(dev_from_gk20a(g), "failed to allocate backing store for compbit : size %zu",
				  compbit_backing_size);
		return -ENOMEM;
	}

	gr->compbit_store.base_iova = iova;
	gr->compbit_store.size = compbit_backing_size;
	err = gk20a_get_sgtable_from_pages(d,
				   &gr->compbit_store.sgt,
				   gr->compbit_store.pages, iova,
				   compbit_backing_size);
	if (err) {
		gk20a_err(dev_from_gk20a(g), "failed to allocate sgt for backing store");
		return err;
	}

	return 0;
}
Exemplo n.º 9
0
void nvhost_vic03_deinit(struct platform_device *dev)
{
	struct vic03 *v = get_vic03(dev);
	struct nvhost_device_data *pdata = nvhost_get_devdata(dev);

	DEFINE_DMA_ATTRS(attrs);
	dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);

	if (!v)
		return;

	if (pdata->scaling_init)
		nvhost_scale_hw_deinit(dev);

	if (v->ucode.mapped) {
		dma_free_attrs(&dev->dev,
			v->ucode.size, v->ucode.mapped,
			v->ucode.dma_addr, &attrs);
		v->ucode.mapped = NULL;
		v->ucode.dma_addr = 0;
	}

	/* zap, free */
	set_vic03(dev, NULL);
	kfree(v);
}
Exemplo n.º 10
0
static int qproc_mba_load_mdt(struct qproc *qproc, const struct firmware *fw)
{
	DEFINE_DMA_ATTRS(attrs);
	unsigned long timeout;
	dma_addr_t phys;
	dma_addr_t end;
	void *ptr;
	int ret;
	s32 val;

	dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
	dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);

	ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
	if (!ptr) {
		dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
		return -ENOMEM;
	}

	end = phys + fw->size;
	dev_info(qproc->dev, "loading mdt header from %pa to %pa\n", &phys, &end);

	memcpy(ptr, fw->data, fw->size);

	writel_relaxed(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH);

	writel_relaxed(phys, qproc->rmb_base + RMB_PMI_META_DATA);
	writel(CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND);

	timeout = jiffies + HZ;
	for (;;) {
		msleep(1);

		val = readl(qproc->rmb_base + RMB_MBA_STATUS);
		if (val == STATUS_META_DATA_AUTH_SUCCESS || val < 0)
			break;

		if (time_after(jiffies, timeout))
			break;
	}
	if (val == 0) {
		dev_err(qproc->dev, "MBA authentication of headers timed out\n");
		ret = -ETIMEDOUT;
		goto out;
	} else if (val < 0) {
		dev_err(qproc->dev, "MBA returned error %d for headers\n", val);
		ret = -EINVAL;
		goto out;
	}

	dev_err(qproc->dev, "mdt authenticated\n");

	ret = 0;
out:
	dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs);

	return ret;
}
Exemplo n.º 11
0
static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
					size_t size)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	void *mdata_virt;
	dma_addr_t mdata_phys;
	s32 status;
	int ret;
	DEFINE_DMA_ATTRS(attrs);

	drv->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
	/* Make metadata physically contiguous and 4K aligned. */
	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
					GFP_KERNEL, &attrs);
	if (!mdata_virt) {
		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
		ret = -ENOMEM;
		goto fail;
	}
	memcpy(mdata_virt, metadata, size);
	/* wmb() ensures copy completes prior to starting authentication. */
	wmb();

	/* Initialize length counter to 0 */
	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);

	/* Pass address of meta-data to the MBA and perform authentication */
	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
		status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
		POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of headers timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for headers\n",
				status);
		ret = -EINVAL;
	}

	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, &attrs);

	if (!ret)
		return ret;

fail:
	modem_log_rmb_regs(drv->rmb_base);
	if (drv->q6) {
		pil_mss_shutdown(pil);
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys,
				&drv->attrs_dma);
		drv->q6->mba_virt = NULL;
	}
	return ret;
}
Exemplo n.º 12
0
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
				    unsigned long size,
				    enum dma_data_direction dma_dir)
{
	struct vb2_dma_sg_conf *conf = alloc_ctx;
	struct vb2_dma_sg_buf *buf;
	struct sg_table *sgt;
	DEFINE_DMA_ATTRS(attrs);
	struct frame_vector *vec;

	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
	buf->dev = conf->dev;
	buf->dma_dir = dma_dir;
	buf->offset = vaddr & ~PAGE_MASK;
	buf->size = size;
	buf->dma_sgt = &buf->sg_table;
	vec = vb2_create_framevec(vaddr, size, buf->dma_dir == DMA_FROM_DEVICE);
	if (IS_ERR(vec))
		goto userptr_fail_pfnvec;
	buf->vec = vec;

	buf->pages = frame_vector_pages(vec);
	if (IS_ERR(buf->pages))
		goto userptr_fail_sgtable;
	buf->num_pages = frame_vector_count(vec);

	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
			buf->num_pages, buf->offset, size, 0))
		goto userptr_fail_sgtable;

	sgt = &buf->sg_table;
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				      buf->dma_dir, &attrs);
	if (!sgt->nents)
		goto userptr_fail_map;

	return buf;

userptr_fail_map:
	sg_free_table(&buf->sg_table);
userptr_fail_sgtable:
	vb2_destroy_framevec(vec);
userptr_fail_pfnvec:
	kfree(buf);
	return NULL;
}
Exemplo n.º 13
0
static int flcn_read_ucode(struct platform_device *dev, const char *fw_name)
{
	struct flcn *v = get_flcn(dev);
	const struct firmware *ucode_fw;
	int err;
	DEFINE_DMA_ATTRS(attrs);

	nvhost_dbg_fn("");

	v->dma_addr = 0;
	v->mapped = NULL;

	ucode_fw = nvhost_client_request_firmware(dev, fw_name);
	if (!ucode_fw) {
		nvhost_dbg_fn("request firmware failed");
		dev_err(&dev->dev, "failed to get firmware\n");
		err = -ENOENT;
		return err;
	}

	v->size = ucode_fw->size;
	dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);

	v->mapped = dma_alloc_attrs(&dev->dev,
				v->size, &v->dma_addr,
				GFP_KERNEL, &attrs);
	if (!v->mapped) {
		dev_err(&dev->dev, "dma memory allocation failed");
		err = -ENOMEM;
		goto clean_up;
	}

	err = flcn_setup_ucode_image(dev, v->mapped, ucode_fw);
	if (err) {
		dev_err(&dev->dev, "failed to parse firmware image\n");
		goto clean_up;
	}

	v->valid = true;

	release_firmware(ucode_fw);

	return 0;

 clean_up:
	if (v->mapped) {
		dma_free_attrs(&dev->dev,
			v->size, v->mapped,
			v->dma_addr, &attrs);
		v->mapped = NULL;
		v->dma_addr = 0;
	}
	release_firmware(ucode_fw);
	return err;
}
Exemplo n.º 14
0
/**
 * bdisp_hw_free_nodes
 * @ctx:        bdisp context
 *
 * Free node memory
 *
 * RETURNS:
 * None
 */
void bdisp_hw_free_nodes(struct bdisp_ctx *ctx)
{
	if (ctx && ctx->node[0]) {
		DEFINE_DMA_ATTRS(attrs);

		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
		dma_free_attrs(ctx->bdisp_dev->dev,
			       sizeof(struct bdisp_node) * MAX_NB_NODE,
			       ctx->node[0], ctx->node_paddr[0], &attrs);
	}
}
Exemplo n.º 15
0
/**
 * bdisp_hw_free_filters
 * @dev:        device
 *
 * Free filters memory
 *
 * RETURNS:
 * None
 */
void bdisp_hw_free_filters(struct device *dev)
{
	int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);

	if (bdisp_h_filter[0].virt) {
		DEFINE_DMA_ATTRS(attrs);

		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
		dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
			       bdisp_h_filter[0].paddr, &attrs);
	}
}
Exemplo n.º 16
0
static int rockchip_gem_alloc_buf(struct rockchip_gem_object *rk_obj,
                                  bool alloc_kmap)
{
    struct drm_gem_object *obj = &rk_obj->base;
    struct drm_device *drm = obj->dev;

    init_dma_attrs(&rk_obj->dma_attrs);
    dma_set_attr(DMA_ATTR_WRITE_COMBINE, &rk_obj->dma_attrs);

    if (!alloc_kmap)
        dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &rk_obj->dma_attrs);

    rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
                                     &rk_obj->dma_addr, GFP_KERNEL,
                                     &rk_obj->dma_attrs);
    if (!rk_obj->kvaddr) {
        DRM_ERROR("failed to allocate %#x byte dma buffer", obj->size);
        return -ENOMEM;
    }

    return 0;
}
Exemplo n.º 17
0
int msenc_read_ucode(struct platform_device *dev, const char *fw_name)
{
    struct msenc *m = get_msenc(dev);
    const struct firmware *ucode_fw;
    int err;

    m->phys = 0;
    m->mapped = NULL;
    init_dma_attrs(&m->attrs);

    ucode_fw  = nvhost_client_request_firmware(dev, fw_name);
    if (!ucode_fw) {
        dev_err(&dev->dev, "failed to get msenc firmware\n");
        err = -ENOENT;
        return err;
    }

    m->size = ucode_fw->size;
    dma_set_attr(DMA_ATTR_READ_ONLY, &m->attrs);

    m->mapped = dma_alloc_attrs(&dev->dev,
                                m->size, &m->phys,
                                GFP_KERNEL, &m->attrs);
    if (!m->mapped) {
        dev_err(&dev->dev, "dma memory allocation failed");
        err = -ENOMEM;
        goto clean_up;
    }

    err = msenc_setup_ucode_image(dev, m->mapped, ucode_fw);
    if (err) {
        dev_err(&dev->dev, "failed to parse firmware image\n");
        goto clean_up;
    }

    m->valid = true;

    release_firmware(ucode_fw);

    return 0;

clean_up:
    if (m->mapped) {
        dma_free_attrs(&dev->dev,
                       m->size, m->mapped,
                       m->phys, &m->attrs);
        m->mapped = NULL;
    }
    release_firmware(ucode_fw);
    return err;
}
Exemplo n.º 18
0
static int msm_unload(struct drm_device *dev)
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_kms *kms = priv->kms;
	struct msm_gpu *gpu = priv->gpu;

	drm_kms_helper_poll_fini(dev);
	drm_mode_config_cleanup(dev);
	drm_vblank_cleanup(dev);

	pm_runtime_get_sync(dev->dev);
	drm_irq_uninstall(dev);
	pm_runtime_put_sync(dev->dev);

	flush_workqueue(priv->wq);
	destroy_workqueue(priv->wq);

	if (kms) {
		pm_runtime_disable(dev->dev);
		kms->funcs->destroy(kms);
	}

	if (gpu) {
		mutex_lock(&dev->struct_mutex);
		gpu->funcs->pm_suspend(gpu);
		gpu->funcs->destroy(gpu);
		mutex_unlock(&dev->struct_mutex);
	}

	if (priv->vram.paddr) {
		DEFINE_DMA_ATTRS(attrs);
		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
		drm_mm_takedown(&priv->vram.mm);
		dma_free_attrs(dev->dev, priv->vram.size, NULL,
				priv->vram.paddr, &attrs);
	}

	component_unbind_all(dev->dev, dev);

	dev->dev_private = NULL;

	kfree(priv);

	return 0;
}
Exemplo n.º 19
0
/* ION CMA heap operations functions */
static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
			    struct ion_heap *heap, struct ion_buffer *buffer,
			    unsigned long len, unsigned long align,
			    unsigned long flags)
{
	struct device *dev = heap->priv;
	struct ion_secure_cma_buffer_info *info;
	DEFINE_DMA_ATTRS(attrs);
	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);

	dev_dbg(dev, "Request buffer allocation len %ld\n", len);

	info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
	if (!info) {
		dev_err(dev, "Can't allocate buffer info\n");
		return ION_CMA_ALLOCATE_FAILED;
	}

	info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle), 0, &attrs);

	if (!info->cpu_addr) {
		dev_err(dev, "Fail to allocate buffer\n");
		goto err;
	}

	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!info->table) {
		dev_err(dev, "Fail to allocate sg table\n");
		goto err;
	}

	ion_secure_cma_get_sgtable(dev,
			info->table, info->cpu_addr, info->handle, len);

	info->secure.buffer = info->handle;

	/* keep this for memory release */
	buffer->priv_virt = info;
	dev_dbg(dev, "Allocate buffer %p\n", buffer);
	return info;

err:
	kfree(info);
	return ION_CMA_ALLOCATE_FAILED;
}
Exemplo n.º 20
0
void nvhost_flcn_deinit(struct platform_device *dev)
{
	struct flcn *v = get_flcn(dev);

	DEFINE_DMA_ATTRS(attrs);
	dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);

	if (!v)
		return;

	if (v->mapped) {
		dma_free_attrs(&dev->dev,
			v->size, v->mapped,
			v->dma_addr, &attrs);
		v->mapped = NULL;
		v->dma_addr = 0;
	}

	/* zap, free */
	set_flcn(dev, NULL);
	kfree(v);
}
Exemplo n.º 21
0
/*
 * @put_userptr: inform the allocator that a USERPTR buffer will no longer
 *		 be used
 */
static void vb2_dma_sg_put_userptr(void *buf_priv)
{
	struct vb2_dma_sg_buf *buf = buf_priv;
	struct sg_table *sgt = &buf->sg_table;
	int i = buf->num_pages;
	DEFINE_DMA_ATTRS(attrs);

	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);

	dprintk(1, "%s: Releasing userspace buffer of %d pages\n",
	       __func__, buf->num_pages);
	dma_unmap_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents, buf->dma_dir,
			   &attrs);
	if (buf->vaddr)
		vm_unmap_ram(buf->vaddr, buf->num_pages);
	sg_free_table(buf->dma_sgt);
	while (--i >= 0) {
		if (buf->dma_dir == DMA_FROM_DEVICE)
			set_page_dirty_lock(buf->pages[i]);
	}
	vb2_destroy_framevec(buf->vec);
	kfree(buf);
}
Exemplo n.º 22
0
/**
 * bdisp_hw_alloc_filters
 * @dev:        device
 *
 * Allocate dma memory for filters
 *
 * RETURNS:
 * 0 on success
 */
int bdisp_hw_alloc_filters(struct device *dev)
{
	unsigned int i, size;
	void *base;
	dma_addr_t paddr;
	DEFINE_DMA_ATTRS(attrs);

	/* Allocate all the filters within a single memory page */
	size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
	base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs);
	if (!base)
		return -ENOMEM;

	/* Setup filter addresses */
	for (i = 0; i < NB_H_FILTER; i++) {
		bdisp_h_filter[i].min = bdisp_h_spec[i].min;
		bdisp_h_filter[i].max = bdisp_h_spec[i].max;
		memcpy(base, bdisp_h_spec[i].coef, BDISP_HF_NB);
		bdisp_h_filter[i].virt = base;
		bdisp_h_filter[i].paddr = paddr;
		base += BDISP_HF_NB;
		paddr += BDISP_HF_NB;
	}

	for (i = 0; i < NB_V_FILTER; i++) {
		bdisp_v_filter[i].min = bdisp_v_spec[i].min;
		bdisp_v_filter[i].max = bdisp_v_spec[i].max;
		memcpy(base, bdisp_v_spec[i].coef, BDISP_VF_NB);
		bdisp_v_filter[i].virt = base;
		bdisp_v_filter[i].paddr = paddr;
		base += BDISP_VF_NB;
		paddr += BDISP_VF_NB;
	}

	return 0;
}
Exemplo n.º 23
0
static struct resource_table * qproc_find_rsc_table(struct rproc *rproc,
						    const struct firmware *fw,
						    int *tablesz)
{
	static struct resource_table table = { .ver = 1, };

	*tablesz = sizeof(table);
	return &table;
}

static int qproc_load(struct rproc *rproc, const struct firmware *fw)
{
	struct qproc *qproc = rproc->priv;
	DEFINE_DMA_ATTRS(attrs);
	dma_addr_t phys;
	dma_addr_t end;
	void *ptr;

	dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
	dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);

	ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
	if (!ptr) {
		dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
		return -ENOMEM;
	}

	end = phys + fw->size;
	dev_info(qproc->dev, "loading MBA from %pa to %pa\n", &phys, &end);

	memcpy(ptr, fw->data, fw->size);

	qproc->mba_va = ptr;
	qproc->mba_da = phys;
	qproc->mba_size = fw->size;
	qproc->mba_attrs = attrs;

	return 0;
}

static const struct rproc_fw_ops qproc_fw_ops = {
	.find_rsc_table = qproc_find_rsc_table,
	.load = qproc_load,
	.sanity_check = qproc_sanity_check,
};

static void q6v5proc_reset(struct qproc *qproc)
{
	u32 val;

	/* Assert resets, stop core */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
	writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);

	/* Enable power block headswitch, and wait for it to stabilize */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= QDSS_BHS_ON | QDSS_LDO_BYP;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	mb();
	udelay(1);

	/*
	 * Turn on memories. L2 banks should be done individually
	 * to minimize inrush current.
	 */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
		Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_L2DATA_SLP_NRET_N_2;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_L2DATA_SLP_NRET_N_1;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_L2DATA_SLP_NRET_N_0;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);

	/* Remove IO clamp */
	val &= ~Q6SS_CLAMP_IO;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);

	/* Bring core out of reset */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
	val &= ~Q6SS_CORE_ARES;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);

	/* Turn on core clock */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_GFMUX_CTL);
	val |= Q6SS_CLK_ENA;

#if 0
	/* Need a different clock source for v5.2.0 */
	if (qproc->qdsp6v5_2_0) {
		val &= ~Q6SS_CLK_SRC_SEL_FIELD;
		val |= Q6SS_CLK_SRC_SEL_C;
	}

#endif
	/* force clock on during source switch */
//	if (qproc->qdsp6v56)
		val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;

	writel_relaxed(val, qproc->reg_base + QDSP6SS_GFMUX_CTL);

	/* Start core execution */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
	val &= ~Q6SS_STOP_CORE;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);
}
Exemplo n.º 24
0
static void *vb2_dma_sg_get_userptr(void *alloc_ctx, unsigned long vaddr,
				    unsigned long size,
				    enum dma_data_direction dma_dir)
{
	struct vb2_dma_sg_conf *conf = alloc_ctx;
	struct vb2_dma_sg_buf *buf;
	unsigned long first, last;
	int num_pages_from_user;
	struct vm_area_struct *vma;
	struct sg_table *sgt;
	DEFINE_DMA_ATTRS(attrs);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
#endif

	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
	buf->dev = conf->dev;
	buf->dma_dir = dma_dir;
	buf->offset = vaddr & ~PAGE_MASK;
	buf->size = size;
	buf->dma_sgt = &buf->sg_table;

	first = (vaddr           & PAGE_MASK) >> PAGE_SHIFT;
	last  = ((vaddr + size - 1) & PAGE_MASK) >> PAGE_SHIFT;
	buf->num_pages = last - first + 1;

	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
			     GFP_KERNEL);
	if (!buf->pages)
		goto userptr_fail_alloc_pages;

	vma = find_vma(current->mm, vaddr);
	if (!vma) {
		dprintk(1, "no vma for address %lu\n", vaddr);
		goto userptr_fail_find_vma;
	}

	if (vma->vm_end < vaddr + size) {
		dprintk(1, "vma at %lu is too small for %lu bytes\n",
			vaddr, size);
		goto userptr_fail_find_vma;
	}

	buf->vma = vb2_get_vma(vma);
	if (!buf->vma) {
		dprintk(1, "failed to copy vma\n");
		goto userptr_fail_find_vma;
	}

	if (vma_is_io(buf->vma)) {
		for (num_pages_from_user = 0;
		     num_pages_from_user < buf->num_pages;
		     ++num_pages_from_user, vaddr += PAGE_SIZE) {
			unsigned long pfn;

			if (follow_pfn(vma, vaddr, &pfn)) {
				dprintk(1, "no page for address %lu\n", vaddr);
				break;
			}
			buf->pages[num_pages_from_user] = pfn_to_page(pfn);
		}
	} else
		num_pages_from_user = get_user_pages(current, current->mm,
					     vaddr & PAGE_MASK,
					     buf->num_pages,
					     buf->dma_dir == DMA_FROM_DEVICE,
					     1, /* force */
					     buf->pages,
					     NULL);

	if (num_pages_from_user != buf->num_pages)
		goto userptr_fail_get_user_pages;

	if (sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
			buf->num_pages, buf->offset, size, 0))
		goto userptr_fail_alloc_table_from_pages;

	sgt = &buf->sg_table;
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				      buf->dma_dir, &attrs);
	if (!sgt->nents)
		goto userptr_fail_map;

	return buf;

userptr_fail_map:
	sg_free_table(&buf->sg_table);
userptr_fail_alloc_table_from_pages:
userptr_fail_get_user_pages:
	dprintk(1, "get_user_pages requested/got: %d/%d]\n",
		buf->num_pages, num_pages_from_user);
	if (!vma_is_io(buf->vma))
		while (--num_pages_from_user >= 0)
			put_page(buf->pages[num_pages_from_user]);
	vb2_put_vma(buf->vma);
userptr_fail_find_vma:
	kfree(buf->pages);
userptr_fail_alloc_pages:
	kfree(buf);
	return NULL;
}
Exemplo n.º 25
0
int pil_mss_reset_load_mba(struct pil_desc *pil)
{
	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
	struct modem_data *md = dev_get_drvdata(pil->dev);
	const struct firmware *fw;
	char fw_name_legacy[10] = "mba.b00";
	char fw_name[10] = "mba.mbn";
	char *fw_name_p;
	void *mba_virt;
	dma_addr_t mba_phys, mba_phys_end;
	int ret, count;
	const u8 *data;

	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
	/* Load and authenticate mba image */
	ret = request_firmware(&fw, fw_name_p, pil->dev);
	if (ret) {
		dev_err(pil->dev, "Failed to locate %s\n",
						fw_name_p);
		return ret;
	}

	drv->mba_size = SZ_1M;
	md->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	init_dma_attrs(&md->attrs_dma);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &md->attrs_dma);
	mba_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_size,
			&mba_phys, GFP_KERNEL, &md->attrs_dma);
	if (!mba_virt) {
		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
		ret = -ENOMEM;
		goto err_dma_alloc;
	}

	drv->mba_phys = mba_phys;
	drv->mba_virt = mba_virt;
	mba_phys_end = mba_phys + drv->mba_size;

	dev_info(pil->dev, "MBA: loading from %pa to %pa\n", &mba_phys,
								&mba_phys_end);
	/* Load the MBA image into memory */
	data = fw ? fw->data : NULL;
	if (!data) {
		dev_err(pil->dev, "MBA data is NULL\n");
		ret = -ENOMEM;
		goto err_mss_reset;
	}
	count = fw->size;
	memcpy(mba_virt, data, count);
	wmb();

	ret = pil_mss_reset(pil);
	if (ret) {
		dev_err(pil->dev, "MBA boot failed.\n");
		goto err_mss_reset;
	}

	release_firmware(fw);

	return 0;

err_mss_reset:
	dma_free_attrs(&md->mba_mem_dev, drv->mba_size, drv->mba_virt,
				drv->mba_phys, &md->attrs_dma);
	drv->mba_virt = NULL;
err_dma_alloc:
	release_firmware(fw);
	return ret;
}
Exemplo n.º 26
0
static int msm_iommu_sec_ptbl_init(struct device *dev)
{
    int psize[2] = {0, 0};
    unsigned int spare = 0;
    int ret;
    int version;
    void *cpu_addr;
    dma_addr_t paddr;
    DEFINE_DMA_ATTRS(attrs);
    static bool allocated = false;

    if (allocated)
        return 0;

    version = qcom_scm_get_feat_version(SCM_SVC_MP);

    if (version >= MAKE_VERSION(1, 1, 1)) {
        ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0);
        if (ret) {
            dev_err(dev, "failed setting max virtual size (%d)\n",
                    ret);
            return ret;
        }
    }

    ret = qcom_scm_iommu_secure_ptbl_size(spare, psize);
    if (ret) {
        dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
                ret);
        return ret;
    }

    if (psize[1]) {
        dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
                ret);
        return psize[1];
    }

    dev_info(dev, "iommu sec: pgtable size: %d\n", psize[0]);

    dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);

    cpu_addr = dma_alloc_attrs(dev, psize[0], &paddr, GFP_KERNEL, &attrs);
    if (!cpu_addr) {
        dev_err(dev, "failed to allocate %d bytes for pgtable\n",
                psize[0]);
        return -ENOMEM;
    }

    ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize[0], spare);
    if (ret) {
        dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
        goto free_mem;
    }

    allocated = true;

    return 0;

free_mem:
    dma_free_attrs(dev, psize[0], cpu_addr, paddr, &attrs);
    return ret;
}
Exemplo n.º 27
0
static void *vb2_dma_sg_alloc(void *alloc_ctx, unsigned long size,
			      enum dma_data_direction dma_dir, gfp_t gfp_flags)
{
	struct vb2_dma_sg_conf *conf = alloc_ctx;
	struct vb2_dma_sg_buf *buf;
	struct sg_table *sgt;
	int ret;
	int num_pages;
	DEFINE_DMA_ATTRS(attrs);

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,6,0)
	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &attrs);
#endif

	if (WARN_ON(alloc_ctx == NULL))
		return NULL;
	buf = kzalloc(sizeof *buf, GFP_KERNEL);
	if (!buf)
		return NULL;

	buf->vaddr = NULL;
	buf->dma_dir = dma_dir;
	buf->offset = 0;
	buf->size = size;
	/* size is already page aligned */
	buf->num_pages = size >> PAGE_SHIFT;
	buf->dma_sgt = &buf->sg_table;

	buf->pages = kzalloc(buf->num_pages * sizeof(struct page *),
			     GFP_KERNEL);
	if (!buf->pages)
		goto fail_pages_array_alloc;

	ret = vb2_dma_sg_alloc_compacted(buf, gfp_flags);
	if (ret)
		goto fail_pages_alloc;

	ret = sg_alloc_table_from_pages(buf->dma_sgt, buf->pages,
			buf->num_pages, 0, size, GFP_KERNEL);
	if (ret)
		goto fail_table_alloc;

	/* Prevent the device from being released while the buffer is used */
	buf->dev = get_device(conf->dev);

	sgt = &buf->sg_table;
	/*
	 * No need to sync to the device, this will happen later when the
	 * prepare() memop is called.
	 */
	sgt->nents = dma_map_sg_attrs(buf->dev, sgt->sgl, sgt->orig_nents,
				      buf->dma_dir, &attrs);
	if (!sgt->nents)
		goto fail_map;

	buf->handler.refcount = &buf->refcount;
	buf->handler.put = vb2_dma_sg_put;
	buf->handler.arg = buf;

	atomic_inc(&buf->refcount);

	dprintk(1, "%s: Allocated buffer of %d pages\n",
		__func__, buf->num_pages);
	return buf;

fail_map:
	put_device(buf->dev);
	sg_free_table(buf->dma_sgt);
fail_table_alloc:
	num_pages = buf->num_pages;
	while (num_pages--)
		__free_page(buf->pages[num_pages]);
fail_pages_alloc:
	kfree(buf->pages);
fail_pages_array_alloc:
	kfree(buf);
	return NULL;
}
Exemplo n.º 28
0
/**
    * scsi_dma_set_skip_cpu_sync - skip operations for cache coherency
    */
void scsi_dma_set_skip_cpu_sync(void)
{
	init_dma_attrs(&scsi_direct_attrs);
	dma_set_attr(DMA_ATTR_SKIP_CPU_SYNC, &scsi_direct_attrs);
}
Exemplo n.º 29
0
static int lowlevel_buffer_allocate(struct drm_device *dev,
		unsigned int flags, struct rockchip_drm_gem_buf *buf)
{
	int ret = 0;
	enum dma_attr attr;
	unsigned int nr_pages;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	if (buf->dma_addr) {
		DRM_DEBUG_KMS("already allocated.\n");
		return 0;
	}

	init_dma_attrs(&buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_CONTIG, fully physically contiguous memory
	 * region will be allocated else physically contiguous
	 * as possible.
	 */
	if (!(flags & ROCKCHIP_BO_NONCONTIG))
		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_WC or ROCKCHIP_BO_NONCACHABLE, writecombine mapping
	 * else cachable mapping.
	 */
	if (flags & ROCKCHIP_BO_WC || !(flags & ROCKCHIP_BO_CACHABLE))
		attr = DMA_ATTR_WRITE_COMBINE;
	else
		attr = DMA_ATTR_NON_CONSISTENT;

	dma_set_attr(attr, &buf->dma_attrs);
	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);

	nr_pages = buf->size >> PAGE_SHIFT;

	if (!is_drm_iommu_supported(dev)) {
		dma_addr_t start_addr;
		unsigned int i = 0;

		buf->pages = kzalloc(sizeof(struct page) * nr_pages,
					GFP_KERNEL);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate pages.\n");
			return -ENOMEM;
		}

		buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->kvaddr) {
			DRM_ERROR("failed to allocate buffer.\n");
			kfree(buf->pages);
			return -ENOMEM;
		}

		start_addr = buf->dma_addr;
		while (i < nr_pages) {
			buf->pages[i] = phys_to_page(start_addr);
			start_addr += PAGE_SIZE;
			i++;
		}
	} else {

		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate buffer.\n");
			return -ENOMEM;
		}
	}

	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
	if (!buf->sgt) {
		DRM_ERROR("failed to get sg table.\n");
		ret = -ENOMEM;
		goto err_free_attrs;
	}

	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
			(unsigned long)buf->dma_addr,
			buf->size);

	return ret;

err_free_attrs:
	dma_free_attrs(dev->dev, buf->size, buf->pages,
			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
	buf->dma_addr = (dma_addr_t)NULL;

	if (!is_drm_iommu_supported(dev))
		kfree(buf->pages);

	return ret;
}
Exemplo n.º 30
0
static int __devinit gpu_probe(struct platform_device *pdev)
#endif
{
    int ret = -ENODEV;
    struct resource* res;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	struct contiguous_mem_pool *pool;
	struct reset_control *rstc;
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
	struct device_node *dn =pdev->dev.of_node;
	const u32 *prop;
#else
	struct viv_gpu_platform_data *pdata;
#endif
    gcmkHEADER();

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr");
    if (res)
        baseAddress = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d");
    if (res)
        irqLine = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d");
    if (res)
    {
        registerMemBase = res->start;
        registerMemSize = res->end - res->start + 1;
    }

    res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d");
    if (res)
        irqLine2D = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d");
    if (res)
    {
        registerMemBase2D = res->start;
        registerMemSize2D = res->end - res->start + 1;
    }

    res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg");
    if (res)
        irqLineVG = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg");
    if (res)
    {
        registerMemBaseVG = res->start;
        registerMemSizeVG = res->end - res->start + 1;
    }

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	pool = devm_kzalloc(&pdev->dev, sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return -ENOMEM;
	pool->size = contiguousSize;
	init_dma_attrs(&pool->attrs);
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &pool->attrs);
	pool->virt = dma_alloc_attrs(&pdev->dev, pool->size, &pool->phys,
				     GFP_KERNEL, &pool->attrs);
	if (!pool->virt) {
		dev_err(&pdev->dev, "Failed to allocate contiguous memory\n");
		return -ENOMEM;
	}
	contiguousBase = pool->phys;
	dev_set_drvdata(&pdev->dev, pool);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
	prop = of_get_property(dn, "contiguousbase", NULL);
	if(prop)
		contiguousBase = *prop;
	of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize);
#else
    pdata = pdev->dev.platform_data;
    if (pdata) {
        contiguousBase = pdata->reserved_mem_base;
        contiguousSize = pdata->reserved_mem_size;
     }
#endif
    if (contiguousSize == 0)
       gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n ");
    ret = drv_init(&pdev->dev);

    if (!ret)
    {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	rstc = devm_reset_control_get(&pdev->dev, "gpu3d");
	galDevice->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc;

	rstc = devm_reset_control_get(&pdev->dev, "gpu2d");
	galDevice->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc;

	rstc = devm_reset_control_get(&pdev->dev, "gpuvg");
	galDevice->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc;
#endif
        platform_set_drvdata(pdev, galDevice);

#if gcdENABLE_FSCALE_VAL_ADJUST
        if (galDevice->kernels[gcvCORE_MAJOR])
            REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
#endif
        gcmkFOOTER_NO();
        return ret;
    }
#if gcdENABLE_FSCALE_VAL_ADJUST
    UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
		       &pool->attrs);
#endif
    gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret);
    return ret;
}