static void lowlevel_buffer_deallocate(struct drm_device *dev,
		unsigned int flags, struct rockchip_drm_gem_buf *buf)
{
	DRM_DEBUG_KMS("%s.\n", __FILE__);

	if (!buf->dma_addr) {
		DRM_DEBUG_KMS("dma_addr is invalid.\n");
		return;
	}

	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
			(unsigned long)buf->dma_addr,
			buf->size);

	sg_free_table(buf->sgt);

	kfree(buf->sgt);
	buf->sgt = NULL;

	if (!is_drm_iommu_supported(dev)) {
		dma_free_attrs(dev->dev, buf->size, buf->kvaddr,
				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
		kfree(buf->pages);
	} else
		dma_free_attrs(dev->dev, buf->size, buf->pages,
				(dma_addr_t)buf->dma_addr, &buf->dma_attrs);

	buf->dma_addr = (dma_addr_t)NULL;
}
static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
					size_t size)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	void *mdata_virt;
	dma_addr_t mdata_phys;
	s32 status;
	int ret;
	DEFINE_DMA_ATTRS(attrs);

	drv->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
	/* Make metadata physically contiguous and 4K aligned. */
	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
					GFP_KERNEL, &attrs);
	if (!mdata_virt) {
		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
		ret = -ENOMEM;
		goto fail;
	}
	memcpy(mdata_virt, metadata, size);
	/* wmb() ensures copy completes prior to starting authentication. */
	wmb();

	/* Initialize length counter to 0 */
	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);

	/* Pass address of meta-data to the MBA and perform authentication */
	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
		status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
		POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of headers timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for headers\n",
				status);
		ret = -EINVAL;
	}

	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, &attrs);

	if (!ret)
		return ret;

fail:
	modem_log_rmb_regs(drv->rmb_base);
	if (drv->q6) {
		pil_mss_shutdown(pil);
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys,
				&drv->attrs_dma);
		drv->q6->mba_virt = NULL;
	}
	return ret;
}
static int pil_msa_mba_auth(struct pil_desc *pil)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
	int ret;
	s32 status;

	/* Wait for all segments to be authenticated or an error to occur */
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
			status == STATUS_AUTH_COMPLETE || status < 0,
			50, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of image timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for image\n", status);
		ret = -EINVAL;
	}

	if (drv->q6 && drv->q6->mba_virt) {
		/* Reclaim MBA memory. */
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
					drv->q6->mba_virt, drv->q6->mba_phys,
					&drv->attrs_dma);
		drv->q6->mba_virt = NULL;
	}

	if (ret)
		modem_log_rmb_regs(drv->rmb_base);
	if (q6_drv->ahb_clk_vote)
		clk_disable_unprepare(q6_drv->ahb_clk);

	return ret;
}
int pas_init_image(enum pas_id id, const u8 *metadata, size_t size)
{
	int ret;
	struct pas_init_image_req {
		u32	proc;
		u32	image_addr;
	} request;
	u32 scm_ret = 0;
	void *mdata_buf;
	dma_addr_t mdata_phys;
	DEFINE_DMA_ATTRS(attrs);

	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
	mdata_buf = dma_alloc_attrs(NULL, size, &mdata_phys, GFP_KERNEL,
	                                      &attrs);

	if (!mdata_buf) {
	        pr_err("Allocation for metadata failed.\n");
		return -ENOMEM;
        }

        memcpy(mdata_buf, metadata, size);

	request.proc = id;
	request.image_addr = mdata_phys;

	ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
			sizeof(request), &scm_ret, sizeof(scm_ret));

        dma_free_attrs(NULL, size, mdata_buf, mdata_phys, &attrs);

	if (ret)
		return ret;
	return scm_ret;
}
void nvhost_vic03_deinit(struct platform_device *dev)
{
	struct vic03 *v = get_vic03(dev);
	struct nvhost_device_data *pdata = nvhost_get_devdata(dev);

	DEFINE_DMA_ATTRS(attrs);
	dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);

	if (!v)
		return;

	if (pdata->scaling_init)
		nvhost_scale_hw_deinit(dev);

	if (v->ucode.mapped) {
		dma_free_attrs(&dev->dev,
			v->ucode.size, v->ucode.mapped,
			v->ucode.dma_addr, &attrs);
		v->ucode.mapped = NULL;
		v->ucode.dma_addr = 0;
	}

	/* zap, free */
	set_vic03(dev, NULL);
	kfree(v);
}
void rockchip_vpu_jpeg_enc_exit(struct rockchip_vpu_ctx *ctx)
{
	dma_free_attrs(ctx->dev->dev,
		       ctx->jpeg_enc.bounce_buffer.size,
		       ctx->jpeg_enc.bounce_buffer.cpu,
		       ctx->jpeg_enc.bounce_buffer.dma,
		       DMA_ATTR_ALLOC_SINGLE_PAGES);
}
Exemple #7
0
static void rockchip_gem_free_buf(struct rockchip_gem_object *rk_obj)
{
    struct drm_gem_object *obj = &rk_obj->base;
    struct drm_device *drm = obj->dev;

    dma_free_attrs(drm->dev, obj->size, rk_obj->kvaddr, rk_obj->dma_addr,
                   &rk_obj->dma_attrs);
}
Exemple #8
0
static int qproc_mba_load_mdt(struct qproc *qproc, const struct firmware *fw)
{
	DEFINE_DMA_ATTRS(attrs);
	unsigned long timeout;
	dma_addr_t phys;
	dma_addr_t end;
	void *ptr;
	int ret;
	s32 val;

	dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
	dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);

	ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
	if (!ptr) {
		dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
		return -ENOMEM;
	}

	end = phys + fw->size;
	dev_info(qproc->dev, "loading mdt header from %pa to %pa\n", &phys, &end);

	memcpy(ptr, fw->data, fw->size);

	writel_relaxed(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH);

	writel_relaxed(phys, qproc->rmb_base + RMB_PMI_META_DATA);
	writel(CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND);

	timeout = jiffies + HZ;
	for (;;) {
		msleep(1);

		val = readl(qproc->rmb_base + RMB_MBA_STATUS);
		if (val == STATUS_META_DATA_AUTH_SUCCESS || val < 0)
			break;

		if (time_after(jiffies, timeout))
			break;
	}
	if (val == 0) {
		dev_err(qproc->dev, "MBA authentication of headers timed out\n");
		ret = -ETIMEDOUT;
		goto out;
	} else if (val < 0) {
		dev_err(qproc->dev, "MBA returned error %d for headers\n", val);
		ret = -EINVAL;
		goto out;
	}

	dev_err(qproc->dev, "mdt authenticated\n");

	ret = 0;
out:
	dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs);

	return ret;
}
struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
		unsigned long size, bool alloc_kmap)
{
	struct mtk_drm_gem_obj *mtk_gem;
	struct drm_gem_object *obj;
	int ret;

	mtk_gem = mtk_drm_gem_init(dev, size);
	if (IS_ERR(mtk_gem))
		return ERR_CAST(mtk_gem);

	obj = &mtk_gem->base;

	init_dma_attrs(&mtk_gem->dma_attrs);
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs);

	if (!alloc_kmap)
		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs);

	mtk_gem->kvaddr = dma_alloc_attrs(dev->dev, obj->size,
			(dma_addr_t *)&mtk_gem->dma_addr, GFP_KERNEL,
			&mtk_gem->dma_attrs);
	if (!mtk_gem->kvaddr) {
		DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
		ret = -ENOMEM;
		goto err_dma;
	}

	{
		mtk_gem->sgt = kzalloc(sizeof(*mtk_gem->sgt), GFP_KERNEL);
		if (!mtk_gem->sgt) {
			ret = -ENOMEM;
			goto err_sgt;
		}

		ret = dma_get_sgtable_attrs(dev->dev, mtk_gem->sgt,
			mtk_gem->kvaddr, mtk_gem->dma_addr, obj->size,
			&mtk_gem->dma_attrs);
		if (ret) {
			DRM_ERROR("failed to allocate sgt, %d\n", ret);
			goto err_get_sgtable;
		}
	}

	DRM_INFO("kvaddr = %p dma_addr = %pad\n",
			mtk_gem->kvaddr, &mtk_gem->dma_addr);

	return mtk_gem;
err_get_sgtable:
	kfree(mtk_gem->sgt);
err_sgt:
	dma_free_attrs(dev->dev, obj->size, mtk_gem->kvaddr, mtk_gem->dma_addr,
			&mtk_gem->dma_attrs);
err_dma:
	kfree(mtk_gem);
	return ERR_PTR(ret);
}
static int flcn_read_ucode(struct platform_device *dev, const char *fw_name)
{
	struct flcn *v = get_flcn(dev);
	const struct firmware *ucode_fw;
	int err;
	DEFINE_DMA_ATTRS(attrs);

	nvhost_dbg_fn("");

	v->dma_addr = 0;
	v->mapped = NULL;

	ucode_fw = nvhost_client_request_firmware(dev, fw_name);
	if (!ucode_fw) {
		nvhost_dbg_fn("request firmware failed");
		dev_err(&dev->dev, "failed to get firmware\n");
		err = -ENOENT;
		return err;
	}

	v->size = ucode_fw->size;
	dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);

	v->mapped = dma_alloc_attrs(&dev->dev,
				v->size, &v->dma_addr,
				GFP_KERNEL, &attrs);
	if (!v->mapped) {
		dev_err(&dev->dev, "dma memory allocation failed");
		err = -ENOMEM;
		goto clean_up;
	}

	err = flcn_setup_ucode_image(dev, v->mapped, ucode_fw);
	if (err) {
		dev_err(&dev->dev, "failed to parse firmware image\n");
		goto clean_up;
	}

	v->valid = true;

	release_firmware(ucode_fw);

	return 0;

 clean_up:
	if (v->mapped) {
		dma_free_attrs(&dev->dev,
			v->size, v->mapped,
			v->dma_addr, &attrs);
		v->mapped = NULL;
		v->dma_addr = 0;
	}
	release_firmware(ucode_fw);
	return err;
}
Exemple #11
0
/**
 * bdisp_hw_free_nodes
 * @ctx:        bdisp context
 *
 * Free node memory
 *
 * RETURNS:
 * None
 */
void bdisp_hw_free_nodes(struct bdisp_ctx *ctx)
{
	if (ctx && ctx->node[0]) {
		DEFINE_DMA_ATTRS(attrs);

		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
		dma_free_attrs(ctx->bdisp_dev->dev,
			       sizeof(struct bdisp_node) * MAX_NB_NODE,
			       ctx->node[0], ctx->node_paddr[0], &attrs);
	}
}
Exemple #12
0
void hw_free(struct delta_ctx *ctx, struct delta_buf *buf)
{
	struct delta_dev *delta = ctx->dev;

	dev_dbg(delta->dev,
		"%s     free %d bytes of HW memory @(virt=0x%p, phy=0x%pad): %s\n",
		ctx->name, buf->size, buf->vaddr, &buf->paddr, buf->name);

	dma_free_attrs(delta->dev, buf->size,
		       buf->vaddr, buf->paddr, buf->attrs);
}
Exemple #13
0
int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
	int ret = 0;
	s32 status;

	if (err_path) {
		writel_relaxed(CMD_PILFAIL_NFY_MBA,
				drv->rmb_base + RMB_MBA_COMMAND);
		ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
			status == STATUS_MBA_UNLOCKED || status < 0,
			POLL_INTERVAL_US, pbl_mba_boot_timeout_ms * 1000);
		if (ret)
			dev_err(pil->dev, "MBA region unlock timed out\n");
		else if (status < 0)
			dev_err(pil->dev, "MBA unlock returned err status: %d\n",
						status);
	}

	ret = pil_mss_shutdown(pil);

	if (q6_drv->ahb_clk_vote)
		clk_disable_unprepare(q6_drv->ahb_clk);

	/* In case of any failure where reclaim MBA memory
	 * could not happen, free the memory here */
	if (drv->q6->mba_virt) {
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys,
				&drv->attrs_dma);
		drv->q6->mba_virt = NULL;
	}
	if (drv->q6->dp_virt) {
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->dp_size,
				drv->q6->dp_virt, drv->q6->dp_phys,
				&drv->attrs_dma);
		drv->q6->dp_virt = NULL;
	}
	return ret;
}
Exemple #14
0
/**
 * bdisp_hw_free_filters
 * @dev:        device
 *
 * Free filters memory
 *
 * RETURNS:
 * None
 */
void bdisp_hw_free_filters(struct device *dev)
{
	int size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);

	if (bdisp_h_filter[0].virt) {
		DEFINE_DMA_ATTRS(attrs);

		dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
		dma_free_attrs(dev, size, bdisp_h_filter[0].virt,
			       bdisp_h_filter[0].paddr, &attrs);
	}
}
Exemple #15
0
static int sgiseeq_remove(struct platform_device *pdev)
{
	struct net_device *dev = platform_get_drvdata(pdev);
	struct sgiseeq_private *sp = netdev_priv(dev);

	unregister_netdev(dev);
	dma_free_attrs(&pdev->dev, sizeof(*sp->srings), sp->srings,
		       sp->srings_dma, DMA_ATTR_NON_CONSISTENT);
	free_netdev(dev);

	return 0;
}
Exemple #16
0
int msenc_read_ucode(struct platform_device *dev, const char *fw_name)
{
    struct msenc *m = get_msenc(dev);
    const struct firmware *ucode_fw;
    int err;

    m->phys = 0;
    m->mapped = NULL;
    init_dma_attrs(&m->attrs);

    ucode_fw  = nvhost_client_request_firmware(dev, fw_name);
    if (!ucode_fw) {
        dev_err(&dev->dev, "failed to get msenc firmware\n");
        err = -ENOENT;
        return err;
    }

    m->size = ucode_fw->size;
    dma_set_attr(DMA_ATTR_READ_ONLY, &m->attrs);

    m->mapped = dma_alloc_attrs(&dev->dev,
                                m->size, &m->phys,
                                GFP_KERNEL, &m->attrs);
    if (!m->mapped) {
        dev_err(&dev->dev, "dma memory allocation failed");
        err = -ENOMEM;
        goto clean_up;
    }

    err = msenc_setup_ucode_image(dev, m->mapped, ucode_fw);
    if (err) {
        dev_err(&dev->dev, "failed to parse firmware image\n");
        goto clean_up;
    }

    m->valid = true;

    release_firmware(ucode_fw);

    return 0;

clean_up:
    if (m->mapped) {
        dma_free_attrs(&dev->dev,
                       m->size, m->mapped,
                       m->phys, &m->attrs);
        m->mapped = NULL;
    }
    release_firmware(ucode_fw);
    return err;
}
void mtk_drm_gem_free_object(struct drm_gem_object *obj)
{
	struct mtk_drm_gem_obj *mtk_gem = to_mtk_gem_obj(obj);

	drm_gem_free_mmap_offset(obj);

	/* release file pointer to gem object. */
	drm_gem_object_release(obj);

	dma_free_attrs(obj->dev->dev, obj->size, mtk_gem->kvaddr,
			mtk_gem->dma_addr, &mtk_gem->dma_attrs);

	kfree(mtk_gem);
}
Exemple #18
0
static int qproc_stop(struct rproc *rproc)
{
	struct qproc *qproc = (struct qproc *)rproc->priv;

	q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_Q6_HALT_BASE);
	q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_MODEM_HALT_BASE);
	q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_NC_HALT_BASE);

	reset_control_assert(qproc->mss_restart);
	clk_disable_unprepare(qproc->axi_clk);
	clk_disable_unprepare(qproc->ahb_clk);
//	regulator_disable(qproc->vdd);

	dma_free_attrs(qproc->dev, qproc->mba_size, qproc->mba_va, qproc->mba_da, &qproc->mba_attrs);

	return 0;
}
Exemple #19
0
static int msm_unload(struct drm_device *dev)
{
	struct msm_drm_private *priv = dev->dev_private;
	struct msm_kms *kms = priv->kms;
	struct msm_gpu *gpu = priv->gpu;

	drm_kms_helper_poll_fini(dev);
	drm_mode_config_cleanup(dev);
	drm_vblank_cleanup(dev);

	pm_runtime_get_sync(dev->dev);
	drm_irq_uninstall(dev);
	pm_runtime_put_sync(dev->dev);

	flush_workqueue(priv->wq);
	destroy_workqueue(priv->wq);

	if (kms) {
		pm_runtime_disable(dev->dev);
		kms->funcs->destroy(kms);
	}

	if (gpu) {
		mutex_lock(&dev->struct_mutex);
		gpu->funcs->pm_suspend(gpu);
		gpu->funcs->destroy(gpu);
		mutex_unlock(&dev->struct_mutex);
	}

	if (priv->vram.paddr) {
		DEFINE_DMA_ATTRS(attrs);
		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
		drm_mm_takedown(&priv->vram.mm);
		dma_free_attrs(dev->dev, priv->vram.size, NULL,
				priv->vram.paddr, &attrs);
	}

	component_unbind_all(dev->dev, dev);

	dev->dev_private = NULL;

	kfree(priv);

	return 0;
}
static int __devexit gpu_remove(struct platform_device *pdev)
#endif
{
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	gckGALDEVICE device = platform_get_drvdata(pdev);
	struct contiguous_mem_pool *pool = device->pool;
#endif
    gcmkHEADER();
#if gcdENABLE_FSCALE_VAL_ADJUST
    if(galDevice->kernels[gcvCORE_MAJOR])
        UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
#endif
    drv_exit();
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
		       &pool->attrs);
#endif
    gcmkFOOTER_NO();
    return 0;
}
Exemple #21
0
void nvhost_msenc_deinit(struct platform_device *dev)
{
    struct msenc *m = get_msenc(dev);
    struct nvhost_device_data *pdata = platform_get_drvdata(dev);

    if (pdata->scaling_init)
        nvhost_scale_hw_deinit(dev);

    if (!m)
        return;

    /* unpin, free ucode memory */
    if (m->mapped) {
        dma_free_attrs(&dev->dev,
                       m->size, m->mapped,
                       m->phys, &m->attrs);
        m->mapped = NULL;
    }
    m->valid = false;
    kfree(m);
    set_msenc(dev, NULL);
}
void nvhost_flcn_deinit(struct platform_device *dev)
{
	struct flcn *v = get_flcn(dev);

	DEFINE_DMA_ATTRS(attrs);
	dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);

	if (!v)
		return;

	if (v->mapped) {
		dma_free_attrs(&dev->dev,
			v->size, v->mapped,
			v->dma_addr, &attrs);
		v->mapped = NULL;
		v->dma_addr = 0;
	}

	/* zap, free */
	set_flcn(dev, NULL);
	kfree(v);
}
static int __devinit gpu_probe(struct platform_device *pdev)
#endif
{
    int ret = -ENODEV;
    struct resource* res;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	struct contiguous_mem_pool *pool;
	struct reset_control *rstc;
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
	struct device_node *dn =pdev->dev.of_node;
	const u32 *prop;
#else
	struct viv_gpu_platform_data *pdata;
#endif
    gcmkHEADER();

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr");
    if (res)
        baseAddress = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d");
    if (res)
        irqLine = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d");
    if (res)
    {
        registerMemBase = res->start;
        registerMemSize = res->end - res->start + 1;
    }

    res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d");
    if (res)
        irqLine2D = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d");
    if (res)
    {
        registerMemBase2D = res->start;
        registerMemSize2D = res->end - res->start + 1;
    }

    res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg");
    if (res)
        irqLineVG = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg");
    if (res)
    {
        registerMemBaseVG = res->start;
        registerMemSizeVG = res->end - res->start + 1;
    }

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	pool = devm_kzalloc(&pdev->dev, sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return -ENOMEM;
	pool->size = contiguousSize;
	init_dma_attrs(&pool->attrs);
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &pool->attrs);
	pool->virt = dma_alloc_attrs(&pdev->dev, pool->size, &pool->phys,
				     GFP_KERNEL, &pool->attrs);
	if (!pool->virt) {
		dev_err(&pdev->dev, "Failed to allocate contiguous memory\n");
		return -ENOMEM;
	}
	contiguousBase = pool->phys;
	dev_set_drvdata(&pdev->dev, pool);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
	prop = of_get_property(dn, "contiguousbase", NULL);
	if(prop)
		contiguousBase = *prop;
	of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize);
#else
    pdata = pdev->dev.platform_data;
    if (pdata) {
        contiguousBase = pdata->reserved_mem_base;
        contiguousSize = pdata->reserved_mem_size;
     }
#endif
    if (contiguousSize == 0)
       gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n ");
    ret = drv_init(&pdev->dev);

    if (!ret)
    {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	rstc = devm_reset_control_get(&pdev->dev, "gpu3d");
	galDevice->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc;

	rstc = devm_reset_control_get(&pdev->dev, "gpu2d");
	galDevice->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc;

	rstc = devm_reset_control_get(&pdev->dev, "gpuvg");
	galDevice->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc;
#endif
        platform_set_drvdata(pdev, galDevice);

#if gcdENABLE_FSCALE_VAL_ADJUST
        if (galDevice->kernels[gcvCORE_MAJOR])
            REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
#endif
        gcmkFOOTER_NO();
        return ret;
    }
#if gcdENABLE_FSCALE_VAL_ADJUST
    UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
		       &pool->attrs);
#endif
    gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret);
    return ret;
}
static int lowlevel_buffer_allocate(struct drm_device *dev,
		unsigned int flags, struct rockchip_drm_gem_buf *buf)
{
	int ret = 0;
	enum dma_attr attr;
	unsigned int nr_pages;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	if (buf->dma_addr) {
		DRM_DEBUG_KMS("already allocated.\n");
		return 0;
	}

	init_dma_attrs(&buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_CONTIG, fully physically contiguous memory
	 * region will be allocated else physically contiguous
	 * as possible.
	 */
	if (!(flags & ROCKCHIP_BO_NONCONTIG))
		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_WC or ROCKCHIP_BO_NONCACHABLE, writecombine mapping
	 * else cachable mapping.
	 */
	if (flags & ROCKCHIP_BO_WC || !(flags & ROCKCHIP_BO_CACHABLE))
		attr = DMA_ATTR_WRITE_COMBINE;
	else
		attr = DMA_ATTR_NON_CONSISTENT;

	dma_set_attr(attr, &buf->dma_attrs);
	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);

	nr_pages = buf->size >> PAGE_SHIFT;

	if (!is_drm_iommu_supported(dev)) {
		dma_addr_t start_addr;
		unsigned int i = 0;

		buf->pages = kzalloc(sizeof(struct page) * nr_pages,
					GFP_KERNEL);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate pages.\n");
			return -ENOMEM;
		}

		buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->kvaddr) {
			DRM_ERROR("failed to allocate buffer.\n");
			kfree(buf->pages);
			return -ENOMEM;
		}

		start_addr = buf->dma_addr;
		while (i < nr_pages) {
			buf->pages[i] = phys_to_page(start_addr);
			start_addr += PAGE_SIZE;
			i++;
		}
	} else {

		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate buffer.\n");
			return -ENOMEM;
		}
	}

	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
	if (!buf->sgt) {
		DRM_ERROR("failed to get sg table.\n");
		ret = -ENOMEM;
		goto err_free_attrs;
	}

	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
			(unsigned long)buf->dma_addr,
			buf->size);

	return ret;

err_free_attrs:
	dma_free_attrs(dev->dev, buf->size, buf->pages,
			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
	buf->dma_addr = (dma_addr_t)NULL;

	if (!is_drm_iommu_supported(dev))
		kfree(buf->pages);

	return ret;
}
static int msm_iommu_sec_ptbl_init(struct device *dev)
{
    int psize[2] = {0, 0};
    unsigned int spare = 0;
    int ret;
    int version;
    void *cpu_addr;
    dma_addr_t paddr;
    DEFINE_DMA_ATTRS(attrs);
    static bool allocated = false;

    if (allocated)
        return 0;

    version = qcom_scm_get_feat_version(SCM_SVC_MP);

    if (version >= MAKE_VERSION(1, 1, 1)) {
        ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0);
        if (ret) {
            dev_err(dev, "failed setting max virtual size (%d)\n",
                    ret);
            return ret;
        }
    }

    ret = qcom_scm_iommu_secure_ptbl_size(spare, psize);
    if (ret) {
        dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
                ret);
        return ret;
    }

    if (psize[1]) {
        dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
                ret);
        return psize[1];
    }

    dev_info(dev, "iommu sec: pgtable size: %d\n", psize[0]);

    dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);

    cpu_addr = dma_alloc_attrs(dev, psize[0], &paddr, GFP_KERNEL, &attrs);
    if (!cpu_addr) {
        dev_err(dev, "failed to allocate %d bytes for pgtable\n",
                psize[0]);
        return -ENOMEM;
    }

    ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize[0], spare);
    if (ret) {
        dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
        goto free_mem;
    }

    allocated = true;

    return 0;

free_mem:
    dma_free_attrs(dev, psize[0], cpu_addr, paddr, &attrs);
    return ret;
}
Exemple #26
0
static int qproc_start(struct rproc *rproc)
{
	struct qproc *qproc = (struct qproc *)rproc->priv;
	unsigned long timeout;
	int ret;
	u32 val;

	printk("DEBUG::: pill................... %s \n", __func__);
//	ret = regulator_enable(qproc->vdd);
//	if (ret) {
//		dev_err(qproc->dev, "failed to enable mss vdd\n");
//		return ret;
//	}

	ret = regulator_enable(qproc->pll);
	if (ret) {
		dev_err(qproc->dev, "failed to enable mss vdd pll\n");
		return ret;
	}


	ret = pil_mss_restart_reg(qproc, 0);
	//FIXME
	//ret = reset_control_deassert(qproc->mss_restart);
	if (ret) {
		dev_err(qproc->dev, "failed to deassert mss restart\n");
		goto disable_vdd;
	}

	ret = clk_prepare_enable(qproc->ahb_clk);
	if (ret)
		goto assert_reset;

	ret = clk_prepare_enable(qproc->axi_clk);
	if (ret)
		goto disable_ahb_clk;

	ret = clk_prepare_enable(qproc->rom_clk);
	if (ret)
		goto disable_axi_clk;

	writel_relaxed(qproc->mba_da, qproc->rmb_base + RMB_MBA_IMAGE);

	/* Ensure order of data/entry point and the following reset release */
	wmb();

	q6v5proc_reset(qproc);

	timeout = jiffies + HZ;
	for (;;) {
		msleep(1);

		val = readl(qproc->rmb_base + RMB_PBL_STATUS);
		if (val || time_after(jiffies, timeout))
			break;
	}
	if (val == 0) {
		dev_err(qproc->dev, "PBL boot timed out\n");
		ret = -ETIMEDOUT;
		goto halt_axi_ports;
	} else if (val != STATUS_PBL_SUCCESS) {
		dev_err(qproc->dev, "PBL returned unexpected status %d\n", val);
		ret = -EINVAL;
		goto halt_axi_ports;
	}

	timeout = jiffies + HZ;
	for (;;) {
		msleep(1);

		val = readl(qproc->rmb_base + RMB_MBA_STATUS);
		if (val || time_after(jiffies, timeout))
			break;
	}
	if (val == 0) {
		dev_err(qproc->dev, "MBA boot timed out\n");
		ret = -ETIMEDOUT;
		goto halt_axi_ports;
	} else if (val != STATUS_XPU_UNLOCKED && val != STATUS_XPU_UNLOCKED_SCRIBBLED) {
		dev_err(qproc->dev, "MBA returned unexpected status %d\n", val);
		ret = -EINVAL;
		goto halt_axi_ports;
	}

	dev_info(qproc->dev, "MBA boot done\n");

	ret = qproc_load_modem(qproc);
	if (ret)
		goto halt_axi_ports;

	return 0;

halt_axi_ports:
	q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_Q6_HALT_BASE);
	q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_MODEM_HALT_BASE);
	q6v5proc_halt_axi_port(qproc, qproc->halt_base + MSS_NC_HALT_BASE);
disable_axi_clk:
	clk_disable_unprepare(qproc->axi_clk);
disable_ahb_clk:
	clk_disable_unprepare(qproc->ahb_clk);
assert_reset:
	reset_control_assert(qproc->mss_restart);
disable_vdd:
//	regulator_disable(qproc->vdd);

	dma_free_attrs(qproc->dev, qproc->mba_size, qproc->mba_va, qproc->mba_da, &qproc->mba_attrs);
	return ret;
}
int pil_mss_reset_load_mba(struct pil_desc *pil)
{
	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
	struct modem_data *md = dev_get_drvdata(pil->dev);
	const struct firmware *fw;
	char fw_name_legacy[10] = "mba.b00";
	char fw_name[10] = "mba.mbn";
	char *fw_name_p;
	void *mba_virt;
	dma_addr_t mba_phys, mba_phys_end;
	int ret, count;
	const u8 *data;

	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
	/* Load and authenticate mba image */
	ret = request_firmware(&fw, fw_name_p, pil->dev);
	if (ret) {
		dev_err(pil->dev, "Failed to locate %s\n",
						fw_name_p);
		return ret;
	}

	drv->mba_size = SZ_1M;
	md->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	init_dma_attrs(&md->attrs_dma);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &md->attrs_dma);
	mba_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_size,
			&mba_phys, GFP_KERNEL, &md->attrs_dma);
	if (!mba_virt) {
		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
		ret = -ENOMEM;
		goto err_dma_alloc;
	}

	drv->mba_phys = mba_phys;
	drv->mba_virt = mba_virt;
	mba_phys_end = mba_phys + drv->mba_size;

	dev_info(pil->dev, "MBA: loading from %pa to %pa\n", &mba_phys,
								&mba_phys_end);
	/* Load the MBA image into memory */
	data = fw ? fw->data : NULL;
	if (!data) {
		dev_err(pil->dev, "MBA data is NULL\n");
		ret = -ENOMEM;
		goto err_mss_reset;
	}
	count = fw->size;
	memcpy(mba_virt, data, count);
	wmb();

	ret = pil_mss_reset(pil);
	if (ret) {
		dev_err(pil->dev, "MBA boot failed.\n");
		goto err_mss_reset;
	}

	release_firmware(fw);

	return 0;

err_mss_reset:
	dma_free_attrs(&md->mba_mem_dev, drv->mba_size, drv->mba_virt,
				drv->mba_phys, &md->attrs_dma);
	drv->mba_virt = NULL;
err_dma_alloc:
	release_firmware(fw);
	return ret;
}