Example #1
0
static int gk20a_ltc_alloc_virt_cbc(struct gk20a *g,
				    size_t compbit_backing_size)
{
	struct device *d = dev_from_gk20a(g);
	struct gr_gk20a *gr = &g->gr;
	DEFINE_DMA_ATTRS(attrs);
	dma_addr_t iova;
	int err;

	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);

	gr->compbit_store.pages =
		dma_alloc_attrs(d, compbit_backing_size, &iova,
				GFP_KERNEL, &attrs);
	if (!gr->compbit_store.pages) {
		gk20a_err(dev_from_gk20a(g), "failed to allocate backing store for compbit : size %zu",
				  compbit_backing_size);
		return -ENOMEM;
	}

	gr->compbit_store.base_iova = iova;
	gr->compbit_store.size = compbit_backing_size;
	err = gk20a_get_sgtable_from_pages(d,
				   &gr->compbit_store.sgt,
				   gr->compbit_store.pages, iova,
				   compbit_backing_size);
	if (err) {
		gk20a_err(dev_from_gk20a(g), "failed to allocate sgt for backing store");
		return err;
	}

	return 0;
}
Example #2
0
int hw_alloc(struct delta_ctx *ctx, u32 size, const char *name,
	     struct delta_buf *buf)
{
	struct delta_dev *delta = ctx->dev;
	dma_addr_t dma_addr;
	void *addr;
	unsigned long attrs = DMA_ATTR_WRITE_COMBINE;

	addr = dma_alloc_attrs(delta->dev, size, &dma_addr,
			       GFP_KERNEL | __GFP_NOWARN, attrs);
	if (!addr) {
		dev_err(delta->dev,
			"%s hw_alloc:dma_alloc_coherent failed for %s (size=%d)\n",
			ctx->name, name, size);
		ctx->sys_errors++;
		return -ENOMEM;
	}

	buf->size = size;
	buf->paddr = dma_addr;
	buf->vaddr = addr;
	buf->name = name;
	buf->attrs = attrs;

	dev_dbg(delta->dev,
		"%s allocate %d bytes of HW memory @(virt=0x%p, phy=0x%pad): %s\n",
		ctx->name, size, buf->vaddr, &buf->paddr, buf->name);

	return 0;
}
Example #3
0
/**
 * bdisp_hw_alloc_nodes
 * @ctx:        bdisp context
 *
 * Allocate dma memory for nodes
 *
 * RETURNS:
 * 0 on success
 */
int bdisp_hw_alloc_nodes(struct bdisp_ctx *ctx)
{
	struct device *dev = ctx->bdisp_dev->dev;
	unsigned int i, node_size = sizeof(struct bdisp_node);
	void *base;
	dma_addr_t paddr;
	DEFINE_DMA_ATTRS(attrs);

	/* Allocate all the nodes within a single memory page */
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
	base = dma_alloc_attrs(dev, node_size * MAX_NB_NODE, &paddr,
			       GFP_KERNEL | GFP_DMA, &attrs);
	if (!base) {
		dev_err(dev, "%s no mem\n", __func__);
		return -ENOMEM;
	}

	memset(base, 0, node_size * MAX_NB_NODE);

	for (i = 0; i < MAX_NB_NODE; i++) {
		ctx->node[i] = base;
		ctx->node_paddr[i] = paddr;
		dev_dbg(dev, "node[%d]=0x%p (paddr=%pad)\n", i, ctx->node[i],
			&paddr);
		base += node_size;
		paddr += node_size;
	}

	return 0;
}
int pas_init_image(enum pas_id id, const u8 *metadata, size_t size)
{
	int ret;
	struct pas_init_image_req {
		u32	proc;
		u32	image_addr;
	} request;
	u32 scm_ret = 0;
	void *mdata_buf;
	dma_addr_t mdata_phys;
	DEFINE_DMA_ATTRS(attrs);

	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
	mdata_buf = dma_alloc_attrs(NULL, size, &mdata_phys, GFP_KERNEL,
	                                      &attrs);

	if (!mdata_buf) {
	        pr_err("Allocation for metadata failed.\n");
		return -ENOMEM;
        }

        memcpy(mdata_buf, metadata, size);

	request.proc = id;
	request.image_addr = mdata_phys;

	ret = scm_call(SCM_SVC_PIL, PAS_INIT_IMAGE_CMD, &request,
			sizeof(request), &scm_ret, sizeof(scm_ret));

        dma_free_attrs(NULL, size, mdata_buf, mdata_phys, &attrs);

	if (ret)
		return ret;
	return scm_ret;
}
Example #5
0
static int qproc_mba_load_mdt(struct qproc *qproc, const struct firmware *fw)
{
	DEFINE_DMA_ATTRS(attrs);
	unsigned long timeout;
	dma_addr_t phys;
	dma_addr_t end;
	void *ptr;
	int ret;
	s32 val;

	dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
	dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);

	ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
	if (!ptr) {
		dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
		return -ENOMEM;
	}

	end = phys + fw->size;
	dev_info(qproc->dev, "loading mdt header from %pa to %pa\n", &phys, &end);

	memcpy(ptr, fw->data, fw->size);

	writel_relaxed(0, qproc->rmb_base + RMB_PMI_CODE_LENGTH);

	writel_relaxed(phys, qproc->rmb_base + RMB_PMI_META_DATA);
	writel(CMD_META_DATA_READY, qproc->rmb_base + RMB_MBA_COMMAND);

	timeout = jiffies + HZ;
	for (;;) {
		msleep(1);

		val = readl(qproc->rmb_base + RMB_MBA_STATUS);
		if (val == STATUS_META_DATA_AUTH_SUCCESS || val < 0)
			break;

		if (time_after(jiffies, timeout))
			break;
	}
	if (val == 0) {
		dev_err(qproc->dev, "MBA authentication of headers timed out\n");
		ret = -ETIMEDOUT;
		goto out;
	} else if (val < 0) {
		dev_err(qproc->dev, "MBA returned error %d for headers\n", val);
		ret = -EINVAL;
		goto out;
	}

	dev_err(qproc->dev, "mdt authenticated\n");

	ret = 0;
out:
	dma_free_attrs(qproc->dev, fw->size, ptr, phys, &attrs);

	return ret;
}
Example #6
0
static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
					size_t size)
{
	struct modem_data *drv = dev_get_drvdata(pil->dev);
	void *mdata_virt;
	dma_addr_t mdata_phys;
	s32 status;
	int ret;
	DEFINE_DMA_ATTRS(attrs);

	drv->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
	/* Make metadata physically contiguous and 4K aligned. */
	mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
					GFP_KERNEL, &attrs);
	if (!mdata_virt) {
		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
		ret = -ENOMEM;
		goto fail;
	}
	memcpy(mdata_virt, metadata, size);
	/* wmb() ensures copy completes prior to starting authentication. */
	wmb();

	/* Initialize length counter to 0 */
	writel_relaxed(0, drv->rmb_base + RMB_PMI_CODE_LENGTH);

	/* Pass address of meta-data to the MBA and perform authentication */
	writel_relaxed(mdata_phys, drv->rmb_base + RMB_PMI_META_DATA);
	writel_relaxed(CMD_META_DATA_READY, drv->rmb_base + RMB_MBA_COMMAND);
	ret = readl_poll_timeout(drv->rmb_base + RMB_MBA_STATUS, status,
		status == STATUS_META_DATA_AUTH_SUCCESS || status < 0,
		POLL_INTERVAL_US, modem_auth_timeout_ms * 1000);
	if (ret) {
		dev_err(pil->dev, "MBA authentication of headers timed out\n");
	} else if (status < 0) {
		dev_err(pil->dev, "MBA returned error %d for headers\n",
				status);
		ret = -EINVAL;
	}

	dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, &attrs);

	if (!ret)
		return ret;

fail:
	modem_log_rmb_regs(drv->rmb_base);
	if (drv->q6) {
		pil_mss_shutdown(pil);
		dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_size,
				drv->q6->mba_virt, drv->q6->mba_phys,
				&drv->attrs_dma);
		drv->q6->mba_virt = NULL;
	}
	return ret;
}
struct mtk_drm_gem_obj *mtk_drm_gem_create(struct drm_device *dev,
		unsigned long size, bool alloc_kmap)
{
	struct mtk_drm_gem_obj *mtk_gem;
	struct drm_gem_object *obj;
	int ret;

	mtk_gem = mtk_drm_gem_init(dev, size);
	if (IS_ERR(mtk_gem))
		return ERR_CAST(mtk_gem);

	obj = &mtk_gem->base;

	init_dma_attrs(&mtk_gem->dma_attrs);
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &mtk_gem->dma_attrs);

	if (!alloc_kmap)
		dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &mtk_gem->dma_attrs);

	mtk_gem->kvaddr = dma_alloc_attrs(dev->dev, obj->size,
			(dma_addr_t *)&mtk_gem->dma_addr, GFP_KERNEL,
			&mtk_gem->dma_attrs);
	if (!mtk_gem->kvaddr) {
		DRM_ERROR("failed to allocate %zx byte dma buffer", obj->size);
		ret = -ENOMEM;
		goto err_dma;
	}

	{
		mtk_gem->sgt = kzalloc(sizeof(*mtk_gem->sgt), GFP_KERNEL);
		if (!mtk_gem->sgt) {
			ret = -ENOMEM;
			goto err_sgt;
		}

		ret = dma_get_sgtable_attrs(dev->dev, mtk_gem->sgt,
			mtk_gem->kvaddr, mtk_gem->dma_addr, obj->size,
			&mtk_gem->dma_attrs);
		if (ret) {
			DRM_ERROR("failed to allocate sgt, %d\n", ret);
			goto err_get_sgtable;
		}
	}

	DRM_INFO("kvaddr = %p dma_addr = %pad\n",
			mtk_gem->kvaddr, &mtk_gem->dma_addr);

	return mtk_gem;
err_get_sgtable:
	kfree(mtk_gem->sgt);
err_sgt:
	dma_free_attrs(dev->dev, obj->size, mtk_gem->kvaddr, mtk_gem->dma_addr,
			&mtk_gem->dma_attrs);
err_dma:
	kfree(mtk_gem);
	return ERR_PTR(ret);
}
static int flcn_read_ucode(struct platform_device *dev, const char *fw_name)
{
	struct flcn *v = get_flcn(dev);
	const struct firmware *ucode_fw;
	int err;
	DEFINE_DMA_ATTRS(attrs);

	nvhost_dbg_fn("");

	v->dma_addr = 0;
	v->mapped = NULL;

	ucode_fw = nvhost_client_request_firmware(dev, fw_name);
	if (!ucode_fw) {
		nvhost_dbg_fn("request firmware failed");
		dev_err(&dev->dev, "failed to get firmware\n");
		err = -ENOENT;
		return err;
	}

	v->size = ucode_fw->size;
	dma_set_attr(DMA_ATTR_READ_ONLY, &attrs);

	v->mapped = dma_alloc_attrs(&dev->dev,
				v->size, &v->dma_addr,
				GFP_KERNEL, &attrs);
	if (!v->mapped) {
		dev_err(&dev->dev, "dma memory allocation failed");
		err = -ENOMEM;
		goto clean_up;
	}

	err = flcn_setup_ucode_image(dev, v->mapped, ucode_fw);
	if (err) {
		dev_err(&dev->dev, "failed to parse firmware image\n");
		goto clean_up;
	}

	v->valid = true;

	release_firmware(ucode_fw);

	return 0;

 clean_up:
	if (v->mapped) {
		dma_free_attrs(&dev->dev,
			v->size, v->mapped,
			v->dma_addr, &attrs);
		v->mapped = NULL;
		v->dma_addr = 0;
	}
	release_firmware(ucode_fw);
	return err;
}
Example #9
0
int msenc_read_ucode(struct platform_device *dev, const char *fw_name)
{
    struct msenc *m = get_msenc(dev);
    const struct firmware *ucode_fw;
    int err;

    m->phys = 0;
    m->mapped = NULL;
    init_dma_attrs(&m->attrs);

    ucode_fw  = nvhost_client_request_firmware(dev, fw_name);
    if (!ucode_fw) {
        dev_err(&dev->dev, "failed to get msenc firmware\n");
        err = -ENOENT;
        return err;
    }

    m->size = ucode_fw->size;
    dma_set_attr(DMA_ATTR_READ_ONLY, &m->attrs);

    m->mapped = dma_alloc_attrs(&dev->dev,
                                m->size, &m->phys,
                                GFP_KERNEL, &m->attrs);
    if (!m->mapped) {
        dev_err(&dev->dev, "dma memory allocation failed");
        err = -ENOMEM;
        goto clean_up;
    }

    err = msenc_setup_ucode_image(dev, m->mapped, ucode_fw);
    if (err) {
        dev_err(&dev->dev, "failed to parse firmware image\n");
        goto clean_up;
    }

    m->valid = true;

    release_firmware(ucode_fw);

    return 0;

clean_up:
    if (m->mapped) {
        dma_free_attrs(&dev->dev,
                       m->size, m->mapped,
                       m->phys, &m->attrs);
        m->mapped = NULL;
    }
    release_firmware(ucode_fw);
    return err;
}
Example #10
0
int rockchip_vpu_jpeg_enc_init(struct rockchip_vpu_ctx *ctx)
{
	ctx->jpeg_enc.bounce_buffer.size =
		ctx->dst_fmt.plane_fmt[0].sizeimage -
		ctx->vpu_dst_fmt->header_size;

	ctx->jpeg_enc.bounce_buffer.cpu =
		dma_alloc_attrs(ctx->dev->dev,
				ctx->jpeg_enc.bounce_buffer.size,
				&ctx->jpeg_enc.bounce_buffer.dma,
				GFP_KERNEL,
				DMA_ATTR_ALLOC_SINGLE_PAGES);
	if (!ctx->jpeg_enc.bounce_buffer.cpu)
		return -ENOMEM;

	return 0;
}
/* ION CMA heap operations functions */
static struct ion_secure_cma_buffer_info *__ion_secure_cma_allocate(
			    struct ion_heap *heap, struct ion_buffer *buffer,
			    unsigned long len, unsigned long align,
			    unsigned long flags)
{
	struct device *dev = heap->priv;
	struct ion_secure_cma_buffer_info *info;
	DEFINE_DMA_ATTRS(attrs);
	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);

	dev_dbg(dev, "Request buffer allocation len %ld\n", len);

	info = kzalloc(sizeof(struct ion_secure_cma_buffer_info), GFP_KERNEL);
	if (!info) {
		dev_err(dev, "Can't allocate buffer info\n");
		return ION_CMA_ALLOCATE_FAILED;
	}

	info->cpu_addr = dma_alloc_attrs(dev, len, &(info->handle), 0, &attrs);

	if (!info->cpu_addr) {
		dev_err(dev, "Fail to allocate buffer\n");
		goto err;
	}

	info->table = kmalloc(sizeof(struct sg_table), GFP_KERNEL);
	if (!info->table) {
		dev_err(dev, "Fail to allocate sg table\n");
		goto err;
	}

	ion_secure_cma_get_sgtable(dev,
			info->table, info->cpu_addr, info->handle, len);

	info->secure.buffer = info->handle;

	/* keep this for memory release */
	buffer->priv_virt = info;
	dev_dbg(dev, "Allocate buffer %p\n", buffer);
	return info;

err:
	kfree(info);
	return ION_CMA_ALLOCATE_FAILED;
}
Example #12
0
static int rockchip_gem_alloc_dma(struct rockchip_gem_object *rk_obj,
				  bool alloc_kmap)
{
	struct drm_gem_object *obj = &rk_obj->base;
	struct drm_device *drm = obj->dev;

	rk_obj->dma_attrs = DMA_ATTR_WRITE_COMBINE;

	if (!alloc_kmap)
		rk_obj->dma_attrs |= DMA_ATTR_NO_KERNEL_MAPPING;

	rk_obj->kvaddr = dma_alloc_attrs(drm->dev, obj->size,
					 &rk_obj->dma_addr, GFP_KERNEL,
					 rk_obj->dma_attrs);
	if (!rk_obj->kvaddr) {
		DRM_ERROR("failed to allocate %zu byte dma buffer", obj->size);
		return -ENOMEM;
	}

	return 0;
}
Example #13
0
/**
 * bdisp_hw_alloc_filters
 * @dev:        device
 *
 * Allocate dma memory for filters
 *
 * RETURNS:
 * 0 on success
 */
int bdisp_hw_alloc_filters(struct device *dev)
{
	unsigned int i, size;
	void *base;
	dma_addr_t paddr;
	DEFINE_DMA_ATTRS(attrs);

	/* Allocate all the filters within a single memory page */
	size = (BDISP_HF_NB * NB_H_FILTER) + (BDISP_VF_NB * NB_V_FILTER);
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
	base = dma_alloc_attrs(dev, size, &paddr, GFP_KERNEL | GFP_DMA, &attrs);
	if (!base)
		return -ENOMEM;

	/* Setup filter addresses */
	for (i = 0; i < NB_H_FILTER; i++) {
		bdisp_h_filter[i].min = bdisp_h_spec[i].min;
		bdisp_h_filter[i].max = bdisp_h_spec[i].max;
		memcpy(base, bdisp_h_spec[i].coef, BDISP_HF_NB);
		bdisp_h_filter[i].virt = base;
		bdisp_h_filter[i].paddr = paddr;
		base += BDISP_HF_NB;
		paddr += BDISP_HF_NB;
	}

	for (i = 0; i < NB_V_FILTER; i++) {
		bdisp_v_filter[i].min = bdisp_v_spec[i].min;
		bdisp_v_filter[i].max = bdisp_v_spec[i].max;
		memcpy(base, bdisp_v_spec[i].coef, BDISP_VF_NB);
		bdisp_v_filter[i].virt = base;
		bdisp_v_filter[i].paddr = paddr;
		base += BDISP_VF_NB;
		paddr += BDISP_VF_NB;
	}

	return 0;
}
Example #14
0
int pil_mss_reset_load_mba(struct pil_desc *pil)
{
	struct q6v5_data *drv = container_of(pil, struct q6v5_data, desc);
	struct modem_data *md = dev_get_drvdata(pil->dev);
	const struct firmware *fw;
	char fw_name_legacy[10] = "mba.b00";
	char fw_name[10] = "mba.mbn";
	char *fw_name_p;
	void *mba_virt;
	dma_addr_t mba_phys, mba_phys_end;
	int ret, count;
	const u8 *data;

	fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
	/* Load and authenticate mba image */
	ret = request_firmware(&fw, fw_name_p, pil->dev);
	if (ret) {
		dev_err(pil->dev, "Failed to locate %s\n",
						fw_name_p);
		return ret;
	}

	drv->mba_size = SZ_1M;
	md->mba_mem_dev.coherent_dma_mask =
		DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
	init_dma_attrs(&md->attrs_dma);
	dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &md->attrs_dma);
	mba_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_size,
			&mba_phys, GFP_KERNEL, &md->attrs_dma);
	if (!mba_virt) {
		dev_err(pil->dev, "MBA metadata buffer allocation failed\n");
		ret = -ENOMEM;
		goto err_dma_alloc;
	}

	drv->mba_phys = mba_phys;
	drv->mba_virt = mba_virt;
	mba_phys_end = mba_phys + drv->mba_size;

	dev_info(pil->dev, "MBA: loading from %pa to %pa\n", &mba_phys,
								&mba_phys_end);
	/* Load the MBA image into memory */
	data = fw ? fw->data : NULL;
	if (!data) {
		dev_err(pil->dev, "MBA data is NULL\n");
		ret = -ENOMEM;
		goto err_mss_reset;
	}
	count = fw->size;
	memcpy(mba_virt, data, count);
	wmb();

	ret = pil_mss_reset(pil);
	if (ret) {
		dev_err(pil->dev, "MBA boot failed.\n");
		goto err_mss_reset;
	}

	release_firmware(fw);

	return 0;

err_mss_reset:
	dma_free_attrs(&md->mba_mem_dev, drv->mba_size, drv->mba_virt,
				drv->mba_phys, &md->attrs_dma);
	drv->mba_virt = NULL;
err_dma_alloc:
	release_firmware(fw);
	return ret;
}
Example #15
0
static int msm_iommu_sec_ptbl_init(struct device *dev)
{
    int psize[2] = {0, 0};
    unsigned int spare = 0;
    int ret;
    int version;
    void *cpu_addr;
    dma_addr_t paddr;
    DEFINE_DMA_ATTRS(attrs);
    static bool allocated = false;

    if (allocated)
        return 0;

    version = qcom_scm_get_feat_version(SCM_SVC_MP);

    if (version >= MAKE_VERSION(1, 1, 1)) {
        ret = qcom_scm_iommu_set_cp_pool_size(MAXIMUM_VIRT_SIZE, 0);
        if (ret) {
            dev_err(dev, "failed setting max virtual size (%d)\n",
                    ret);
            return ret;
        }
    }

    ret = qcom_scm_iommu_secure_ptbl_size(spare, psize);
    if (ret) {
        dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
                ret);
        return ret;
    }

    if (psize[1]) {
        dev_err(dev, "failed to get iommu secure pgtable size (%d)\n",
                ret);
        return psize[1];
    }

    dev_info(dev, "iommu sec: pgtable size: %d\n", psize[0]);

    dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);

    cpu_addr = dma_alloc_attrs(dev, psize[0], &paddr, GFP_KERNEL, &attrs);
    if (!cpu_addr) {
        dev_err(dev, "failed to allocate %d bytes for pgtable\n",
                psize[0]);
        return -ENOMEM;
    }

    ret = qcom_scm_iommu_secure_ptbl_init(paddr, psize[0], spare);
    if (ret) {
        dev_err(dev, "failed to init iommu pgtable (%d)\n", ret);
        goto free_mem;
    }

    allocated = true;

    return 0;

free_mem:
    dma_free_attrs(dev, psize[0], cpu_addr, paddr, &attrs);
    return ret;
}
static int __devinit gpu_probe(struct platform_device *pdev)
#endif
{
    int ret = -ENODEV;
    struct resource* res;
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	struct contiguous_mem_pool *pool;
	struct reset_control *rstc;
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
	struct device_node *dn =pdev->dev.of_node;
	const u32 *prop;
#else
	struct viv_gpu_platform_data *pdata;
#endif
    gcmkHEADER();

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "phys_baseaddr");
    if (res)
        baseAddress = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_3d");
    if (res)
        irqLine = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_3d");
    if (res)
    {
        registerMemBase = res->start;
        registerMemSize = res->end - res->start + 1;
    }

    res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_2d");
    if (res)
        irqLine2D = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_2d");
    if (res)
    {
        registerMemBase2D = res->start;
        registerMemSize2D = res->end - res->start + 1;
    }

    res = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "irq_vg");
    if (res)
        irqLineVG = res->start;

    res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "iobase_vg");
    if (res)
    {
        registerMemBaseVG = res->start;
        registerMemSizeVG = res->end - res->start + 1;
    }

#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	pool = devm_kzalloc(&pdev->dev, sizeof(*pool), GFP_KERNEL);
	if (!pool)
		return -ENOMEM;
	pool->size = contiguousSize;
	init_dma_attrs(&pool->attrs);
	dma_set_attr(DMA_ATTR_WRITE_COMBINE, &pool->attrs);
	pool->virt = dma_alloc_attrs(&pdev->dev, pool->size, &pool->phys,
				     GFP_KERNEL, &pool->attrs);
	if (!pool->virt) {
		dev_err(&pdev->dev, "Failed to allocate contiguous memory\n");
		return -ENOMEM;
	}
	contiguousBase = pool->phys;
	dev_set_drvdata(&pdev->dev, pool);
#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,5,0)
	prop = of_get_property(dn, "contiguousbase", NULL);
	if(prop)
		contiguousBase = *prop;
	of_property_read_u32(dn,"contiguoussize", (u32 *)&contiguousSize);
#else
    pdata = pdev->dev.platform_data;
    if (pdata) {
        contiguousBase = pdata->reserved_mem_base;
        contiguousSize = pdata->reserved_mem_size;
     }
#endif
    if (contiguousSize == 0)
       gckOS_Print("Warning: No contiguous memory is reserverd for gpu.!\n ");
    ret = drv_init(&pdev->dev);

    if (!ret)
    {
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	rstc = devm_reset_control_get(&pdev->dev, "gpu3d");
	galDevice->rstc[gcvCORE_MAJOR] = IS_ERR(rstc) ? NULL : rstc;

	rstc = devm_reset_control_get(&pdev->dev, "gpu2d");
	galDevice->rstc[gcvCORE_2D] = IS_ERR(rstc) ? NULL : rstc;

	rstc = devm_reset_control_get(&pdev->dev, "gpuvg");
	galDevice->rstc[gcvCORE_VG] = IS_ERR(rstc) ? NULL : rstc;
#endif
        platform_set_drvdata(pdev, galDevice);

#if gcdENABLE_FSCALE_VAL_ADJUST
        if (galDevice->kernels[gcvCORE_MAJOR])
            REG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
#endif
        gcmkFOOTER_NO();
        return ret;
    }
#if gcdENABLE_FSCALE_VAL_ADJUST
    UNREG_THERMAL_NOTIFIER(&thermal_hot_pm_notifier);
#endif
#if LINUX_VERSION_CODE >= KERNEL_VERSION(3,10,0)
	dma_free_attrs(&pdev->dev, pool->size, pool->virt, pool->phys,
		       &pool->attrs);
#endif
    gcmkFOOTER_ARG(KERN_INFO "Failed to register gpu driver: %d\n", ret);
    return ret;
}
static int lowlevel_buffer_allocate(struct drm_device *dev,
		unsigned int flags, struct rockchip_drm_gem_buf *buf)
{
	int ret = 0;
	enum dma_attr attr;
	unsigned int nr_pages;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	if (buf->dma_addr) {
		DRM_DEBUG_KMS("already allocated.\n");
		return 0;
	}

	init_dma_attrs(&buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_CONTIG, fully physically contiguous memory
	 * region will be allocated else physically contiguous
	 * as possible.
	 */
	if (!(flags & ROCKCHIP_BO_NONCONTIG))
		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_WC or ROCKCHIP_BO_NONCACHABLE, writecombine mapping
	 * else cachable mapping.
	 */
	if (flags & ROCKCHIP_BO_WC || !(flags & ROCKCHIP_BO_CACHABLE))
		attr = DMA_ATTR_WRITE_COMBINE;
	else
		attr = DMA_ATTR_NON_CONSISTENT;

	dma_set_attr(attr, &buf->dma_attrs);
	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);

	nr_pages = buf->size >> PAGE_SHIFT;

	if (!is_drm_iommu_supported(dev)) {
		dma_addr_t start_addr;
		unsigned int i = 0;

		buf->pages = kzalloc(sizeof(struct page) * nr_pages,
					GFP_KERNEL);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate pages.\n");
			return -ENOMEM;
		}

		buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->kvaddr) {
			DRM_ERROR("failed to allocate buffer.\n");
			kfree(buf->pages);
			return -ENOMEM;
		}

		start_addr = buf->dma_addr;
		while (i < nr_pages) {
			buf->pages[i] = phys_to_page(start_addr);
			start_addr += PAGE_SIZE;
			i++;
		}
	} else {

		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate buffer.\n");
			return -ENOMEM;
		}
	}

	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
	if (!buf->sgt) {
		DRM_ERROR("failed to get sg table.\n");
		ret = -ENOMEM;
		goto err_free_attrs;
	}

	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
			(unsigned long)buf->dma_addr,
			buf->size);

	return ret;

err_free_attrs:
	dma_free_attrs(dev->dev, buf->size, buf->pages,
			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
	buf->dma_addr = (dma_addr_t)NULL;

	if (!is_drm_iommu_supported(dev))
		kfree(buf->pages);

	return ret;
}
Example #18
0
static int sgiseeq_probe(struct platform_device *pdev)
{
	struct sgiseeq_platform_data *pd = dev_get_platdata(&pdev->dev);
	struct hpc3_regs *hpcregs = pd->hpc;
	struct sgiseeq_init_block *sr;
	unsigned int irq = pd->irq;
	struct sgiseeq_private *sp;
	struct net_device *dev;
	int err;

	dev = alloc_etherdev(sizeof (struct sgiseeq_private));
	if (!dev) {
		err = -ENOMEM;
		goto err_out;
	}

	platform_set_drvdata(pdev, dev);
	sp = netdev_priv(dev);

	/* Make private data page aligned */
	sr = dma_alloc_attrs(&pdev->dev, sizeof(*sp->srings), &sp->srings_dma,
			     GFP_KERNEL, DMA_ATTR_NON_CONSISTENT);
	if (!sr) {
		printk(KERN_ERR "Sgiseeq: Page alloc failed, aborting.\n");
		err = -ENOMEM;
		goto err_out_free_dev;
	}
	sp->srings = sr;
	sp->rx_desc = sp->srings->rxvector;
	sp->tx_desc = sp->srings->txvector;
	spin_lock_init(&sp->tx_lock);

	/* A couple calculations now, saves many cycles later. */
	setup_rx_ring(dev, sp->rx_desc, SEEQ_RX_BUFFERS);
	setup_tx_ring(dev, sp->tx_desc, SEEQ_TX_BUFFERS);

	memcpy(dev->dev_addr, pd->mac, ETH_ALEN);

#ifdef DEBUG
	gpriv = sp;
	gdev = dev;
#endif
	sp->sregs = (struct sgiseeq_regs *) &hpcregs->eth_ext[0];
	sp->hregs = &hpcregs->ethregs;
	sp->name = sgiseeqstr;
	sp->mode = SEEQ_RCMD_RBCAST;

	/* Setup PIO and DMA transfer timing */
	sp->hregs->pconfig = 0x161;
	sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
			     HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;

	/* Setup PIO and DMA transfer timing */
	sp->hregs->pconfig = 0x161;
	sp->hregs->dconfig = HPC3_EDCFG_FIRQ | HPC3_EDCFG_FEOP |
			     HPC3_EDCFG_FRXDC | HPC3_EDCFG_PTO | 0x026;

	/* Reset the chip. */
	hpc3_eth_reset(sp->hregs);

	sp->is_edlc = !(sp->sregs->rw.rregs.collision_tx[0] & 0xff);
	if (sp->is_edlc)
		sp->control = SEEQ_CTRL_XCNT | SEEQ_CTRL_ACCNT |
			      SEEQ_CTRL_SFLAG | SEEQ_CTRL_ESHORT |
			      SEEQ_CTRL_ENCARR;

	dev->netdev_ops		= &sgiseeq_netdev_ops;
	dev->watchdog_timeo	= (200 * HZ) / 1000;
	dev->irq		= irq;

	if (register_netdev(dev)) {
		printk(KERN_ERR "Sgiseeq: Cannot register net device, "
		       "aborting.\n");
		err = -ENODEV;
		goto err_out_free_page;
	}

	printk(KERN_INFO "%s: %s %pM\n", dev->name, sgiseeqstr, dev->dev_addr);

	return 0;

err_out_free_page:
	free_page((unsigned long) sp->srings);
err_out_free_dev:
	free_netdev(dev);

err_out:
	return err;
}
Example #19
0
static struct resource_table * qproc_find_rsc_table(struct rproc *rproc,
						    const struct firmware *fw,
						    int *tablesz)
{
	static struct resource_table table = { .ver = 1, };

	*tablesz = sizeof(table);
	return &table;
}

static int qproc_load(struct rproc *rproc, const struct firmware *fw)
{
	struct qproc *qproc = rproc->priv;
	DEFINE_DMA_ATTRS(attrs);
	dma_addr_t phys;
	dma_addr_t end;
	void *ptr;

	dma_set_mask(qproc->dev, DMA_BIT_MASK(32));
	dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &attrs);

	ptr = dma_alloc_attrs(qproc->dev, fw->size, &phys, GFP_KERNEL, &attrs);
	if (!ptr) {
		dev_err(qproc->dev, "failed to allocate mba metadata buffer\n");
		return -ENOMEM;
	}

	end = phys + fw->size;
	dev_info(qproc->dev, "loading MBA from %pa to %pa\n", &phys, &end);

	memcpy(ptr, fw->data, fw->size);

	qproc->mba_va = ptr;
	qproc->mba_da = phys;
	qproc->mba_size = fw->size;
	qproc->mba_attrs = attrs;

	return 0;
}

static const struct rproc_fw_ops qproc_fw_ops = {
	.find_rsc_table = qproc_find_rsc_table,
	.load = qproc_load,
	.sanity_check = qproc_sanity_check,
};

static void q6v5proc_reset(struct qproc *qproc)
{
	u32 val;

	/* Assert resets, stop core */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
	val |= (Q6SS_CORE_ARES | Q6SS_BUS_ARES_ENA | Q6SS_STOP_CORE);
	writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);

	/* Enable power block headswitch, and wait for it to stabilize */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= QDSS_BHS_ON | QDSS_LDO_BYP;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	mb();
	udelay(1);

	/*
	 * Turn on memories. L2 banks should be done individually
	 * to minimize inrush current.
	 */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_SLP_RET_N | Q6SS_L2TAG_SLP_NRET_N |
		Q6SS_ETB_SLP_NRET_N | Q6SS_L2DATA_STBY_N;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_L2DATA_SLP_NRET_N_2;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_L2DATA_SLP_NRET_N_1;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);
	val |= Q6SS_L2DATA_SLP_NRET_N_0;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);

	/* Remove IO clamp */
	val &= ~Q6SS_CLAMP_IO;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_PWR_CTL);

	/* Bring core out of reset */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
	val &= ~Q6SS_CORE_ARES;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);

	/* Turn on core clock */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_GFMUX_CTL);
	val |= Q6SS_CLK_ENA;

#if 0
	/* Need a different clock source for v5.2.0 */
	if (qproc->qdsp6v5_2_0) {
		val &= ~Q6SS_CLK_SRC_SEL_FIELD;
		val |= Q6SS_CLK_SRC_SEL_C;
	}

#endif
	/* force clock on during source switch */
//	if (qproc->qdsp6v56)
		val |= Q6SS_CLK_SRC_SWITCH_CLK_OVR;

	writel_relaxed(val, qproc->reg_base + QDSP6SS_GFMUX_CTL);

	/* Start core execution */
	val = readl_relaxed(qproc->reg_base + QDSP6SS_RESET);
	val &= ~Q6SS_STOP_CORE;
	writel_relaxed(val, qproc->reg_base + QDSP6SS_RESET);
}