Exemplo n.º 1
0
static int _get_iommu_ctxs(struct kgsl_iommu *iommu, struct kgsl_device *device,
	struct kgsl_device_iommu_data *data)
{
	int i;

	for (i = 0; i < data->iommu_ctx_count; i++) {
		if (iommu->dev_count >= KGSL_IOMMU_MAX_DEV) {
			KGSL_CORE_ERR("Tried to attach too many IOMMU "
				"devices\n");
			return -ENOMEM;
		}

		if (!data->iommu_ctx_names[i])
			continue;

		iommu->dev[iommu->dev_count].dev =
			msm_iommu_get_ctx(data->iommu_ctx_names[i]);
		if (iommu->dev[iommu->dev_count].dev == NULL) {
			KGSL_CORE_ERR("Failed to iommu dev handle for "
				"device %s\n", data->iommu_ctx_names[i]);
			return -EINVAL;
		}

		iommu->dev_count++;
	}

	return 0;
}
Exemplo n.º 2
0
int mdss_iommu_init(void)
{
	struct iommu_domain *domain;
	int domain_idx, i;

	domain_idx = msm_register_domain(&mdp_iommu_layout);
	if (IS_ERR_VALUE(domain_idx))
		return -EINVAL;

	domain = msm_get_iommu_domain(domain_idx);
	if (!domain) {
		pr_err("unable to get iommu domain(%d)\n", domain_idx);
		return -EINVAL;
	}

	iommu_set_fault_handler(domain, mdss_iommu_fault_handler);

	for (i = 0; i < ARRAY_SIZE(mdp_iommu_ctx); i++) {
		mdp_iommu_ctx[i].ctx = msm_iommu_get_ctx(mdp_iommu_ctx[i].name);
		if (!mdp_iommu_ctx[i].ctx) {
			pr_warn("unable to get iommu ctx(%s)\n",
					mdp_iommu_ctx[i].name);
			return -EINVAL;
		}
	}
	mdss_res->iommu_domain = domain_idx;

	return 0;
}
static int kgsl_get_iommu_ctxt(struct kgsl_iommu *iommu,
				struct kgsl_device *device)
{
	int status = 0;
	struct platform_device *pdev =
		container_of(device->parentdev, struct platform_device, dev);
	struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
	if (pdata_dev->iommu_user_ctx_name)
		iommu->iommu_user_dev = msm_iommu_get_ctx(
					pdata_dev->iommu_user_ctx_name);
	if (pdata_dev->iommu_priv_ctx_name)
		iommu->iommu_priv_dev = msm_iommu_get_ctx(
					pdata_dev->iommu_priv_ctx_name);
	if (!iommu->iommu_user_dev) {
		KGSL_CORE_ERR("Failed to get user iommu dev handle for "
				"device %s\n",
				pdata_dev->iommu_user_ctx_name);
		status = -EINVAL;
	}
	return status;
}
Exemplo n.º 4
0
/*
 * _get_iommu_ctxs - Get device pointer to IOMMU contexts
 * @mmu - Pointer to mmu device
 * data - Pointer to the platform data containing information about
 * iommu devices for one iommu unit
 * unit_id - The IOMMU unit number. This is not a specific ID but just
 * a serial number. The serial numbers are treated as ID's of the
 * IOMMU units
 *
 * Return - 0 on success else error code
 */
static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
	struct kgsl_device_iommu_data *data, unsigned int unit_id)
{
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
	int i, j;
	int found_ctx;

	for (j = 0; j < KGSL_IOMMU_MAX_DEVS_PER_UNIT; j++) {
		found_ctx = 0;
		for (i = 0; i < data->iommu_ctx_count; i++) {
			if (j == data->iommu_ctxs[i].ctx_id) {
				found_ctx = 1;
				break;
			}
		}
		if (!found_ctx)
			break;
		if (!data->iommu_ctxs[i].iommu_ctx_name) {
			KGSL_CORE_ERR("Context name invalid\n");
			return -EINVAL;
		}

		iommu_unit->dev[iommu_unit->dev_count].dev =
			msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
		if (iommu_unit->dev[iommu_unit->dev_count].dev == NULL) {
			KGSL_CORE_ERR("Failed to get iommu dev handle for "
			"device %s\n", data->iommu_ctxs[i].iommu_ctx_name);
			return -EINVAL;
		}
		iommu_unit->dev[iommu_unit->dev_count].ctx_id =
						data->iommu_ctxs[i].ctx_id;
		iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;

		KGSL_DRV_INFO(mmu->device,
				"Obtained dev handle %p for iommu context %s\n",
				iommu_unit->dev[iommu_unit->dev_count].dev,
				data->iommu_ctxs[i].iommu_ctx_name);

		iommu_unit->dev_count++;
	}
	if (!j) {
		KGSL_CORE_ERR("No ctxts initialized, user ctxt absent\n ");
		return -EINVAL;
	}

	return 0;
}
Exemplo n.º 5
0
/*
 * _get_iommu_ctxs - Get device pointer to IOMMU contexts
 * @mmu - Pointer to mmu device
 * data - Pointer to the platform data containing information about
 * iommu devices for one iommu unit
 * unit_id - The IOMMU unit number. This is not a specific ID but just
 * a serial number. The serial numbers are treated as ID's of the
 * IOMMU units
 *
 * Return - 0 on success else error code
 */
static int _get_iommu_ctxs(struct kgsl_mmu *mmu,
	struct kgsl_device_iommu_data *data, unsigned int unit_id)
{
	struct kgsl_iommu *iommu = mmu->priv;
	struct kgsl_iommu_unit *iommu_unit = &iommu->iommu_units[unit_id];
	int i;

	if (data->iommu_ctx_count > KGSL_IOMMU_MAX_DEVS_PER_UNIT) {
		KGSL_CORE_ERR("Too many iommu devices defined for an "
				"IOMMU unit\n");
		return -EINVAL;
	}

	for (i = 0; i < data->iommu_ctx_count; i++) {
		if (!data->iommu_ctxs[i].iommu_ctx_name)
			continue;

		iommu_unit->dev[iommu_unit->dev_count].dev =
			msm_iommu_get_ctx(data->iommu_ctxs[i].iommu_ctx_name);
		if (iommu_unit->dev[iommu_unit->dev_count].dev == NULL) {
			KGSL_CORE_ERR("Failed to get iommu dev handle for "
			"device %s\n", data->iommu_ctxs[i].iommu_ctx_name);
			return -EINVAL;
		}
		if (KGSL_IOMMU_CONTEXT_USER != data->iommu_ctxs[i].ctx_id &&
			KGSL_IOMMU_CONTEXT_PRIV != data->iommu_ctxs[i].ctx_id) {
			KGSL_CORE_ERR("Invalid context ID defined: %d\n",
					data->iommu_ctxs[i].ctx_id);
			return -EINVAL;
		}
		iommu_unit->dev[iommu_unit->dev_count].ctx_id =
						data->iommu_ctxs[i].ctx_id;
		iommu_unit->dev[iommu_unit->dev_count].kgsldev = mmu->device;

		KGSL_DRV_INFO(mmu->device,
				"Obtained dev handle %p for iommu context %s\n",
				iommu_unit->dev[iommu_unit->dev_count].dev,
				data->iommu_ctxs[i].iommu_ctx_name);

		iommu_unit->dev_count++;
	}

	return 0;
}
Exemplo n.º 6
0
int mdss_iommu_init(struct mdss_data_type *mdata)
{
    struct msm_iova_layout layout;
    struct iommu_domain *domain;
    struct mdss_iommu_map_type *iomap;
    int i;

    if (mdata->iommu_map) {
        pr_warn("iommu already initialized\n");
        return 0;
    }

    for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
        iomap = &mdss_iommu_map[i];

        layout.client_name = iomap->client_name;
        layout.partitions = iomap->partitions;
        layout.npartitions = iomap->npartitions;
        layout.is_secure = (i == MDSS_IOMMU_DOMAIN_SECURE);

        iomap->domain_idx = msm_register_domain(&layout);
        if (IS_ERR_VALUE(iomap->domain_idx))
            return -EINVAL;

        domain = msm_get_iommu_domain(iomap->domain_idx);
        if (!domain) {
            pr_err("unable to get iommu domain(%d)\n",
                   iomap->domain_idx);
            return -EINVAL;
        }
        iommu_set_fault_handler(domain, mdss_iommu_fault_handler, NULL);

        iomap->ctx = msm_iommu_get_ctx(iomap->ctx_name);
        if (!iomap->ctx) {
            pr_warn("unable to get iommu ctx(%s)\n",
                    iomap->ctx_name);
            return -EINVAL;
        }
    }

    mdata->iommu_map = mdss_iommu_map;

    return 0;
}
Exemplo n.º 7
0
static int __devinit msm_vpe_probe(struct platform_device *pdev)
{
	int rc = 0;
	struct msm_cam_subdev_info sd_info;

	D("%s: device id = %d\n", __func__, pdev->id);
	vpe_ctrl = kzalloc(sizeof(struct vpe_ctrl_type), GFP_KERNEL);
	if (!vpe_ctrl) {
		pr_err("%s: not enough memory\n", __func__);
		return -ENOMEM;
	}

	v4l2_subdev_init(&vpe_ctrl->subdev, &msm_vpe_subdev_ops);
	v4l2_set_subdevdata(&vpe_ctrl->subdev, vpe_ctrl);
	vpe_ctrl->subdev.internal_ops = &msm_vpe_internal_ops;
	vpe_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
	snprintf(vpe_ctrl->subdev.name, sizeof(vpe_ctrl->subdev.name), "vpe");
	platform_set_drvdata(pdev, &vpe_ctrl->subdev);

	media_entity_init(&vpe_ctrl->subdev.entity, 0, NULL, 0);
	vpe_ctrl->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
	vpe_ctrl->subdev.entity.group_id = VPE_DEV;
	vpe_ctrl->subdev.entity.name = vpe_ctrl->subdev.name;

	vpe_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;

	vpe_ctrl->vpemem = platform_get_resource_byname(pdev,
					IORESOURCE_MEM, "vpe");
	if (!vpe_ctrl->vpemem) {
		pr_err("%s: no mem resource?\n", __func__);
		rc = -ENODEV;
		goto vpe_no_resource;
	}
	vpe_ctrl->vpeirq = platform_get_resource_byname(pdev,
					IORESOURCE_IRQ, "vpe");
	if (!vpe_ctrl->vpeirq) {
		pr_err("%s: no irq resource?\n", __func__);
		rc = -ENODEV;
		goto vpe_no_resource;
	}

	vpe_ctrl->vpeio = request_mem_region(vpe_ctrl->vpemem->start,
		resource_size(vpe_ctrl->vpemem), pdev->name);
	if (!vpe_ctrl->vpeio) {
		pr_err("%s: no valid mem region\n", __func__);
		rc = -EBUSY;
		goto vpe_no_resource;
	}

	rc = request_irq(vpe_ctrl->vpeirq->start, vpe_parse_irq,
		IRQF_TRIGGER_RISING, "vpe", 0);
	if (rc < 0) {
		release_mem_region(vpe_ctrl->vpemem->start,
			resource_size(vpe_ctrl->vpemem));
		pr_err("%s: irq request fail\n", __func__);
		rc = -EBUSY;
		goto vpe_no_resource;
	}

	vpe_ctrl->fs_vpe = regulator_get(&pdev->dev, "vdd");
	if (IS_ERR(vpe_ctrl->fs_vpe)) {
		pr_err("%s: Regulator FS_VPE get failed %ld\n", __func__,
			PTR_ERR(vpe_ctrl->fs_vpe));
		vpe_ctrl->fs_vpe = NULL;
	}

	disable_irq(vpe_ctrl->vpeirq->start);

#ifdef CONFIG_MSM_IOMMU
	/*get device context for IOMMU*/
	vpe_ctrl->iommu_ctx_src = msm_iommu_get_ctx("vpe_src"); /*re-confirm*/
	vpe_ctrl->iommu_ctx_dst = msm_iommu_get_ctx("vpe_dst"); /*re-confirm*/
	if (!vpe_ctrl->iommu_ctx_src || !vpe_ctrl->iommu_ctx_dst) {
		release_mem_region(vpe_ctrl->vpemem->start,
			resource_size(vpe_ctrl->vpemem));
		pr_err("%s: No iommu fw context found\n", __func__);
		rc = -ENODEV;
		goto vpe_no_resource;
	}
#endif

	atomic_set(&vpe_ctrl->active, 0);
	vpe_ctrl->pdev = pdev;
	sd_info.sdev_type = VPE_DEV;
	sd_info.sd_index = pdev->id;
	sd_info.irq_num = vpe_ctrl->vpeirq->start;
	msm_cam_register_subdev_node(&vpe_ctrl->subdev, &sd_info);
	vpe_ctrl->subdev.entity.revision = vpe_ctrl->subdev.devnode->num;
	msm_queue_init(&vpe_ctrl->eventData_q, "ackevents");

	return 0;

vpe_no_resource:
	pr_err("%s: VPE Probe failed.\n", __func__);
	kfree(vpe_ctrl);
	return rc;
}
Exemplo n.º 8
0
static int venus_register_domain(u32 fw_max_sz)
{
	struct msm_iova_partition venus_fw_partition = {
		.start = 0,
		.size = fw_max_sz,
	};
	struct msm_iova_layout venus_fw_layout = {
		.partitions = &venus_fw_partition,
		.npartitions = 1,
		.client_name = "pil_venus",
		.domain_flags = 0,
	};

	return msm_register_domain(&venus_fw_layout);
}

static int pil_venus_mem_setup(struct platform_device *pdev, size_t size)
{
	int domain;

	venus_data->iommu_fw_ctx  = msm_iommu_get_ctx("venus_fw");
	if (!venus_data->iommu_fw_ctx) {
		dprintk(VIDC_ERR, "No iommu fw context found\n");
		return -ENODEV;
	}

	if (!venus_data->venus_domain_num) {
		size = round_up(size, SZ_4K);
		domain = venus_register_domain(size);
		if (domain < 0) {
			dprintk(VIDC_ERR,
				"Venus fw iommu domain register failed\n");
			return -ENODEV;
		}
		venus_data->iommu_fw_domain = msm_get_iommu_domain(domain);
		if (!venus_data->iommu_fw_domain) {
			dprintk(VIDC_ERR, "No iommu fw domain found\n");
			return -ENODEV;
		}
		venus_data->venus_domain_num = domain;
		venus_data->fw_sz = size;
	}
	return 0;
}

static int pil_venus_auth_and_reset(struct platform_device *pdev)
{
	int rc;
	phys_addr_t fw_bias = venus_data->resources->firmware_base;
	void __iomem *reg_base = venus_data->reg_base;
	u32 ver;
	bool iommu_present = is_iommu_present(venus_data->resources);

	if (!fw_bias) {
		dprintk(VIDC_ERR, "FW bias is not valid\n");
		return -EINVAL;
	}
	/* Get Venus version number */
	if (!venus_data->hw_ver_checked) {
		ver = readl_relaxed(reg_base + VIDC_WRAPPER_HW_VERSION);
		venus_data->hw_ver_minor = (ver & 0x0FFF0000) >> 16;
		venus_data->hw_ver_major = (ver & 0xF0000000) >> 28;
		venus_data->hw_ver_checked = 1;
	}