static int __devinit cpp_probe(struct platform_device *pdev)
{
	struct cpp_device *cpp_dev;
	struct msm_cam_subdev_info sd_info;
	int rc = 0;
	CDBG("%s: device id = %d\n", __func__, pdev->id);
	cpp_dev = kzalloc(sizeof(struct cpp_device), GFP_KERNEL);
	if (!cpp_dev) {
		pr_err("%s: no enough memory\n", __func__);
		return -ENOMEM;
	}
	v4l2_subdev_init(&cpp_dev->subdev, &msm_cpp_subdev_ops);
	cpp_dev->subdev.internal_ops = &msm_cpp_internal_ops;
	snprintf(cpp_dev->subdev.name, ARRAY_SIZE(cpp_dev->subdev.name),
		 "cpp");
	cpp_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
	cpp_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;
	v4l2_set_subdevdata(&cpp_dev->subdev, cpp_dev);
	platform_set_drvdata(pdev, &cpp_dev->subdev);
	mutex_init(&cpp_dev->mutex);

	cpp_dev->pdev = pdev;

	media_entity_init(&cpp_dev->subdev.entity, 0, NULL, 0);
	cpp_dev->subdev.entity.type = MEDIA_ENT_T_DEVNODE_V4L;
	cpp_dev->subdev.entity.group_id = CPP_DEV;
	cpp_dev->subdev.entity.name = pdev->name;
	sd_info.sdev_type = CPP_DEV;
	sd_info.sd_index = pdev->id;
	msm_cam_register_subdev_node(&cpp_dev->subdev, &sd_info);
	msm_cpp_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner;
	msm_cpp_v4l2_subdev_fops.open = v4l2_subdev_fops.open;
	msm_cpp_v4l2_subdev_fops.unlocked_ioctl = msm_cpp_subdev_fops_ioctl;
	msm_cpp_v4l2_subdev_fops.release = v4l2_subdev_fops.release;
	msm_cpp_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll;

	cpp_dev->subdev.devnode->fops = &msm_cpp_v4l2_subdev_fops;
	cpp_dev->subdev.entity.revision = cpp_dev->subdev.devnode->num;
	msm_cpp_enable_debugfs(cpp_dev);
	msm_queue_init(&cpp_dev->eventData_q, "eventdata");
	msm_queue_init(&cpp_dev->offline_q, "frame");
	msm_queue_init(&cpp_dev->realtime_q, "frame");
	msm_queue_init(&cpp_dev->processing_q, "frame");
	cpp_dev->cpp_open_cnt = 0;

	return rc;
}
Example #2
0
static int __devinit msm_vpe_probe(struct platform_device *pdev)
{
	int rc = 0;
	struct msm_cam_subdev_info sd_info;

	D("%s: device id = %d\n", __func__, pdev->id);
	vpe_ctrl = kzalloc(sizeof(struct vpe_ctrl_type), GFP_KERNEL);
	if (!vpe_ctrl) {
		pr_err("%s: not enough memory\n", __func__);
		return -ENOMEM;
	}

	v4l2_subdev_init(&vpe_ctrl->subdev, &msm_vpe_subdev_ops);
	v4l2_set_subdevdata(&vpe_ctrl->subdev, vpe_ctrl);
	vpe_ctrl->subdev.internal_ops = &msm_vpe_internal_ops;
	vpe_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE;
	snprintf(vpe_ctrl->subdev.name, sizeof(vpe_ctrl->subdev.name), "vpe");
	platform_set_drvdata(pdev, &vpe_ctrl->subdev);

	media_entity_init(&vpe_ctrl->subdev.entity, 0, NULL, 0);
	vpe_ctrl->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
	vpe_ctrl->subdev.entity.group_id = VPE_DEV;
	vpe_ctrl->subdev.entity.name = vpe_ctrl->subdev.name;

	vpe_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_EVENTS;

	vpe_ctrl->vpemem = platform_get_resource_byname(pdev,
					IORESOURCE_MEM, "vpe");
	if (!vpe_ctrl->vpemem) {
		pr_err("%s: no mem resource?\n", __func__);
		rc = -ENODEV;
		goto vpe_no_resource;
	}
	vpe_ctrl->vpeirq = platform_get_resource_byname(pdev,
					IORESOURCE_IRQ, "vpe");
	if (!vpe_ctrl->vpeirq) {
		pr_err("%s: no irq resource?\n", __func__);
		rc = -ENODEV;
		goto vpe_no_resource;
	}

	vpe_ctrl->vpeio = request_mem_region(vpe_ctrl->vpemem->start,
		resource_size(vpe_ctrl->vpemem), pdev->name);
	if (!vpe_ctrl->vpeio) {
		pr_err("%s: no valid mem region\n", __func__);
		rc = -EBUSY;
		goto vpe_no_resource;
	}

	rc = request_irq(vpe_ctrl->vpeirq->start, vpe_parse_irq,
		IRQF_TRIGGER_RISING, "vpe", 0);
	if (rc < 0) {
		release_mem_region(vpe_ctrl->vpemem->start,
			resource_size(vpe_ctrl->vpemem));
		pr_err("%s: irq request fail\n", __func__);
		rc = -EBUSY;
		goto vpe_no_resource;
	}

	vpe_ctrl->fs_vpe = regulator_get(&pdev->dev, "vdd");
	if (IS_ERR(vpe_ctrl->fs_vpe)) {
		pr_err("%s: Regulator FS_VPE get failed %ld\n", __func__,
			PTR_ERR(vpe_ctrl->fs_vpe));
		vpe_ctrl->fs_vpe = NULL;
	}

	disable_irq(vpe_ctrl->vpeirq->start);

#ifdef CONFIG_MSM_IOMMU
	/*get device context for IOMMU*/
	vpe_ctrl->iommu_ctx_src = msm_iommu_get_ctx("vpe_src"); /*re-confirm*/
	vpe_ctrl->iommu_ctx_dst = msm_iommu_get_ctx("vpe_dst"); /*re-confirm*/
	if (!vpe_ctrl->iommu_ctx_src || !vpe_ctrl->iommu_ctx_dst) {
		release_mem_region(vpe_ctrl->vpemem->start,
			resource_size(vpe_ctrl->vpemem));
		pr_err("%s: No iommu fw context found\n", __func__);
		rc = -ENODEV;
		goto vpe_no_resource;
	}
#endif

	atomic_set(&vpe_ctrl->active, 0);
	vpe_ctrl->pdev = pdev;
	sd_info.sdev_type = VPE_DEV;
	sd_info.sd_index = pdev->id;
	sd_info.irq_num = vpe_ctrl->vpeirq->start;
	msm_cam_register_subdev_node(&vpe_ctrl->subdev, &sd_info);
	vpe_ctrl->subdev.entity.revision = vpe_ctrl->subdev.devnode->num;
	msm_queue_init(&vpe_ctrl->eventData_q, "ackevents");

	return 0;

vpe_no_resource:
	pr_err("%s: VPE Probe failed.\n", __func__);
	kfree(vpe_ctrl);
	return rc;
}