static int msm_gesture_node_register(void) { struct msm_gesture_ctrl *p_gesture_ctrl = &g_gesture_ctrl; struct v4l2_subdev *gesture_subdev = kzalloc(sizeof(struct v4l2_subdev), GFP_KERNEL); D("%s\n", __func__); if (!gesture_subdev) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; }; v4l2_subdev_init(gesture_subdev, &msm_gesture_subdev_ops); gesture_subdev->internal_ops = &msm_gesture_internal_ops; gesture_subdev->flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(gesture_subdev->name, sizeof(gesture_subdev->name), "gesture"); media_entity_init(&gesture_subdev->entity, 0, NULL, 0); gesture_subdev->entity.type = MEDIA_ENT_T_DEVNODE_V4L; gesture_subdev->entity.group_id = GESTURE_DEV; gesture_subdev->entity.name = gesture_subdev->name; /* events */ gesture_subdev->flags |= V4L2_SUBDEV_FL_HAS_EVENTS; msm_cam_register_subdev_node(gesture_subdev, GESTURE_DEV, 0); gesture_subdev->entity.revision = gesture_subdev->devnode->num; atomic_set(&p_gesture_ctrl->active, 0); p_gesture_ctrl->queue_id = -1; p_gesture_ctrl->event.evt_data = NULL; p_gesture_ctrl->event.evt_len = 0; return 0; }
static int __devinit vpe_probe(struct platform_device *pdev) { int rc = 0; CDBG("%s: device id = %d\n", __func__, pdev->id); vpe_ctrl = kzalloc(sizeof(struct vpe_ctrl_type), GFP_KERNEL); if (!vpe_ctrl) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&vpe_ctrl->subdev, &msm_vpe_subdev_ops); v4l2_set_subdevdata(&vpe_ctrl->subdev, vpe_ctrl); vpe_ctrl->subdev.internal_ops = &msm_vpe_internal_ops; vpe_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(vpe_ctrl->subdev.name, sizeof(vpe_ctrl->subdev.name), "vpe"); platform_set_drvdata(pdev, &vpe_ctrl->subdev); vpe_ctrl->vpemem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe"); if (!vpe_ctrl->vpemem) { pr_err("%s: no mem resource?\n", __func__); rc = -ENODEV; goto vpe_no_resource; } vpe_ctrl->vpeirq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vpe"); if (!vpe_ctrl->vpeirq) { pr_err("%s: no irq resource?\n", __func__); rc = -ENODEV; goto vpe_no_resource; } vpe_ctrl->vpeio = request_mem_region(vpe_ctrl->vpemem->start, resource_size(vpe_ctrl->vpemem), pdev->name); if (!vpe_ctrl->vpeio) { pr_err("%s: no valid mem region\n", __func__); rc = -EBUSY; goto vpe_no_resource; } rc = request_irq(vpe_ctrl->vpeirq->start, vpe_parse_irq, IRQF_TRIGGER_RISING, "vpe", 0); if (rc < 0) { release_mem_region(vpe_ctrl->vpemem->start, resource_size(vpe_ctrl->vpemem)); pr_err("%s: irq request fail\n", __func__); rc = -EBUSY; goto vpe_no_resource; } disable_irq(vpe_ctrl->vpeirq->start); vpe_ctrl->pdev = pdev; msm_cam_register_subdev_node(&vpe_ctrl->subdev, VPE_DEV, pdev->id); return 0; vpe_no_resource: kfree(vpe_ctrl); return 0; }
static int __devinit irqrouter_probe(struct platform_device *pdev) { int rc = 0; struct irqrouter_ctrl_type *irqrouter_ctrl; struct msm_cam_subdev_info sd_info; D("%s: device id = %d\n", __func__, pdev->id); irqrouter_ctrl = kzalloc(sizeof(struct irqrouter_ctrl_type), GFP_KERNEL); if (!irqrouter_ctrl) { pr_err("%s: not enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&irqrouter_ctrl->subdev, &msm_irqrouter_subdev_ops); irqrouter_ctrl->subdev.internal_ops = &msm_irqrouter_internal_ops; irqrouter_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(irqrouter_ctrl->subdev.name, sizeof(irqrouter_ctrl->subdev.name), "msm_irqrouter"); v4l2_set_subdevdata(&irqrouter_ctrl->subdev, irqrouter_ctrl); irqrouter_ctrl->pdev = pdev; if (pdev->dev.of_node) of_property_read_u32((&pdev->dev)->of_node, "cell-index", &pdev->id); msm_irqrouter_send_default_irqmap(irqrouter_ctrl); media_entity_init(&irqrouter_ctrl->subdev.entity, 0, NULL, 0); irqrouter_ctrl->subdev.entity.type = MEDIA_ENT_T_DEVNODE_V4L; irqrouter_ctrl->subdev.entity.group_id = IRQ_ROUTER_DEV; irqrouter_ctrl->subdev.entity.name = pdev->name; sd_info.sdev_type = IRQ_ROUTER_DEV; sd_info.sd_index = 0; sd_info.irq_num = 0; /* Now register this subdev with the camera server. */ rc = msm_cam_register_subdev_node(&irqrouter_ctrl->subdev, &sd_info); if (rc < 0) { pr_err("%s Error registering irqr subdev %d", __func__, rc); goto error; } irqrouter_ctrl->subdev.entity.revision = irqrouter_ctrl->subdev.devnode->num; atomic_set(&irqrouter_ctrl->active, 0); platform_set_drvdata(pdev, &irqrouter_ctrl->subdev); return rc; error: kfree(irqrouter_ctrl); return rc; }
static int __devinit csid_probe(struct platform_device *pdev) { struct csid_device *new_csid_dev; int rc = 0; CDBG("%s: device id = %d\n", __func__, pdev->id); new_csid_dev = kzalloc(sizeof(struct csid_device), GFP_KERNEL); if (!new_csid_dev) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&new_csid_dev->subdev, &msm_csid_subdev_ops); new_csid_dev->subdev.internal_ops = &msm_csid_internal_ops; new_csid_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(new_csid_dev->subdev.name, ARRAY_SIZE(new_csid_dev->subdev.name), "msm_csid"); v4l2_set_subdevdata(&new_csid_dev->subdev, new_csid_dev); platform_set_drvdata(pdev, &new_csid_dev->subdev); mutex_init(&new_csid_dev->mutex); new_csid_dev->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csid"); if (!new_csid_dev->mem) { pr_err("%s: no mem resource?\n", __func__); rc = -ENODEV; goto csid_no_resource; } new_csid_dev->irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "csid"); if (!new_csid_dev->irq) { pr_err("%s: no irq resource?\n", __func__); rc = -ENODEV; goto csid_no_resource; } new_csid_dev->io = request_mem_region(new_csid_dev->mem->start, resource_size(new_csid_dev->mem), pdev->name); if (!new_csid_dev->io) { pr_err("%s: no valid mem region\n", __func__); rc = -EBUSY; goto csid_no_resource; } new_csid_dev->pdev = pdev; msm_cam_register_subdev_node(&new_csid_dev->subdev, CSID_DEV, pdev->id); return 0; csid_no_resource: mutex_destroy(&new_csid_dev->mutex); kfree(new_csid_dev); return 0; }
void vfe40_axi_probe(struct axi_ctrl_t *axi_ctrl) { struct msm_cam_subdev_info sd_info; v4l2_subdev_init(&axi_ctrl->subdev, &msm_axi_subdev_ops); axi_ctrl->subdev.internal_ops = &msm_axi_internal_ops; axi_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(axi_ctrl->subdev.name, sizeof(axi_ctrl->subdev.name), "axi"); v4l2_set_subdevdata(&axi_ctrl->subdev, axi_ctrl); sd_info.sdev_type = AXI_DEV; sd_info.sd_index = axi_ctrl->pdev->id; sd_info.irq_num = 0; msm_cam_register_subdev_node(&axi_ctrl->subdev, &sd_info); }
static int __devinit cpp_probe(struct platform_device *pdev) { struct cpp_device *cpp_dev; struct msm_cam_subdev_info sd_info; int rc = 0; CDBG("%s: device id = %d\n", __func__, pdev->id); cpp_dev = kzalloc(sizeof(struct cpp_device), GFP_KERNEL); if (!cpp_dev) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&cpp_dev->subdev, &msm_cpp_subdev_ops); cpp_dev->subdev.internal_ops = &msm_cpp_internal_ops; snprintf(cpp_dev->subdev.name, ARRAY_SIZE(cpp_dev->subdev.name), "cpp"); cpp_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; cpp_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_EVENTS; v4l2_set_subdevdata(&cpp_dev->subdev, cpp_dev); platform_set_drvdata(pdev, &cpp_dev->subdev); mutex_init(&cpp_dev->mutex); cpp_dev->pdev = pdev; media_entity_init(&cpp_dev->subdev.entity, 0, NULL, 0); cpp_dev->subdev.entity.type = MEDIA_ENT_T_DEVNODE_V4L; cpp_dev->subdev.entity.group_id = CPP_DEV; cpp_dev->subdev.entity.name = pdev->name; sd_info.sdev_type = CPP_DEV; sd_info.sd_index = pdev->id; msm_cam_register_subdev_node(&cpp_dev->subdev, &sd_info); msm_cpp_v4l2_subdev_fops.owner = v4l2_subdev_fops.owner; msm_cpp_v4l2_subdev_fops.open = v4l2_subdev_fops.open; msm_cpp_v4l2_subdev_fops.unlocked_ioctl = msm_cpp_subdev_fops_ioctl; msm_cpp_v4l2_subdev_fops.release = v4l2_subdev_fops.release; msm_cpp_v4l2_subdev_fops.poll = v4l2_subdev_fops.poll; cpp_dev->subdev.devnode->fops = &msm_cpp_v4l2_subdev_fops; cpp_dev->subdev.entity.revision = cpp_dev->subdev.devnode->num; msm_cpp_enable_debugfs(cpp_dev); msm_queue_init(&cpp_dev->eventData_q, "eventdata"); msm_queue_init(&cpp_dev->offline_q, "frame"); msm_queue_init(&cpp_dev->realtime_q, "frame"); msm_queue_init(&cpp_dev->processing_q, "frame"); cpp_dev->cpp_open_cnt = 0; return rc; }
static int __devinit msm_vpe_probe(struct platform_device *pdev) { int rc = 0; struct msm_cam_subdev_info sd_info; D("%s: device id = %d\n", __func__, pdev->id); vpe_ctrl = kzalloc(sizeof(struct vpe_ctrl_type), GFP_KERNEL); if (!vpe_ctrl) { pr_err("%s: not enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&vpe_ctrl->subdev, &msm_vpe_subdev_ops); v4l2_set_subdevdata(&vpe_ctrl->subdev, vpe_ctrl); vpe_ctrl->subdev.internal_ops = &msm_vpe_internal_ops; vpe_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(vpe_ctrl->subdev.name, sizeof(vpe_ctrl->subdev.name), "vpe"); platform_set_drvdata(pdev, &vpe_ctrl->subdev); media_entity_init(&vpe_ctrl->subdev.entity, 0, NULL, 0); vpe_ctrl->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; vpe_ctrl->subdev.entity.group_id = VPE_DEV; vpe_ctrl->subdev.entity.name = vpe_ctrl->subdev.name; vpe_ctrl->subdev.flags |= V4L2_SUBDEV_FL_HAS_EVENTS; vpe_ctrl->vpemem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "vpe"); if (!vpe_ctrl->vpemem) { pr_err("%s: no mem resource?\n", __func__); rc = -ENODEV; goto vpe_no_resource; } vpe_ctrl->vpeirq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "vpe"); if (!vpe_ctrl->vpeirq) { pr_err("%s: no irq resource?\n", __func__); rc = -ENODEV; goto vpe_no_resource; } vpe_ctrl->vpeio = request_mem_region(vpe_ctrl->vpemem->start, resource_size(vpe_ctrl->vpemem), pdev->name); if (!vpe_ctrl->vpeio) { pr_err("%s: no valid mem region\n", __func__); rc = -EBUSY; goto vpe_no_resource; } rc = request_irq(vpe_ctrl->vpeirq->start, vpe_parse_irq, IRQF_TRIGGER_RISING, "vpe", 0); if (rc < 0) { release_mem_region(vpe_ctrl->vpemem->start, resource_size(vpe_ctrl->vpemem)); pr_err("%s: irq request fail\n", __func__); rc = -EBUSY; goto vpe_no_resource; } vpe_ctrl->fs_vpe = regulator_get(&pdev->dev, "vdd"); if (IS_ERR(vpe_ctrl->fs_vpe)) { pr_err("%s: Regulator FS_VPE get failed %ld\n", __func__, PTR_ERR(vpe_ctrl->fs_vpe)); vpe_ctrl->fs_vpe = NULL; } disable_irq(vpe_ctrl->vpeirq->start); #ifdef CONFIG_MSM_IOMMU /*get device context for IOMMU*/ vpe_ctrl->iommu_ctx_src = msm_iommu_get_ctx("vpe_src"); /*re-confirm*/ vpe_ctrl->iommu_ctx_dst = msm_iommu_get_ctx("vpe_dst"); /*re-confirm*/ if (!vpe_ctrl->iommu_ctx_src || !vpe_ctrl->iommu_ctx_dst) { release_mem_region(vpe_ctrl->vpemem->start, resource_size(vpe_ctrl->vpemem)); pr_err("%s: No iommu fw context found\n", __func__); rc = -ENODEV; goto vpe_no_resource; } #endif atomic_set(&vpe_ctrl->active, 0); vpe_ctrl->pdev = pdev; sd_info.sdev_type = VPE_DEV; sd_info.sd_index = pdev->id; sd_info.irq_num = vpe_ctrl->vpeirq->start; msm_cam_register_subdev_node(&vpe_ctrl->subdev, &sd_info); vpe_ctrl->subdev.entity.revision = vpe_ctrl->subdev.devnode->num; msm_queue_init(&vpe_ctrl->eventData_q, "ackevents"); return 0; vpe_no_resource: pr_err("%s: VPE Probe failed.\n", __func__); kfree(vpe_ctrl); return rc; }
static int __devinit csid_probe(struct platform_device *pdev) { struct csid_device *new_csid_dev; int rc = 0; CDBG("%s: device id = %d\n", __func__, pdev->id); new_csid_dev = kzalloc(sizeof(struct csid_device), GFP_KERNEL); if (!new_csid_dev) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&new_csid_dev->subdev, &msm_csid_subdev_ops); new_csid_dev->subdev.internal_ops = &msm_csid_internal_ops; new_csid_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(new_csid_dev->subdev.name, ARRAY_SIZE(new_csid_dev->subdev.name), "msm_csid"); v4l2_set_subdevdata(&new_csid_dev->subdev, new_csid_dev); platform_set_drvdata(pdev, &new_csid_dev->subdev); mutex_init(&new_csid_dev->mutex); new_csid_dev->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csid"); if (!new_csid_dev->mem) { pr_err("%s: no mem resource?\n", __func__); rc = -ENODEV; goto csid_no_resource; } new_csid_dev->irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "csid"); if (!new_csid_dev->irq) { pr_err("%s: no irq resource?\n", __func__); rc = -ENODEV; goto csid_no_resource; } new_csid_dev->io = request_mem_region(new_csid_dev->mem->start, resource_size(new_csid_dev->mem), pdev->name); if (!new_csid_dev->io) { pr_err("%s: no valid mem region\n", __func__); rc = -EBUSY; goto csid_no_resource; } rc = request_irq(new_csid_dev->irq->start, msm_csid_irq, IRQF_TRIGGER_RISING, "csid", new_csid_dev); if (rc < 0) { release_mem_region(new_csid_dev->mem->start, resource_size(new_csid_dev->mem)); pr_err("%s: irq request fail\n", __func__); rc = -EBUSY; goto csid_no_resource; } disable_irq(new_csid_dev->irq->start); new_csid_dev->base = ioremap(new_csid_dev->mem->start, resource_size(new_csid_dev->mem)); if (!new_csid_dev->base) { rc = -ENOMEM; goto csid_no_resource; } new_csid_dev->hw_version = msm_io_r(new_csid_dev->base + CSID_HW_VERSION_ADDR); iounmap(new_csid_dev->base); new_csid_dev->pdev = pdev; msm_cam_register_subdev_node(&new_csid_dev->subdev, CSID_DEV, pdev->id); return 0; csid_no_resource: mutex_destroy(&new_csid_dev->mutex); kfree(new_csid_dev); return 0; }
static int __devinit csid_probe(struct platform_device *pdev) { struct csid_device *new_csid_dev; struct msm_cam_subdev_info sd_info; struct intr_table_entry irq_req; int rc = 0; CDBG("%s:%d called\n", __func__, __LINE__); new_csid_dev = kzalloc(sizeof(struct csid_device), GFP_KERNEL); if (!new_csid_dev) { pr_err("%s: no enough memory\n", __func__); return -ENOMEM; } v4l2_subdev_init(&new_csid_dev->subdev, &msm_csid_subdev_ops); new_csid_dev->subdev.internal_ops = &msm_csid_internal_ops; new_csid_dev->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; snprintf(new_csid_dev->subdev.name, ARRAY_SIZE(new_csid_dev->subdev.name), "msm_csid"); v4l2_set_subdevdata(&new_csid_dev->subdev, new_csid_dev); platform_set_drvdata(pdev, &new_csid_dev->subdev); mutex_init(&new_csid_dev->mutex); if (pdev->dev.of_node) of_property_read_u32((&pdev->dev)->of_node, "cell-index", &pdev->id); CDBG("%s device id %d\n", __func__, pdev->id); new_csid_dev->mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "csid"); if (!new_csid_dev->mem) { pr_err("%s: no mem resource?\n", __func__); rc = -ENODEV; goto csid_no_resource; } new_csid_dev->irq = platform_get_resource_byname(pdev, IORESOURCE_IRQ, "csid"); if (!new_csid_dev->irq) { pr_err("%s: no irq resource?\n", __func__); rc = -ENODEV; goto csid_no_resource; } new_csid_dev->io = request_mem_region(new_csid_dev->mem->start, resource_size(new_csid_dev->mem), pdev->name); if (!new_csid_dev->io) { pr_err("%s: no valid mem region\n", __func__); rc = -EBUSY; goto csid_no_resource; } new_csid_dev->pdev = pdev; sd_info.sdev_type = CSID_DEV; sd_info.sd_index = pdev->id; sd_info.irq_num = new_csid_dev->irq->start; msm_cam_register_subdev_node(&new_csid_dev->subdev, &sd_info); media_entity_init(&new_csid_dev->subdev.entity, 0, NULL, 0); new_csid_dev->subdev.entity.type = MEDIA_ENT_T_V4L2_SUBDEV; new_csid_dev->subdev.entity.group_id = CSID_DEV; new_csid_dev->subdev.entity.name = pdev->name; new_csid_dev->subdev.entity.revision = new_csid_dev->subdev.devnode->num; /* Request for this device irq from the camera server. If the * IRQ Router is present on this target, the interrupt will be * handled by the camera server and the interrupt service * routine called. If the request_irq call returns ENXIO, then * the IRQ Router hardware is not present on this target. We * have to request for the irq ourselves and register the * appropriate interrupt handler. */ irq_req.cam_hw_idx = MSM_CAM_HW_CSI0 + pdev->id; irq_req.dev_name = "csid"; irq_req.irq_idx = CAMERA_SS_IRQ_2 + pdev->id; irq_req.irq_num = new_csid_dev->irq->start; irq_req.is_composite = 0; irq_req.irq_trigger_type = IRQF_TRIGGER_RISING; irq_req.num_hwcore = 1; irq_req.subdev_list[0] = &new_csid_dev->subdev; irq_req.data = (void *)new_csid_dev; rc = msm_cam_server_request_irq(&irq_req); if (rc == -ENXIO) { /* IRQ Router hardware is not present on this hardware. * Request for the IRQ and register the interrupt handler. */ rc = request_irq(new_csid_dev->irq->start, msm_csid_irq, IRQF_TRIGGER_RISING, "csid", new_csid_dev); if (rc < 0) { release_mem_region(new_csid_dev->mem->start, resource_size(new_csid_dev->mem)); pr_err("%s: irq request fail\n", __func__); rc = -EBUSY; goto csid_no_resource; } disable_irq(new_csid_dev->irq->start); } else if (rc < 0) { release_mem_region(new_csid_dev->mem->start, resource_size(new_csid_dev->mem)); pr_err("%s Error registering irq ", __func__); goto csid_no_resource; } new_csid_dev->csid_state = CSID_POWER_DOWN; if (pdev->id >= 0 && pdev->id < MAX_CSID) { pr_debug("Init csid %d\n", pdev->id); lsh_csid_dev[pdev->id] = new_csid_dev; } return 0; csid_no_resource: mutex_destroy(&new_csid_dev->mutex); kfree(new_csid_dev); return rc; }