static int msm_cpp_notify_frame_done(struct cpp_device *cpp_dev)
{
	struct v4l2_event v4l2_evt;
	struct msm_queue_cmd *frame_qcmd;
	struct msm_queue_cmd *event_qcmd;
	struct msm_cpp_frame_info_t *processed_frame;
	struct msm_device_queue *queue = &cpp_dev->processing_q;

	if (queue->len > 0) {
		frame_qcmd = msm_dequeue(queue, list_frame);
		processed_frame = frame_qcmd->command;

		event_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
		if (!event_qcmd) {
			pr_err("%s Insufficient memory. return", __func__);
			return -ENOMEM;
		}
		atomic_set(&event_qcmd->on_heap, 1);
		event_qcmd->command = processed_frame;
		CPP_DBG("fid %d\n", processed_frame->frame_id);
		msm_enqueue(&cpp_dev->eventData_q, &event_qcmd->list_eventdata);

		v4l2_evt.id = processed_frame->inst_id;
		v4l2_evt.type = V4L2_EVENT_CPP_FRAME_DONE;
		v4l2_event_queue(cpp_dev->subdev.devnode, &v4l2_evt);
	}
	return 0;
}
static int msm_cpp_send_frame_to_hardware(struct cpp_device *cpp_dev)
{
	struct msm_queue_cmd *frame_qcmd;
	struct msm_cpp_frame_info_t *process_frame;
	struct msm_device_queue *queue;

	if (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
		while (cpp_dev->processing_q.len < MAX_CPP_PROCESSING_FRAME) {
			if (cpp_dev->realtime_q.len != 0) {
				queue = &cpp_dev->realtime_q;
			} else if (cpp_dev->offline_q.len != 0) {
				queue = &cpp_dev->offline_q;
			} else {
				pr_debug("%s: All frames queued\n", __func__);
				break;
			}
			frame_qcmd = msm_dequeue(queue, list_frame);
			/*TBD Code to actually sending to harware*/
			process_frame = frame_qcmd->command;

			msm_enqueue(&cpp_dev->processing_q,
						&frame_qcmd->list_frame);
		}
	}
	return 0;
}
예제 #3
0
static long msm_vpe_subdev_ioctl(struct v4l2_subdev *sd,
			unsigned int cmd, void *arg)
{
	struct msm_vpe_cfg_cmd *vpe_cmd;
	int rc = 0;
	struct msm_cam_media_controller *mctl;
	mctl = v4l2_get_subdev_hostdata(sd);
	switch (cmd) {
	case VIDIOC_MSM_VPE_INIT: {
		msm_vpe_subdev_init(sd);
		break;
		}

	case VIDIOC_MSM_VPE_RELEASE:
		msm_vpe_subdev_release(sd);
		break;

	case MSM_CAM_V4L2_IOCTL_CFG_VPE: {
		vpe_cmd = (struct msm_vpe_cfg_cmd *)arg;
		rc = msm_vpe_process_vpe_cmd(vpe_cmd, mctl);
		if (rc < 0) {
			pr_err("%s Error processing VPE cmd %d ",
				__func__, vpe_cmd->cmd_type);
			break;
		}
		break;
		}

	case MSM_CAM_V4L2_IOCTL_GET_EVENT_PAYLOAD: {
		struct msm_device_queue *queue = &vpe_ctrl->eventData_q;
		struct msm_queue_cmd *event_qcmd;
		struct msm_mctl_pp_event_info pp_event_info;
		struct msm_mctl_pp_frame_info *pp_frame_info;
		struct msm_camera_v4l2_ioctl_t *v4l2_ioctl = arg;

		event_qcmd = msm_dequeue(queue, list_eventdata);
		if (!event_qcmd) {
			pr_err("%s No events in the queue", __func__);
			return -EFAULT;
		}
		pp_frame_info = event_qcmd->command;

		D("%s Unmapping source and destination buffers ",
			__func__);
		msm_mctl_unmap_user_frame(&pp_frame_info->src_frame,
			pp_frame_info->p_mctl->client, mctl->domain_num);
		msm_mctl_unmap_user_frame(&pp_frame_info->dest_frame,
			pp_frame_info->p_mctl->client, mctl->domain_num);

		pp_event_info.event = MCTL_PP_EVENT_CMD_ACK;
		pp_event_info.ack.cmd = pp_frame_info->user_cmd;
		pp_event_info.ack.status = 0;
		pp_event_info.ack.cookie = pp_frame_info->pp_frame_cmd.cookie;
		D("%s Sending payload %d %d %d", __func__,
			pp_event_info.ack.cmd, pp_event_info.ack.status,
			pp_event_info.ack.cookie);
		if (copy_to_user((void __user *)v4l2_ioctl->ioctl_ptr,
			&pp_event_info,	sizeof(struct msm_mctl_pp_event_info)))
			pr_err("%s PAYLOAD Copy to user failed ", __func__);

		kfree(pp_frame_info);
		kfree(event_qcmd);
#ifdef CONFIG_PANTECH_CAMERA // pp_fram_info init
		pp_frame_info = NULL;
		event_qcmd = NULL;
#endif
		break;
		}

	default:
		break;
	}
	return rc;
}
static long msm_vpe_subdev_ioctl(struct v4l2_subdev *sd,
			unsigned int cmd, void *arg)
{
	struct msm_vpe_cfg_cmd *vpe_cmd;
	int rc = 0;

	switch (cmd) {
	case VIDIOC_MSM_VPE_INIT: {
		msm_vpe_subdev_init(sd);
		break;
		}

	case VIDIOC_MSM_VPE_RELEASE:
		msm_vpe_subdev_release();
		break;

	case MSM_CAM_V4L2_IOCTL_CFG_VPE: {
		vpe_cmd = (struct msm_vpe_cfg_cmd *)arg;
		rc = msm_vpe_process_vpe_cmd(vpe_cmd);
		if (rc < 0) {
			pr_err("%s Error processing VPE cmd %d ",
				__func__, vpe_cmd->cmd_type);
			break;
		}
		break;
		}

	case MSM_CAM_V4L2_IOCTL_GET_EVENT_PAYLOAD: {
		struct msm_device_queue *queue = &vpe_ctrl->eventData_q;
		struct msm_queue_cmd *event_qcmd;
		struct msm_mctl_pp_event_info pp_event_info;
		struct msm_mctl_pp_frame_info *pp_frame_info;
		struct msm_camera_v4l2_ioctl_t *v4l2_ioctl = arg;

		event_qcmd = msm_dequeue(queue, list_eventdata);
		if (!event_qcmd) {
			pr_err("%s No events in the queue", __func__);
			return -EFAULT;
		}
		pp_frame_info = event_qcmd->command;
		pp_event_info.event = MCTL_PP_EVENT_CMD_ACK;
		pp_event_info.ack.cmd = pp_frame_info->user_cmd;
		pp_event_info.ack.status = 0;
		pp_event_info.ack.cookie = pp_frame_info->pp_frame_cmd.cookie;
		D("%s Sending payload %d %d %d", __func__,
			pp_event_info.ack.cmd, pp_event_info.ack.status,
			pp_event_info.ack.cookie);
		if (copy_to_user((void __user *)v4l2_ioctl->ioctl_ptr,
			&pp_event_info,
			sizeof(struct msm_mctl_pp_event_info)))
			pr_err("%s EVENTPAYLOAD Copy to user failed ",
				__func__);
		kfree(pp_frame_info);
		event_qcmd->command = NULL;
		free_qcmd(event_qcmd);
		break;
		}

	default:
		break;
	}
	return rc;
}
long msm_cpp_subdev_ioctl(struct v4l2_subdev *sd,
			unsigned int cmd, void *arg)
{
	struct cpp_device *cpp_dev = v4l2_get_subdevdata(sd);
	struct msm_camera_v4l2_ioctl_t *ioctl_ptr = arg;
	int rc = 0;

	CPP_DBG("%s: %d\n", __func__, __LINE__);
	mutex_lock(&cpp_dev->mutex);
	CPP_DBG("%s cmd: %d\n", __func__, cmd);
	switch (cmd) {
	case VIDIOC_MSM_CPP_CFG: {
		struct msm_queue_cmd *frame_qcmd;
		struct msm_cpp_frame_info_t *new_frame =
			kzalloc(sizeof(struct msm_cpp_frame_info_t),
					GFP_KERNEL);
		if (!new_frame) {
			pr_err("%s Insufficient memory. return", __func__);
			mutex_unlock(&cpp_dev->mutex);
			return -ENOMEM;
		}

		COPY_FROM_USER(rc, new_frame,
			       (void __user *)ioctl_ptr->ioctl_ptr,
			       sizeof(struct msm_cpp_frame_info_t));
		if (rc) {
			ERR_COPY_FROM_USER();
			kfree(new_frame);
			mutex_unlock(&cpp_dev->mutex);
			return -EINVAL;
		}

		frame_qcmd = kzalloc(sizeof(struct msm_queue_cmd), GFP_KERNEL);
		if (!frame_qcmd) {
			pr_err("%s Insufficient memory. return", __func__);
			kfree(new_frame);
			mutex_unlock(&cpp_dev->mutex);
			return -ENOMEM;
		}

		atomic_set(&frame_qcmd->on_heap, 1);
		frame_qcmd->command = new_frame;
		if (new_frame->frame_type == MSM_CPP_REALTIME_FRAME) {
			msm_enqueue(&cpp_dev->realtime_q,
						&frame_qcmd->list_frame);
		} else if (new_frame->frame_type == MSM_CPP_OFFLINE_FRAME) {
			msm_enqueue(&cpp_dev->offline_q,
						&frame_qcmd->list_frame);
		} else {
			pr_err("%s: Invalid frame type\n", __func__);
			kfree(new_frame);
			kfree(frame_qcmd);
			mutex_unlock(&cpp_dev->mutex);
			return -EINVAL;
		}
		break;
	}
	case VIDIOC_MSM_CPP_GET_EVENTPAYLOAD: {
		struct msm_device_queue *queue = &cpp_dev->eventData_q;
		struct msm_queue_cmd *event_qcmd;
		struct msm_cpp_frame_info_t *process_frame;
		event_qcmd = msm_dequeue(queue, list_eventdata);
		process_frame = event_qcmd->command;
		CPP_DBG("fid %d\n", process_frame->frame_id);
		if (copy_to_user((void __user *)ioctl_ptr->ioctl_ptr,
				process_frame,
				sizeof(struct msm_cpp_frame_info_t))) {
					mutex_unlock(&cpp_dev->mutex);
					return -EINVAL;
		}
		kfree(process_frame);
		kfree(event_qcmd);
		break;
	}
	}
	mutex_unlock(&cpp_dev->mutex);
	CPP_DBG("%s: %d\n", __func__, __LINE__);
	return 0;
}