int gsc_fill_addr(struct gsc_ctx *ctx) { struct gsc_frame *s_frame, *d_frame; struct vb2_buffer *vb = NULL; int ret = 0; s_frame = &ctx->s_frame; d_frame = &ctx->d_frame; vb = v4l2_m2m_next_src_buf(ctx->m2m_ctx); if (vb->num_planes != s_frame->fmt->num_planes) { gsc_err("gsc(%s): vb(%p) planes=%d s_frame(%p) planes=%d\n", v4l2_m2m_get_src_vq(ctx->m2m_ctx)->name, vb, vb->num_planes, s_frame, s_frame->fmt->num_planes); return -EINVAL; } ret = gsc_prepare_addr(ctx, vb, s_frame, &s_frame->addr); if (ret) return ret; vb = v4l2_m2m_next_dst_buf(ctx->m2m_ctx); if (vb->num_planes != d_frame->fmt->num_planes) { gsc_err("gsc(%s): vb(%p) planes=%d d_frame(%p) planes=%d\n", v4l2_m2m_get_dst_vq(ctx->m2m_ctx)->name, vb, vb->num_planes, d_frame, d_frame->fmt->num_planes); return -EINVAL; } ret = gsc_prepare_addr(ctx, vb, d_frame, &d_frame->addr); return ret; }
/** * v4l2_m2m_poll() - poll replacement, for destination buffers only * * Call from the driver's poll() function. Will poll both queues. If a buffer * is available to dequeue (with dqbuf) from the source queue, this will * indicate that a non-blocking write can be performed, while read will be * returned in case of the destination queue. */ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct poll_table_struct *wait) { struct vb2_queue *src_q, *dst_q; struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; unsigned int rc = 0; unsigned long flags; src_q = v4l2_m2m_get_src_vq(m2m_ctx); dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); /* * There has to be at least one buffer queued on each queued_list, which * means either in driver already or waiting for driver to claim it * and start processing. */ if ((!src_q->streaming || list_empty(&src_q->queued_list)) && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { rc = POLLERR; goto end; } if (m2m_ctx->m2m_dev->m2m_ops->unlock) m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); poll_wait(file, &src_q->done_wq, wait); poll_wait(file, &dst_q->done_wq, wait); if (m2m_ctx->m2m_dev->m2m_ops->lock) m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); spin_lock_irqsave(&src_q->done_lock, flags); if (!list_empty(&src_q->done_list)) src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, done_entry); if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE || src_vb->state == VB2_BUF_STATE_ERROR)) rc |= POLLOUT | POLLWRNORM; spin_unlock_irqrestore(&src_q->done_lock, flags); spin_lock_irqsave(&dst_q->done_lock, flags); if (!list_empty(&dst_q->done_list)) dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, done_entry); if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE || dst_vb->state == VB2_BUF_STATE_ERROR)) rc |= POLLIN | POLLRDNORM; spin_unlock_irqrestore(&dst_q->done_lock, flags); end: return rc; }
static void gsc_m2m_job_abort(void *priv) { struct gsc_ctx *ctx = priv; struct gsc_dev *gsc = ctx->gsc_dev; int ret; vb2_wait_for_all_buffers(v4l2_m2m_get_src_vq(ctx->m2m_ctx)); vb2_wait_for_all_buffers(v4l2_m2m_get_dst_vq(ctx->m2m_ctx)); ret = gsc_ctx_stop_req(ctx); /* FIXME: need to add v4l2_m2m_job_finish(fail) if ret is timeout */ if (ret < 0) dev_err(&gsc->pdev->dev, "wait timeout : %s\n", __func__); }
/** * v4l2_m2m_mmap() - source and destination queues-aware mmap multiplexer * * Call from driver's mmap() function. Will handle mmap() for both queues * seamlessly for videobuffer, which will receive normal per-queue offsets and * proper videobuf queue pointers. The differentiation is made outside videobuf * by adding a predefined offset to buffers from one of the queues and * subtracting it before passing it back to videobuf. Only drivers (and * thus applications) receive modified offsets. */ int v4l2_m2m_mmap(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct vm_area_struct *vma) { unsigned long offset = vma->vm_pgoff << PAGE_SHIFT; struct vb2_queue *vq; if (offset < DST_QUEUE_OFF_BASE) { vq = v4l2_m2m_get_src_vq(m2m_ctx); } else { vq = v4l2_m2m_get_dst_vq(m2m_ctx); vma->vm_pgoff -= (DST_QUEUE_OFF_BASE >> PAGE_SHIFT); } return vb2_mmap(vq, vma); }
/** * v4l2_m2m_poll() - poll replacement, for destination buffers only * * Call from the driver's poll() function. Will poll both queues. If a buffer * is available to dequeue (with dqbuf) from the source queue, this will * indicate that a non-blocking write can be performed, while read will be * returned in case of the destination queue. */ unsigned int v4l2_m2m_poll(struct file *file, struct v4l2_m2m_ctx *m2m_ctx, struct poll_table_struct *wait) { struct video_device *vfd = video_devdata(file); unsigned long req_events = poll_requested_events(wait); struct vb2_queue *src_q, *dst_q; struct vb2_buffer *src_vb = NULL, *dst_vb = NULL; unsigned int rc = 0; unsigned long flags; if (test_bit(V4L2_FL_USES_V4L2_FH, &vfd->flags)) { struct v4l2_fh *fh = file->private_data; if (v4l2_event_pending(fh)) rc = POLLPRI; else if (req_events & POLLPRI) poll_wait(file, &fh->wait, wait); if (!(req_events & (POLLOUT | POLLWRNORM | POLLIN | POLLRDNORM))) return rc; } src_q = v4l2_m2m_get_src_vq(m2m_ctx); dst_q = v4l2_m2m_get_dst_vq(m2m_ctx); /* * There has to be at least one buffer queued on each queued_list, which * means either in driver already or waiting for driver to claim it * and start processing. */ if ((!src_q->streaming || list_empty(&src_q->queued_list)) && (!dst_q->streaming || list_empty(&dst_q->queued_list))) { rc |= POLLERR; goto end; } if (m2m_ctx->m2m_dev->m2m_ops->unlock) m2m_ctx->m2m_dev->m2m_ops->unlock(m2m_ctx->priv); poll_wait(file, &src_q->done_wq, wait); poll_wait(file, &dst_q->done_wq, wait); if (m2m_ctx->m2m_dev->m2m_ops->lock) m2m_ctx->m2m_dev->m2m_ops->lock(m2m_ctx->priv); spin_lock_irqsave(&src_q->done_lock, flags); if (!list_empty(&src_q->done_list)) src_vb = list_first_entry(&src_q->done_list, struct vb2_buffer, done_entry); if (src_vb && (src_vb->state == VB2_BUF_STATE_DONE || src_vb->state == VB2_BUF_STATE_ERROR)) rc |= POLLOUT | POLLWRNORM; spin_unlock_irqrestore(&src_q->done_lock, flags); spin_lock_irqsave(&dst_q->done_lock, flags); if (!list_empty(&dst_q->done_list)) dst_vb = list_first_entry(&dst_q->done_list, struct vb2_buffer, done_entry); if (dst_vb && (dst_vb->state == VB2_BUF_STATE_DONE || dst_vb->state == VB2_BUF_STATE_ERROR)) rc |= POLLIN | POLLRDNORM; spin_unlock_irqrestore(&dst_q->done_lock, flags); end: return rc; }