static int gsc_capture_buf_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct gsc_ctx *ctx = vq->drv_priv; struct gsc_frame *frame = &ctx->d_frame; int i; if (frame->fmt == NULL) return -EINVAL; for (i = 0; i < frame->fmt->num_planes; i++) { unsigned long size = frame->payload[i]; if (vb2_plane_size(vb, i) < size) { v4l2_err(ctx->gsc_dev->cap.vfd, "User buffer too small (%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } vb2_ion_buf_prepare(vb); return 0; }
static int isp_video_capture_buffer_prepare(struct vb2_buffer *vb) { struct fimc_isp *isp = vb2_get_drv_priv(vb->vb2_queue); struct fimc_is_video *video = &isp->video_capture; int i; if (video->format == NULL) return -EINVAL; for (i = 0; i < video->format->memplanes; i++) { unsigned long size = video->pixfmt.plane_fmt[i].sizeimage; if (vb2_plane_size(vb, i) < size) { v4l2_err(&video->ve.vdev, "User buffer too small (%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } /* Check if we get one of the already known buffers. */ if (test_bit(ST_ISP_VID_CAP_BUF_PREP, &isp->state)) { dma_addr_t dma_addr = vb2_dma_contig_plane_dma_addr(vb, 0); int i; for (i = 0; i < video->buf_count; i++) if (video->buffers[i]->dma_addr[0] == dma_addr) return 0; return -ENXIO; } return 0; }
static int uvc_buffer_prepare(struct vb2_buffer *vb) { struct uvc_video_queue *queue = vb2_get_drv_priv(vb->vb2_queue); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct uvc_buffer *buf = container_of(vbuf, struct uvc_buffer, buf); if (vb->type == V4L2_BUF_TYPE_VIDEO_OUTPUT && vb2_get_plane_payload(vb, 0) > vb2_plane_size(vb, 0)) { uvc_trace(UVC_TRACE_CAPTURE, "[E] Bytes used out of bounds.\n"); return -EINVAL; } if (unlikely(queue->flags & UVC_QUEUE_DISCONNECTED)) return -ENODEV; buf->state = UVC_BUF_STATE_QUEUED; buf->mem = vb2_plane_vaddr(vb, 0); buf->length = vb2_plane_size(vb, 0); if (vb->type == V4L2_BUF_TYPE_VIDEO_CAPTURE) buf->bytesused = 0; else buf->bytesused = vb2_get_plane_payload(vb, 0); return 0; }
static int unicam_videobuf_prepare(struct vb2_buffer *vb) { struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd-> current_fmt->host_fmt); unsigned long size; pr_debug("-enter"); if (bytes_per_line < 0) return bytes_per_line; pr_debug("vb=0x%p buf=0x%p, size=%lu", vb, (void *)vb2_plane_dma_addr(vb, 0), vb2_get_plane_payload(vb, 0)); size = icd->user_height * bytes_per_line; if (vb2_plane_size(vb, 0) < size) { dev_err(icd->dev.parent, "Buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -ENOBUFS; } vb2_set_plane_payload(vb, 0, size); pr_debug("-exit"); return 0; }
static void buffer_queue(struct vb2_buffer *vb) { unsigned long flags; struct smi2021 *smi2021 = vb2_get_drv_priv(vb->vb2_queue); struct smi2021_buf *buf = container_of(vb, struct smi2021_buf, vb); if (smi2021->udev == NULL) { vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); return; } buf->mem = vb2_plane_vaddr(vb, 0); buf->length = vb2_plane_size(vb, 0); buf->pos = 0; buf->trc_av = 0; buf->in_blank = true; buf->second_field = false; spin_lock_irqsave(&smi2021->buf_lock, flags); if (buf->length < smi2021->cur_height * SMI2021_BYTES_PER_LINE) vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); else list_add_tail(&buf->list, &smi2021->bufs); spin_unlock_irqrestore(&smi2021->buf_lock, flags); }
static void buffer_queue(struct vb2_buffer *vb) { unsigned long flags; struct stk1160 *dev = vb2_get_drv_priv(vb->vb2_queue); struct stk1160_buffer *buf = container_of(vb, struct stk1160_buffer, vb); spin_lock_irqsave(&dev->buf_lock, flags); if (!dev->udev) { /* * If the device is disconnected return the buffer to userspace * directly. The next QBUF call will fail with -ENODEV. */ vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); } else { buf->mem = vb2_plane_vaddr(vb, 0); buf->length = vb2_plane_size(vb, 0); buf->bytesused = 0; buf->pos = 0; /* * If buffer length is less from expected then we return * the buffer to userspace directly. */ if (buf->length < dev->width * dev->height * 2) vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); else list_add_tail(&buf->list, &dev->avail_bufs); } spin_unlock_irqrestore(&dev->buf_lock, flags); }
static int _buffer_prepare(struct vb2_buffer *vb) { struct dvb_vb2_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); unsigned long size = ctx->buf_siz; if (vb2_plane_size(vb, 0) < size) { dprintk(1, "[%s] data will not fit into plane (%lu < %lu)\n", ctx->name, vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); dprintk(3, "[%s]\n", ctx->name); return 0; }
static int buffer_prepare(struct vb2_buffer *vb) { struct cx23885_dev *dev = vb->vb2_queue->drv_priv; struct cx23885_buffer *buf = container_of(vb, struct cx23885_buffer, vb); struct sg_table *sgt = vb2_dma_sg_plane_desc(vb, 0); unsigned lines = VBI_PAL_LINE_COUNT; int ret; if (dev->tvnorm & V4L2_STD_525_60) lines = VBI_NTSC_LINE_COUNT; if (vb2_plane_size(vb, 0) < lines * VBI_LINE_LENGTH * 2) return -EINVAL; vb2_set_plane_payload(vb, 0, lines * VBI_LINE_LENGTH * 2); ret = dma_map_sg(&dev->pci->dev, sgt->sgl, sgt->nents, DMA_FROM_DEVICE); if (!ret) return -EIO; cx23885_risc_vbibuffer(dev->pci, &buf->risc, sgt->sgl, 0, VBI_LINE_LENGTH * lines, VBI_LINE_LENGTH, 0, lines); return 0; }
static int buf_prepare(struct vb2_buffer *vb) { struct css2600_isys_queue *aq = vb2_queue_to_css2600_isys_queue(vb->vb2_queue); struct css2600_isys_video *av = css2600_isys_queue_to_video(aq); dev_dbg(&av->isys->adev->dev, "configured size %u, buffer size %lu\n", av->pix.sizeimage, vb2_plane_size(vb, 0)); if (av->pix.sizeimage > vb2_plane_size(vb, 0)) return -EINVAL; vb2_set_plane_payload(vb, 0, av->pix.sizeimage); return 0; }
static int iss_video_buf_prepare(struct vb2_buffer *vb) { struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue); struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb); struct iss_video *video = vfh->video; unsigned long size = vfh->format.fmt.pix.sizeimage; dma_addr_t addr; if (vb2_plane_size(vb, 0) < size) return -ENOBUFS; /* Refuse to prepare the buffer is the video node has registered an * error. We don't need to take any lock here as the operation is * inherently racy. The authoritative check will be performed in the * queue handler, which can't return an error, this check is just a best * effort to notify userspace as early as possible. */ if (unlikely(video->error)) return -EIO; addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (!IS_ALIGNED(addr, 32)) { dev_dbg(video->iss->dev, "Buffer address must be aligned to 32 bytes boundary.\n"); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); buffer->iss_addr = addr; return 0; }
static void rcar_vin_videobuf_queue(struct vb2_buffer *vb) { struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct rcar_vin_priv *priv = ici->priv; unsigned long size; size = icd->sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n", vb->v4l2_buf.index, vb2_plane_size(vb, 0), size); goto error; } vb2_set_plane_payload(vb, 0, size); dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); spin_lock_irq(&priv->lock); list_add_tail(to_buf_list(vb), &priv->capture); rcar_vin_fill_hw_slot(priv); /* If we weren't running, and have enough buffers, start capturing! */ if (priv->state != RUNNING && rcar_vin_hw_ready(priv)) { if (rcar_vin_setup(priv)) { /* Submit error */ list_del_init(to_buf_list(vb)); spin_unlock_irq(&priv->lock); goto error; } priv->request_to_stop = false; init_completion(&priv->capture_stop); priv->state = RUNNING; rcar_vin_capture(priv); } spin_unlock_irq(&priv->lock); return; error: vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); }
static int mx3_videobuf_prepare(struct vb2_buffer *vb) { struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct mx3_camera_dev *mx3_cam = ici->priv; struct idmac_channel *ichan = mx3_cam->idmac_channel[0]; struct scatterlist *sg; struct mx3_camera_buffer *buf; size_t new_size; int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); if (bytes_per_line < 0) return bytes_per_line; buf = to_mx3_vb(vb); sg = &buf->sg; new_size = bytes_per_line * icd->user_height; if (vb2_plane_size(vb, 0) < new_size) { dev_err(icd->dev.parent, "Buffer too small (%lu < %zu)\n", vb2_plane_size(vb, 0), new_size); return -ENOBUFS; } if (buf->state == CSI_BUF_NEEDS_INIT) { sg_dma_address(sg) = vb2_dma_contig_plane_paddr(vb, 0); sg_dma_len(sg) = new_size; buf->txd = ichan->dma_chan.device->device_prep_slave_sg( &ichan->dma_chan, sg, 1, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT); if (!buf->txd) return -EIO; buf->txd->callback_param = buf->txd; buf->txd->callback = mx3_cam_dma_done; buf->state = CSI_BUF_PREPARED; } vb2_set_plane_payload(vb, 0, new_size); return 0; }
static int vbi_buffer_prepare(struct vb2_buffer *vb) { struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue); struct em28xx_v4l2 *v4l2 = dev->v4l2; unsigned long size; size = v4l2->vbi_width * v4l2->vbi_height * 2; if (vb2_plane_size(vb, 0) < size) { printk(KERN_INFO "%s data will not fit into plane (%lu < %lu)\n", __func__, vb2_plane_size(vb, 0), size); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); return 0; }
/* Called for each 256-byte image chunk. * First word identifies the chunk, followed by 240 words of image * data and padding. */ static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk) { int frame_id, odd, chunk_no; u32 *frame; struct usbtv_buf *buf; unsigned long flags; /* Ignore corrupted lines. */ if (!USBTV_MAGIC_OK(chunk)) return; frame_id = USBTV_FRAME_ID(chunk); odd = USBTV_ODD(chunk); chunk_no = USBTV_CHUNK_NO(chunk); if (chunk_no >= usbtv->n_chunks) return; /* Beginning of a frame. */ if (chunk_no == 0) { usbtv->frame_id = frame_id; usbtv->chunks_done = 0; } if (usbtv->frame_id != frame_id) return; spin_lock_irqsave(&usbtv->buflock, flags); if (list_empty(&usbtv->bufs)) { /* No free buffers. Userspace likely too slow. */ spin_unlock_irqrestore(&usbtv->buflock, flags); return; } /* First available buffer. */ buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list); frame = vb2_plane_vaddr(&buf->vb, 0); /* Copy the chunk data. */ usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd); usbtv->chunks_done++; /* Last chunk in a frame, signalling an end */ if (odd && chunk_no == usbtv->n_chunks-1) { int size = vb2_plane_size(&buf->vb, 0); enum vb2_buffer_state state = usbtv->chunks_done == usbtv->n_chunks ? VB2_BUF_STATE_DONE : VB2_BUF_STATE_ERROR; buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED; buf->vb.v4l2_buf.sequence = usbtv->sequence++; v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp); vb2_set_plane_payload(&buf->vb, 0, size); vb2_buffer_done(&buf->vb, state); list_del(&buf->list); } spin_unlock_irqrestore(&usbtv->buflock, flags); }
static int vb2ops_venc_buf_prepare(struct vb2_buffer *vb) { struct mtk_vcodec_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct mtk_q_data *q_data; int i; q_data = mtk_venc_get_q_data(ctx, vb->vb2_queue->type); for (i = 0; i < q_data->fmt->num_planes; i++) { if (vb2_plane_size(vb, i) < q_data->sizeimage[i]) { mtk_v4l2_err("data will not fit into plane %d (%lu < %d)", i, vb2_plane_size(vb, i), q_data->sizeimage[i]); return -EINVAL; } } return 0; }
static int fimc_is_isp_buf_prepare(struct vb2_buffer *vb) { struct fimc_is_video_dev *video = vb->vb2_queue->drv_priv; struct fimc_is_dev *is_dev = video->dev; unsigned long size; int i; for (i = 0; i < is_dev->video[FIMC_IS_VIDEO_NUM_BAYER].num_plane; i++) { size = is_dev->video[FIMC_IS_VIDEO_NUM_BAYER].plane_size[i]; if (vb2_plane_size(vb, i) < size) { err("User buffer too small(%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } return 0; }
static int xvip_dma_buffer_prepare(struct vb2_buffer *vb) { struct xvip_dma *dma = vb2_get_drv_priv(vb->vb2_queue); struct xvip_dma_buffer *buf = to_xvip_dma_buffer(vb); buf->dma = dma; buf->addr = vb2_dma_contig_plane_dma_addr(vb, 0); buf->length = vb2_plane_size(vb, 0); buf->bytesused = 0; return 0; }
static int hva_buf_prepare(struct vb2_buffer *vb) { struct hva_ctx *ctx = vb2_get_drv_priv(vb->vb2_queue); struct device *dev = ctx_to_dev(ctx); struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); if (vb->vb2_queue->type == V4L2_BUF_TYPE_VIDEO_OUTPUT) { struct hva_frame *frame = to_hva_frame(vbuf); if (vbuf->field == V4L2_FIELD_ANY) vbuf->field = V4L2_FIELD_NONE; if (vbuf->field != V4L2_FIELD_NONE) { dev_dbg(dev, "%s frame[%d] prepare: %d field not supported\n", ctx->name, vb->index, vbuf->field); return -EINVAL; } if (!frame->prepared) { /* get memory addresses */ frame->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0); frame->paddr = vb2_dma_contig_plane_dma_addr( &vbuf->vb2_buf, 0); frame->info = ctx->frameinfo; frame->prepared = true; dev_dbg(dev, "%s frame[%d] prepared; virt=%p, phy=%pad\n", ctx->name, vb->index, frame->vaddr, &frame->paddr); } } else { struct hva_stream *stream = to_hva_stream(vbuf); if (!stream->prepared) { /* get memory addresses */ stream->vaddr = vb2_plane_vaddr(&vbuf->vb2_buf, 0); stream->paddr = vb2_dma_contig_plane_dma_addr( &vbuf->vb2_buf, 0); stream->size = vb2_plane_size(&vbuf->vb2_buf, 0); stream->prepared = true; dev_dbg(dev, "%s stream[%d] prepared; virt=%p, phy=%pad\n", ctx->name, vb->index, stream->vaddr, &stream->paddr); } } return 0; }
static void rockchip_vpu_job_finish(struct rockchip_vpu_dev *vpu, struct rockchip_vpu_ctx *ctx, unsigned int bytesused, enum vb2_buffer_state result) { struct vb2_v4l2_buffer *src, *dst; size_t avail_size; pm_runtime_mark_last_busy(vpu->dev); pm_runtime_put_autosuspend(vpu->dev); clk_bulk_disable(vpu->variant->num_clocks, vpu->clocks); src = v4l2_m2m_src_buf_remove(ctx->fh.m2m_ctx); dst = v4l2_m2m_dst_buf_remove(ctx->fh.m2m_ctx); if (WARN_ON(!src)) return; if (WARN_ON(!dst)) return; src->sequence = ctx->sequence_out++; dst->sequence = ctx->sequence_cap++; dst->field = src->field; if (src->flags & V4L2_BUF_FLAG_TIMECODE) dst->timecode = src->timecode; dst->vb2_buf.timestamp = src->vb2_buf.timestamp; dst->flags &= ~(V4L2_BUF_FLAG_TSTAMP_SRC_MASK | V4L2_BUF_FLAG_TIMECODE); dst->flags |= src->flags & (V4L2_BUF_FLAG_TSTAMP_SRC_MASK | V4L2_BUF_FLAG_TIMECODE); avail_size = vb2_plane_size(&dst->vb2_buf, 0) - ctx->vpu_dst_fmt->header_size; if (bytesused <= avail_size) { if (ctx->bounce_buf) { memcpy(vb2_plane_vaddr(&dst->vb2_buf, 0) + ctx->vpu_dst_fmt->header_size, ctx->bounce_buf, bytesused); } dst->vb2_buf.planes[0].bytesused = ctx->vpu_dst_fmt->header_size + bytesused; } else { result = VB2_BUF_STATE_ERROR; } v4l2_m2m_buf_done(src, result); v4l2_m2m_buf_done(dst, result); v4l2_m2m_job_finish(vpu->m2m_dev, ctx->fh.m2m_ctx); }
static int buffer_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct fimc_lite *fimc = vq->drv_priv; int i; if (fimc->fmt == NULL) return -EINVAL; for (i = 0; i < fimc->fmt->memplanes; i++) { unsigned long size = fimc->payload[i]; if (vb2_plane_size(vb, i) < size) { v4l2_err(&fimc->vfd, "User buffer too small (%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } return 0; }
static int buffer_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct fimc_ctx *ctx = vq->drv_priv; int i; if (ctx->d_frame.fmt == NULL) return -EINVAL; for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) { unsigned long size = ctx->d_frame.payload[i]; if (vb2_plane_size(vb, i) < size) { v4l2_err(ctx->fimc_dev->vid_cap.vfd, "User buffer too small (%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } return 0; }
static void vbi_buffer_queue(struct vb2_buffer *vb) { struct em28xx *dev = vb2_get_drv_priv(vb->vb2_queue); struct em28xx_buffer *buf = container_of(vb, struct em28xx_buffer, vb); struct em28xx_dmaqueue *vbiq = &dev->vbiq; unsigned long flags = 0; buf->mem = vb2_plane_vaddr(vb, 0); buf->length = vb2_plane_size(vb, 0); spin_lock_irqsave(&dev->slock, flags); list_add_tail(&buf->list, &vbiq->active); spin_unlock_irqrestore(&dev->slock, flags); }
static int buffer_prepare(struct vb2_buffer *vb) { struct vb2_queue *vq = vb->vb2_queue; struct fimc_ctx *ctx = vq->drv_priv; struct v4l2_device *v4l2_dev = &ctx->fimc_dev->m2m.v4l2_dev; int i; if (!ctx->d_frame.fmt || vq->type != V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) return -EINVAL; for (i = 0; i < ctx->d_frame.fmt->memplanes; i++) { unsigned long size = get_plane_size(&ctx->d_frame, i); if (vb2_plane_size(vb, i) < size) { v4l2_err(v4l2_dev, "User buffer too small (%ld < %ld)\n", vb2_plane_size(vb, i), size); return -EINVAL; } vb2_set_plane_payload(vb, i, size); } return 0; }
static int enc_pre_seq_start(struct s5p_mfc_ctx *ctx) { struct s5p_mfc_dev *dev = ctx->dev; struct s5p_mfc_buf *dst_mb; unsigned long dst_addr; unsigned int dst_size; unsigned long flags; spin_lock_irqsave(&dev->irqlock, flags); dst_mb = list_entry(ctx->dst_queue.next, struct s5p_mfc_buf, list); dst_addr = vb2_dma_contig_plane_dma_addr(dst_mb->b, 0); dst_size = vb2_plane_size(dst_mb->b, 0); s5p_mfc_set_enc_stream_buffer(ctx, dst_addr, dst_size); spin_unlock_irqrestore(&dev->irqlock, flags); return 0; }
static int histo_buffer_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct vsp1_histogram *histo = vb2_get_drv_priv(vb->vb2_queue); struct vsp1_histogram_buffer *buf = to_vsp1_histogram_buffer(vbuf); if (vb->num_planes != 1) return -EINVAL; if (vb2_plane_size(vb, 0) < histo->data_size) return -EINVAL; buf->addr = vb2_plane_vaddr(vb, 0); return 0; }
int vc_out_buffer_prepare( struct vb2_buffer * vb ) { unsigned long size; struct vc_device * dev; dev = vb2_get_drv_priv( vb->vb2_queue ); size = dev->output_format.sizeimage; if( vb2_plane_size(vb,0) < size ){ PRINT_ERROR( KERN_ERR "data will not fit into buffer\n" ); return -EINVAL; } vb2_set_plane_payload(vb,0,size); return 0; }
static int video_buf_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct camss_video *video = vb2_get_drv_priv(vb->vb2_queue); const struct v4l2_pix_format_mplane *format = &video->active_fmt.fmt.pix_mp; unsigned int i; for (i = 0; i < format->num_planes; i++) { if (format->plane_fmt[i].sizeimage > vb2_plane_size(vb, i)) return -EINVAL; vb2_set_plane_payload(vb, i, format->plane_fmt[i].sizeimage); } vbuf->field = V4L2_FIELD_NONE; return 0; }
/* * The hardware takes care only of ext hdr and dct partition. The software * must take care of frame header. * * Buffer layout as received from hardware: * |<--gap-->|<--ext hdr-->|<-gap->|<---dct part--- * |<-------dct part offset------->| * * Required buffer layout: * |<--hdr-->|<--ext hdr-->|<---dct part--- */ void rk3288_vpu_vp8e_assemble_bitstream(struct rk3288_vpu_ctx *ctx, struct rk3288_vpu_buf *dst_buf) { size_t ext_hdr_size = dst_buf->vp8e.ext_hdr_size; size_t dct_size = dst_buf->vp8e.dct_size; size_t hdr_size = dst_buf->vp8e.hdr_size; size_t dst_size; size_t tag_size; void *dst; u32 *tag; dst_size = vb2_plane_size(&dst_buf->b, 0); dst = vb2_plane_vaddr(&dst_buf->b, 0); tag = dst; /* To access frame tag words. */ if (WARN_ON(hdr_size + ext_hdr_size + dct_size > dst_size)) return; if (WARN_ON(dst_buf->vp8e.dct_offset + dct_size > dst_size)) return; vpu_debug(1, "%s: hdr_size = %u, ext_hdr_size = %u, dct_size = %u\n", __func__, hdr_size, ext_hdr_size, dct_size); memmove(dst + hdr_size + ext_hdr_size, dst + dst_buf->vp8e.dct_offset, dct_size); memcpy(dst, dst_buf->vp8e.header, hdr_size); /* Patch frame tag at first 32-bit word of the frame. */ if (dst_buf->b.v4l2_buf.flags & V4L2_BUF_FLAG_KEYFRAME) { tag_size = VP8_KEY_FRAME_HDR_SIZE; tag[0] &= ~VP8_FRAME_TAG_KEY_FRAME_BIT; } else { tag_size = VP8_INTER_FRAME_HDR_SIZE; tag[0] |= VP8_FRAME_TAG_KEY_FRAME_BIT; } tag[0] &= ~VP8_FRAME_TAG_LENGTH_MASK; tag[0] |= (hdr_size + ext_hdr_size - tag_size) << VP8_FRAME_TAG_LENGTH_SHIFT; vb2_set_plane_payload(&dst_buf->b, 0, hdr_size + ext_hdr_size + dct_size); }
static void buffer_queue(struct vb2_buffer *vb) { unsigned long flags; struct smi2021 *smi2021 = vb2_get_drv_priv(vb->vb2_queue); #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) struct smi2021_buf *buf = container_of(vb, struct smi2021_buf, vb); #else struct smi2021_buf *buf = container_of(vb, struct smi2021_buf, vb.vb2_buf); #endif spin_lock_irqsave(&smi2021->buf_lock, flags); if (!smi2021->udev) { /* * If the device is disconnected return the buffer to userspace * directly. The next QBUF call will fail with -ENODEV. */ #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); #else vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); #endif } else { buf->mem = vb2_plane_vaddr(vb, 0); buf->length = vb2_plane_size(vb, 0); buf->pos = 0; buf->in_blank = true; buf->odd = false; /* * If the buffer length is less than expected, * we return the buffer back to userspace */ if (buf->length < SMI2021_BYTES_PER_LINE * smi2021->cur_height) #if LINUX_VERSION_CODE < KERNEL_VERSION(4, 4, 0) vb2_buffer_done(&buf->vb, VB2_BUF_STATE_ERROR); #else vb2_buffer_done(&buf->vb.vb2_buf, VB2_BUF_STATE_ERROR); #endif else list_add_tail(&buf->list, &smi2021->avail_bufs); }
static int iss_video_buf_prepare(struct vb2_buffer *vb) { struct iss_video_fh *vfh = vb2_get_drv_priv(vb->vb2_queue); struct iss_buffer *buffer = container_of(vb, struct iss_buffer, vb); struct iss_video *video = vfh->video; unsigned long size = vfh->format.fmt.pix.sizeimage; dma_addr_t addr; if (vb2_plane_size(vb, 0) < size) return -ENOBUFS; addr = vb2_dma_contig_plane_dma_addr(vb, 0); if (!IS_ALIGNED(addr, 32)) { dev_dbg(video->iss->dev, "Buffer address must be aligned to 32 bytes boundary.\n"); return -EINVAL; } vb2_set_plane_payload(vb, 0, size); buffer->iss_addr = addr; return 0; }