コード例 #1
0
ファイル: omap24xxcam.c プロジェクト: 03199618/linux
/*
 * This routine is called from interrupt context when a scatter-gather DMA
 * transfer of a videobuf_buffer completes.
 */
static void omap24xxcam_vbq_complete(struct omap24xxcam_sgdma *sgdma,
				     u32 csr, void *arg)
{
	struct omap24xxcam_device *cam =
		container_of(sgdma, struct omap24xxcam_device, sgdma);
	struct omap24xxcam_fh *fh = cam->streaming->private_data;
	struct videobuf_buffer *vb = (struct videobuf_buffer *)arg;
	const u32 csr_error = CAMDMA_CSR_MISALIGNED_ERR
		| CAMDMA_CSR_SUPERVISOR_ERR | CAMDMA_CSR_SECURE_ERR
		| CAMDMA_CSR_TRANS_ERR | CAMDMA_CSR_DROP;
	unsigned long flags;

	spin_lock_irqsave(&cam->core_enable_disable_lock, flags);
	if (--cam->sgdma_in_queue == 0)
		omap24xxcam_core_disable(cam);
	spin_unlock_irqrestore(&cam->core_enable_disable_lock, flags);

	v4l2_get_timestamp(&vb->ts);
	vb->field_count = atomic_add_return(2, &fh->field_count);
	if (csr & csr_error) {
		vb->state = VIDEOBUF_ERROR;
		if (!atomic_read(&fh->cam->in_reset)) {
			dev_dbg(cam->dev, "resetting camera, csr 0x%x\n", csr);
			omap24xxcam_reset(cam);
		}
	} else
		vb->state = VIDEOBUF_DONE;
	wake_up(&vb->done);
}
コード例 #2
0
ファイル: mx1_camera.c プロジェクト: 03199618/linux
static void mx1_camera_wakeup(struct mx1_camera_dev *pcdev,
			      struct videobuf_buffer *vb,
			      struct mx1_buffer *buf)
{
	/* _init is used to debug races, see comment in mx1_camera_reqbufs() */
	list_del_init(&vb->queue);
	vb->state = VIDEOBUF_DONE;
	v4l2_get_timestamp(&vb->ts);
	vb->field_count++;
	wake_up(&vb->done);

	if (list_empty(&pcdev->capture)) {
		pcdev->active = NULL;
		return;
	}

	pcdev->active = list_entry(pcdev->capture.next,
				   struct mx1_buffer, vb.queue);

	/* setup sg list for future DMA */
	if (likely(!mx1_camera_setup_dma(pcdev))) {
		unsigned int temp;

		/* enable SOF irq */
		temp = __raw_readl(pcdev->base + CSICR1) | CSICR1_SOF_INTEN;
		__raw_writel(temp, pcdev->base + CSICR1);
	}
}
コード例 #3
0
/* Called for each 256-byte image chunk.
 * First word identifies the chunk, followed by 240 words of image
 * data and padding. */
static void usbtv_image_chunk(struct usbtv *usbtv, __be32 *chunk)
{
	int frame_id, odd, chunk_no;
	u32 *frame;
	struct usbtv_buf *buf;
	unsigned long flags;

	/* Ignore corrupted lines. */
	if (!USBTV_MAGIC_OK(chunk))
		return;
	frame_id = USBTV_FRAME_ID(chunk);
	odd = USBTV_ODD(chunk);
	chunk_no = USBTV_CHUNK_NO(chunk);
	if (chunk_no >= usbtv->n_chunks)
		return;

	/* Beginning of a frame. */
	if (chunk_no == 0) {
		usbtv->frame_id = frame_id;
		usbtv->chunks_done = 0;
	}

	if (usbtv->frame_id != frame_id)
		return;

	spin_lock_irqsave(&usbtv->buflock, flags);
	if (list_empty(&usbtv->bufs)) {
		/* No free buffers. Userspace likely too slow. */
		spin_unlock_irqrestore(&usbtv->buflock, flags);
		return;
	}

	/* First available buffer. */
	buf = list_first_entry(&usbtv->bufs, struct usbtv_buf, list);
	frame = vb2_plane_vaddr(&buf->vb, 0);

	/* Copy the chunk data. */
	usbtv_chunk_to_vbuf(frame, &chunk[1], chunk_no, odd);
	usbtv->chunks_done++;

	/* Last chunk in a frame, signalling an end */
	if (odd && chunk_no == usbtv->n_chunks-1) {
		int size = vb2_plane_size(&buf->vb, 0);
		enum vb2_buffer_state state = usbtv->chunks_done ==
						usbtv->n_chunks ?
						VB2_BUF_STATE_DONE :
						VB2_BUF_STATE_ERROR;

		buf->vb.v4l2_buf.field = V4L2_FIELD_INTERLACED;
		buf->vb.v4l2_buf.sequence = usbtv->sequence++;
		v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
		vb2_set_plane_payload(&buf->vb, 0, size);
		vb2_buffer_done(&buf->vb, state);
		list_del(&buf->list);
	}

	spin_unlock_irqrestore(&usbtv->buflock, flags);
}
コード例 #4
0
ファイル: xilinx-dma.c プロジェクト: bmouring/linux-xlnx
static void xvip_dma_complete(void *param)
{
	struct xvip_dma_buffer *buf = param;
	struct xvip_dma *dma = buf->dma;

	buf->buf.v4l2_buf.sequence = dma->sequence++;
	v4l2_get_timestamp(&buf->buf.v4l2_buf.timestamp);
	vb2_set_plane_payload(&buf->buf, 0, buf->length);
	vb2_buffer_done(&buf->buf, VB2_BUF_STATE_DONE);
}
コード例 #5
0
ファイル: vivid-ctrls.c プロジェクト: 383530895/linux
static int vivid_streaming_s_ctrl(struct v4l2_ctrl *ctrl)
{
	struct vivid_dev *dev = container_of(ctrl->handler, struct vivid_dev, ctrl_hdl_streaming);
	struct timeval tv;

	switch (ctrl->id) {
	case VIVID_CID_DQBUF_ERROR:
		dev->dqbuf_error = true;
		break;
	case VIVID_CID_PERC_DROPPED:
		dev->perc_dropped_buffers = ctrl->val;
		break;
	case VIVID_CID_QUEUE_SETUP_ERROR:
		dev->queue_setup_error = true;
		break;
	case VIVID_CID_BUF_PREPARE_ERROR:
		dev->buf_prepare_error = true;
		break;
	case VIVID_CID_START_STR_ERROR:
		dev->start_streaming_error = true;
		break;
	case VIVID_CID_QUEUE_ERROR:
		if (vb2_start_streaming_called(&dev->vb_vid_cap_q))
			vb2_queue_error(&dev->vb_vid_cap_q);
		if (vb2_start_streaming_called(&dev->vb_vbi_cap_q))
			vb2_queue_error(&dev->vb_vbi_cap_q);
		if (vb2_start_streaming_called(&dev->vb_vid_out_q))
			vb2_queue_error(&dev->vb_vid_out_q);
		if (vb2_start_streaming_called(&dev->vb_vbi_out_q))
			vb2_queue_error(&dev->vb_vbi_out_q);
		if (vb2_start_streaming_called(&dev->vb_sdr_cap_q))
			vb2_queue_error(&dev->vb_sdr_cap_q);
		break;
	case VIVID_CID_SEQ_WRAP:
		dev->seq_wrap = ctrl->val;
		break;
	case VIVID_CID_TIME_WRAP:
		dev->time_wrap = ctrl->val;
		if (ctrl->val == 0) {
			dev->time_wrap_offset = 0;
			break;
		}
		v4l2_get_timestamp(&tv);
		dev->time_wrap_offset = -tv.tv_sec - 16;
		break;
	}
	return 0;
}
コード例 #6
0
ファイル: cx231xx-vbi.c プロジェクト: AiWinters/linux
/*
 * Announces that a buffer were filled and request the next
 */
static inline void vbi_buffer_filled(struct cx231xx *dev,
				     struct cx231xx_dmaqueue *dma_q,
				     struct cx231xx_buffer *buf)
{
	/* Advice that buffer was filled */
	/* cx231xx_info(DRIVER_NAME "[%p/%d] wakeup\n", buf, buf->vb.i); */

	buf->vb.state = VIDEOBUF_DONE;
	buf->vb.field_count++;
	v4l2_get_timestamp(&buf->vb.ts);

	dev->vbi_mode.bulk_ctl.buf = NULL;

	list_del(&buf->vb.queue);
	wake_up(&buf->vb.done);
}
コード例 #7
0
ファイル: cx23885-video.c プロジェクト: mikemvk/linux-at91
void cx23885_video_wakeup(struct cx23885_dev *dev,
	struct cx23885_dmaqueue *q, u32 count)
{
	struct cx23885_buffer *buf;

	if (list_empty(&q->active))
		return;
	buf = list_entry(q->active.next,
			struct cx23885_buffer, queue);

	buf->vb.v4l2_buf.sequence = q->count++;
	v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
	dprintk(2, "[%p/%d] wakeup reg=%d buf=%d\n", buf, buf->vb.v4l2_buf.index,
			count, q->count);
	list_del(&buf->queue);
	vb2_buffer_done(&buf->vb, VB2_BUF_STATE_DONE);
}
コード例 #8
0
ファイル: ispstat.c プロジェクト: AshishNamdev/linux
static int isp_stat_buf_queue(struct ispstat *stat)
{
	if (!stat->active_buf)
		return STAT_NO_BUF;

	v4l2_get_timestamp(&stat->active_buf->ts);

	stat->active_buf->buf_size = stat->buf_size;
	if (isp_stat_buf_check_magic(stat, stat->active_buf)) {
		dev_dbg(stat->isp->dev, "%s: data wasn't properly written.\n",
			stat->subdev.name);
		return STAT_NO_BUF;
	}
	stat->active_buf->config_counter = stat->config_counter;
	stat->active_buf->frame_number = stat->frame_number;
	stat->active_buf->empty = 0;
	stat->active_buf = NULL;

	return STAT_BUF_DONE;
}
コード例 #9
0
ファイル: solo6x10-v4l2.c プロジェクト: 168519/linux
static void solo_fillbuf(struct solo_dev *solo_dev,
			 struct vb2_buffer *vb)
{
	dma_addr_t vbuf;
	unsigned int fdma_addr;
	int error = -1;
	int i;

	vbuf = vb2_dma_contig_plane_dma_addr(vb, 0);
	if (!vbuf)
		goto finish_buf;

	if (erase_off(solo_dev)) {
		void *p = vb2_plane_vaddr(vb, 0);
		int image_size = solo_image_size(solo_dev);

		for (i = 0; i < image_size; i += 2) {
			((u8 *)p)[i] = 0x80;
			((u8 *)p)[i + 1] = 0x00;
		}
		error = 0;
	} else {
		fdma_addr = SOLO_DISP_EXT_ADDR + (solo_dev->old_write *
				(SOLO_HW_BPL * solo_vlines(solo_dev)));

		error = solo_p2m_dma_t(solo_dev, 0, vbuf, fdma_addr,
				       solo_bytesperline(solo_dev),
				       solo_vlines(solo_dev), SOLO_HW_BPL);
	}

finish_buf:
	if (!error) {
		vb2_set_plane_payload(vb, 0,
			solo_vlines(solo_dev) * solo_bytesperline(solo_dev));
		vb->v4l2_buf.sequence = solo_dev->sequence++;
		v4l2_get_timestamp(&vb->v4l2_buf.timestamp);
	}

	vb2_buffer_done(vb, error ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE);
}
コード例 #10
0
ファイル: saa7146_fops.c プロジェクト: AlexShiLucky/linux
void saa7146_buffer_finish(struct saa7146_dev *dev,
			   struct saa7146_dmaqueue *q,
			   int state)
{
	assert_spin_locked(&dev->slock);
	DEB_EE("dev:%p, dmaq:%p, state:%d\n", dev, q, state);
	DEB_EE("q->curr:%p\n", q->curr);

	BUG_ON(!q->curr);

	/* finish current buffer */
	if (NULL == q->curr) {
		DEB_D("aiii. no current buffer\n");
		return;
	}

	q->curr->vb.state = state;
	v4l2_get_timestamp(&q->curr->vb.ts);
	wake_up(&q->curr->vb.done);

	q->curr = NULL;
}
コード例 #11
0
ファイル: dt3155v4l.c プロジェクト: imcek/BEAGLEBONE_BSP
static irqreturn_t
dt3155_irq_handler_even(int irq, void *dev_id)
{
	struct dt3155_priv *ipd = dev_id;
	struct vb2_buffer *ivb;
	dma_addr_t dma_addr;
	u32 tmp;

	tmp = ioread32(ipd->regs + INT_CSR) & (FLD_START | FLD_END_ODD);
	if (!tmp)
		return IRQ_NONE;  /* not our irq */
	if ((tmp & FLD_START) && !(tmp & FLD_END_ODD)) {
		iowrite32(FLD_START_EN | FLD_END_ODD_EN | FLD_START,
							ipd->regs + INT_CSR);
		ipd->field_count++;
		return IRQ_HANDLED; /* start of field irq */
	}
	if ((tmp & FLD_START) && (tmp & FLD_END_ODD))
		ipd->stats.start_before_end++;
	/*	check for corrupted fields     */
/*	write_i2c_reg(ipd->regs, EVEN_CSR, CSR_ERROR | CSR_DONE);	*/
/*	write_i2c_reg(ipd->regs, ODD_CSR, CSR_ERROR | CSR_DONE);	*/
	tmp = ioread32(ipd->regs + CSR1) & (FLD_CRPT_EVEN | FLD_CRPT_ODD);
	if (tmp) {
		ipd->stats.corrupted_fields++;
		iowrite32(FIFO_EN | SRST | FLD_CRPT_ODD | FLD_CRPT_EVEN |
						FLD_DN_ODD | FLD_DN_EVEN |
						CAP_CONT_EVEN | CAP_CONT_ODD,
							ipd->regs + CSR1);
		mmiowb();
	}

	spin_lock(&ipd->lock);
	if (ipd->curr_buf) {
		v4l2_get_timestamp(&ipd->curr_buf->v4l2_buf.timestamp);
		ipd->curr_buf->v4l2_buf.sequence = (ipd->field_count) >> 1;
		vb2_buffer_done(ipd->curr_buf, VB2_BUF_STATE_DONE);
	}
コード例 #12
0
ファイル: cpia2_usb.c プロジェクト: acton393/linux
/******************************************************************************
 *
 *  cpia2_usb_complete
 *
 *  callback when incoming packet is received
 *****************************************************************************/
static void cpia2_usb_complete(struct urb *urb)
{
	int i;
	unsigned char *cdata;
	static bool frame_ready = false;
	struct camera_data *cam = (struct camera_data *) urb->context;

	if (urb->status!=0) {
		if (!(urb->status == -ENOENT ||
		      urb->status == -ECONNRESET ||
		      urb->status == -ESHUTDOWN))
		{
			DBG("urb->status = %d!\n", urb->status);
		}
		DBG("Stopping streaming\n");
		return;
	}

	if (!cam->streaming || !video_is_registered(&cam->vdev)) {
		LOG("Will now stop the streaming: streaming = %d, present=%d\n",
		    cam->streaming, video_is_registered(&cam->vdev));
		return;
	}

	/***
	 * Packet collater
	 ***/
	//DBG("Collating %d packets\n", urb->number_of_packets);
	for (i = 0; i < urb->number_of_packets; i++) {
		u16 checksum, iso_checksum;
		int j;
		int n = urb->iso_frame_desc[i].actual_length;
		int st = urb->iso_frame_desc[i].status;

		if(cam->workbuff->status == FRAME_READY) {
			struct framebuf *ptr;
			/* Try to find an available buffer */
			DBG("workbuff full, searching\n");
			for (ptr = cam->workbuff->next;
			     ptr != cam->workbuff;
			     ptr = ptr->next)
			{
				if (ptr->status == FRAME_EMPTY) {
					ptr->status = FRAME_READING;
					ptr->length = 0;
					break;
				}
			}
			if (ptr == cam->workbuff)
				break; /* No READING or EMPTY buffers left */

			cam->workbuff = ptr;
		}

		if (cam->workbuff->status == FRAME_EMPTY ||
		    cam->workbuff->status == FRAME_ERROR) {
			cam->workbuff->status = FRAME_READING;
			cam->workbuff->length = 0;
		}

		//DBG("   Packet %d length = %d, status = %d\n", i, n, st);
		cdata = urb->transfer_buffer + urb->iso_frame_desc[i].offset;

		if (st) {
			LOG("cpia2 data error: [%d] len=%d, status = %d\n",
			    i, n, st);
			if(!ALLOW_CORRUPT)
				cam->workbuff->status = FRAME_ERROR;
			continue;
		}

		if(n<=2)
			continue;

		checksum = 0;
		for(j=0; j<n-2; ++j)
			checksum += cdata[j];
		iso_checksum = cdata[j] + cdata[j+1]*256;
		if(checksum != iso_checksum) {
			LOG("checksum mismatch: [%d] len=%d, calculated = %x, checksum = %x\n",
			    i, n, (int)checksum, (int)iso_checksum);
			if(!ALLOW_CORRUPT) {
				cam->workbuff->status = FRAME_ERROR;
				continue;
			}
		}
		n -= 2;

		if(cam->workbuff->status != FRAME_READING) {
			if((0xFF == cdata[0] && 0xD8 == cdata[1]) ||
			   (0xD8 == cdata[0] && 0xFF == cdata[1] &&
			    0 != cdata[2])) {
				/* frame is skipped, but increment total
				 * frame count anyway */
				cam->frame_count++;
			}
			DBG("workbuff not reading, status=%d\n",
			    cam->workbuff->status);
			continue;
		}

		if (cam->frame_size < cam->workbuff->length + n) {
			ERR("buffer overflow! length: %d, n: %d\n",
			    cam->workbuff->length, n);
			cam->workbuff->status = FRAME_ERROR;
			if(cam->workbuff->length > cam->workbuff->max_length)
				cam->workbuff->max_length =
					cam->workbuff->length;
			continue;
		}

		if (cam->workbuff->length == 0) {
			int data_offset;
			if ((0xD8 == cdata[0]) && (0xFF == cdata[1])) {
				data_offset = 1;
			} else if((0xFF == cdata[0]) && (0xD8 == cdata[1])
				  && (0xFF == cdata[2])) {
				data_offset = 2;
			} else {
				DBG("Ignoring packet, not beginning!\n");
				continue;
			}
			DBG("Start of frame pattern found\n");
			v4l2_get_timestamp(&cam->workbuff->timestamp);
			cam->workbuff->seq = cam->frame_count++;
			cam->workbuff->data[0] = 0xFF;
			cam->workbuff->data[1] = 0xD8;
			cam->workbuff->length = 2;
			add_APPn(cam);
			add_COM(cam);
			memcpy(cam->workbuff->data+cam->workbuff->length,
			       cdata+data_offset, n-data_offset);
			cam->workbuff->length += n-data_offset;
		} else if (cam->workbuff->length > 0) {
			memcpy(cam->workbuff->data + cam->workbuff->length,
			       cdata, n);
			cam->workbuff->length += n;
		}

		if ((cam->workbuff->length >= 3) &&
		    (cam->workbuff->data[cam->workbuff->length - 3] == 0xFF) &&
		    (cam->workbuff->data[cam->workbuff->length - 2] == 0xD9) &&
		    (cam->workbuff->data[cam->workbuff->length - 1] == 0xFF)) {
			frame_ready = true;
			cam->workbuff->data[cam->workbuff->length - 1] = 0;
			cam->workbuff->length -= 1;
		} else if ((cam->workbuff->length >= 2) &&
		   (cam->workbuff->data[cam->workbuff->length - 2] == 0xFF) &&
		   (cam->workbuff->data[cam->workbuff->length - 1] == 0xD9)) {
			frame_ready = true;
		}

		if (frame_ready) {
			DBG("Workbuff image size = %d\n",cam->workbuff->length);
			process_frame(cam);

			frame_ready = false;

			if (waitqueue_active(&cam->wq_stream))
				wake_up_interruptible(&cam->wq_stream);
		}
	}

	if(cam->streaming) {
		/* resubmit */
		urb->dev = cam->dev;
		if ((i = usb_submit_urb(urb, GFP_ATOMIC)) != 0)
			ERR("%s: usb_submit_urb ret %d!\n", __func__, i);
	}
}
コード例 #13
0
ファイル: videobuf2-v4l2.c プロジェクト: Chong-Li/cse522
/**
 * __vb2_perform_fileio() - perform a single file io (read or write) operation
 * @q:		videobuf2 queue
 * @data:	pointed to target userspace buffer
 * @count:	number of bytes to read or write
 * @ppos:	file handle position tracking pointer
 * @nonblock:	mode selector (1 means blocking calls, 0 means nonblocking)
 * @read:	access mode selector (1 means read, 0 means write)
 */
static size_t __vb2_perform_fileio(struct vb2_queue *q, char __user *data, size_t count,
		loff_t *ppos, int nonblock, int read)
{
	struct vb2_fileio_data *fileio;
	struct vb2_fileio_buf *buf;
	bool is_multiplanar = q->is_multiplanar;
	/*
	 * When using write() to write data to an output video node the vb2 core
	 * should set timestamps if V4L2_BUF_FLAG_TIMESTAMP_COPY is set. Nobody
	 * else is able to provide this information with the write() operation.
	 */
	bool set_timestamp = !read &&
		(q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
		V4L2_BUF_FLAG_TIMESTAMP_COPY;
	int ret, index;

	dprintk(3, "mode %s, offset %ld, count %zd, %sblocking\n",
		read ? "read" : "write", (long)*ppos, count,
		nonblock ? "non" : "");

	if (!data)
		return -EINVAL;

	/*
	 * Initialize emulator on first call.
	 */
	if (!vb2_fileio_is_active(q)) {
		ret = __vb2_init_fileio(q, read);
		dprintk(3, "vb2_init_fileio result: %d\n", ret);
		if (ret)
			return ret;
	}
	fileio = q->fileio;

	/*
	 * Check if we need to dequeue the buffer.
	 */
	index = fileio->cur_index;
	if (index >= q->num_buffers) {
		/*
		 * Call vb2_dqbuf to get buffer back.
		 */
		memset(&fileio->b, 0, sizeof(fileio->b));
		fileio->b.type = q->type;
		fileio->b.memory = q->memory;
		if (is_multiplanar) {
			memset(&fileio->p, 0, sizeof(fileio->p));
			fileio->b.m.planes = &fileio->p;
			fileio->b.length = 1;
		}
		ret = vb2_internal_dqbuf(q, &fileio->b, nonblock);
		dprintk(5, "vb2_dqbuf result: %d\n", ret);
		if (ret)
			return ret;
		fileio->dq_count += 1;

		fileio->cur_index = index = fileio->b.index;
		buf = &fileio->bufs[index];

		/*
		 * Get number of bytes filled by the driver
		 */
		buf->pos = 0;
		buf->queued = 0;
		buf->size = read ? vb2_get_plane_payload(q->bufs[index], 0)
				 : vb2_plane_size(q->bufs[index], 0);
		/* Compensate for data_offset on read in the multiplanar case. */
		if (is_multiplanar && read &&
		    fileio->b.m.planes[0].data_offset < buf->size) {
			buf->pos = fileio->b.m.planes[0].data_offset;
			buf->size -= buf->pos;
		}
	} else {
		buf = &fileio->bufs[index];
	}

	/*
	 * Limit count on last few bytes of the buffer.
	 */
	if (buf->pos + count > buf->size) {
		count = buf->size - buf->pos;
		dprintk(5, "reducing read count: %zd\n", count);
	}

	/*
	 * Transfer data to userspace.
	 */
	dprintk(3, "copying %zd bytes - buffer %d, offset %u\n",
		count, index, buf->pos);
	if (read)
		ret = copy_to_user(data, buf->vaddr + buf->pos, count);
	else
		ret = copy_from_user(buf->vaddr + buf->pos, data, count);
	if (ret) {
		dprintk(3, "error copying data\n");
		return -EFAULT;
	}

	/*
	 * Update counters.
	 */
	buf->pos += count;
	*ppos += count;

	/*
	 * Queue next buffer if required.
	 */
	if (buf->pos == buf->size || (!read && fileio->write_immediately)) {
		/*
		 * Check if this is the last buffer to read.
		 */
		if (read && fileio->read_once && fileio->dq_count == 1) {
			dprintk(3, "read limit reached\n");
			return __vb2_cleanup_fileio(q);
		}

		/*
		 * Call vb2_qbuf and give buffer to the driver.
		 */
		memset(&fileio->b, 0, sizeof(fileio->b));
		fileio->b.type = q->type;
		fileio->b.memory = q->memory;
		fileio->b.index = index;
		fileio->b.bytesused = buf->pos;
		if (is_multiplanar) {
			memset(&fileio->p, 0, sizeof(fileio->p));
			fileio->p.bytesused = buf->pos;
			fileio->b.m.planes = &fileio->p;
			fileio->b.length = 1;
		}
		if (set_timestamp)
			v4l2_get_timestamp(&fileio->b.timestamp);
		ret = vb2_internal_qbuf(q, &fileio->b);
		dprintk(5, "vb2_dbuf result: %d\n", ret);
		if (ret)
			return ret;

		/*
		 * Buffer has been queued, update the status
		 */
		buf->pos = 0;
		buf->queued = 1;
		buf->size = vb2_plane_size(q->bufs[index], 0);
		fileio->q_count += 1;
		/*
		 * If we are queuing up buffers for the first time, then
		 * increase initial_index by one.
		 */
		if (fileio->initial_index < q->num_buffers)
			fileio->initial_index++;
		/*
		 * The next buffer to use is either a buffer that's going to be
		 * queued for the first time (initial_index < q->num_buffers)
		 * or it is equal to q->num_buffers, meaning that the next
		 * time we need to dequeue a buffer since we've now queued up
		 * all the 'first time' buffers.
		 */
		fileio->cur_index = fileio->initial_index;
	}

	/*
	 * Return proper number of bytes processed.
	 */
	if (ret == 0)
		ret = count;
	return ret;
}
コード例 #14
0
ファイル: vivid-kthread-cap.c プロジェクト: 168519/linux
static void vivid_fillbuff(struct vivid_dev *dev, struct vivid_buffer *buf)
{
	unsigned factor = V4L2_FIELD_HAS_T_OR_B(dev->field_cap) ? 2 : 1;
	unsigned line_height = 16 / factor;
	bool is_tv = vivid_is_sdtv_cap(dev);
	bool is_60hz = is_tv && (dev->std_cap & V4L2_STD_525_60);
	unsigned p;
	int line = 1;
	u8 *basep[TPG_MAX_PLANES][2];
	unsigned ms;
	char str[100];
	s32 gain;
	bool is_loop = false;

	if (dev->loop_video && dev->can_loop_video &&
	    ((vivid_is_svid_cap(dev) && !VIVID_INVALID_SIGNAL(dev->std_signal_mode)) ||
	     (vivid_is_hdmi_cap(dev) && !VIVID_INVALID_SIGNAL(dev->dv_timings_signal_mode))))
		is_loop = true;

	buf->vb.v4l2_buf.sequence = dev->vid_cap_seq_count;
	/*
	 * Take the timestamp now if the timestamp source is set to
	 * "Start of Exposure".
	 */
	if (dev->tstamp_src_is_soe)
		v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
	if (dev->field_cap == V4L2_FIELD_ALTERNATE) {
		/*
		 * 60 Hz standards start with the bottom field, 50 Hz standards
		 * with the top field. So if the 0-based seq_count is even,
		 * then the field is TOP for 50 Hz and BOTTOM for 60 Hz
		 * standards.
		 */
		buf->vb.v4l2_buf.field = ((dev->vid_cap_seq_count & 1) ^ is_60hz) ?
			V4L2_FIELD_TOP : V4L2_FIELD_BOTTOM;
		/*
		 * The sequence counter counts frames, not fields. So divide
		 * by two.
		 */
		buf->vb.v4l2_buf.sequence /= 2;
	} else {
		buf->vb.v4l2_buf.field = dev->field_cap;
	}
	tpg_s_field(&dev->tpg, buf->vb.v4l2_buf.field);
	tpg_s_perc_fill_blank(&dev->tpg, dev->must_blank[buf->vb.v4l2_buf.index]);

	vivid_precalc_copy_rects(dev);

	for (p = 0; p < tpg_g_planes(&dev->tpg); p++) {
		void *vbuf = vb2_plane_vaddr(&buf->vb, p);

		/*
		 * The first plane of a multiplanar format has a non-zero
		 * data_offset. This helps testing whether the application
		 * correctly supports non-zero data offsets.
		 */
		if (dev->fmt_cap->data_offset[p]) {
			memset(vbuf, dev->fmt_cap->data_offset[p] & 0xff,
			       dev->fmt_cap->data_offset[p]);
			vbuf += dev->fmt_cap->data_offset[p];
		}
		tpg_calc_text_basep(&dev->tpg, basep, p, vbuf);
		if (!is_loop || vivid_copy_buffer(dev, p, vbuf, buf))
			tpg_fillbuffer(&dev->tpg, vivid_get_std_cap(dev), p, vbuf);
	}
	dev->must_blank[buf->vb.v4l2_buf.index] = false;

	/* Updates stream time, only update at the start of a new frame. */
	if (dev->field_cap != V4L2_FIELD_ALTERNATE || (buf->vb.v4l2_buf.sequence & 1) == 0)
		dev->ms_vid_cap = jiffies_to_msecs(jiffies - dev->jiffies_vid_cap);

	ms = dev->ms_vid_cap;
	if (dev->osd_mode <= 1) {
		snprintf(str, sizeof(str), " %02d:%02d:%02d:%03d %u%s",
				(ms / (60 * 60 * 1000)) % 24,
				(ms / (60 * 1000)) % 60,
				(ms / 1000) % 60,
				ms % 1000,
				buf->vb.v4l2_buf.sequence,
				(dev->field_cap == V4L2_FIELD_ALTERNATE) ?
					(buf->vb.v4l2_buf.field == V4L2_FIELD_TOP ?
					 " top" : " bottom") : "");
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
	}
	if (dev->osd_mode == 0) {
		snprintf(str, sizeof(str), " %dx%d, input %d ",
				dev->src_rect.width, dev->src_rect.height, dev->input);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);

		gain = v4l2_ctrl_g_ctrl(dev->gain);
		mutex_lock(dev->ctrl_hdl_user_vid.lock);
		snprintf(str, sizeof(str),
			" brightness %3d, contrast %3d, saturation %3d, hue %d ",
			dev->brightness->cur.val,
			dev->contrast->cur.val,
			dev->saturation->cur.val,
			dev->hue->cur.val);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		snprintf(str, sizeof(str),
			" autogain %d, gain %3d, alpha 0x%02x ",
			dev->autogain->cur.val, gain, dev->alpha->cur.val);
		mutex_unlock(dev->ctrl_hdl_user_vid.lock);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		mutex_lock(dev->ctrl_hdl_user_aud.lock);
		snprintf(str, sizeof(str),
			" volume %3d, mute %d ",
			dev->volume->cur.val, dev->mute->cur.val);
		mutex_unlock(dev->ctrl_hdl_user_aud.lock);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		mutex_lock(dev->ctrl_hdl_user_gen.lock);
		snprintf(str, sizeof(str), " int32 %d, int64 %lld, bitmask %08x ",
			dev->int32->cur.val,
			*dev->int64->p_cur.p_s64,
			dev->bitmask->cur.val);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		snprintf(str, sizeof(str), " boolean %d, menu %s, string \"%s\" ",
			dev->boolean->cur.val,
			dev->menu->qmenu[dev->menu->cur.val],
			dev->string->p_cur.p_char);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		snprintf(str, sizeof(str), " integer_menu %lld, value %d ",
			dev->int_menu->qmenu_int[dev->int_menu->cur.val],
			dev->int_menu->cur.val);
		mutex_unlock(dev->ctrl_hdl_user_gen.lock);
		tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		if (dev->button_pressed) {
			dev->button_pressed--;
			snprintf(str, sizeof(str), " button pressed!");
			tpg_gen_text(&dev->tpg, basep, line++ * line_height, 16, str);
		}
	}

	/*
	 * If "End of Frame" is specified at the timestamp source, then take
	 * the timestamp now.
	 */
	if (!dev->tstamp_src_is_soe)
		v4l2_get_timestamp(&buf->vb.v4l2_buf.timestamp);
	buf->vb.v4l2_buf.timestamp.tv_sec += dev->time_wrap_offset;
}
コード例 #15
0
ファイル: videobuf2-v4l2.c プロジェクト: Chong-Li/cse522
static int vb2_thread(void *data)
{
	struct vb2_queue *q = data;
	struct vb2_threadio_data *threadio = q->threadio;
	struct vb2_fileio_data *fileio = q->fileio;
	bool set_timestamp = false;
	int prequeue = 0;
	int index = 0;
	int ret = 0;

	if (q->is_output) {
		prequeue = q->num_buffers;
		set_timestamp =
			(q->timestamp_flags & V4L2_BUF_FLAG_TIMESTAMP_MASK) ==
			V4L2_BUF_FLAG_TIMESTAMP_COPY;
	}

	set_freezable();

	for (;;) {
		struct vb2_buffer *vb;

		/*
		 * Call vb2_dqbuf to get buffer back.
		 */
		memset(&fileio->b, 0, sizeof(fileio->b));
		fileio->b.type = q->type;
		fileio->b.memory = q->memory;
		if (prequeue) {
			fileio->b.index = index++;
			prequeue--;
		} else {
			call_void_qop(q, wait_finish, q);
			if (!threadio->stop)
				ret = vb2_internal_dqbuf(q, &fileio->b, 0);
			call_void_qop(q, wait_prepare, q);
			dprintk(5, "file io: vb2_dqbuf result: %d\n", ret);
		}
		if (ret || threadio->stop)
			break;
		try_to_freeze();

		vb = q->bufs[fileio->b.index];
		if (!(fileio->b.flags & V4L2_BUF_FLAG_ERROR))
			if (threadio->fnc(vb, threadio->priv))
				break;
		call_void_qop(q, wait_finish, q);
		if (set_timestamp)
			v4l2_get_timestamp(&fileio->b.timestamp);
		if (!threadio->stop)
			ret = vb2_internal_qbuf(q, &fileio->b);
		call_void_qop(q, wait_prepare, q);
		if (ret || threadio->stop)
			break;
	}

	/* Hmm, linux becomes *very* unhappy without this ... */
	while (!kthread_should_stop()) {
		set_current_state(TASK_INTERRUPTIBLE);
		schedule();
	}
	return 0;
}
コード例 #16
0
/*
 * omap3isp_video_buffer_next - Complete the current buffer and return the next
 * @video: ISP video object
 *
 * Remove the current video buffer from the DMA queue and fill its timestamp and
 * field count before handing it back to videobuf2.
 *
 * For capture video nodes the buffer state is set to VB2_BUF_STATE_DONE if no
 * error has been flagged in the pipeline, or to VB2_BUF_STATE_ERROR otherwise.
 * For video output nodes the buffer state is always set to VB2_BUF_STATE_DONE.
 *
 * The DMA queue is expected to contain at least one buffer.
 *
 * Return a pointer to the next buffer in the DMA queue, or NULL if the queue is
 * empty.
 */
struct isp_buffer *omap3isp_video_buffer_next(struct isp_video *video)
{
	struct isp_pipeline *pipe = to_isp_pipeline(&video->video.entity);
	enum isp_pipeline_state state;
	struct isp_buffer *buf;
	unsigned long flags;

	spin_lock_irqsave(&video->irqlock, flags);
	if (WARN_ON(list_empty(&video->dmaqueue))) {
		spin_unlock_irqrestore(&video->irqlock, flags);
		return NULL;
	}

	buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
			       irqlist);
	list_del(&buf->irqlist);
	spin_unlock_irqrestore(&video->irqlock, flags);

	v4l2_get_timestamp(&buf->vb.timestamp);

	/* Do frame number propagation only if this is the output video node.
	 * Frame number either comes from the CSI receivers or it gets
	 * incremented here if H3A is not active.
	 * Note: There is no guarantee that the output buffer will finish
	 * first, so the input number might lag behind by 1 in some cases.
	 */
	if (video == pipe->output && !pipe->do_propagation)
		buf->vb.sequence =
			atomic_inc_return(&pipe->frame_number);
	else
		buf->vb.sequence = atomic_read(&pipe->frame_number);

	if (pipe->field != V4L2_FIELD_NONE)
		buf->vb.sequence /= 2;

	buf->vb.field = pipe->field;

	/* Report pipeline errors to userspace on the capture device side. */
	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->error) {
		state = VB2_BUF_STATE_ERROR;
		pipe->error = false;
	} else {
		state = VB2_BUF_STATE_DONE;
	}

	vb2_buffer_done(&buf->vb.vb2_buf, state);

	spin_lock_irqsave(&video->irqlock, flags);

	if (list_empty(&video->dmaqueue)) {
		spin_unlock_irqrestore(&video->irqlock, flags);

		if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE)
			state = ISP_PIPELINE_QUEUE_OUTPUT
			      | ISP_PIPELINE_STREAM;
		else
			state = ISP_PIPELINE_QUEUE_INPUT
			      | ISP_PIPELINE_STREAM;

		spin_lock_irqsave(&pipe->lock, flags);
		pipe->state &= ~state;
		if (video->pipe.stream_state == ISP_PIPELINE_STREAM_CONTINUOUS)
			video->dmaqueue_flags |= ISP_VIDEO_DMAQUEUE_UNDERRUN;
		spin_unlock_irqrestore(&pipe->lock, flags);
		return NULL;
	}

	if (video->type == V4L2_BUF_TYPE_VIDEO_CAPTURE && pipe->input != NULL) {
		spin_lock(&pipe->lock);
		pipe->state &= ~ISP_PIPELINE_STREAM;
		spin_unlock(&pipe->lock);
	}

	buf = list_first_entry(&video->dmaqueue, struct isp_buffer,
			       irqlist);

	spin_unlock_irqrestore(&video->irqlock, flags);

	return buf;
}
コード例 #17
0
ファイル: pwc-if.c プロジェクト: imcek/BEAGLEBONE_BSP
/* This gets called for the Isochronous pipe (video). This is done in
 * interrupt time, so it has to be fast, not crash, and not stall. Neat.
 */
static void pwc_isoc_handler(struct urb *urb)
{
	struct pwc_device *pdev = (struct pwc_device *)urb->context;
	int i, fst, flen;
	unsigned char *iso_buf = NULL;

	if (urb->status == -ENOENT || urb->status == -ECONNRESET ||
	    urb->status == -ESHUTDOWN) {
		PWC_DEBUG_OPEN("URB (%p) unlinked %ssynchronuously.\n", urb, urb->status == -ENOENT ? "" : "a");
		return;
	}

	if (pdev->fill_buf == NULL)
		pdev->fill_buf = pwc_get_next_fill_buf(pdev);

	if (urb->status != 0) {
		const char *errmsg;

		errmsg = "Unknown";
		switch(urb->status) {
			case -ENOSR:		errmsg = "Buffer error (overrun)"; break;
			case -EPIPE:		errmsg = "Stalled (device not responding)"; break;
			case -EOVERFLOW:	errmsg = "Babble (bad cable?)"; break;
			case -EPROTO:		errmsg = "Bit-stuff error (bad cable?)"; break;
			case -EILSEQ:		errmsg = "CRC/Timeout (could be anything)"; break;
			case -ETIME:		errmsg = "Device does not respond"; break;
		}
		PWC_ERROR("pwc_isoc_handler() called with status %d [%s].\n",
			  urb->status, errmsg);
		/* Give up after a number of contiguous errors */
		if (++pdev->visoc_errors > MAX_ISOC_ERRORS)
		{
			PWC_ERROR("Too many ISOC errors, bailing out.\n");
			if (pdev->fill_buf) {
				vb2_buffer_done(&pdev->fill_buf->vb,
						VB2_BUF_STATE_ERROR);
				pdev->fill_buf = NULL;
			}
		}
		pdev->vsync = 0; /* Drop the current frame */
		goto handler_end;
	}

	/* Reset ISOC error counter. We did get here, after all. */
	pdev->visoc_errors = 0;

	/* vsync: 0 = don't copy data
		  1 = sync-hunt
		  2 = synched
	 */
	/* Compact data */
	for (i = 0; i < urb->number_of_packets; i++) {
		fst  = urb->iso_frame_desc[i].status;
		flen = urb->iso_frame_desc[i].actual_length;
		iso_buf = urb->transfer_buffer + urb->iso_frame_desc[i].offset;
		if (fst != 0) {
			PWC_ERROR("Iso frame %d has error %d\n", i, fst);
			continue;
		}
		if (flen > 0 && pdev->vsync) {
			struct pwc_frame_buf *fbuf = pdev->fill_buf;

			if (pdev->vsync == 1) {
				v4l2_get_timestamp(
					&fbuf->vb.v4l2_buf.timestamp);
				pdev->vsync = 2;
			}

			if (flen + fbuf->filled > pdev->frame_total_size) {
				PWC_ERROR("Frame overflow (%d > %d)\n",
					  flen + fbuf->filled,
					  pdev->frame_total_size);
				pdev->vsync = 0; /* Let's wait for an EOF */
			} else {
				memcpy(fbuf->data + fbuf->filled, iso_buf,
				       flen);
				fbuf->filled += flen;
			}
		}
		if (flen < pdev->vlast_packet_size) {
			/* Shorter packet... end of frame */
			if (pdev->vsync == 2)
				pwc_frame_complete(pdev);
			if (pdev->fill_buf == NULL)
				pdev->fill_buf = pwc_get_next_fill_buf(pdev);
			if (pdev->fill_buf) {
				pdev->fill_buf->filled = 0;
				pdev->vsync = 1;
			}
		}
		pdev->vlast_packet_size = flen;
	}

handler_end:
	i = usb_submit_urb(urb, GFP_ATOMIC);
	if (i != 0)
		PWC_ERROR("Error (%d) re-submitting urb in pwc_isoc_handler.\n", i);
}
コード例 #18
0
static void cobalt_dma_stream_queue_handler(struct cobalt_stream *s)
{
	struct cobalt *cobalt = s->cobalt;
	int rx = s->video_channel;
	struct m00473_freewheel_regmap __iomem *fw =
		COBALT_CVI_FREEWHEEL(s->cobalt, rx);
	struct m00233_video_measure_regmap __iomem *vmr =
		COBALT_CVI_VMR(s->cobalt, rx);
	struct m00389_cvi_regmap __iomem *cvi =
		COBALT_CVI(s->cobalt, rx);
	struct m00479_clk_loss_detector_regmap __iomem *clkloss =
		COBALT_CVI_CLK_LOSS(s->cobalt, rx);
	struct cobalt_buffer *cb;
	bool skip = false;

	spin_lock(&s->irqlock);

	if (list_empty(&s->bufs)) {
		pr_err("no buffers!\n");
		spin_unlock(&s->irqlock);
		return;
	}

	/* Give the fresh filled up buffer to the user.
	 * Note that the interrupt is only sent if the DMA can continue
	 * with a new buffer, so it is always safe to return this buffer
	 * to userspace. */
	cb = list_first_entry(&s->bufs, struct cobalt_buffer, list);
	list_del(&cb->list);
	spin_unlock(&s->irqlock);

	if (s->is_audio || s->is_output)
		goto done;

	if (s->unstable_frame) {
		uint32_t stat = ioread32(&vmr->irq_status);

		iowrite32(stat, &vmr->irq_status);
		if (!(ioread32(&vmr->status) &
		      M00233_STATUS_BITMAP_INIT_DONE_MSK)) {
			cobalt_dbg(1, "!init_done\n");
			if (s->enable_freewheel)
				goto restart_fw;
			goto done;
		}

		if (ioread32(&clkloss->status) &
		    M00479_STATUS_BITMAP_CLOCK_MISSING_MSK) {
			iowrite32(0, &clkloss->ctrl);
			iowrite32(M00479_CTRL_BITMAP_ENABLE_MSK, &clkloss->ctrl);
			cobalt_dbg(1, "no clock\n");
			if (s->enable_freewheel)
				goto restart_fw;
			goto done;
		}
		if ((stat & (M00233_IRQ_STATUS_BITMAP_VACTIVE_AREA_MSK |
			     M00233_IRQ_STATUS_BITMAP_HACTIVE_AREA_MSK)) ||
				ioread32(&vmr->vactive_area) != s->timings.bt.height ||
				ioread32(&vmr->hactive_area) != s->timings.bt.width) {
			cobalt_dbg(1, "unstable\n");
			if (s->enable_freewheel)
				goto restart_fw;
			goto done;
		}
		if (!s->enable_cvi) {
			s->enable_cvi = true;
			iowrite32(M00389_CONTROL_BITMAP_ENABLE_MSK, &cvi->control);
			goto done;
		}
		if (!(ioread32(&cvi->status) & M00389_STATUS_BITMAP_LOCK_MSK)) {
			cobalt_dbg(1, "cvi no lock\n");
			if (s->enable_freewheel)
				goto restart_fw;
			goto done;
		}
		if (!s->enable_freewheel) {
			cobalt_dbg(1, "stable\n");
			s->enable_freewheel = true;
			iowrite32(0, &fw->ctrl);
			goto done;
		}
		cobalt_dbg(1, "enabled fw\n");
		iowrite32(M00233_CONTROL_BITMAP_ENABLE_MEASURE_MSK |
			  M00233_CONTROL_BITMAP_ENABLE_INTERRUPT_MSK,
			  &vmr->control);
		iowrite32(M00473_CTRL_BITMAP_ENABLE_MSK, &fw->ctrl);
		s->enable_freewheel = false;
		s->unstable_frame = false;
		s->skip_first_frames = 2;
		skip = true;
		goto done;
	}
	if (ioread32(&fw->status) & M00473_STATUS_BITMAP_FREEWHEEL_MODE_MSK) {
restart_fw:
		cobalt_dbg(1, "lost lock\n");
		iowrite32(M00233_CONTROL_BITMAP_ENABLE_MEASURE_MSK,
			  &vmr->control);
		iowrite32(M00473_CTRL_BITMAP_ENABLE_MSK |
			  M00473_CTRL_BITMAP_FORCE_FREEWHEEL_MODE_MSK,
			  &fw->ctrl);
		iowrite32(0, &cvi->control);
		s->unstable_frame = true;
		s->enable_freewheel = false;
		s->enable_cvi = false;
	}
done:
	if (s->skip_first_frames) {
		skip = true;
		s->skip_first_frames--;
	}
	v4l2_get_timestamp(&cb->vb.v4l2_buf.timestamp);
	/* TODO: the sequence number should be read from the FPGA so we
	   also know about dropped frames. */
	cb->vb.v4l2_buf.sequence = s->sequence++;
	vb2_buffer_done(&cb->vb, (skip || s->unstable_frame) ?
			VB2_BUF_STATE_QUEUED : VB2_BUF_STATE_DONE);
}
コード例 #19
0
ファイル: omap1_camera.c プロジェクト: 3null/linux
static void videobuf_done(struct omap1_cam_dev *pcdev,
		enum videobuf_state result)
{
	struct omap1_cam_buf *buf = pcdev->active;
	struct videobuf_buffer *vb;
	struct device *dev = pcdev->soc_host.icd->parent;

	if (WARN_ON(!buf)) {
		suspend_capture(pcdev);
		disable_capture(pcdev);
		return;
	}

	if (result == VIDEOBUF_ERROR)
		suspend_capture(pcdev);

	vb = &buf->vb;
	if (waitqueue_active(&vb->done)) {
		if (!pcdev->ready && result != VIDEOBUF_ERROR) {
			/*
			 * No next buffer has been entered into the DMA
			 * programming register set on time (could be done only
			 * while the previous DMA interurpt was processed, not
			 * later), so the last DMA block, be it a whole buffer
			 * if in CONTIG or its last sgbuf if in SG mode, is
			 * about to be reused by the just autoreinitialized DMA
			 * engine, and overwritten with next frame data. Best we
			 * can do is stopping the capture as soon as possible,
			 * hopefully before the next frame start.
			 */
			suspend_capture(pcdev);
		}
		vb->state = result;
		v4l2_get_timestamp(&vb->ts);
		if (result != VIDEOBUF_ERROR)
			vb->field_count++;
		wake_up(&vb->done);

		/* shift in next buffer */
		buf = pcdev->ready;
		pcdev->active = buf;
		pcdev->ready = NULL;

		if (!buf) {
			/*
			 * No next buffer was ready on time (see above), so
			 * indicate error condition to force capture restart or
			 * stop, depending on next buffer already queued or not.
			 */
			result = VIDEOBUF_ERROR;
			prepare_next_vb(pcdev);

			buf = pcdev->ready;
			pcdev->active = buf;
			pcdev->ready = NULL;
		}
	} else if (pcdev->ready) {
		/*
		 * In both CONTIG and SG mode, the DMA engine has possibly
		 * been already autoreinitialized with the preprogrammed
		 * pcdev->ready buffer.  We can either accept this fact
		 * and just swap the buffers, or provoke an error condition
		 * and restart capture.  The former seems less intrusive.
		 */
		dev_dbg(dev, "%s: nobody waiting on videobuf, swap with next\n",
				__func__);
		pcdev->active = pcdev->ready;

		if (pcdev->vb_mode == OMAP1_CAM_DMA_SG) {
			/*
			 * In SG mode, we have to make sure that the buffer we
			 * are putting back into the pcdev->ready is marked
			 * fresh.
			 */
			buf->sgbuf = NULL;
		}
		pcdev->ready = buf;

		buf = pcdev->active;
	} else {
		/*
		 * No next buffer has been entered into
		 * the DMA programming register set on time.
		 */
		if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
			/*
			 * In CONTIG mode, the DMA engine has already been
			 * reinitialized with the current buffer. Best we can do
			 * is not touching it.
			 */
			dev_dbg(dev,
				"%s: nobody waiting on videobuf, reuse it\n",
				__func__);
		} else {
			/*
			 * In SG mode, the DMA engine has just been
			 * autoreinitialized with the last sgbuf from the
			 * current list. Restart capture in order to transfer
			 * next frame start into the first sgbuf, not the last
			 * one.
			 */
			if (result != VIDEOBUF_ERROR) {
				suspend_capture(pcdev);
				result = VIDEOBUF_ERROR;
			}
		}
	}

	if (!buf) {
		dev_dbg(dev, "%s: no more videobufs, stop capture\n", __func__);
		disable_capture(pcdev);
		return;
	}

	if (pcdev->vb_mode == OMAP1_CAM_DMA_CONTIG) {
		/*
		 * In CONTIG mode, the current buffer parameters had already
		 * been entered into the DMA programming register set while the
		 * buffer was fetched with prepare_next_vb(), they may have also
		 * been transferred into the runtime set and already active if
		 * the DMA still running.
		 */
	} else {
		/* In SG mode, extra steps are required */
		if (result == VIDEOBUF_ERROR)
			/* make sure we (re)use sglist from start on error */
			buf->sgbuf = NULL;

		/*
		 * In any case, enter the next sgbuf parameters into the DMA
		 * programming register set.  They will be used either during
		 * nearest DMA autoreinitialization or, in case of an error,
		 * on DMA startup below.
		 */
		try_next_sgbuf(pcdev->dma_ch, buf);
	}

	if (result == VIDEOBUF_ERROR) {
		dev_dbg(dev, "%s: videobuf error; reset FIFO, restart DMA\n",
				__func__);
		start_capture(pcdev);
		/*
		 * In SG mode, the above also resulted in the next sgbuf
		 * parameters being entered into the DMA programming register
		 * set, making them ready for next DMA autoreinitialization.
		 */
	}

	/*
	 * Finally, try fetching next buffer.
	 * In CONTIG mode, it will also enter it into the DMA programming
	 * register set, making it ready for next DMA autoreinitialization.
	 */
	prepare_next_vb(pcdev);
}