コード例 #1
0
static void socle_camera_capture(struct socle_camera_dev *pcdev)
{
	if (pcdev->active) {
		vip_write(pcdev, SOCLE_VIP_CAPTURE_F1SA_Y, (u32)videobuf_to_dma_contig(pcdev->active));
		vip_write(pcdev, SOCLE_VIP_CAPTURE_F1SA_Cb, (u32)videobuf_to_dma_contig(pcdev->active)+pcdev->cb_off);
		vip_write(pcdev, SOCLE_VIP_CAPTURE_F1SA_Cr, (u32)videobuf_to_dma_contig(pcdev->active)+pcdev->cr_off);
		vip_write(pcdev, SOCLE_VIP_FB_SR, vip_read(pcdev, SOCLE_VIP_FB_SR) & ~0x1);
	}
}
コード例 #2
0
ファイル: vpbe_display.c プロジェクト: AllenDou/linux
static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
				struct vpbe_layer *layer)
{
	struct osd_state *osd_device = disp_obj->osd_device;
	unsigned long addr;

	spin_lock(&disp_obj->dma_queue_lock);
	if (list_empty(&layer->dma_queue) ||
		(layer->cur_frm != layer->next_frm)) {
		spin_unlock(&disp_obj->dma_queue_lock);
		return;
	}
	/*
	 * one field is displayed configure
	 * the next frame if it is available
	 * otherwise hold on current frame
	 * Get next from the buffer queue
	 */
	layer->next_frm = list_entry(
				layer->dma_queue.next,
				struct  videobuf_buffer,
				queue);
	/* Remove that from the buffer queue */
	list_del(&layer->next_frm->queue);
	spin_unlock(&disp_obj->dma_queue_lock);
	/* Mark state of the frame to active */
	layer->next_frm->state = VIDEOBUF_ACTIVE;
	addr = videobuf_to_dma_contig(layer->next_frm);
	osd_device->ops.start_layer(osd_device,
			layer->layer_info.id,
			addr,
			disp_obj->cbcr_ofst);
}
コード例 #3
0
ファイル: mx2_camera.c プロジェクト: 125radheyshyam/linux
static void mx27_camera_frame_done_emma(struct mx2_camera_dev *pcdev,
		int bufnum, int state)
{
	struct mx2_buffer *buf;
	struct videobuf_buffer *vb;
	unsigned long phys;

	if (!list_empty(&pcdev->active_bufs)) {
		buf = list_entry(pcdev->active_bufs.next,
			struct mx2_buffer, vb.queue);

		BUG_ON(buf->bufnum != bufnum);

		vb = &buf->vb;
#ifdef DEBUG
		phys = videobuf_to_dma_contig(vb);
		if (readl(pcdev->base_emma + PRP_DEST_RGB1_PTR + 4 * bufnum)
				!= phys) {
			dev_err(pcdev->dev, "%p != %p\n", phys,
					readl(pcdev->base_emma +
						PRP_DEST_RGB1_PTR +
						4 * bufnum));
		}
#endif
		dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb,
				vb->baddr, vb->bsize);

		list_del(&vb->queue);
		vb->state = state;
		do_gettimeofday(&vb->ts);
		vb->field_count++;

		wake_up(&vb->done);
	}
コード例 #4
0
static void vpbe_isr_odd_field(struct vpbe_display *disp_obj,
				struct vpbe_layer *layer)
{
	struct osd_state *osd_device = disp_obj->osd_device;
	unsigned long addr;

	spin_lock(&disp_obj->dma_queue_lock);
	if (list_empty(&layer->dma_queue) ||
		(layer->cur_frm != layer->next_frm)) {
		spin_unlock(&disp_obj->dma_queue_lock);
		return;
	}
	layer->next_frm = list_entry(
				layer->dma_queue.next,
				struct  videobuf_buffer,
				queue);
	
	list_del(&layer->next_frm->queue);
	spin_unlock(&disp_obj->dma_queue_lock);
	
	layer->next_frm->state = VIDEOBUF_ACTIVE;
	addr = videobuf_to_dma_contig(layer->next_frm);
	osd_device->ops.start_layer(osd_device,
			layer->layer_info.id,
			addr,
			disp_obj->cbcr_ofst);
}
コード例 #5
0
ファイル: ak_camera.c プロジェクト: Lamobo/Lamobo-D1
/**
 * @brief: irq handler function, camera start data collection
 * 
 * @author: caolianming
 * @date: 2014-01-06
 * @param [in] *pcdev:ak camera drivers structure, include soc camera structure
 */
static int ak_camera_setup_dma(struct ak_camera_dev *pcdev)
{
	struct videobuf_buffer *vb_active = &pcdev->active->vb;
	struct videobuf_buffer *vb;
	struct list_head *next;
	unsigned long yaddr_chl1_active, yaddr_chl2_active; 
	unsigned long yaddr_chl1_next, yaddr_chl2_next;
	int size;

	size = vb_active->width * vb_active->height;
	yaddr_chl1_active = videobuf_to_dma_contig(vb_active);
	yaddr_chl2_active = yaddr_chl1_active + size * 3 / 2;

	/* for single mode */
	if (!isp_is_continuous(&pcdev->isp)) {
		isp_set_even_frame(&pcdev->isp, yaddr_chl1_active, yaddr_chl2_active);
		isp_update_regtable(&pcdev->isp, 1);
		isp_start_capturing(&pcdev->isp);
		return 0;
	}

	/* ISP is in the continuous mode */
	next = pcdev->capture.next;
	next = next->next;
	if (next == &pcdev->capture) {
		isp_dbg("irq: the next vbuf is empty.\n");
		//isp_stop_capturing(&pcdev->isp);
		irq_buf_empty_flag = 1;
		irq_need_baffle = 1;
		pcdev->dma_running = 0;
		goto out;
	} else 
		irq_buf_empty_flag = 0;
	
	vb = list_entry(next, struct videobuf_buffer, queue);

	/* setup the DMA address for transferring */
	yaddr_chl1_next = videobuf_to_dma_contig(vb);
	yaddr_chl2_next = yaddr_chl1_next + size * 3 / 2;
	if (isp_is_capturing_odd(&pcdev->isp))
		isp_set_even_frame(&pcdev->isp, yaddr_chl1_next, yaddr_chl2_next);
	else 
		isp_set_odd_frame(&pcdev->isp, yaddr_chl1_next, yaddr_chl2_next);
out:	
	isp_update_regtable(&pcdev->isp, 0);
	return 0;
}
コード例 #6
0
static void solo_fillbuf(struct solo_filehandle *fh,
			 struct videobuf_buffer *vb)
{
	struct solo6010_dev *solo_dev = fh->solo_dev;
	dma_addr_t vbuf;
	unsigned int fdma_addr;
	int frame_size;
	int error = 1;
	int i;

	if (!(vbuf = videobuf_to_dma_contig(vb)))
		goto finish_buf;

	if (erase_off(solo_dev)) {
		void *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
		int image_size = solo_image_size(solo_dev);
		for (i = 0; i < image_size; i += 2) {
			((u8 *)p)[i] = 0x80;
			((u8 *)p)[i + 1] = 0x00;
		}
		error = 0;
		goto finish_buf;
	}

	frame_size = SOLO_HW_BPL * solo_vlines(solo_dev);
	fdma_addr = SOLO_DISP_EXT_ADDR(solo_dev) + (fh->old_write * frame_size);

	for (i = 0; i < frame_size / SOLO_DISP_BUF_SIZE; i++) {
		int j;
		for (j = 0; j < (SOLO_DISP_BUF_SIZE / SOLO_HW_BPL); j++) {
			if (solo_p2m_dma_t(solo_dev, SOLO_P2M_DMA_ID_DISP, 0,
					   vbuf, fdma_addr + (j * SOLO_HW_BPL),
					   solo_bytesperline(solo_dev)))
				goto finish_buf;
			vbuf += solo_bytesperline(solo_dev);
		}
		fdma_addr += SOLO_DISP_BUF_SIZE;
	}
	error = 0;

finish_buf:
	if (error) {
		vb->state = VIDEOBUF_ERROR;
	} else {
		vb->state = VIDEOBUF_DONE;
		vb->field_count++;
		do_gettimeofday(&vb->ts);
	}

	wake_up(&vb->done);

	return;
}
コード例 #7
0
ファイル: mx2_camera.c プロジェクト: 125radheyshyam/linux
static void mx27_camera_frame_done(struct mx2_camera_dev *pcdev, int state)
{
	struct videobuf_buffer *vb;
	struct mx2_buffer *buf;
	unsigned long flags;
	int ret;

	spin_lock_irqsave(&pcdev->lock, flags);

	if (!pcdev->active) {
		dev_err(pcdev->dev, "%s called with no active buffer!\n",
				__func__);
		goto out;
	}

	vb = &pcdev->active->vb;
	buf = container_of(vb, struct mx2_buffer, vb);
	WARN_ON(list_empty(&vb->queue));
	dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
		vb, vb->baddr, vb->bsize);

	/* _init is used to debug races, see comment in pxa_camera_reqbufs() */
	list_del_init(&vb->queue);
	vb->state = state;
	do_gettimeofday(&vb->ts);
	vb->field_count++;

	wake_up(&vb->done);

	if (list_empty(&pcdev->capture)) {
		pcdev->active = NULL;
		goto out;
	}

	pcdev->active = list_entry(pcdev->capture.next,
			struct mx2_buffer, vb.queue);

	vb = &pcdev->active->vb;
	vb->state = VIDEOBUF_ACTIVE;

	ret = imx_dma_setup_single(pcdev->dma, videobuf_to_dma_contig(vb),
			vb->size, (u32)pcdev->base_dma + 0x10, DMA_MODE_READ);

	if (ret) {
		vb->state = VIDEOBUF_ERROR;
		pcdev->active = NULL;
		wake_up(&vb->done);
	}

out:
	spin_unlock_irqrestore(&pcdev->lock, flags);
}
コード例 #8
0
static void sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
{
	ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~1);
	ceu_write(pcdev, CETCR, ~ceu_read(pcdev, CETCR) & 0x0317f313);
	ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | 1);

	ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~0x10000);

	ceu_write(pcdev, CETCR, 0x0317f313 ^ 0x10);

	if (pcdev->active) {
		pcdev->active->state = VIDEOBUF_ACTIVE;
		ceu_write(pcdev, CDAYR, videobuf_to_dma_contig(pcdev->active));
		ceu_write(pcdev, CAPSR, 0x1); /* start capture */
	}
}
コード例 #9
0
ファイル: fimc-core.c プロジェクト: jeehong/kernel
/* The color format (planes_cnt, buff_cnt) must be already configured. */
static int fimc_prepare_addr(struct fimc_ctx *ctx,
                             struct fimc_vid_buffer *buf, enum v4l2_buf_type type)
{
    struct fimc_frame *frame;
    struct fimc_addr *paddr;
    u32 pix_size;
    int ret = 0;

    frame = ctx_m2m_get_frame(ctx, type);
    if (IS_ERR(frame))
        return PTR_ERR(frame);
    paddr = &frame->paddr;

    if (!buf)
        return -EINVAL;

    pix_size = frame->width * frame->height;

    dbg("buff_cnt= %d, planes_cnt= %d, frame->size= %d, pix_size= %d",
        frame->fmt->buff_cnt, frame->fmt->planes_cnt,
        frame->size, pix_size);

    if (frame->fmt->buff_cnt == 1) {
        paddr->y = videobuf_to_dma_contig(&buf->vb);
        switch (frame->fmt->planes_cnt) {
        case 1:
            paddr->cb = 0;
            paddr->cr = 0;
            break;
        case 2:
            /* decompose Y into Y/Cb */
            paddr->cb = (u32)(paddr->y + pix_size);
            paddr->cr = 0;
            break;
        case 3:
            paddr->cb = (u32)(paddr->y + pix_size);
            /* decompose Y into Y/Cb/Cr */
            if (S5P_FIMC_YCBCR420 == frame->fmt->color)
                paddr->cr = (u32)(paddr->cb
                                  + (pix_size >> 2));
            else /* 422 */
                paddr->cr = (u32)(paddr->cb
                                  + (pix_size >> 1));
            break;
        default:
            return -EINVAL;
        }
コード例 #10
0
ファイル: omap1_camera.c プロジェクト: andi34/Dhollmen_Kernel
static void set_dma_dest_params(int dma_ch, struct omap1_cam_buf *buf,
		enum omap1_cam_vb_mode vb_mode)
{
	dma_addr_t dma_addr;
	unsigned int block_size;

	if (vb_mode == OMAP1_CAM_DMA_CONTIG) {
		dma_addr = videobuf_to_dma_contig(&buf->vb);
		block_size = buf->vb.size;
	} else {
		if (WARN_ON(!buf->sgbuf)) {
			buf->result = VIDEOBUF_ERROR;
			return;
		}
		dma_addr = sg_dma_address(buf->sgbuf);
		if (WARN_ON(!dma_addr)) {
			buf->sgbuf = NULL;
			buf->result = VIDEOBUF_ERROR;
			return;
		}
		block_size = sg_dma_len(buf->sgbuf);
		if (WARN_ON(!block_size)) {
			buf->sgbuf = NULL;
			buf->result = VIDEOBUF_ERROR;
			return;
		}
		if (unlikely(buf->bytes_left < block_size))
			block_size = buf->bytes_left;
		if (WARN_ON(dma_addr & (DMA_FRAME_SIZE(vb_mode) *
				DMA_ELEMENT_SIZE - 1))) {
			dma_addr = ALIGN(dma_addr, DMA_FRAME_SIZE(vb_mode) *
					DMA_ELEMENT_SIZE);
			block_size &= ~(DMA_FRAME_SIZE(vb_mode) *
					DMA_ELEMENT_SIZE - 1);
		}
		buf->bytes_left -= block_size;
		buf->sgcount++;
	}

	omap_set_dma_dest_params(dma_ch,
		OMAP_DMA_PORT_EMIFF, OMAP_DMA_AMODE_POST_INC, dma_addr, 0, 0);
	omap_set_dma_transfer_params(dma_ch,
		OMAP_DMA_DATA_TYPE_S32, DMA_FRAME_SIZE(vb_mode),
		block_size >> (DMA_FRAME_SHIFT(vb_mode) + DMA_ELEMENT_SHIFT),
		DMA_SYNC, 0, 0);
}
コード例 #11
0
ファイル: vpif_display.c プロジェクト: 119-org/hi3518-osdrv
static void process_progressive_mode(struct common_obj *common)
{
	unsigned long addr = 0;

	/* Get the next buffer from buffer queue */
	common->next_frm = list_entry(common->dma_queue.next,
				struct videobuf_buffer, queue);
	/* Remove that buffer from the buffer queue */
	list_del(&common->next_frm->queue);
	/* Mark status of the buffer as active */
	common->next_frm->state = VIDEOBUF_ACTIVE;

	/* Set top and bottom field addrs in VPIF registers */
	addr = videobuf_to_dma_contig(common->next_frm);
	common->set_addr(addr + common->ytop_off,
				 addr + common->ybtm_off,
				 addr + common->ctop_off,
				 addr + common->cbtm_off);
}
コード例 #12
0
ファイル: mx2_camera.c プロジェクト: 125radheyshyam/linux
static void mx25_camera_frame_done(struct mx2_camera_dev *pcdev, int fb,
		int state)
{
	struct videobuf_buffer *vb;
	struct mx2_buffer *buf;
	struct mx2_buffer **fb_active = fb == 1 ? &pcdev->fb1_active :
		&pcdev->fb2_active;
	u32 fb_reg = fb == 1 ? CSIDMASA_FB1 : CSIDMASA_FB2;
	unsigned long flags;

	spin_lock_irqsave(&pcdev->lock, flags);

	if (*fb_active == NULL)
		goto out;

	vb = &(*fb_active)->vb;
	dev_dbg(pcdev->dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
		vb, vb->baddr, vb->bsize);

	vb->state = state;
	do_gettimeofday(&vb->ts);
	vb->field_count++;

	wake_up(&vb->done);

	if (list_empty(&pcdev->capture)) {
		buf = NULL;
		writel(0, pcdev->base_csi + fb_reg);
	} else {
		buf = list_entry(pcdev->capture.next, struct mx2_buffer,
				vb.queue);
		vb = &buf->vb;
		list_del(&vb->queue);
		vb->state = VIDEOBUF_ACTIVE;
		writel(videobuf_to_dma_contig(vb), pcdev->base_csi + fb_reg);
	}

	*fb_active = buf;

out:
	spin_unlock_irqrestore(&pcdev->lock, flags);
}
コード例 #13
0
/**
 * vpif_schedule_next_buffer: set next buffer address for capture
 * @common : ptr to common channel object
 *
 * This function will get next buffer from the dma queue and
 * set the buffer address in the vpif register for capture.
 * the buffer is marked active
 */
static void vpif_schedule_next_buffer(struct common_obj *common)
{
	unsigned long addr = 0;

	common->next_frm = list_entry(common->dma_queue.next,
				     struct videobuf_buffer, queue);
	/* Remove that buffer from the buffer queue */
	list_del(&common->next_frm->queue);
	common->next_frm->state = VIDEOBUF_ACTIVE;
	if (V4L2_MEMORY_USERPTR == common->memory)
		addr = common->next_frm->boff;
	else
		addr = videobuf_to_dma_contig(common->next_frm);

	/* Set top and bottom field addresses in VPIF registers */
	common->set_addr(addr + common->ytop_off,
			 addr + common->ybtm_off,
			 addr + common->ctop_off,
			 addr + common->cbtm_off);
}
コード例 #14
0
static void vpif_schedule_next_buffer(struct common_obj *common)
{
	unsigned long addr = 0;

	common->next_frm = list_entry(common->dma_queue.next,
				     struct videobuf_buffer, queue);
	
	list_del(&common->next_frm->queue);
	common->next_frm->state = VIDEOBUF_ACTIVE;
	if (V4L2_MEMORY_USERPTR == common->memory)
		addr = common->next_frm->boff;
	else
		addr = videobuf_to_dma_contig(common->next_frm);

	
	common->set_addr(addr + common->ytop_off,
			 addr + common->ybtm_off,
			 addr + common->ctop_off,
			 addr + common->cbtm_off);
}
コード例 #15
0
static void sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
{
	struct soc_camera_device *icd = pcdev->icd;
	dma_addr_t phys_addr_top, phys_addr_bottom;

	/* The hardware is _very_ picky about this sequence. Especially
	 * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge
	 * several not-so-well documented interrupt sources in CETCR.
	 */
	ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_CPEIE);
	ceu_write(pcdev, CETCR, ~ceu_read(pcdev, CETCR) & CEU_CETCR_MAGIC);
	ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_CPEIE);
	ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
	ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);

	if (!pcdev->active)
		return;

	phys_addr_top = videobuf_to_dma_contig(pcdev->active);
	ceu_write(pcdev, CDAYR, phys_addr_top);
	if (pcdev->is_interlaced) {
		phys_addr_bottom = phys_addr_top + icd->width;
		ceu_write(pcdev, CDBYR, phys_addr_bottom);
	}

	switch (icd->current_fmt->fourcc) {
	case V4L2_PIX_FMT_NV12:
	case V4L2_PIX_FMT_NV21:
	case V4L2_PIX_FMT_NV16:
	case V4L2_PIX_FMT_NV61:
		phys_addr_top += icd->width * icd->height;
		ceu_write(pcdev, CDACR, phys_addr_top);
		if (pcdev->is_interlaced) {
			phys_addr_bottom = phys_addr_top + icd->width;
			ceu_write(pcdev, CDBCR, phys_addr_bottom);
		}
	}

	pcdev->active->state = VIDEOBUF_ACTIVE;
	ceu_write(pcdev, CAPSR, 0x1); /* start capture */
}
コード例 #16
0
ファイル: mx1_camera.c プロジェクト: 03199618/linux
static int mx1_camera_setup_dma(struct mx1_camera_dev *pcdev)
{
	struct videobuf_buffer *vbuf = &pcdev->active->vb;
	struct device *dev = pcdev->soc_host.icd->parent;
	int ret;

	if (unlikely(!pcdev->active)) {
		dev_err(dev, "DMA End IRQ with no active buffer\n");
		return -EFAULT;
	}

	/* setup sg list for future DMA */
	ret = imx_dma_setup_single(pcdev->dma_chan,
		videobuf_to_dma_contig(vbuf),
		vbuf->size, pcdev->res->start +
		CSIRXR, DMA_MODE_READ);
	if (unlikely(ret))
		dev_err(dev, "Failed to setup DMA sg list\n");

	return ret;
}
コード例 #17
0
static void sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev)
{
	struct soc_camera_device *icd = pcdev->icd;
	dma_addr_t phys_addr_top, phys_addr_bottom;

	
	ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_CPEIE);
	ceu_write(pcdev, CETCR, ~ceu_read(pcdev, CETCR) & CEU_CETCR_MAGIC);
	ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_CPEIE);
	ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP);
	ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW);

	if (!pcdev->active)
		return;

	phys_addr_top = videobuf_to_dma_contig(pcdev->active);
	ceu_write(pcdev, CDAYR, phys_addr_top);
	if (pcdev->is_interlaced) {
		phys_addr_bottom = phys_addr_top + icd->user_width;
		ceu_write(pcdev, CDBYR, phys_addr_bottom);
	}

	switch (icd->current_fmt->fourcc) {
	case V4L2_PIX_FMT_NV12:
	case V4L2_PIX_FMT_NV21:
	case V4L2_PIX_FMT_NV16:
	case V4L2_PIX_FMT_NV61:
		phys_addr_top += icd->user_width *
			icd->user_height;
		ceu_write(pcdev, CDACR, phys_addr_top);
		if (pcdev->is_interlaced) {
			phys_addr_bottom = phys_addr_top +
				icd->user_width;
			ceu_write(pcdev, CDBCR, phys_addr_bottom);
		}
	}

	pcdev->active->state = VIDEOBUF_ACTIVE;
	ceu_write(pcdev, CAPSR, 0x1); 
}
コード例 #18
0
ファイル: v4l2.c プロジェクト: alexxxwork/solo6x10
static void solo_fillbuf(struct solo_filehandle *fh,
			 struct videobuf_buffer *vb)
{
	struct solo_dev *solo_dev = fh->solo_dev;
	dma_addr_t vbuf;
	unsigned int fdma_addr;
	int error = -1;
	int i;

	vbuf = videobuf_to_dma_contig(vb);
	if (!vbuf)
		goto finish_buf;

	if (erase_off(solo_dev)) {
		void *p = videobuf_queue_to_vaddr(&fh->vidq, vb);
		int image_size = solo_image_size(solo_dev);
		for (i = 0; i < image_size; i += 2) {
			((u8 *)p)[i] = 0x80;
			((u8 *)p)[i + 1] = 0x00;
		}
		error = 0;
	} else {
		fdma_addr = SOLO_DISP_EXT_ADDR + (fh->old_write *
				(SOLO_HW_BPL * solo_vlines(solo_dev)));

		error = solo_p2m_dma_t(solo_dev, 0, vbuf, fdma_addr,
				       solo_bytesperline(solo_dev),
				       solo_vlines(solo_dev), SOLO_HW_BPL);
	}

finish_buf:
	if (error) {
		vb->state = VIDEOBUF_ERROR;
	} else {
		vb->state = VIDEOBUF_DONE;
		vb->field_count++;
	}

	wake_up(&vb->done);
}
コード例 #19
0
static int vpif_streamon(struct file *file, void *priv,
				enum v4l2_buf_type buftype)
{

	struct vpif_capture_config *config = vpif_dev->platform_data;
	struct vpif_fh *fh = priv;
	struct channel_obj *ch = fh->channel;
	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
	struct channel_obj *oth_ch = vpif_obj.dev[!ch->channel_id];
	struct vpif_params *vpif;
	unsigned long addr = 0;
	int ret = 0;

	vpif_dbg(2, debug, "vpif_streamon\n");

	vpif = &ch->vpifparams;

	if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
		vpif_dbg(1, debug, "buffer type not supported\n");
		return -EINVAL;
	}

	
	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
		vpif_dbg(1, debug, "io not allowed\n");
		return -EACCES;
	}

	
	if (common->started) {
		vpif_dbg(1, debug, "channel->started\n");
		return -EBUSY;
	}

	if ((ch->channel_id == VPIF_CHANNEL0_VIDEO &&
	    oth_ch->common[VPIF_VIDEO_INDEX].started &&
	    vpif->std_info.ycmux_mode == 0) ||
	   ((ch->channel_id == VPIF_CHANNEL1_VIDEO) &&
	    (2 == oth_ch->common[VPIF_VIDEO_INDEX].started))) {
		vpif_dbg(1, debug, "other channel is being used\n");
		return -EBUSY;
	}

	ret = vpif_check_format(ch, &common->fmt.fmt.pix, 0);
	if (ret)
		return ret;

	
	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
				s_stream, 1);

	if (ret && (ret != -ENOIOCTLCMD)) {
		vpif_dbg(1, debug, "stream on failed in subdev\n");
		return ret;
	}

	
	ret = videobuf_streamon(&common->buffer_queue);
	if (ret) {
		vpif_dbg(1, debug, "videobuf_streamon\n");
		return ret;
	}

	
	if (list_empty(&common->dma_queue)) {
		vpif_dbg(1, debug, "buffer queue is empty\n");
		ret = -EIO;
		goto exit;
	}

	
	common->cur_frm = list_entry(common->dma_queue.next,
				    struct videobuf_buffer, queue);
	common->next_frm = common->cur_frm;

	
	list_del(&common->cur_frm->queue);
	
	common->cur_frm->state = VIDEOBUF_ACTIVE;
	
	ch->field_id = 0;
	common->started = 1;

	if (V4L2_MEMORY_USERPTR == common->memory)
		addr = common->cur_frm->boff;
	else
		addr = videobuf_to_dma_contig(common->cur_frm);

	
	vpif_calculate_offsets(ch);

	if ((vpif->std_info.frm_fmt &&
	    ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE) &&
	     (common->fmt.fmt.pix.field != V4L2_FIELD_ANY))) ||
	    (!vpif->std_info.frm_fmt &&
	     (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
		vpif_dbg(1, debug, "conflict in field format and std format\n");
		ret = -EINVAL;
		goto exit;
	}

	
	ret = config->setup_input_channel_mode(vpif->std_info.ycmux_mode);

	if (ret < 0) {
		vpif_dbg(1, debug, "can't set vpif channel mode\n");
		goto exit;
	}

	
	ret = vpif_set_video_params(vpif, ch->channel_id);

	if (ret < 0) {
		vpif_dbg(1, debug, "can't set video params\n");
		goto exit;
	}

	common->started = ret;
	vpif_config_addr(ch, ret);

	common->set_addr(addr + common->ytop_off,
			 addr + common->ybtm_off,
			 addr + common->ctop_off,
			 addr + common->cbtm_off);

	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id)) {
		channel0_intr_assert();
		channel0_intr_enable(1);
		enable_channel0(1);
	}
	if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
	    (common->started == 2)) {
		channel1_intr_assert();
		channel1_intr_enable(1);
		enable_channel1(1);
	}
	channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
	return ret;

exit:
	videobuf_streamoff(&common->buffer_queue);
	return ret;
}
コード例 #20
0
	
	if (VIDEOBUF_NEEDS_INIT == vb->state) {
		vb->width = layer->pix_fmt.width;
		vb->height = layer->pix_fmt.height;
		vb->size = layer->pix_fmt.sizeimage;
		vb->field = field;

		ret = videobuf_iolock(q, vb, NULL);
		if (ret < 0) {
			v4l2_err(&vpbe_dev->v4l2_dev, "Failed to map \
				user address\n");
			return -EINVAL;
		}

		addr = videobuf_to_dma_contig(vb);

		if (q->streaming) {
			if (!IS_ALIGNED(addr, 8)) {
				v4l2_err(&vpbe_dev->v4l2_dev,
					"buffer_prepare:offset is \
					not aligned to 32 bytes\n");
				return -EINVAL;
			}
		}
		vb->state = VIDEOBUF_PREPARED;
	}
	return 0;
}

static int vpbe_buffer_setup(struct videobuf_queue *q,
コード例 #21
0
/**
 * vpif_qbuf() - query buffer handler
 * @file: file ptr
 * @priv: file handle
 * @buf: v4l2 buffer structure ptr
 */
static int vpif_qbuf(struct file *file, void *priv, struct v4l2_buffer *buf)
{

	struct vpif_fh *fh = priv;
	struct channel_obj *ch = fh->channel;
	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
	struct v4l2_buffer tbuf = *buf;
	struct videobuf_buffer *buf1;
	unsigned long addr = 0;
	unsigned long flags;
	int ret = 0;

	vpif_dbg(2, debug, "vpif_qbuf\n");

	if (common->fmt.type != tbuf.type) {
		vpif_err("invalid buffer type\n");
		return -EINVAL;
	}

	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
		vpif_err("fh io not allowed \n");
		return -EACCES;
	}

	if (!(list_empty(&common->dma_queue)) ||
	    (common->cur_frm != common->next_frm) ||
	    !common->started ||
	    (common->started && (0 == ch->field_id)))
		return videobuf_qbuf(&common->buffer_queue, buf);

	/* bufferqueue is empty store buffer address in VPIF registers */
	mutex_lock(&common->buffer_queue.vb_lock);
	buf1 = common->buffer_queue.bufs[tbuf.index];

	if ((buf1->state == VIDEOBUF_QUEUED) ||
	    (buf1->state == VIDEOBUF_ACTIVE)) {
		vpif_err("invalid state\n");
		goto qbuf_exit;
	}

	switch (buf1->memory) {
	case V4L2_MEMORY_MMAP:
		if (buf1->baddr == 0)
			goto qbuf_exit;
		break;

	case V4L2_MEMORY_USERPTR:
		if (tbuf.length < buf1->bsize)
			goto qbuf_exit;

		if ((VIDEOBUF_NEEDS_INIT != buf1->state)
			    && (buf1->baddr != tbuf.m.userptr))
			vpif_buffer_release(&common->buffer_queue, buf1);
			buf1->baddr = tbuf.m.userptr;
		break;

	default:
		goto qbuf_exit;
	}

	local_irq_save(flags);
	ret = vpif_buffer_prepare(&common->buffer_queue, buf1,
					common->buffer_queue.field);
	if (ret < 0) {
		local_irq_restore(flags);
		goto qbuf_exit;
	}

	buf1->state = VIDEOBUF_ACTIVE;

	if (V4L2_MEMORY_USERPTR == common->memory)
		addr = buf1->boff;
	else
		addr = videobuf_to_dma_contig(buf1);

	common->next_frm = buf1;
	common->set_addr(addr + common->ytop_off,
			 addr + common->ybtm_off,
			 addr + common->ctop_off,
			 addr + common->cbtm_off);

	local_irq_restore(flags);
	list_add_tail(&buf1->stream, &common->buffer_queue.stream);
	mutex_unlock(&common->buffer_queue.vb_lock);
	return 0;

qbuf_exit:
	mutex_unlock(&common->buffer_queue.vb_lock);
	return -EINVAL;
}
コード例 #22
0
ファイル: ak_camera.c プロジェクト: Lamobo/Lamobo-D1
/**
 * @brief: Called when application apply buffers, camera start data collection
 * 
 * @author: caolianming
 * @date: 2014-01-06
 * @param [in] *vq: V4L2  buffer queue information structure
 * @param [in] *vb: V4L2  buffer information structure
 */
static void ak_videobuf_queue(struct videobuf_queue *vq, 
								struct videobuf_buffer *vb)
{
	struct soc_camera_device *icd = vq->priv_data;
	struct soc_camera_host *ici = to_soc_camera_host(icd->parent);
	struct ak_camera_dev *pcdev = ici->priv;
	struct ak_buffer *buf = container_of(vb, struct ak_buffer, vb);
	u32 yaddr_chl1, yaddr_chl2, size;
	static int ch2_sync = 0;
	
	isp_dbg("%s (vb=0x%p) buf[%d] baddr = 0x%08lx, bsize = %d\n",
			__func__,  vb, vb->i, vb->baddr, vb->bsize);

	list_add_tail(&vb->queue, &pcdev->capture);

	vb->state = VIDEOBUF_ACTIVE;
	size = vb->width * vb->height;
	yaddr_chl1 = videobuf_to_dma_contig(vb); /* for mater channel */
	yaddr_chl2 = yaddr_chl1 + size * 3 / 2; /* for secondary channel */
	
	switch(pcdev->isp.cur_mode) {
	case ISP_YUV_OUT:
	case ISP_YUV_BYPASS:
	case ISP_RGB_OUT:	
		/* for single mode */
		if (!pcdev->active) {
			pcdev->active = buf;
			pcdev->dma_running = 1;	
			
			isp_set_even_frame(&pcdev->isp, yaddr_chl1, yaddr_chl2);
			isp_apply_mode(&pcdev->isp);
			isp_start_capturing(&pcdev->isp);
			
			isp_dbg("queue[single]: vbuf[%d] start run.\n", vb->i);
		}
		break;

	case ISP_YUV_VIDEO_OUT:
	case ISP_YUV_VIDEO_BYPASS:
	case ISP_RGB_VIDEO_OUT:
		/* for continous mode */
		if (!pcdev->active) {
			pcdev->active = buf;
			pcdev->dma_running = 0;
			ch2_sync = 1;
			
			isp_set_even_frame(&pcdev->isp, yaddr_chl1, yaddr_chl2);
			isp_dbg("queue[continue]: vbuf1[%d]\n", vb->i);
			return; 		
		}

		if (!pcdev->dma_running) {
			pcdev->dma_running = 1;

			if (ch2_sync) {
				ch2_sync = 0;
				irq_buf_empty_flag = 0;
				
				isp_set_odd_frame(&pcdev->isp, yaddr_chl1, yaddr_chl2);
				isp_apply_mode(&pcdev->isp);
				isp_start_capturing(&pcdev->isp);
				
				isp_dbg("queue[continue]: vbuf2[%d] start.\n", vb->i);
				return;
			}

			// ensure that can update yaddr immediately
			if (isp_is_capturing_odd(&pcdev->isp)) 
				isp_set_even_frame(&pcdev->isp, yaddr_chl1, yaddr_chl2);
			else 
				isp_set_odd_frame(&pcdev->isp, yaddr_chl1, yaddr_chl2);
			
		}
		break;
	default:
		printk("The working mode of ISP hasn't been initialized.\n");
	}
	if (pcdev->pdata->rf_led.pin > 0)
		{
	rfled_timer(pcdev);
		}
}
コード例 #23
0
/**
 * vpif_streamon() - streamon handler
 * @file: file ptr
 * @priv: file handle
 * @buftype: v4l2 buffer type
 */
static int vpif_streamon(struct file *file, void *priv,
				enum v4l2_buf_type buftype)
{

	struct vpif_capture_config *config = vpif_dev->platform_data;
	struct vpif_fh *fh = priv;
	struct channel_obj *ch = fh->channel;
	struct common_obj *common = &ch->common[VPIF_VIDEO_INDEX];
	struct channel_obj *oth_ch = vpif_obj.dev[!ch->channel_id];
	struct vpif_params *vpif;
	unsigned long addr = 0;
	int ret = 0;

	vpif_dbg(2, debug, "vpif_streamon\n");

	vpif = &ch->vpifparams;

	if (buftype != V4L2_BUF_TYPE_VIDEO_CAPTURE) {
		vpif_dbg(1, debug, "buffer type not supported\n");
		return -EINVAL;
	}

	/* If file handle is not allowed IO, return error */
	if (!fh->io_allowed[VPIF_VIDEO_INDEX]) {
		vpif_dbg(1, debug, "io not allowed\n");
		return -EACCES;
	}

	/* If Streaming is already started, return error */
	if (common->started) {
		vpif_dbg(1, debug, "channel->started\n");
		return -EBUSY;
	}

	if ((ch->channel_id == VPIF_CHANNEL0_VIDEO &&
	    oth_ch->common[VPIF_VIDEO_INDEX].started &&
	    vpif->std_info.ycmux_mode == 0) ||
	   ((ch->channel_id == VPIF_CHANNEL1_VIDEO) &&
	    (2 == oth_ch->common[VPIF_VIDEO_INDEX].started))) {
		vpif_dbg(1, debug, "other channel is being used\n");
		return -EBUSY;
	}

	ret = vpif_check_format(ch, &common->fmt.fmt.pix, 0);
	if (ret)
		return ret;

	/* Enable streamon on the sub device */
	ret = v4l2_subdev_call(vpif_obj.sd[ch->curr_sd_index], video,
				s_stream, 1);

	if (ret && (ret != -ENOIOCTLCMD)) {
		vpif_dbg(1, debug, "stream on failed in subdev\n");
		return ret;
	}

	/* Call videobuf_streamon to start streaming in videobuf */
	ret = videobuf_streamon(&common->buffer_queue);
	if (ret) {
		vpif_dbg(1, debug, "videobuf_streamon\n");
		return ret;
	}

	if (mutex_lock_interruptible(&common->lock)) {
		ret = -ERESTARTSYS;
		goto streamoff_exit;
	}

	/* If buffer queue is empty, return error */
	if (list_empty(&common->dma_queue)) {
		vpif_dbg(1, debug, "buffer queue is empty\n");
		ret = -EIO;
		goto exit;
	}

	/* Get the next frame from the buffer queue */
	common->cur_frm = list_entry(common->dma_queue.next,
				    struct videobuf_buffer, queue);
	common->next_frm = common->cur_frm;

	/* Remove buffer from the buffer queue */
	list_del(&common->cur_frm->queue);
	/* Mark state of the current frame to active */
	common->cur_frm->state = VIDEOBUF_ACTIVE;
	/* Initialize field_id and started member */
	ch->field_id = 0;
	common->started = 1;

	if (V4L2_MEMORY_USERPTR == common->memory)
		addr = common->cur_frm->boff;
	else
		addr = videobuf_to_dma_contig(common->cur_frm);

	/* Calculate the offset for Y and C data in the buffer */
	vpif_calculate_offsets(ch);

	if ((vpif->std_info.frm_fmt &&
	    ((common->fmt.fmt.pix.field != V4L2_FIELD_NONE) &&
	     (common->fmt.fmt.pix.field != V4L2_FIELD_ANY))) ||
	    (!vpif->std_info.frm_fmt &&
	     (common->fmt.fmt.pix.field == V4L2_FIELD_NONE))) {
		vpif_dbg(1, debug, "conflict in field format and std format\n");
		ret = -EINVAL;
		goto exit;
	}

	/* configure 1 or 2 channel mode */
	ret = config->setup_input_channel_mode(vpif->std_info.ycmux_mode);

	if (ret < 0) {
		vpif_dbg(1, debug, "can't set vpif channel mode\n");
		goto exit;
	}

	/* Call vpif_set_params function to set the parameters and addresses */
	ret = vpif_set_video_params(vpif, ch->channel_id);

	if (ret < 0) {
		vpif_dbg(1, debug, "can't set video params\n");
		goto exit;
	}

	common->started = ret;
	vpif_config_addr(ch, ret);

	common->set_addr(addr + common->ytop_off,
			 addr + common->ybtm_off,
			 addr + common->ctop_off,
			 addr + common->cbtm_off);

	/**
	 * Set interrupt for both the fields in VPIF Register enable channel in
	 * VPIF register
	 */
	if ((VPIF_CHANNEL0_VIDEO == ch->channel_id)) {
		channel0_intr_assert();
		channel0_intr_enable(1);
		enable_channel0(1);
	}
	if ((VPIF_CHANNEL1_VIDEO == ch->channel_id) ||
	    (common->started == 2)) {
		channel1_intr_assert();
		channel1_intr_enable(1);
		enable_channel1(1);
	}
	channel_first_int[VPIF_VIDEO_INDEX][ch->channel_id] = 1;
	mutex_unlock(&common->lock);
	return ret;

exit:
	mutex_unlock(&common->lock);
streamoff_exit:
	ret = videobuf_streamoff(&common->buffer_queue);
	return ret;
}
コード例 #24
0
static int jz4780_camera_setup_dma(struct jz4780_camera_dev *pcdev,
		unsigned char dev_num)
{
	struct videobuf_buffer *vbuf = &pcdev->active->vb;
	struct soc_camera_device *icd = pcdev->icd[dev_num];
	struct jz4780_camera_dma_desc *dma_desc;
	dma_addr_t dma_address;
	unsigned int i, regval;


	dma_desc = (struct jz4780_camera_dma_desc *) pcdev->desc_vaddr;

	if (unlikely(!pcdev->active)) {
		dprintk(3, "setup dma error with no active buffer\n");
		return -EFAULT;
	}

	if(pcdev->is_tlb_enabled == 0) {
		dma_address = videobuf_to_dma_contig(vbuf);

		/* disable tlb error interrupt */
		regval = readl(pcdev->base + CIM_IMR);
		regval |= CIM_IMR_TLBEM;
		writel(regval, pcdev->base + CIM_IMR);

		/* disable tlb */
		regval = readl(pcdev->base + CIM_TC);
		regval &= ~CIM_TC_ENA;
		writel(regval, pcdev->base + CIM_TC);
	} else {
		dma_address = icd->vb_vidq.bufs[0]->baddr;

		/* enable tlb error interrupt */
		regval = readl(pcdev->base + CIM_IMR);
		regval &= ~CIM_IMR_TLBEM;
		writel(regval, pcdev->base + CIM_IMR);

		/* enable tlb */
		regval = readl(pcdev->base + CIM_TC);
		regval |= CIM_TC_ENA;
		writel(regval, pcdev->base + CIM_TC);
	}

	if(!dma_address) {
		dprintk(3, "Failed to setup DMA address\n");
		return -ENOMEM;
	}

	regval = (unsigned int) (pcdev->dma_desc);
	writel(regval, pcdev->base + CIM_DA);

	for(i = 0; i < (pcdev->buf_cnt); i++) {
		dma_desc[i].id = i;
		dma_desc[i].buf = dma_address + (icd->user_width * icd->user_height << 1) * i;

		dprintk(7, "cim dma desc[i] address is: 0x%x\n", dma_desc[i].buf);

		if(icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_YUYV) {
			dma_desc[i].cmd = icd->sizeimage >> 2 |
					CIM_CMD_EOFINT | CIM_CMD_OFRCV;
		} else {
コード例 #25
0
ファイル: mx2_camera.c プロジェクト: 125radheyshyam/linux
static void mx2_videobuf_queue(struct videobuf_queue *vq,
			       struct videobuf_buffer *vb)
{
	struct soc_camera_device *icd = vq->priv_data;
	struct soc_camera_host *ici =
		to_soc_camera_host(icd->parent);
	struct mx2_camera_dev *pcdev = ici->priv;
	struct mx2_buffer *buf = container_of(vb, struct mx2_buffer, vb);
	unsigned long flags;

	dev_dbg(icd->parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__,
		vb, vb->baddr, vb->bsize);

	spin_lock_irqsave(&pcdev->lock, flags);

	vb->state = VIDEOBUF_QUEUED;
	list_add_tail(&vb->queue, &pcdev->capture);

	if (mx27_camera_emma(pcdev)) {
		goto out;
#ifdef CONFIG_MACH_MX27
	} else if (cpu_is_mx27()) {
		int ret;

		if (pcdev->active == NULL) {
			ret = imx_dma_setup_single(pcdev->dma,
					videobuf_to_dma_contig(vb), vb->size,
					(u32)pcdev->base_dma + 0x10,
					DMA_MODE_READ);
			if (ret) {
				vb->state = VIDEOBUF_ERROR;
				wake_up(&vb->done);
				goto out;
			}

			vb->state = VIDEOBUF_ACTIVE;
			pcdev->active = buf;
		}
#endif
	} else { /* cpu_is_mx25() */
		u32 csicr3, dma_inten = 0;

		if (pcdev->fb1_active == NULL) {
			writel(videobuf_to_dma_contig(vb),
					pcdev->base_csi + CSIDMASA_FB1);
			pcdev->fb1_active = buf;
			dma_inten = CSICR1_FB1_DMA_INTEN;
		} else if (pcdev->fb2_active == NULL) {
			writel(videobuf_to_dma_contig(vb),
					pcdev->base_csi + CSIDMASA_FB2);
			pcdev->fb2_active = buf;
			dma_inten = CSICR1_FB2_DMA_INTEN;
		}

		if (dma_inten) {
			list_del(&vb->queue);
			vb->state = VIDEOBUF_ACTIVE;

			csicr3 = readl(pcdev->base_csi + CSICR3);

			/* Reflash DMA */
			writel(csicr3 | CSICR3_DMA_REFLASH_RFF,
					pcdev->base_csi + CSICR3);

			/* clear & enable interrupts */
			writel(dma_inten, pcdev->base_csi + CSISR);
			pcdev->csicr1 |= dma_inten;
			writel(pcdev->csicr1, pcdev->base_csi + CSICR1);

			/* enable DMA */
			csicr3 |= CSICR3_DMA_REQ_EN_RFF | CSICR3_RXFF_LEVEL(1);
			writel(csicr3, pcdev->base_csi + CSICR3);
		}
	}

out:
	spin_unlock_irqrestore(&pcdev->lock, flags);
}