Exemple #1
0
static void sdc_enable_channel(struct mx3fb_info *mx3_fbi)
{
	struct mx3fb_data *mx3fb = mx3_fbi->mx3fb;
	struct idmac_channel *ichan = mx3_fbi->idmac_channel;
	struct dma_chan *dma_chan = &ichan->dma_chan;
	unsigned long flags;
	dma_cookie_t cookie;

	if (mx3_fbi->txd)
		dev_dbg(mx3fb->dev, "mx3fbi %p, desc %p, sg %p\n", mx3_fbi,
			to_tx_desc(mx3_fbi->txd), to_tx_desc(mx3_fbi->txd)->sg);
	else
		dev_dbg(mx3fb->dev, "mx3fbi %p, txd = NULL\n", mx3_fbi);

	/* This enables the channel */
	if (mx3_fbi->cookie < 0) {
		mx3_fbi->txd = dma_chan->device->device_prep_slave_sg(dma_chan,
		      &mx3_fbi->sg[0], 1, DMA_TO_DEVICE, DMA_PREP_INTERRUPT);
		if (!mx3_fbi->txd) {
			dev_err(mx3fb->dev, "Cannot allocate descriptor on %d\n",
				dma_chan->chan_id);
			return;
		}

		mx3_fbi->txd->callback_param	= mx3_fbi->txd;
		mx3_fbi->txd->callback		= mx3fb_dma_done;

		cookie = mx3_fbi->txd->tx_submit(mx3_fbi->txd);
		dev_dbg(mx3fb->dev, "%d: Submit %p #%d [%c]\n", __LINE__,
		       mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
	} else {
		if (!mx3_fbi->txd || !mx3_fbi->txd->tx_submit) {
			dev_err(mx3fb->dev, "Cannot enable channel %d\n",
				dma_chan->chan_id);
			return;
		}

		/* Just re-activate the same buffer */
		dma_async_issue_pending(dma_chan);
		cookie = mx3_fbi->cookie;
		dev_dbg(mx3fb->dev, "%d: Re-submit %p #%d [%c]\n", __LINE__,
		       mx3_fbi->txd, cookie, list_empty(&ichan->queue) ? '-' : '+');
	}

	if (cookie >= 0) {
		spin_lock_irqsave(&mx3fb->lock, flags);
		sdc_fb_init(mx3_fbi);
		mx3_fbi->cookie = cookie;
		spin_unlock_irqrestore(&mx3fb->lock, flags);
	}

	/*
	 * Attention! Without this msleep the channel keeps generating
	 * interrupts. Next sdc_set_brightness() is going to be called
	 * from mx3fb_blank().
	 */
	msleep(2);
}
/* Callback function triggered after PxP receives an EOF interrupt */
static void pxp_dma_done(void *arg)
{
	struct pxp_tx_desc *tx_desc = to_tx_desc(arg);
	struct dma_chan *chan = tx_desc->txd.chan;
	struct pxp_channel *pxp_chan = to_pxp_channel(chan);
	cam_data *cam = pxp_chan->client;

	/* This call will signal wait_for_completion_timeout() */
	complete(&cam->pxp_tx_cmpl);
}
Exemple #3
0
static void mx3fb_dma_done(void *arg)
{
	struct idmac_tx_desc *tx_desc = to_tx_desc(arg);
	struct dma_chan *chan = tx_desc->txd.chan;
	struct idmac_channel *ichannel = to_idmac_chan(chan);
	struct mx3fb_data *mx3fb = ichannel->client;
	struct mx3fb_info *mx3_fbi = mx3fb->fbi->par;

	dev_dbg(mx3fb->dev, "irq %d callback\n", ichannel->eof_irq);

	/* We only need one interrupt, it will be re-enabled as needed */
	disable_irq_nosync(ichannel->eof_irq);

	complete(&mx3_fbi->flip_cmpl);
}
/* Called from the IPU IDMAC ISR */
static void mx3_cam_dma_done(void *arg)
{
	struct idmac_tx_desc *desc = to_tx_desc(arg);
	struct dma_chan *chan = desc->txd.chan;
	struct idmac_channel *ichannel = to_idmac_chan(chan);
	struct mx3_camera_dev *mx3_cam = ichannel->client;

	dev_dbg(chan->device->dev, "callback cookie %d, active DMA 0x%08x\n",
		desc->txd.cookie, mx3_cam->active ? sg_dma_address(&mx3_cam->active->sg) : 0);

	spin_lock(&mx3_cam->lock);
	if (mx3_cam->active) {
		struct vb2_buffer *vb = &mx3_cam->active->vb;
		struct mx3_camera_buffer *buf = to_mx3_vb(vb);

		list_del_init(&buf->queue);
		do_gettimeofday(&vb->v4l2_buf.timestamp);
		vb->v4l2_buf.field = mx3_cam->field;
		vb->v4l2_buf.sequence = mx3_cam->sequence++;
		vb2_buffer_done(vb, VB2_BUF_STATE_DONE);
	}

	if (list_empty(&mx3_cam->capture)) {
		mx3_cam->active = NULL;
		spin_unlock(&mx3_cam->lock);

		/*
		 * stop capture - without further buffers IPU_CHA_BUF0_RDY will
		 * not get updated
		 */
		return;
	}

	mx3_cam->active = list_entry(mx3_cam->capture.next,
				     struct mx3_camera_buffer, queue);
	spin_unlock(&mx3_cam->lock);
}
/*
 * Function to call PxP DMA driver and send our new V4L2 buffer
 * through the PxP and PxP will process this buffer in place.
 * Note: This is a blocking call, so upon return the PxP tx should be complete.
 */
static int pxp_process_update(cam_data *cam)
{
	dma_cookie_t cookie;
	struct scatterlist *sg = cam->sg;
	struct dma_chan *dma_chan;
	struct pxp_tx_desc *desc;
	struct dma_async_tx_descriptor *txd;
	struct pxp_config_data *pxp_conf = &cam->pxp_conf;
	struct pxp_proc_data *proc_data = &cam->pxp_conf.proc_data;
	int i, ret;
	int length;

	pr_debug("Starting PxP Send Buffer\n");

	/* First, check to see that we have acquired a PxP Channel object */
	if (cam->pxp_chan == NULL) {
		/*
		 * PxP Channel has not yet been created and initialized,
		 * so let's go ahead and try
		 */
		ret = pxp_chan_init(cam);
		if (ret) {
			/*
			 * PxP channel init failed, and we can't use the
			 * PxP until the PxP DMA driver has loaded, so we abort
			 */
			pr_err("PxP chan init failed\n");
			return -ENODEV;
		}
	}

	/*
	 * Init completion, so that we can be properly informed of
	 * the completion of the PxP task when it is done.
	 */
	init_completion(&cam->pxp_tx_cmpl);

	dma_chan = &cam->pxp_chan->dma_chan;

	txd = dma_chan->device->device_prep_slave_sg(dma_chan, sg, 2,
						     DMA_TO_DEVICE,
						     DMA_PREP_INTERRUPT);
	if (!txd) {
		pr_err("Error preparing a DMA transaction descriptor.\n");
		return -EIO;
	}

	txd->callback_param = txd;
	txd->callback = pxp_dma_done;

	/*
	 * Configure PxP for processing of new v4l2 buf
	 */
	pxp_conf->s0_param.pixel_fmt = PXP_PIX_FMT_UYVY;
	pxp_conf->s0_param.color_key = -1;
	pxp_conf->s0_param.color_key_enable = false;
	pxp_conf->s0_param.width = cam->v2f.fmt.pix.width;
	pxp_conf->s0_param.height = cam->v2f.fmt.pix.height;

	pxp_conf->ol_param[0].combine_enable = false;

	proc_data->srect.top = 0;
	proc_data->srect.left = 0;
	proc_data->srect.width = pxp_conf->s0_param.width;
	proc_data->srect.height = pxp_conf->s0_param.height;

	proc_data->drect.top = 0;
	proc_data->drect.left = 0;
	proc_data->drect.width = proc_data->srect.width;
	proc_data->drect.height = proc_data->srect.height;
	proc_data->scaling = 0;
	proc_data->hflip = 0;
	proc_data->vflip = 0;
	proc_data->rotate = 0;
	proc_data->bgcolor = 0;

	pxp_conf->out_param.pixel_fmt = PXP_PIX_FMT_RGB565;
	pxp_conf->out_param.width = proc_data->drect.width;
	pxp_conf->out_param.height = proc_data->drect.height;

	if (cam->rotation >= IPU_ROTATE_90_RIGHT)
		pxp_conf->out_param.stride = pxp_conf->out_param.height;
	else
		pxp_conf->out_param.stride = pxp_conf->out_param.width;

	desc = to_tx_desc(txd);
	length = desc->len;
	for (i = 0; i < length; i++) {
		if (i == 0) {/* S0 */
			memcpy(&desc->proc_data, proc_data,
				sizeof(struct pxp_proc_data));
			pxp_conf->s0_param.paddr = sg_dma_address(&sg[0]);
			memcpy(&desc->layer_param.s0_param, &pxp_conf->s0_param,
				sizeof(struct pxp_layer_param));
		} else if (i == 1) {
			pxp_conf->out_param.paddr = sg_dma_address(&sg[1]);
			memcpy(&desc->layer_param.out_param,
				&pxp_conf->out_param,
				sizeof(struct pxp_layer_param));
		}

		desc = desc->next;
	}

	/* Submitting our TX starts the PxP processing task */
	cookie = txd->tx_submit(txd);
	if (cookie < 0) {
		pr_err("Error sending FB through PxP\n");
		return -EIO;
	}

	cam->txd = txd;

	/* trigger PxP */
	dma_async_issue_pending(dma_chan);

	return 0;
}