Example #1
0
/**
 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
 * 	pre-attached.
 * @depend_tx: the operation that must finish before the new operation runs
 * @tx: the new operation
 */
static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
			struct dma_async_tx_descriptor *tx)
{
	struct dma_chan *chan = depend_tx->chan;
	struct dma_device *device = chan->device;
	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;

	/* first check to see if we can still append to depend_tx */
	txd_lock(depend_tx);
	if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
		txd_chain(depend_tx, tx);
		intr_tx = NULL;
	}
	txd_unlock(depend_tx);

	/* attached dependency, flush the parent channel */
	if (!intr_tx) {
		device->device_issue_pending(chan);
		return;
	}

	/* see if we can schedule an interrupt
	 * otherwise poll for completion
	 */
	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
		intr_tx = device->device_prep_dma_interrupt(chan, 0);
	else
		intr_tx = NULL;

	if (intr_tx) {
		intr_tx->callback = NULL;
		intr_tx->callback_param = NULL;
		/* safe to chain outside the lock since we know we are
		 * not submitted yet
		 */
		txd_chain(intr_tx, tx);

		/* check if we need to append */
		txd_lock(depend_tx);
		if (txd_parent(depend_tx)) {
			txd_chain(depend_tx, intr_tx);
			async_tx_ack(intr_tx);
			intr_tx = NULL;
		}
		txd_unlock(depend_tx);

		if (intr_tx) {
			txd_clear_parent(intr_tx);
			intr_tx->tx_submit(intr_tx);
			async_tx_ack(intr_tx);
		}
		device->device_issue_pending(chan);
	} else {
		if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
			panic("%s: DMA error waiting for depend_tx\n",
			      __func__);
		tx->tx_submit(tx);
	}
}
static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
			struct dma_async_tx_descriptor *tx)
{
	struct dma_chan *chan = depend_tx->chan;
	struct dma_device *device = chan->device;
	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;

	
	txd_lock(depend_tx);
	if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
		txd_chain(depend_tx, tx);
		intr_tx = NULL;
	}
	txd_unlock(depend_tx);

	
	if (!intr_tx) {
		device->device_issue_pending(chan);
		return;
	}

	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
		intr_tx = device->device_prep_dma_interrupt(chan, 0);
	else
		intr_tx = NULL;

	if (intr_tx) {
		intr_tx->callback = NULL;
		intr_tx->callback_param = NULL;
		txd_chain(intr_tx, tx);

		
		txd_lock(depend_tx);
		if (txd_parent(depend_tx)) {
			txd_chain(depend_tx, intr_tx);
			async_tx_ack(intr_tx);
			intr_tx = NULL;
		}
		txd_unlock(depend_tx);

		if (intr_tx) {
			txd_clear_parent(intr_tx);
			intr_tx->tx_submit(intr_tx);
			async_tx_ack(intr_tx);
		}
		device->device_issue_pending(chan);
	} else {
		if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
			panic("%s: DMA_ERROR waiting for depend_tx\n",
			      __func__);
		tx->tx_submit(tx);
	}
}
void
async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
		struct async_submit_ctl *submit)
{
	struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;

	tx->callback = submit->cb_fn;
	tx->callback_param = submit->cb_param;

	if (depend_tx) {
		enum submit_disposition s;

		BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
		       txd_parent(tx));

		txd_lock(depend_tx);
		if (txd_parent(depend_tx)) {
			if (depend_tx->chan == chan) {
				txd_chain(depend_tx, tx);
				s = ASYNC_TX_SUBMITTED;
			} else
				s = ASYNC_TX_CHANNEL_SWITCH;
		} else {
			if (depend_tx->chan == chan)
				s = ASYNC_TX_DIRECT_SUBMIT;
			else
				s = ASYNC_TX_CHANNEL_SWITCH;
		}
		txd_unlock(depend_tx);

		switch (s) {
		case ASYNC_TX_SUBMITTED:
			break;
		case ASYNC_TX_CHANNEL_SWITCH:
			async_tx_channel_switch(depend_tx, tx);
			break;
		case ASYNC_TX_DIRECT_SUBMIT:
			txd_clear_parent(tx);
			tx->tx_submit(tx);
			break;
		}
	} else {
		txd_clear_parent(tx);
		tx->tx_submit(tx);
	}

	if (submit->flags & ASYNC_TX_ACK)
		async_tx_ack(tx);

	if (depend_tx)
		async_tx_ack(depend_tx);
}
Example #4
0
static void s3c24xx_serial_rx_dma_complete(void *args)
{
	struct s3c24xx_uart_port *ourport = args;
	struct uart_port *port = &ourport->port;

	struct s3c24xx_uart_dma *dma = ourport->dma;
	struct tty_port *t = &port->state->port;
	struct tty_struct *tty = tty_port_tty_get(&ourport->port.state->port);

	struct dma_tx_state state;
	unsigned long flags;
	int received;

	dmaengine_tx_status(dma->rx_chan,  dma->rx_cookie, &state);
	received  = dma->rx_bytes_requested - state.residue;
	async_tx_ack(dma->rx_desc);

	spin_lock_irqsave(&port->lock, flags);

	if (received)
		s3c24xx_uart_copy_rx_to_tty(ourport, t, received);

	if (tty) {
		tty_flip_buffer_push(t);
		tty_kref_put(tty);
	}

	s3c64xx_start_rx_dma(ourport);

	spin_unlock_irqrestore(&port->lock, flags);
}
Example #5
0
static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup,
		unsigned long *flags)
{
	struct dma_tx_state state;
	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
	struct tty_port *port = &tup->uport.state->port;
	struct uart_port *u = &tup->uport;
	int count;

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	dmaengine_terminate_all(tup->rx_dma_chan);
	dmaengine_tx_status(tup->rx_dma_chan,  tup->rx_cookie, &state);
	async_tx_ack(tup->rx_dma_desc);
	count = tup->rx_bytes_requested - state.residue;

	/* If we are here, DMA is stopped */
	if (count)
		tegra_uart_copy_rx_to_tty(tup, port, count);

	tegra_uart_handle_rx_pio(tup, port);
	if (tty) {
		spin_unlock_irqrestore(&u->lock, *flags);
		tty_flip_buffer_push(port);
		spin_lock_irqsave(&u->lock, *flags);
		tty_kref_put(tty);
	}
	tegra_uart_start_rx_dma(tup);

	if (tup->rts_active)
		set_rts(tup, true);
}
Example #6
0
static void mx3_videobuf_release(struct vb2_buffer *vb)
{
	struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue);
	struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent);
	struct mx3_camera_dev *mx3_cam = ici->priv;
	struct mx3_camera_buffer *buf = to_mx3_vb(vb);
	struct dma_async_tx_descriptor *txd = buf->txd;
	unsigned long flags;

	dev_dbg(icd->dev.parent,
		"Release%s DMA 0x%08x, queue %sempty\n",
		mx3_cam->active == buf ? " active" : "", sg_dma_address(&buf->sg),
		list_empty(&buf->queue) ? "" : "not ");

	spin_lock_irqsave(&mx3_cam->lock, flags);

	if (mx3_cam->active == buf)
		mx3_cam->active = NULL;

	/* Doesn't hurt also if the list is empty */
	list_del_init(&buf->queue);
	buf->state = CSI_BUF_NEEDS_INIT;

	if (txd) {
		buf->txd = NULL;
		if (mx3_cam->idmac_channel[0])
			async_tx_ack(txd);
	}

	spin_unlock_irqrestore(&mx3_cam->lock, flags);
}
Example #7
0
static void tegra_uart_rx_dma_complete(void *args)
{
	struct tegra_uart_port *tup = args;
	struct uart_port *u = &tup->uport;
	int count = tup->rx_bytes_requested;
	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
	struct tty_port *port = &u->state->port;
	unsigned long flags;

	async_tx_ack(tup->rx_dma_desc);
	spin_lock_irqsave(&u->lock, flags);

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	/* If we are here, DMA is stopped */
	if (count)
		tegra_uart_copy_rx_to_tty(tup, port, count);

	tegra_uart_handle_rx_pio(tup, port);
	if (tty) {
		spin_unlock_irqrestore(&u->lock, flags);
		tty_flip_buffer_push(port);
		spin_lock_irqsave(&u->lock, flags);
		tty_kref_put(tty);
	}
	tegra_uart_start_rx_dma(tup);

	/* Activate flow control to start transfer */
	if (tup->rts_active)
		set_rts(tup, true);

	spin_unlock_irqrestore(&u->lock, flags);
}
Example #8
0
static void s3c24xx_serial_tx_dma_complete(void *args)
{
	struct s3c24xx_uart_port *ourport = args;
	struct uart_port *port = &ourport->port;
	struct circ_buf *xmit = &port->state->xmit;
	struct s3c24xx_uart_dma *dma = ourport->dma;
	struct dma_tx_state state;
	unsigned long flags;
	int count;


	dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
	count = dma->tx_bytes_requested - state.residue;
	async_tx_ack(dma->tx_desc);

	dma_sync_single_for_cpu(ourport->port.dev, dma->tx_transfer_addr,
				dma->tx_size, DMA_TO_DEVICE);

	spin_lock_irqsave(&port->lock, flags);

	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
	port->icount.tx += count;
	ourport->tx_in_progress = 0;

	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(port);

	s3c24xx_serial_start_next_tx(ourport);
	spin_unlock_irqrestore(&port->lock, flags);
}
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
	if (*tx) {
		BUG_ON(async_tx_test_ack(*tx));
		if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
			panic("DMA_ERROR waiting for transaction\n");
		async_tx_ack(*tx);
		*tx = NULL;
	}
}
static void tegra_uart_rx_dma_complete(void *args)
{
	struct tegra_uart_port *tup = args;
	struct uart_port *u = &tup->uport;
	int count = tup->rx_bytes_requested;
	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
	struct tty_port *port = &u->state->port;
	unsigned long flags;
	int rx_level = 0;
	struct dma_tx_state state;
	enum dma_status status;

	spin_lock_irqsave(&u->lock, flags);
	async_tx_ack(tup->rx_dma_desc);

	status = dmaengine_tx_status(tup->rx_dma_chan,
			tup->rx_cookie, &state);
	if (status == DMA_IN_PROGRESS) {
		dev_info(tup->uport.dev, "RX DMA is in progress\n");
		goto done;
	}

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	/* If we are here, DMA is stopped */
	if (count)
		tegra_uart_copy_rx_to_tty(tup, port, count);

	tegra_uart_handle_rx_pio(tup, port);

	if (tup->enable_rx_buffer_throttle) {
		rx_level = tty_buffer_get_level(port);
		if (rx_level > 70)
			mod_timer(&tup->timer,
					jiffies + tup->timer_timeout_jiffies);
	}

	if (tty) {
		tty_flip_buffer_push(port);
		tty_kref_put(tty);
	}
	tegra_uart_start_rx_dma(tup);

	/* Activate flow control to start transfer */
	if (tup->enable_rx_buffer_throttle) {
		if ((rx_level <= 70) && tup->rts_active)
			set_rts(tup, true);
	} else if (tup->rts_active)
		set_rts(tup, true);
done:
	spin_unlock_irqrestore(&u->lock, flags);
}
Example #11
0
/**
 * async_tx_quiesce - ensure tx is complete and freeable upon return
 * @tx - transaction to quiesce
 */
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
	if (*tx) {
		/* if ack is already set then we cannot be sure
		 * we are referring to the correct operation
		 */
		BUG_ON(async_tx_test_ack(*tx));
		if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
			panic("DMA_ERROR waiting for transaction\n");
		async_tx_ack(*tx);
		*tx = NULL;
	}
}
Example #12
0
static void tegra_uart_stop_tx(struct uart_port *u)
{
	struct tegra_uart_port *tup = to_tegra_uport(u);
	struct circ_buf *xmit = &tup->uport.state->xmit;
	struct dma_tx_state state;
	int count;

	dmaengine_terminate_all(tup->tx_dma_chan);
	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
	count = tup->tx_bytes_requested - state.residue;
	async_tx_ack(tup->tx_dma_desc);
	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
	tup->tx_in_progress = 0;
	return;
}
Example #13
0
static void tegra_uart_rx_buffer_push(struct tegra_uart_port *tup,
				      unsigned int residue)
{
	struct tty_port *port = &tup->uport.state->port;
	struct tty_struct *tty = tty_port_tty_get(port);
	unsigned int count;

	async_tx_ack(tup->rx_dma_desc);
	count = tup->rx_bytes_requested - residue;

	/* If we are here, DMA is stopped */
	tegra_uart_copy_rx_to_tty(tup, port, count);

	tegra_uart_handle_rx_pio(tup, port);
	if (tty) {
		tty_flip_buffer_push(port);
		tty_kref_put(tty);
	}
}
Example #14
0
static void tegra_uart_tx_dma_complete(void *args)
{
	struct tegra_uart_port *tup = args;
	struct circ_buf *xmit = &tup->uport.state->xmit;
	struct dma_tx_state state;
	unsigned long flags;
	unsigned int count;

	dmaengine_tx_status(tup->tx_dma_chan, tup->tx_cookie, &state);
	count = tup->tx_bytes_requested - state.residue;
	async_tx_ack(tup->tx_dma_desc);
	spin_lock_irqsave(&tup->uport.lock, flags);
	xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
	tup->tx_in_progress = 0;
	if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
		uart_write_wakeup(&tup->uport);
	tegra_uart_start_next_tx(tup);
	spin_unlock_irqrestore(&tup->uport.lock, flags);
}
static void tegra_uart_handle_rx_dma(struct tegra_uart_port *tup)
{
	struct dma_tx_state state;
	struct tty_struct *tty = tty_port_tty_get(&tup->uport.state->port);
	struct tty_port *port = &tup->uport.state->port;
	int count;
	int rx_level = 0;

	/* Deactivate flow control to stop sender */
	if (tup->rts_active)
		set_rts(tup, false);

	dmaengine_terminate_all(tup->rx_dma_chan);
	dmaengine_tx_status(tup->rx_dma_chan,  tup->rx_cookie, &state);
	async_tx_ack(tup->rx_dma_desc);
	count = tup->rx_bytes_requested - state.residue;

	/* If we are here, DMA is stopped */
	if (count)
		tegra_uart_copy_rx_to_tty(tup, port, count);

	tegra_uart_handle_rx_pio(tup, port);

	if (tup->enable_rx_buffer_throttle) {
		rx_level = tty_buffer_get_level(port);
		if (rx_level > 70)
			mod_timer(&tup->timer,
					jiffies + tup->timer_timeout_jiffies);
	}

	if (tty) {
		tty_flip_buffer_push(port);
		tty_kref_put(tty);
	}
	tegra_uart_start_rx_dma(tup);

	if (tup->enable_rx_buffer_throttle) {
		if ((rx_level <= 70) && tup->rts_active)
			set_rts(tup, true);
	} else if (tup->rts_active)
		set_rts(tup, true);
}
static void tegra_uart_stop_rx(struct uart_port *u)
{
	struct tegra_uart_port *tup = to_tegra_uport(u);
	struct tty_struct *tty;
	struct tty_port *port = &u->state->port;
	struct dma_tx_state state;
	unsigned long ier;
	int count;

	if (tup->rts_active)
		set_rts(tup, false);

	if (!tup->rx_in_progress)
		return;

	tty = tty_port_tty_get(&tup->uport.state->port);

	tegra_uart_wait_sym_time(tup, 1); /* wait a character interval */

	ier = tup->ier_shadow;
	ier &= ~(UART_IER_RDI | UART_IER_RLSI | UART_IER_RTOIE |
					TEGRA_UART_IER_EORD);
	tup->ier_shadow = ier;
	tegra_uart_write(tup, ier, UART_IER);
	tup->rx_in_progress = 0;
	if (tup->rx_dma_chan && !tup->use_rx_pio) {
		dmaengine_terminate_all(tup->rx_dma_chan);
		dmaengine_tx_status(tup->rx_dma_chan, tup->rx_cookie, &state);
		async_tx_ack(tup->rx_dma_desc);
		count = tup->rx_bytes_requested - state.residue;
		if (count)
			tegra_uart_copy_rx_to_tty(tup, port, count);
		tegra_uart_handle_rx_pio(tup, port);
	} else {
		tegra_uart_handle_rx_pio(tup, port);
	}
	if (tty) {
		tty_flip_buffer_push(port);
		tty_kref_put(tty);
	}
	return;
}
Example #17
0
static void s3c24xx_serial_stop_tx(struct uart_port *port)
{
	struct s3c24xx_uart_port *ourport = to_ourport(port);
	struct s3c24xx_uart_dma *dma = ourport->dma;
	struct circ_buf *xmit = &port->state->xmit;
	struct dma_tx_state state;
	int count;

	if (!tx_enabled(port))
		return;

	if (s3c24xx_serial_has_interrupt_mask(port))
		s3c24xx_set_bit(port, S3C64XX_UINTM_TXD, S3C64XX_UINTM);
	else
		disable_irq_nosync(ourport->tx_irq);

	if (dma && dma->tx_chan && ourport->tx_in_progress == S3C24XX_TX_DMA) {
		dmaengine_pause(dma->tx_chan);
		dmaengine_tx_status(dma->tx_chan, dma->tx_cookie, &state);
		dmaengine_terminate_all(dma->tx_chan);
		dma_sync_single_for_cpu(ourport->port.dev,
			dma->tx_transfer_addr, dma->tx_size, DMA_TO_DEVICE);
		async_tx_ack(dma->tx_desc);
		count = dma->tx_bytes_requested - state.residue;
		xmit->tail = (xmit->tail + count) & (UART_XMIT_SIZE - 1);
		port->icount.tx += count;
	}

	tx_enabled(port) = 0;
	ourport->tx_in_progress = 0;

	if (port->flags & UPF_CONS_FLOW)
		s3c24xx_serial_rx_enable(port);

	ourport->tx_mode = 0;
}
Example #18
0
/**
 * async_xor_zero_sum - attempt a xor parity check with a dma engine.
 * @dest: destination page used if the xor is performed synchronously
 * @src_list: array of source pages.  The dest page must be listed as a source
 * 	at index zero.  The contents of this array may be overwritten.
 * @offset: offset in pages to start transaction
 * @src_cnt: number of source pages
 * @len: length in bytes
 * @result: 0 if sum == 0 else non-zero
 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
 * @depend_tx: xor depends on the result of this transaction.
 * @cb_fn: function to call when the xor completes
 * @cb_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_xor_zero_sum(struct page *dest, struct page **src_list,
	unsigned int offset, int src_cnt, size_t len,
	u32 *result, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
						      &dest, 1, src_list,
						      src_cnt, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;

	BUG_ON(src_cnt <= 1);

	if (device && src_cnt <= device->max_xor) {
		dma_addr_t *dma_src = (dma_addr_t *) src_list;
		unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
		int i;

		pr_debug("%s: (async) len: %zu\n", __func__, len);

		for (i = 0; i < src_cnt; i++)
			dma_src[i] = dma_map_page(device->dev, src_list[i],
						  offset, len, DMA_TO_DEVICE);

		tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
						      len, result,
						      dma_prep_flags);
		if (!tx) {
			if (depend_tx)
				dma_wait_for_async_tx(depend_tx);

			while (!tx)
				tx = device->device_prep_dma_zero_sum(chan,
					dma_src, src_cnt, len, result,
					dma_prep_flags);
		}

		async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
	} else {
		unsigned long xor_flags = flags;

		pr_debug("%s: (sync) len: %zu\n", __func__, len);

		xor_flags |= ASYNC_TX_XOR_DROP_DST;
		xor_flags &= ~ASYNC_TX_ACK;

		tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
			depend_tx, NULL, NULL);

		if (tx) {
			if (dma_wait_for_async_tx(tx) == DMA_ERROR)
				panic("%s: DMA_ERROR waiting for tx\n",
					__func__);
			async_tx_ack(tx);
		}

		*result = page_is_zero(dest, offset, len) ? 0 : 1;

		tx = NULL;

		async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
	}

	return tx;
}
Example #19
0
void
async_tx_submit(struct dma_chan *chan, struct dma_async_tx_descriptor *tx,
		struct async_submit_ctl *submit)
{
	struct dma_async_tx_descriptor *depend_tx = submit->depend_tx;

	tx->callback = submit->cb_fn;
	tx->callback_param = submit->cb_param;

	if (depend_tx) {
		enum submit_disposition s;

		/* sanity check the dependency chain:
		 * 1/ if ack is already set then we cannot be sure
		 * we are referring to the correct operation
		 * 2/ dependencies are 1:1 i.e. two transactions can
		 * not depend on the same parent
		 */
		BUG_ON(async_tx_test_ack(depend_tx) || txd_next(depend_tx) ||
		       txd_parent(tx));

		/* the lock prevents async_tx_run_dependencies from missing
		 * the setting of ->next when ->parent != NULL
		 */
		txd_lock(depend_tx);
		if (txd_parent(depend_tx)) {
			/* we have a parent so we can not submit directly
			 * if we are staying on the same channel: append
			 * else: channel switch
			 */
			if (depend_tx->chan == chan) {
				txd_chain(depend_tx, tx);
				s = ASYNC_TX_SUBMITTED;
			} else
				s = ASYNC_TX_CHANNEL_SWITCH;
		} else {
			/* we do not have a parent so we may be able to submit
			 * directly if we are staying on the same channel
			 */
			if (depend_tx->chan == chan)
				s = ASYNC_TX_DIRECT_SUBMIT;
			else
				s = ASYNC_TX_CHANNEL_SWITCH;
		}
		txd_unlock(depend_tx);

		switch (s) {
		case ASYNC_TX_SUBMITTED:
			break;
		case ASYNC_TX_CHANNEL_SWITCH:
			async_tx_channel_switch(depend_tx, tx);
			break;
		case ASYNC_TX_DIRECT_SUBMIT:
			txd_clear_parent(tx);
			tx->tx_submit(tx);
			break;
		}
	} else {
		txd_clear_parent(tx);
		tx->tx_submit(tx);
	}

	if (submit->flags & ASYNC_TX_ACK)
		async_tx_ack(tx);

	if (depend_tx)
		async_tx_ack(depend_tx);
}
Example #20
0
/**
 * async_xor - attempt to xor a set of blocks with a dma engine.
 *	xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
 *	flag must be set to not include dest data in the calculation.  The
 *	assumption with dma eninges is that they only use the destination
 *	buffer as a source when it is explicity specified in the source list.
 * @dest: destination page
 * @src_list: array of source pages (if the dest is also a source it must be
 *	at index zero).  The contents of this array may be overwritten.
 * @offset: offset in pages to start transaction
 * @src_cnt: number of source pages
 * @len: length in bytes
 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
 *	ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
 * @depend_tx: xor depends on the result of this transaction.
 * @cb_fn: function to call when the xor completes
 * @cb_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
	int src_cnt, size_t len, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
						      &dest, 1, src_list,
						      src_cnt, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;
	dma_async_tx_callback _cb_fn;
	void *_cb_param;
	unsigned long local_flags;
	int xor_src_cnt;
	int i = 0, src_off = 0;

	BUG_ON(src_cnt <= 1);

	while (src_cnt) {
		local_flags = flags;
		if (device) { /* run the xor asynchronously */
			xor_src_cnt = min(src_cnt, device->max_xor);
			/* if we are submitting additional xors
			 * only set the callback on the last transaction
			 */
			if (src_cnt > xor_src_cnt) {
				local_flags &= ~ASYNC_TX_ACK;
				_cb_fn = NULL;
				_cb_param = NULL;
			} else {
				_cb_fn = cb_fn;
				_cb_param = cb_param;
			}

			tx = do_async_xor(device, chan, dest,
					  &src_list[src_off], offset,
					  xor_src_cnt, len, local_flags,
					  depend_tx, _cb_fn, _cb_param);
		} else { /* run the xor synchronously */
			/* in the sync case the dest is an implied source
			 * (assumes the dest is at the src_off index)
			 */
			if (flags & ASYNC_TX_XOR_DROP_DST) {
				src_cnt--;
				src_off++;
			}

			/* process up to 'MAX_XOR_BLOCKS' sources */
			xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);

			/* if we are submitting additional xors
			 * only set the callback on the last transaction
			 */
			if (src_cnt > xor_src_cnt) {
				local_flags &= ~ASYNC_TX_ACK;
				_cb_fn = NULL;
				_cb_param = NULL;
			} else {
				_cb_fn = cb_fn;
				_cb_param = cb_param;
			}

			/* wait for any prerequisite operations */
			if (depend_tx) {
				/* if ack is already set then we cannot be sure
				 * we are referring to the correct operation
				 */
				BUG_ON(async_tx_test_ack(depend_tx));
				if (dma_wait_for_async_tx(depend_tx) ==
					DMA_ERROR)
					panic("%s: DMA_ERROR waiting for "
						"depend_tx\n",
						__func__);
			}

			do_sync_xor(dest, &src_list[src_off], offset,
				xor_src_cnt, len, local_flags, depend_tx,
				_cb_fn, _cb_param);
		}

		/* the previous tx is hidden from the client,
		 * so ack it
		 */
		if (i && depend_tx)
			async_tx_ack(depend_tx);

		depend_tx = tx;

		if (src_cnt > xor_src_cnt) {
			/* drop completed sources */
			src_cnt -= xor_src_cnt;
			src_off += xor_src_cnt;

			/* unconditionally preserve the destination */
			flags &= ~ASYNC_TX_XOR_ZERO_DST;

			/* use the intermediate result a source, but remember
			 * it's dropped, because it's implied, in the sync case
			 */
			src_list[--src_off] = dest;
			src_cnt++;
			flags |= ASYNC_TX_XOR_DROP_DST;
		} else
			src_cnt = 0;
		i++;
	}

	return tx;
}
Example #21
0
/**
 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
 * 	pre-attached.
 * @depend_tx: the operation that must finish before the new operation runs
 * @tx: the new operation
 */
static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
			struct dma_async_tx_descriptor *tx)
{
	struct dma_chan *chan;
	struct dma_device *device;
	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;

	/* first check to see if we can still append to depend_tx */
	spin_lock_bh(&depend_tx->lock);
	if (depend_tx->parent && depend_tx->chan == tx->chan) {
		tx->parent = depend_tx;
		depend_tx->next = tx;
		intr_tx = NULL;
	}
	spin_unlock_bh(&depend_tx->lock);

	if (!intr_tx)
		return;

	chan = depend_tx->chan;
	device = chan->device;

	/* see if we can schedule an interrupt
	 * otherwise poll for completion
	 */
	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
		intr_tx = device->device_prep_dma_interrupt(chan, 0);
	else
		intr_tx = NULL;

	if (intr_tx) {
		intr_tx->callback = NULL;
		intr_tx->callback_param = NULL;
		tx->parent = intr_tx;
		/* safe to set ->next outside the lock since we know we are
		 * not submitted yet
		 */
		intr_tx->next = tx;

		/* check if we need to append */
		spin_lock_bh(&depend_tx->lock);
		if (depend_tx->parent) {
			intr_tx->parent = depend_tx;
			depend_tx->next = intr_tx;
			async_tx_ack(intr_tx);
			intr_tx = NULL;
		}
		spin_unlock_bh(&depend_tx->lock);

		if (intr_tx) {
			intr_tx->parent = NULL;
			intr_tx->tx_submit(intr_tx);
			async_tx_ack(intr_tx);
		}
	} else {
		if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
			panic("%s: DMA_ERROR waiting for depend_tx\n",
			      __func__);
		tx->tx_submit(tx);
	}
}