Example #1
0
static int sst_dsp_dma_copy(struct sst_dsp *sst, dma_addr_t dest_addr,
	dma_addr_t src_addr, size_t size)
{
	struct dma_async_tx_descriptor *desc;
	struct sst_dma *dma = sst->dma;

	if (dma->ch == NULL) {
		dev_err(sst->dev, "error: no DMA channel\n");
		return -ENODEV;
	}

	dev_dbg(sst->dev, "DMA: src: 0x%lx dest 0x%lx size %zu\n",
		(unsigned long)src_addr, (unsigned long)dest_addr, size);

	desc = dma->ch->device->device_prep_dma_memcpy(dma->ch, dest_addr,
		src_addr, size, DMA_CTRL_ACK);
	if (!desc){
		dev_err(sst->dev, "error: dma prep memcpy failed\n");
		return -EINVAL;
	}

	desc->callback = sst_dma_transfer_complete;
	desc->callback_param = sst;

	desc->tx_submit(desc);
	dma_wait_for_async_tx(desc);

	return 0;
}
Example #2
0
/* NB this function blocks until the transfer is complete */
static void
smi_dma_read_sgl(struct bcm2835_smi_instance *inst,
	struct scatterlist *sgl, size_t sg_len, size_t n_bytes)
{
	struct dma_async_tx_descriptor *desc;

	/* Disable SMI and set to read before dispatching DMA - if SMI is in
	 * write mode and TX fifo is empty, it will generate a DREQ which may
	 * cause the read DMA to complete before the SMI read command is even
	 * dispatched! We want to dispatch DMA before SMI read so that reading
	 * is gapless, for logic analyser.
	 */

	smi_disable(inst, DMA_DEV_TO_MEM);

	desc = smi_dma_submit_sgl(inst, sgl, sg_len, DMA_DEV_TO_MEM, NULL);
	dma_async_issue_pending(inst->dma_chan);

	if (inst->settings.data_width == SMI_WIDTH_8BIT)
		smi_init_programmed_read(inst, n_bytes);
	else
		smi_init_programmed_read(inst, n_bytes / 2);

	if (dma_wait_for_async_tx(desc) == DMA_ERROR)
		smi_dump_context_labelled(inst, "DMA timeout!");
}
Example #3
0
/**
 * do_async_pqxor - asynchronously calculate P and/or Q
 */
static struct dma_async_tx_descriptor *
do_async_pqxor(struct dma_device *device,
	struct dma_chan *chan,
	struct page *pdest, struct page *qdest,
	struct page **src_list, unsigned char *scoef_list,
	unsigned int offset, unsigned int src_cnt, size_t len,
	enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	struct page *dest;
	dma_addr_t dma_dest[2];
	dma_addr_t *dma_src = (dma_addr_t *) src_list;
	unsigned char *scf = qdest ? scoef_list : NULL;
	struct dma_async_tx_descriptor *tx;
	int i, dst_cnt = 0, zdst = flags & ASYNC_TX_XOR_ZERO_DST ? 1 : 0;
	unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;

	if (flags & ASYNC_TX_XOR_ZERO_DST)
		dma_prep_flags |= DMA_PREP_ZERO_DST;

	/*  One parity (P or Q) calculation is initiated always;
	 * first always try Q
	 */
	dest = qdest ? qdest : pdest;
	dma_dest[dst_cnt++] = dma_map_page(device->dev, dest, offset, len,
					    DMA_FROM_DEVICE);

	/* Switch to the next destination */
	if (qdest && pdest) {
		/* Both destinations are set, thus here we deal with P */
		dma_dest[dst_cnt++] = dma_map_page(device->dev, pdest, offset,
						len, DMA_FROM_DEVICE);
	}

	for (i = 0; i < src_cnt; i++)
		dma_src[i] = dma_map_page(device->dev, src_list[i],
			offset, len, DMA_TO_DEVICE);

	/* Since we have clobbered the src_list we are committed
	 * to doing this asynchronously.  Drivers force forward progress
	 * in case they can not provide a descriptor
	 */
	tx = device->device_prep_dma_pqxor(chan, dma_dest, dst_cnt, dma_src,
					   src_cnt, scf, len, dma_prep_flags);
	if (!tx) {
		if (depend_tx)
			dma_wait_for_async_tx(depend_tx);

		while (!tx)
			tx = device->device_prep_dma_pqxor(chan,
							   dma_dest, dst_cnt,
							   dma_src, src_cnt,
							   scf, len,
							   dma_prep_flags);
	}

	async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);

	return tx;
}
Example #4
0
/**
 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
 * 	pre-attached.
 * @depend_tx: the operation that must finish before the new operation runs
 * @tx: the new operation
 */
static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
			struct dma_async_tx_descriptor *tx)
{
	struct dma_chan *chan = depend_tx->chan;
	struct dma_device *device = chan->device;
	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;

	/* first check to see if we can still append to depend_tx */
	txd_lock(depend_tx);
	if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
		txd_chain(depend_tx, tx);
		intr_tx = NULL;
	}
	txd_unlock(depend_tx);

	/* attached dependency, flush the parent channel */
	if (!intr_tx) {
		device->device_issue_pending(chan);
		return;
	}

	/* see if we can schedule an interrupt
	 * otherwise poll for completion
	 */
	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
		intr_tx = device->device_prep_dma_interrupt(chan, 0);
	else
		intr_tx = NULL;

	if (intr_tx) {
		intr_tx->callback = NULL;
		intr_tx->callback_param = NULL;
		/* safe to chain outside the lock since we know we are
		 * not submitted yet
		 */
		txd_chain(intr_tx, tx);

		/* check if we need to append */
		txd_lock(depend_tx);
		if (txd_parent(depend_tx)) {
			txd_chain(depend_tx, intr_tx);
			async_tx_ack(intr_tx);
			intr_tx = NULL;
		}
		txd_unlock(depend_tx);

		if (intr_tx) {
			txd_clear_parent(intr_tx);
			intr_tx->tx_submit(intr_tx);
			async_tx_ack(intr_tx);
		}
		device->device_issue_pending(chan);
	} else {
		if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE)
			panic("%s: DMA error waiting for depend_tx\n",
			      __func__);
		tx->tx_submit(tx);
	}
}
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
	if (*tx) {
		BUG_ON(async_tx_test_ack(*tx));
		if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
			panic("DMA_ERROR waiting for transaction\n");
		async_tx_ack(*tx);
		*tx = NULL;
	}
}
static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
			struct dma_async_tx_descriptor *tx)
{
	struct dma_chan *chan = depend_tx->chan;
	struct dma_device *device = chan->device;
	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;

	
	txd_lock(depend_tx);
	if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) {
		txd_chain(depend_tx, tx);
		intr_tx = NULL;
	}
	txd_unlock(depend_tx);

	
	if (!intr_tx) {
		device->device_issue_pending(chan);
		return;
	}

	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
		intr_tx = device->device_prep_dma_interrupt(chan, 0);
	else
		intr_tx = NULL;

	if (intr_tx) {
		intr_tx->callback = NULL;
		intr_tx->callback_param = NULL;
		txd_chain(intr_tx, tx);

		
		txd_lock(depend_tx);
		if (txd_parent(depend_tx)) {
			txd_chain(depend_tx, intr_tx);
			async_tx_ack(intr_tx);
			intr_tx = NULL;
		}
		txd_unlock(depend_tx);

		if (intr_tx) {
			txd_clear_parent(intr_tx);
			intr_tx->tx_submit(intr_tx);
			async_tx_ack(intr_tx);
		}
		device->device_issue_pending(chan);
	} else {
		if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
			panic("%s: DMA_ERROR waiting for depend_tx\n",
			      __func__);
		tx->tx_submit(tx);
	}
}
Example #7
0
/**
 * async_tx_quiesce - ensure tx is complete and freeable upon return
 * @tx - transaction to quiesce
 */
void async_tx_quiesce(struct dma_async_tx_descriptor **tx)
{
	if (*tx) {
		/* if ack is already set then we cannot be sure
		 * we are referring to the correct operation
		 */
		BUG_ON(async_tx_test_ack(*tx));
		if (dma_wait_for_async_tx(*tx) == DMA_ERROR)
			panic("DMA_ERROR waiting for transaction\n");
		async_tx_ack(*tx);
		*tx = NULL;
	}
}
Example #8
0
/**
 * async_memset - attempt to fill memory with a dma engine.
 * @dest: destination page
 * @val: fill value
 * @offset: offset in pages to start transaction
 * @len: length in bytes
 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
 * @depend_tx: memset depends on the result of this transaction
 * @cb_fn: function to call when the memcpy completes
 * @cb_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_memset(struct page *dest, int val, unsigned int offset,
    size_t len, enum async_tx_flags flags,
    struct dma_async_tx_descriptor *depend_tx,
    dma_async_tx_callback cb_fn, void *cb_param)
{
    struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMSET,
                              &dest, 1, NULL, 0, len);
    struct dma_device *device = chan ? chan->device : NULL;
    struct dma_async_tx_descriptor *tx = NULL;

    if (device) {
        dma_addr_t dma_dest;
        unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;

        dma_dest = dma_map_page(device->dev, dest, offset, len,
                    DMA_FROM_DEVICE);

        tx = device->device_prep_dma_memset(chan, dma_dest, val, len,
                            dma_prep_flags);
    }

    if (tx) {
        pr_debug("%s: (async) len: %zu\n", __func__, len);
        async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
    } else { /* run the memset synchronously */
        void *dest_buf;
        pr_debug("%s: (sync) len: %zu\n", __func__, len);

        dest_buf = (void *) (((char *) page_address(dest)) + offset);

        /* wait for any prerequisite operations */
        if (depend_tx) {
            /* if ack is already set then we cannot be sure
             * we are referring to the correct operation
             */
            BUG_ON(depend_tx->ack);
            if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
                panic("%s: DMA_ERROR waiting for depend_tx\n",
                    __func__);
        }

        memset(dest_buf, val, len);

        async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
    }

    return tx;
}
Example #9
0
/**
 * async_pqxor - attempt to calculate RS-syndrome and XOR in parallel using
 *	a dma engine.
 * @pdest: destination page for P-parity (XOR)
 * @qdest: destination page for Q-parity (GF-XOR)
 * @src_list: array of source pages
 * @src_coef_list: array of source coefficients used in GF-multiplication
 * @offset: offset in pages to start transaction
 * @src_cnt: number of source pages
 * @len: length in bytes
 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_ASSUME_COHERENT,
 *	ASYNC_TX_ACK, ASYNC_TX_DEP_ACK, ASYNC_TX_ASYNC_ONLY
 * @depend_tx: depends on the result of this transaction.
 * @callback: function to call when the operation completes
 * @callback_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_pqxor(struct page *pdest, struct page *qdest,
	struct page **src_list, unsigned char *scoef_list,
	unsigned int offset, int src_cnt, size_t len, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback callback, void *callback_param)
{
	struct page *dest[] = {pdest, qdest};
	struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_PQ_XOR,
						      dest, 2, src_list,
						      src_cnt, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;

	if (!device && (flags & ASYNC_TX_ASYNC_ONLY))
		return NULL;

	if (device) { /* run the xor asynchronously */
		tx = do_async_pqxor(device, chan, pdest, qdest, src_list,
			       scoef_list, offset, src_cnt, len, flags,
			       depend_tx, callback,callback_param);
	} else { /* run the pqxor synchronously */
		/* may do synchronous PQ only when both destinations exsists */
		if (!pdest || !qdest)
			return NULL;

		/* wait for any prerequisite operations */
		if (depend_tx) {
			/* if ack is already set then we cannot be sure
			 * we are referring to the correct operation
			 */
			BUG_ON(depend_tx->ack);
			if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
				panic("%s: DMA_ERROR waiting for depend_tx\n",
					__FUNCTION__);
		}

		do_sync_pqxor(pdest, qdest, src_list,
			offset,	src_cnt, len, flags, depend_tx,
			callback, callback_param);
	}

	return tx;
}
Example #10
0
/* do_async_xor - dma map the pages and perform the xor with an engine.
 * 	This routine is marked __always_inline so it can be compiled away
 * 	when CONFIG_DMA_ENGINE=n
 */
static __always_inline struct dma_async_tx_descriptor *
do_async_xor(struct dma_device *device,
	struct dma_chan *chan, struct page *dest, struct page **src_list,
	unsigned int offset, unsigned int src_cnt, size_t len,
	enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	dma_addr_t dma_dest;
	dma_addr_t *dma_src = (dma_addr_t *) src_list;
	struct dma_async_tx_descriptor *tx;
	int i;
	unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;

	pr_debug("%s: len: %zu\n", __func__, len);

	dma_dest = dma_map_page(device->dev, dest, offset, len,
				DMA_FROM_DEVICE);

	for (i = 0; i < src_cnt; i++)
		dma_src[i] = dma_map_page(device->dev, src_list[i], offset,
					  len, DMA_TO_DEVICE);

	/* Since we have clobbered the src_list we are committed
	 * to doing this asynchronously.  Drivers force forward progress
	 * in case they can not provide a descriptor
	 */
	tx = device->device_prep_dma_xor(chan, dma_dest, dma_src, src_cnt, len,
					 dma_prep_flags);
	if (!tx) {
		if (depend_tx)
			dma_wait_for_async_tx(depend_tx);

		while (!tx)
			tx = device->device_prep_dma_xor(chan, dma_dest,
							 dma_src, src_cnt, len,
							 dma_prep_flags);
	}

	async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);

	return tx;
}
Example #11
0
static void
smi_dma_write_sgl(struct bcm2835_smi_instance *inst,
	struct scatterlist *sgl, size_t sg_len, size_t n_bytes)
{
	struct dma_async_tx_descriptor *desc;

	if (inst->settings.data_width == SMI_WIDTH_8BIT)
		smi_init_programmed_write(inst, n_bytes);
	else
		smi_init_programmed_write(inst, n_bytes / 2);

	desc = smi_dma_submit_sgl(inst, sgl, sg_len, DMA_MEM_TO_DEV, NULL);
	dma_async_issue_pending(inst->dma_chan);

	if (dma_wait_for_async_tx(desc) == DMA_ERROR)
		smi_dump_context_labelled(inst, "DMA timeout!");
	else
		/* Wait for SMI to finish our writes */
		while (!(read_smi_reg(inst, SMICS) & SMICS_DONE))
			cpu_relax();
}
Example #12
0
/**
 * async_xor_zero_sum - attempt a xor parity check with a dma engine.
 * @dest: destination page used if the xor is performed synchronously
 * @src_list: array of source pages.  The dest page must be listed as a source
 * 	at index zero.  The contents of this array may be overwritten.
 * @offset: offset in pages to start transaction
 * @src_cnt: number of source pages
 * @len: length in bytes
 * @result: 0 if sum == 0 else non-zero
 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
 * @depend_tx: xor depends on the result of this transaction.
 * @cb_fn: function to call when the xor completes
 * @cb_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_xor_zero_sum(struct page *dest, struct page **src_list,
	unsigned int offset, int src_cnt, size_t len,
	u32 *result, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_ZERO_SUM,
						      &dest, 1, src_list,
						      src_cnt, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;

	BUG_ON(src_cnt <= 1);

	if (device && src_cnt <= device->max_xor) {
		dma_addr_t *dma_src = (dma_addr_t *) src_list;
		unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
		int i;

		pr_debug("%s: (async) len: %zu\n", __func__, len);

		for (i = 0; i < src_cnt; i++)
			dma_src[i] = dma_map_page(device->dev, src_list[i],
						  offset, len, DMA_TO_DEVICE);

		tx = device->device_prep_dma_zero_sum(chan, dma_src, src_cnt,
						      len, result,
						      dma_prep_flags);
		if (!tx) {
			if (depend_tx)
				dma_wait_for_async_tx(depend_tx);

			while (!tx)
				tx = device->device_prep_dma_zero_sum(chan,
					dma_src, src_cnt, len, result,
					dma_prep_flags);
		}

		async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
	} else {
		unsigned long xor_flags = flags;

		pr_debug("%s: (sync) len: %zu\n", __func__, len);

		xor_flags |= ASYNC_TX_XOR_DROP_DST;
		xor_flags &= ~ASYNC_TX_ACK;

		tx = async_xor(dest, src_list, offset, src_cnt, len, xor_flags,
			depend_tx, NULL, NULL);

		if (tx) {
			if (dma_wait_for_async_tx(tx) == DMA_ERROR)
				panic("%s: DMA_ERROR waiting for tx\n",
					__func__);
			async_tx_ack(tx);
		}

		*result = page_is_zero(dest, offset, len) ? 0 : 1;

		tx = NULL;

		async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
	}

	return tx;
}
Example #13
0
/**
 * async_xor - attempt to xor a set of blocks with a dma engine.
 *	xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST
 *	flag must be set to not include dest data in the calculation.  The
 *	assumption with dma eninges is that they only use the destination
 *	buffer as a source when it is explicity specified in the source list.
 * @dest: destination page
 * @src_list: array of source pages (if the dest is also a source it must be
 *	at index zero).  The contents of this array may be overwritten.
 * @offset: offset in pages to start transaction
 * @src_cnt: number of source pages
 * @len: length in bytes
 * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST,
 *	ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
 * @depend_tx: xor depends on the result of this transaction.
 * @cb_fn: function to call when the xor completes
 * @cb_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_xor(struct page *dest, struct page **src_list, unsigned int offset,
	int src_cnt, size_t len, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR,
						      &dest, 1, src_list,
						      src_cnt, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;
	dma_async_tx_callback _cb_fn;
	void *_cb_param;
	unsigned long local_flags;
	int xor_src_cnt;
	int i = 0, src_off = 0;

	BUG_ON(src_cnt <= 1);

	while (src_cnt) {
		local_flags = flags;
		if (device) { /* run the xor asynchronously */
			xor_src_cnt = min(src_cnt, device->max_xor);
			/* if we are submitting additional xors
			 * only set the callback on the last transaction
			 */
			if (src_cnt > xor_src_cnt) {
				local_flags &= ~ASYNC_TX_ACK;
				_cb_fn = NULL;
				_cb_param = NULL;
			} else {
				_cb_fn = cb_fn;
				_cb_param = cb_param;
			}

			tx = do_async_xor(device, chan, dest,
					  &src_list[src_off], offset,
					  xor_src_cnt, len, local_flags,
					  depend_tx, _cb_fn, _cb_param);
		} else { /* run the xor synchronously */
			/* in the sync case the dest is an implied source
			 * (assumes the dest is at the src_off index)
			 */
			if (flags & ASYNC_TX_XOR_DROP_DST) {
				src_cnt--;
				src_off++;
			}

			/* process up to 'MAX_XOR_BLOCKS' sources */
			xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS);

			/* if we are submitting additional xors
			 * only set the callback on the last transaction
			 */
			if (src_cnt > xor_src_cnt) {
				local_flags &= ~ASYNC_TX_ACK;
				_cb_fn = NULL;
				_cb_param = NULL;
			} else {
				_cb_fn = cb_fn;
				_cb_param = cb_param;
			}

			/* wait for any prerequisite operations */
			if (depend_tx) {
				/* if ack is already set then we cannot be sure
				 * we are referring to the correct operation
				 */
				BUG_ON(async_tx_test_ack(depend_tx));
				if (dma_wait_for_async_tx(depend_tx) ==
					DMA_ERROR)
					panic("%s: DMA_ERROR waiting for "
						"depend_tx\n",
						__func__);
			}

			do_sync_xor(dest, &src_list[src_off], offset,
				xor_src_cnt, len, local_flags, depend_tx,
				_cb_fn, _cb_param);
		}

		/* the previous tx is hidden from the client,
		 * so ack it
		 */
		if (i && depend_tx)
			async_tx_ack(depend_tx);

		depend_tx = tx;

		if (src_cnt > xor_src_cnt) {
			/* drop completed sources */
			src_cnt -= xor_src_cnt;
			src_off += xor_src_cnt;

			/* unconditionally preserve the destination */
			flags &= ~ASYNC_TX_XOR_ZERO_DST;

			/* use the intermediate result a source, but remember
			 * it's dropped, because it's implied, in the sync case
			 */
			src_list[--src_off] = dest;
			src_cnt++;
			flags |= ASYNC_TX_XOR_DROP_DST;
		} else
			src_cnt = 0;
		i++;
	}

	return tx;
}
Example #14
0
/**
 * async_tx_channel_switch - queue an interrupt descriptor with a dependency
 * 	pre-attached.
 * @depend_tx: the operation that must finish before the new operation runs
 * @tx: the new operation
 */
static void
async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx,
			struct dma_async_tx_descriptor *tx)
{
	struct dma_chan *chan;
	struct dma_device *device;
	struct dma_async_tx_descriptor *intr_tx = (void *) ~0;

	/* first check to see if we can still append to depend_tx */
	spin_lock_bh(&depend_tx->lock);
	if (depend_tx->parent && depend_tx->chan == tx->chan) {
		tx->parent = depend_tx;
		depend_tx->next = tx;
		intr_tx = NULL;
	}
	spin_unlock_bh(&depend_tx->lock);

	if (!intr_tx)
		return;

	chan = depend_tx->chan;
	device = chan->device;

	/* see if we can schedule an interrupt
	 * otherwise poll for completion
	 */
	if (dma_has_cap(DMA_INTERRUPT, device->cap_mask))
		intr_tx = device->device_prep_dma_interrupt(chan, 0);
	else
		intr_tx = NULL;

	if (intr_tx) {
		intr_tx->callback = NULL;
		intr_tx->callback_param = NULL;
		tx->parent = intr_tx;
		/* safe to set ->next outside the lock since we know we are
		 * not submitted yet
		 */
		intr_tx->next = tx;

		/* check if we need to append */
		spin_lock_bh(&depend_tx->lock);
		if (depend_tx->parent) {
			intr_tx->parent = depend_tx;
			depend_tx->next = intr_tx;
			async_tx_ack(intr_tx);
			intr_tx = NULL;
		}
		spin_unlock_bh(&depend_tx->lock);

		if (intr_tx) {
			intr_tx->parent = NULL;
			intr_tx->tx_submit(intr_tx);
			async_tx_ack(intr_tx);
		}
	} else {
		if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
			panic("%s: DMA_ERROR waiting for depend_tx\n",
			      __func__);
		tx->tx_submit(tx);
	}
}
Example #15
0
/**
 * async_xor_zero_sum - attempt a PQ parities check with a dma engine.
 * @pdest: P-parity destination to check
 * @qdest: Q-parity destination to check
 * @src_list: array of source pages; the 1st pointer is qdest, the 2nd - pdest.
 * @scoef_list: coefficients to use in GF-multiplications
 * @offset: offset in pages to start transaction
 * @src_cnt: number of source pages
 * @len: length in bytes
 * @presult: 0 if P parity is OK else non-zero
 * @qresult: 0 if Q parity is OK else non-zero
 * @flags: ASYNC_TX_ASSUME_COHERENT, ASYNC_TX_ACK, ASYNC_TX_DEP_ACK
 * @depend_tx: depends on the result of this transaction.
 * @callback: function to call when the xor completes
 * @callback_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_pqxor_zero_sum(struct page *pdest, struct page *qdest,
	struct page **src_list, unsigned char *scf,
	unsigned int offset, int src_cnt, size_t len,
	u32 *presult, u32 *qresult, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	struct dma_chan *chan = async_tx_find_channel(depend_tx,
						      DMA_PQ_ZERO_SUM,
						      src_list, 2, &src_list[2],
						      src_cnt, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;

	BUG_ON(src_cnt <= 1);
	BUG_ON(!qdest || qdest != src_list[0] || pdest != src_list[1]);

	if (device) {
		dma_addr_t *dma_src = (dma_addr_t *)src_list;
		unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;
		int i;

		for (i = 0; i < src_cnt; i++)
			dma_src[i] = dma_map_page(device->dev, src_list[i],
						  offset, len, DMA_TO_DEVICE);

		tx = device->device_prep_dma_pqzero_sum(chan, dma_src, src_cnt,
						      scf, len,
						      presult, qresult,
						      dma_prep_flags);

		if (!tx) {
			if (depend_tx)
				dma_wait_for_async_tx(depend_tx);

			while (!tx)
				tx = device->device_prep_dma_pqzero_sum(chan,
						dma_src, src_cnt, scf, len,
						presult, qresult,
						dma_prep_flags);
		}

		async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
	} else {
		unsigned long lflags = flags;

		/* TBD: support for lengths size of more than PAGE_SIZE */

		lflags &= ~ASYNC_TX_ACK;
		spin_lock(&spare_lock);
		do_sync_pqxor(spare_pages[0], spare_pages[1],
			&src_list[2], offset,
			src_cnt - 2, len, lflags,
			depend_tx, NULL, NULL);

		if (presult && pdest)
			*presult = memcmp(page_address(pdest),
					   page_address(spare_pages[0]),
					   len) == 0 ? 0 : 1;
		if (qresult && qdest)
			*qresult = memcmp(page_address(qdest),
					   page_address(spare_pages[1]),
					   len) == 0 ? 0 : 1;
		spin_unlock(&spare_lock);
	}

	return tx;
}
Example #16
0
/**
 * async_memcpy - attempt to copy memory with a dma engine.
 * @dest: destination page
 * @src: src page
 * @offset: offset in pages to start transaction
 * @len: length in bytes
 * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK,
 * @depend_tx: memcpy depends on the result of this transaction
 * @cb_fn: function to call when the memcpy completes
 * @cb_param: parameter to pass to the callback routine
 */
struct dma_async_tx_descriptor *
async_memcpy(struct page *dest, struct page *src, unsigned int dest_offset,
	unsigned int src_offset, size_t len, enum async_tx_flags flags,
	struct dma_async_tx_descriptor *depend_tx,
	dma_async_tx_callback cb_fn, void *cb_param)
{
	struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_MEMCPY,
						      &dest, 1, &src, 1, len);
	struct dma_device *device = chan ? chan->device : NULL;
	struct dma_async_tx_descriptor *tx = NULL;

	if (device) {
		dma_addr_t dma_dest, dma_src;
		unsigned long dma_prep_flags = cb_fn ? DMA_PREP_INTERRUPT : 0;

		dma_dest = dma_map_page(device->dev, dest, dest_offset, len,
					DMA_FROM_DEVICE);

		dma_src = dma_map_page(device->dev, src, src_offset, len,
				       DMA_TO_DEVICE);

		tx = device->device_prep_dma_memcpy(chan, dma_dest, dma_src,
						    len, dma_prep_flags);
	}

	if (tx) {
		pr_debug("%s: (async) len: %zu\n", __func__, len);
		async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param);
	} else {
		void *dest_buf, *src_buf;
		pr_debug("%s: (sync) len: %zu\n", __func__, len);

		/* wait for any prerequisite operations */
		if (depend_tx) {
			/* if ack is already set then we cannot be sure
			 * we are referring to the correct operation
			 */
			BUG_ON(depend_tx->ack);
			if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR)
				panic("%s: DMA_ERROR waiting for depend_tx\n",
					__func__);
		}

		if (flags & ASYNC_TX_KMAP_DST)
			dest_buf = kmap_atomic(dest, KM_USER0) + dest_offset;
		else
			dest_buf = page_address(dest) + dest_offset;

		if (flags & ASYNC_TX_KMAP_SRC)
			src_buf = kmap_atomic(src, KM_USER0) + src_offset;
		else
			src_buf = page_address(src) + src_offset;

		memcpy(dest_buf, src_buf, len);

		if (flags & ASYNC_TX_KMAP_DST)
			kunmap_atomic(dest_buf, KM_USER0);

		if (flags & ASYNC_TX_KMAP_SRC)
			kunmap_atomic(src_buf, KM_USER0);

		async_tx_sync_epilog(flags, depend_tx, cb_fn, cb_param);
	}

	return tx;
}