/** * async_xor - attempt to xor a set of blocks with a dma engine. * @dest: destination page * @src_list: array of source pages * @offset: common src/dst offset to start transaction * @src_cnt: number of source pages * @len: length in bytes * @submit: submission / completion modifiers * * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST * * xor_blocks always uses the dest as a source so the * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in * the calculation. The assumption with dma eninges is that they only * use the destination buffer as a source when it is explicity specified * in the source list. * * src_list note: if the dest is also a source it must be at index zero. * The contents of this array will be overwritten if a scribble region * is not specified. */ struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); dma_addr_t *dma_src = NULL; BUG_ON(src_cnt <= 1); if (submit->scribble) dma_src = submit->scribble; else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) src_list; if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); return do_async_xor(chan, dest, src_list, offset, src_cnt, len, dma_src, submit); } else { /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); WARN_ONCE(chan, "%s: no space for dma address conversion\n", __func__); /* in the sync case the dest is an implied source * (assumes the dest is the first source) */ if (submit->flags & ASYNC_TX_XOR_DROP_DST) { src_cnt--; src_list++; } /* wait for any prerequisite operations */ async_tx_quiesce(&submit->depend_tx); do_sync_xor(dest, src_list, offset, src_cnt, len, submit); return NULL; } }
/** * async_xor - attempt to xor a set of blocks with a dma engine. * @dest: destination page * @src_list: array of source pages * @offset: common src/dst offset to start transaction * @src_cnt: number of source pages * @len: length in bytes * @submit: submission / completion modifiers * * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST * * xor_blocks always uses the dest as a source so the * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in * the calculation. The assumption with dma eninges is that they only * use the destination buffer as a source when it is explicity specified * in the source list. * * src_list note: if the dest is also a source it must be at index zero. * The contents of this array will be overwritten if a scribble region * is not specified. */ struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); dma_addr_t *dma_src = NULL; BUG_ON(src_cnt <= 1); if (submit->scribble) dma_src = submit->scribble; else if (sizeof(dma_addr_t) <= sizeof(struct page *)) dma_src = (dma_addr_t *) src_list; if (dma_src && chan && is_dma_xor_aligned(chan->device, offset, 0, len)) { pr_debug("%s (async): len: %zu\n", __func__, len); return do_async_xor(chan, dest, src_list, offset, src_cnt, len, dma_src, submit); } else { pr_debug("%s (sync): len: %zu\n", __func__, len); WARN_ONCE(chan, "%s: no space for dma address conversion\n", __func__); if (submit->flags & ASYNC_TX_XOR_DROP_DST) { src_cnt--; src_list++; } async_tx_quiesce(&submit->depend_tx); do_sync_xor(dest, src_list, offset, src_cnt, len, submit); return NULL; } }
/** * async_xor - attempt to xor a set of blocks with a dma engine. * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST * flag must be set to not include dest data in the calculation. The * assumption with dma eninges is that they only use the destination * buffer as a source when it is explicity specified in the source list. * @dest: destination page * @src_list: array of source pages (if the dest is also a source it must be * at index zero). The contents of this array may be overwritten. * @offset: offset in pages to start transaction * @src_cnt: number of source pages * @len: length in bytes * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK * @depend_tx: xor depends on the result of this transaction. * @cb_fn: function to call when the xor completes * @cb_param: parameter to pass to the callback routine */ struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_param) { struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, &dest, 1, src_list, src_cnt, len); BUG_ON(src_cnt <= 1); if (chan) { /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); return do_async_xor(chan, dest, src_list, offset, src_cnt, len, flags, depend_tx, cb_fn, cb_param); } else { /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); /* in the sync case the dest is an implied source * (assumes the dest is the first source) */ if (flags & ASYNC_TX_XOR_DROP_DST) { src_cnt--; src_list++; } /* wait for any prerequisite operations */ async_tx_quiesce(&depend_tx); do_sync_xor(dest, src_list, offset, src_cnt, len, flags, cb_fn, cb_param); return NULL; } }
/** * async_xor - attempt to xor a set of blocks with a dma engine. * xor_blocks always uses the dest as a source so the ASYNC_TX_XOR_ZERO_DST * flag must be set to not include dest data in the calculation. The * assumption with dma eninges is that they only use the destination * buffer as a source when it is explicity specified in the source list. * @dest: destination page * @src_list: array of source pages (if the dest is also a source it must be * at index zero). The contents of this array may be overwritten. * @offset: offset in pages to start transaction * @src_cnt: number of source pages * @len: length in bytes * @flags: ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DEST, * ASYNC_TX_ACK, ASYNC_TX_DEP_ACK * @depend_tx: xor depends on the result of this transaction. * @cb_fn: function to call when the xor completes * @cb_param: parameter to pass to the callback routine */ struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_param) { struct dma_chan *chan = async_tx_find_channel(depend_tx, DMA_XOR, &dest, 1, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dma_async_tx_descriptor *tx = NULL; dma_async_tx_callback _cb_fn; void *_cb_param; unsigned long local_flags; int xor_src_cnt; int i = 0, src_off = 0; BUG_ON(src_cnt <= 1); while (src_cnt) { local_flags = flags; if (device) { /* run the xor asynchronously */ xor_src_cnt = min(src_cnt, device->max_xor); /* if we are submitting additional xors * only set the callback on the last transaction */ if (src_cnt > xor_src_cnt) { local_flags &= ~ASYNC_TX_ACK; _cb_fn = NULL; _cb_param = NULL; } else { _cb_fn = cb_fn; _cb_param = cb_param; } tx = do_async_xor(device, chan, dest, &src_list[src_off], offset, xor_src_cnt, len, local_flags, depend_tx, _cb_fn, _cb_param); } else { /* run the xor synchronously */ /* in the sync case the dest is an implied source * (assumes the dest is at the src_off index) */ if (flags & ASYNC_TX_XOR_DROP_DST) { src_cnt--; src_off++; } /* process up to 'MAX_XOR_BLOCKS' sources */ xor_src_cnt = min(src_cnt, MAX_XOR_BLOCKS); /* if we are submitting additional xors * only set the callback on the last transaction */ if (src_cnt > xor_src_cnt) { local_flags &= ~ASYNC_TX_ACK; _cb_fn = NULL; _cb_param = NULL; } else { _cb_fn = cb_fn; _cb_param = cb_param; } /* wait for any prerequisite operations */ if (depend_tx) { /* if ack is already set then we cannot be sure * we are referring to the correct operation */ BUG_ON(async_tx_test_ack(depend_tx)); if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for " "depend_tx\n", __func__); } do_sync_xor(dest, &src_list[src_off], offset, xor_src_cnt, len, local_flags, depend_tx, _cb_fn, _cb_param); } /* the previous tx is hidden from the client, * so ack it */ if (i && depend_tx) async_tx_ack(depend_tx); depend_tx = tx; if (src_cnt > xor_src_cnt) { /* drop completed sources */ src_cnt -= xor_src_cnt; src_off += xor_src_cnt; /* unconditionally preserve the destination */ flags &= ~ASYNC_TX_XOR_ZERO_DST; /* use the intermediate result a source, but remember * it's dropped, because it's implied, in the sync case */ src_list[--src_off] = dest; src_cnt++; flags |= ASYNC_TX_XOR_DROP_DST; } else src_cnt = 0; i++; } return tx; }
/** * async_xor - attempt to xor a set of blocks with a dma engine. * @dest: destination page * @src_list: array of source pages * @offset: common src/dst offset to start transaction * @src_cnt: number of source pages * @len: length in bytes * @submit: submission / completion modifiers * * honored flags: ASYNC_TX_ACK, ASYNC_TX_XOR_ZERO_DST, ASYNC_TX_XOR_DROP_DST * * xor_blocks always uses the dest as a source so the * ASYNC_TX_XOR_ZERO_DST flag must be set to not include dest data in * the calculation. The assumption with dma eninges is that they only * use the destination buffer as a source when it is explicity specified * in the source list. * * src_list note: if the dest is also a source it must be at index zero. * The contents of this array will be overwritten if a scribble region * is not specified. */ struct dma_async_tx_descriptor * async_xor(struct page *dest, struct page **src_list, unsigned int offset, int src_cnt, size_t len, struct async_submit_ctl *submit) { struct dma_chan *chan = async_tx_find_channel(submit, DMA_XOR, &dest, 1, src_list, src_cnt, len); struct dma_device *device = chan ? chan->device : NULL; struct dmaengine_unmap_data *unmap = NULL; BUG_ON(src_cnt <= 1); if (device) unmap = dmaengine_get_unmap_data(device->dev, src_cnt+1, GFP_NOWAIT); if (unmap && is_dma_xor_aligned(device, offset, 0, len)) { struct dma_async_tx_descriptor *tx; int i, j; /* run the xor asynchronously */ pr_debug("%s (async): len: %zu\n", __func__, len); unmap->len = len; for (i = 0, j = 0; i < src_cnt; i++) { if (!src_list[i]) continue; unmap->to_cnt++; unmap->addr[j++] = dma_map_page(device->dev, src_list[i], offset, len, DMA_TO_DEVICE); } /* map it bidirectional as it may be re-used as a source */ unmap->addr[j] = dma_map_page(device->dev, dest, offset, len, DMA_BIDIRECTIONAL); unmap->bidi_cnt = 1; tx = do_async_xor(chan, unmap, submit); dmaengine_unmap_put(unmap); return tx; } else { dmaengine_unmap_put(unmap); /* run the xor synchronously */ pr_debug("%s (sync): len: %zu\n", __func__, len); WARN_ONCE(chan, "%s: no space for dma address conversion\n", __func__); /* in the sync case the dest is an implied source * (assumes the dest is the first source) */ if (submit->flags & ASYNC_TX_XOR_DROP_DST) { src_cnt--; src_list++; } /* wait for any prerequisite operations */ async_tx_quiesce(&submit->depend_tx); do_sync_xor(dest, src_list, offset, src_cnt, len, submit); return NULL; } }