/** * dma_chan_get - try to grab a dma channel's parent driver module * @chan - channel to grab * * Must be called under dma_list_mutex */ static int dma_chan_get(struct dma_chan *chan) { struct module *owner = dma_chan_to_owner(chan); int ret; /* The channel is already in use, update client count */ if (chan->client_count) { __module_get(owner); goto out; } if (!try_module_get(owner)) return -ENODEV; /* allocate upon first client reference */ if (chan->device->device_alloc_chan_resources) { ret = chan->device->device_alloc_chan_resources(chan); if (ret < 0) goto err_out; } if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) balance_ref_count(chan); out: chan->client_count++; return 0; err_out: module_put(owner); return ret; }
/** * dma_chan_get - try to grab a dma channel's parent driver module * @chan - channel to grab * * Must be called under dma_list_mutex */ static int dma_chan_get(struct dma_chan *chan) { int err = -ENODEV; struct module *owner = dma_chan_to_owner(chan); if (chan->client_count) { __module_get(owner); err = 0; } else if (try_module_get(owner)) err = 0; if (err == 0) chan->client_count++; /* allocate upon first client reference */ if (chan->client_count == 1 && err == 0) { int desc_cnt = chan->device->device_alloc_chan_resources(chan); if (desc_cnt < 0) { err = desc_cnt; chan->client_count = 0; module_put(owner); } else if (!dma_has_cap(DMA_PRIVATE, chan->device->cap_mask)) balance_ref_count(chan); } return err; }
struct dma_async_tx_descriptor * async_trigger_callback(struct async_submit_ctl *submit) { struct dma_chan *chan; struct dma_device *device; struct dma_async_tx_descriptor *tx; struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; if (depend_tx) { chan = depend_tx->chan; device = chan->device; if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) device = NULL; tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; } else tx = NULL; if (tx) { pr_debug("%s: (async)\n", __func__); async_tx_submit(chan, tx, submit); } else { pr_debug("%s: (sync)\n", __func__); async_tx_quiesce(&submit->depend_tx); async_tx_sync_epilog(submit); } return tx; }
/** * async_tx_channel_switch - queue an interrupt descriptor with a dependency * pre-attached. * @depend_tx: the operation that must finish before the new operation runs * @tx: the new operation */ static void async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *tx) { struct dma_chan *chan = depend_tx->chan; struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; /* first check to see if we can still append to depend_tx */ txd_lock(depend_tx); if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { txd_chain(depend_tx, tx); intr_tx = NULL; } txd_unlock(depend_tx); /* attached dependency, flush the parent channel */ if (!intr_tx) { device->device_issue_pending(chan); return; } /* see if we can schedule an interrupt * otherwise poll for completion */ if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) intr_tx = device->device_prep_dma_interrupt(chan, 0); else intr_tx = NULL; if (intr_tx) { intr_tx->callback = NULL; intr_tx->callback_param = NULL; /* safe to chain outside the lock since we know we are * not submitted yet */ txd_chain(intr_tx, tx); /* check if we need to append */ txd_lock(depend_tx); if (txd_parent(depend_tx)) { txd_chain(depend_tx, intr_tx); async_tx_ack(intr_tx); intr_tx = NULL; } txd_unlock(depend_tx); if (intr_tx) { txd_clear_parent(intr_tx); intr_tx->tx_submit(intr_tx); async_tx_ack(intr_tx); } device->device_issue_pending(chan); } else { if (dma_wait_for_async_tx(depend_tx) != DMA_COMPLETE) panic("%s: DMA error waiting for depend_tx\n", __func__); tx->tx_submit(tx); } }
/** * __async_tx_find_channel - find a channel to carry out the operation or let * the transaction execute synchronously * @depend_tx: transaction dependency * @tx_type: transaction type */ struct dma_chan * __async_tx_find_channel(struct dma_async_tx_descriptor *depend_tx, enum dma_transaction_type tx_type) { /* see if we can keep the chain on one channel */ if (depend_tx && dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) return depend_tx->chan; return dma_find_channel(tx_type); }
static void async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *tx) { struct dma_chan *chan = depend_tx->chan; struct dma_device *device = chan->device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; txd_lock(depend_tx); if (txd_parent(depend_tx) && depend_tx->chan == tx->chan) { txd_chain(depend_tx, tx); intr_tx = NULL; } txd_unlock(depend_tx); if (!intr_tx) { device->device_issue_pending(chan); return; } if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) intr_tx = device->device_prep_dma_interrupt(chan, 0); else intr_tx = NULL; if (intr_tx) { intr_tx->callback = NULL; intr_tx->callback_param = NULL; txd_chain(intr_tx, tx); txd_lock(depend_tx); if (txd_parent(depend_tx)) { txd_chain(depend_tx, intr_tx); async_tx_ack(intr_tx); intr_tx = NULL; } txd_unlock(depend_tx); if (intr_tx) { txd_clear_parent(intr_tx); intr_tx->tx_submit(intr_tx); async_tx_ack(intr_tx); } device->device_issue_pending(chan); } else { if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", __func__); tx->tx_submit(tx); } }
struct dma_chan * __async_tx_find_channel(struct async_submit_ctl *submit, enum dma_transaction_type tx_type) { struct dma_async_tx_descriptor *depend_tx = submit->depend_tx; if (depend_tx && dma_has_cap(tx_type, depend_tx->chan->device->cap_mask)) return depend_tx->chan; return async_dma_find_channel(tx_type); }
/** * async_trigger_callback - schedules the callback function to be run after * any dependent operations have been completed. * @flags: ASYNC_TX_ACK, ASYNC_TX_DEP_ACK * @depend_tx: 'callback' requires the completion of this transaction * @cb_fn: function to call after depend_tx completes * @cb_param: parameter to pass to the callback routine */ struct dma_async_tx_descriptor * async_trigger_callback(enum async_tx_flags flags, struct dma_async_tx_descriptor *depend_tx, dma_async_tx_callback cb_fn, void *cb_param) { struct dma_chan *chan; struct dma_device *device; struct dma_async_tx_descriptor *tx; if (depend_tx) { chan = depend_tx->chan; device = chan->device; /* see if we can schedule an interrupt * otherwise poll for completion */ if (device && !dma_has_cap(DMA_INTERRUPT, device->cap_mask)) device = NULL; tx = device ? device->device_prep_dma_interrupt(chan, 0) : NULL; } else tx = NULL; if (tx) { pr_debug("%s: (async)\n", __func__); async_tx_submit(chan, tx, flags, depend_tx, cb_fn, cb_param); } else { pr_debug("%s: (sync)\n", __func__); /* wait for any prerequisite operations */ async_tx_quiesce(&depend_tx); async_tx_sync_epilog(cb_fn, cb_param); } return tx; }
/** * async_tx_channel_switch - queue an interrupt descriptor with a dependency * pre-attached. * @depend_tx: the operation that must finish before the new operation runs * @tx: the new operation */ static void async_tx_channel_switch(struct dma_async_tx_descriptor *depend_tx, struct dma_async_tx_descriptor *tx) { struct dma_chan *chan; struct dma_device *device; struct dma_async_tx_descriptor *intr_tx = (void *) ~0; /* first check to see if we can still append to depend_tx */ spin_lock_bh(&depend_tx->lock); if (depend_tx->parent && depend_tx->chan == tx->chan) { tx->parent = depend_tx; depend_tx->next = tx; intr_tx = NULL; } spin_unlock_bh(&depend_tx->lock); if (!intr_tx) return; chan = depend_tx->chan; device = chan->device; /* see if we can schedule an interrupt * otherwise poll for completion */ if (dma_has_cap(DMA_INTERRUPT, device->cap_mask)) intr_tx = device->device_prep_dma_interrupt(chan, 0); else intr_tx = NULL; if (intr_tx) { intr_tx->callback = NULL; intr_tx->callback_param = NULL; tx->parent = intr_tx; /* safe to set ->next outside the lock since we know we are * not submitted yet */ intr_tx->next = tx; /* check if we need to append */ spin_lock_bh(&depend_tx->lock); if (depend_tx->parent) { intr_tx->parent = depend_tx; depend_tx->next = intr_tx; async_tx_ack(intr_tx); intr_tx = NULL; } spin_unlock_bh(&depend_tx->lock); if (intr_tx) { intr_tx->parent = NULL; intr_tx->tx_submit(intr_tx); async_tx_ack(intr_tx); } } else { if (dma_wait_for_async_tx(depend_tx) == DMA_ERROR) panic("%s: DMA_ERROR waiting for depend_tx\n", __func__); tx->tx_submit(tx); } }