Ejemplo n.º 1
0
static enum device_dma_cmd unipro_dma_callback(unsigned int channel,
                                               enum device_dma_event event,
                                               device_dma_transfer_arg *arg)
{
    struct unipro_xfer_descriptor *desc =
        containerof(arg, struct unipro_xfer_descriptor, dma_arg);
    int retval;


    switch (event) {
    case DEVICE_DMA_EVENT_BLOCKED:
        retval = unipro_continue_xfer(desc);
        if (retval) {
            unipro_xfer_pause(desc);
            return DEVICE_DMA_CMD_STOP;
        }
        return DEVICE_DMA_CMD_CONTINUE;

    case DEVICE_DMA_EVENT_COMPLETED:
        if (desc->callback)
            desc->callback(0, desc->data, desc->priv);

        unipro_xfer_dequeue_descriptor(desc);
        return DEVICE_DMA_CMD_STOP;

    default:
        if (desc->callback)
            desc->callback(-EBADE, desc->data, desc->priv);

        unipro_xfer_dequeue_descriptor(desc);
        return DEVICE_DMA_CMD_STOP;
    }
}
Ejemplo n.º 2
0
static int unipro_dma_tx_callback(struct device *dev, void *chan,
        struct device_dma_op *op, unsigned int event, void *arg)
{
    struct unipro_xfer_descriptor *desc = arg;
    int retval = OK;

    if ((event & DEVICE_DMA_CALLBACK_EVENT_START) &&
        (tsb_get_rev_id() != tsb_rev_es2)) {
        int req_activated = 0;
        struct dma_channel *desc_chan = desc->channel;

        if (desc_chan->cportid != 0xFFFF) {
            req_activated = device_atabl_req_is_activated(unipro_dma.atabl_dev,
                                                          desc_chan->req);
        }
        if (req_activated != 0) {
            device_atabl_deactivate_req(unipro_dma.atabl_dev,
                                        desc_chan->req);
        }

        if (desc_chan->cportid != desc->cport->cportid) {
            if (desc_chan->cportid != 0xFFFF) {
                device_atabl_disconnect_cport_from_req(unipro_dma.atabl_dev,
                        desc_chan->req);
                desc_chan->cportid = 0xffff;
            }

            retval = device_atabl_connect_cport_to_req(unipro_dma.atabl_dev,
                             desc->cport->cportid, desc_chan->req);
            if (retval != OK) {
                lldbg("Error: Failed to connect cport to REQn\n");
            }
        }
        retval = device_atabl_activate_req(unipro_dma.atabl_dev,
                                           desc_chan->req);

        if (retval) {
            lldbg("Error: Failed to activate cport %d on REQn\n",
                  desc->cport->cportid);
            return retval;
        } else {
            desc_chan->cportid = desc->cport->cportid;
        }
    }

    if (event & DEVICE_DMA_CALLBACK_EVENT_COMPLETE) {
        if (desc->data_offset >= desc->len) {
            struct dma_channel *desc_chan = desc->channel;

            unipro_dma_tx_set_eom_flag(desc->cport);

            list_del(&desc->list);
            device_dma_op_free(unipro_dma.dev, op);

            if (desc->callback != NULL) {
                desc->callback(0, desc->data, desc->priv);
            }

            if (tsb_get_rev_id() != tsb_rev_es2) {
                device_atabl_transfer_completed(unipro_dma.atabl_dev,
                                                desc_chan->req);
            }

            unipro_xfer_dequeue_descriptor(desc);
        } else {
            desc->channel = NULL;

            sem_post(&worker.tx_fifo_lock);
        }
    }

    return retval;
}