Exemple #1
0
static void unipro_evt_handler(enum unipro_event evt)
{
    int retval;

    DBG_UNIPRO("UniPro: event %d.\n", evt);

    switch (evt) {
    case UNIPRO_EVT_MAILBOX:
        mailbox_evt();
        break;

    case UNIPRO_EVT_LUP_DONE:
        if (tsb_get_rev_id() == tsb_rev_es2)
            es2_apply_mphy_fixup();

        retval = unipro_enable_mailbox_irq();
        if (retval) {
            lowsyslog("unipro: failed to enable mailbox irq\n");
        }
        break;
    }

    if (evt_handler) {
        evt_handler(evt);
    }
}
Exemple #2
0
void up_idle(void)
{
#if defined(CONFIG_SUPPRESS_INTERRUPTS) || defined(CONFIG_SUPPRESS_TIMER_INTS)
  /* If the system is idle and there are no timer interrupts, then process
   * "fake" timer interrupts. Hopefully, something will wake up.
   */

  sched_process_timer();
#else

  up_idlepm();

  /* SW-425 */
  if (tsb_get_rev_id() > tsb_rev_es2) {
    asm("wfi");
  } else {
    /* We theorize that instruction fetch on the bridge silicon may stall an
     * in-progress USB DMA transfer.  The ideal solution is to halt the processor
     * during idle via WFI (wait for interrupt), but that degrades the JTAG
     * debugging experience (see discussion below).
     *
     * For es2 builds, we'll try a work-around suggested by Olin Siebert, namely
     * to execute a sequence of 16-bit nop instructions.  The theory is that the CM3
     * core will fetch two 16-bit instructions at a time, but execute them
     * sequentially, dropping instruction fetch bandwidth by 50% during idle
     * periods, and offering USB DMA transfers the opportunity to progress and
     * complete.
     */
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
    asm("nop");
  }

#endif
}
Exemple #3
0
/**
 * Since the switch has no 32-bit MBOX_ACK_ATTR attribute, we need to repurpose
 * a 16-bit attribute, which means that received mbox values must fit inside a
 * uint16_t.
 */
static int tsb_unipro_mbox_ack(uint16_t val) {
    int rc;
    uint32_t mbox_ack_attr = tsb_get_rev_id() == tsb_rev_es2 ?
                             ES2_MBOX_ACK_ATTR : ES3_MBOX_ACK_ATTR;

    rc = unipro_attr_local_write(mbox_ack_attr, val, 0);
    if (rc) {
        lldbg("MBOX_ACK_ATTR complement write of 0x%x failed: %d\n", val, rc);
        return rc;
    }

    return 0;
}
Exemple #4
0
/**
 * @brief Probe SPI device
 *
 * This function is called by the system to register the driver when the system
 * boots up. This function allocates memory for the private SPI device
 * information, and then sets up the hardware resource and interrupt handler.
 *
 * @param dev pointer to structure of device data
 * @return 0 on success, negative errno on error
 */
static int tsb_spi_dev_probe(struct device *dev)
{
    struct tsb_spi_dev_info *info;
    struct device_resource *r;
    int ret = 0;

    if (tsb_get_rev_id() == tsb_rev_es2) {
        return -ENODEV;
    }

    if (!dev) {
        return -EINVAL;
    }

    info = zalloc(sizeof(*info));
    if (!info) {
        return -ENOMEM;
    }

    /* get register data from resource block */
    r = device_resource_get_by_name(dev, DEVICE_RESOURCE_TYPE_REG, "reg_base");
    if (!r) {
        ret = -EINVAL;
        goto err_freemem;
    }

    info->reg_base = (uint32_t)r->start;
    info->state = TSB_SPI_STATE_CLOSED;
    info->spi_pmstate = PM_SLEEP;
    device_set_private(dev, info);
    spi_dev = dev;

    sem_init(&info->bus, 0, 1);
    sem_init(&info->lock, 0, 1);
    sem_init(&info->xfer_completed, 0, 0);

    ret = tsb_pm_register(tsb_spi_pm_prepare, tsb_spi_pm_notify, dev);
    if (ret) {
        goto err_freemem;
    }
    return 0;

err_freemem:
    free(info);
    return ret;
}
Exemple #5
0
unsigned int unipro_cport_count(void) {
    uint32_t num_cports;
    int retval;

    if (tsb_get_rev_id() == tsb_rev_es2) { /* T_NUMCPORTS is incorrect on es2 */
        /*
         * Reduce the run-time CPort count to what's available on the
         * GPBridges, unless we can determine that we're running on an
         * APBridge.
         */
        return ((tsb_get_product_id() == tsb_pid_apbridge) ?
                ES2_APBRIDGE_CPORT_MAX : ES2_GPBRIDGE_CPORT_MAX);
    }

    retval = unipro_attr_local_read(T_NUMCPORTS, &num_cports, 0);
    if (retval) {
        lowsyslog("unipro: cannot determine number of cports\n");
        return 0;
    }

    return num_cports;
}
int unipro_tx_init(void)
{
    int i;
    int retval;
    int avail_chan = 0;
    enum device_dma_dev dst_device = DEVICE_DMA_DEV_MEM;

    sem_init(&worker.tx_fifo_lock, 0, 0);
    sem_init(&unipro_dma.dma_channel_lock, 0, 0);

    unipro_dma.dev = device_open(DEVICE_TYPE_DMA_HW, 0);
    if (!unipro_dma.dev) {
        lldbg("Failed to open DMA driver.\n");
        return -ENODEV;
    }

    if (tsb_get_rev_id() != tsb_rev_es2) {
        /*
         * Setup HW hand shake threshold.
         */
        for (i = 0; i < unipro_cport_count(); i++) {
            uint32_t offset_value =
                unipro_read(REG_TX_BUFFER_SPACE_OFFSET_REG(i));


#ifdef CONFIG_ARCH_UNIPROTX_DMA_WMB
            unipro_write(REG_TX_BUFFER_SPACE_OFFSET_REG(i),
                        offset_value | (0x10 << 8));
#else
            unipro_write(REG_TX_BUFFER_SPACE_OFFSET_REG(i),
                        offset_value | (0x20 << 8));
#endif
        }

        /*
         * Open Atabl driver.
         */
        unipro_dma.atabl_dev = device_open(DEVICE_TYPE_ATABL_HW, 0);
        if (!unipro_dma.atabl_dev) {
            lldbg("Failed to open ATABL driver.\n");

            device_close(unipro_dma.dev);
            return -ENODEV;
        }
    }

    unipro_dma.max_channel = 0;
    list_init(&unipro_dma.free_channel_list);
    avail_chan = device_dma_chan_free_count(unipro_dma.dev);

    if (avail_chan > ARRAY_SIZE(unipro_dma.dma_channels)) {
        avail_chan = ARRAY_SIZE(unipro_dma.dma_channels);
    }

    if (tsb_get_rev_id() != tsb_rev_es2) {
        dst_device = DEVICE_DMA_DEV_UNIPRO;

        if (device_atabl_req_free_count(unipro_dma.atabl_dev) < avail_chan) {
            device_close(unipro_dma.dev);
            device_close(unipro_dma.atabl_dev);
            return -ENODEV;
        }
    }

    for (i = 0; i < avail_chan; i++) {
        struct device_dma_params chan_params = {
                .src_dev = DEVICE_DMA_DEV_MEM,
                .src_devid = 0,
                .src_inc_options = DEVICE_DMA_INC_AUTO,
                .dst_dev = dst_device,
                .dst_devid = 0,
                .dst_inc_options = DEVICE_DMA_INC_AUTO,
                .transfer_size = DEVICE_DMA_TRANSFER_SIZE_64,
                .burst_len = DEVICE_DMA_BURST_LEN_16,
                .swap = DEVICE_DMA_SWAP_SIZE_NONE,
        };

        if (tsb_get_rev_id() != tsb_rev_es2) {
            if (device_atabl_req_alloc(unipro_dma.atabl_dev,
                                        &unipro_dma.dma_channels[i].req)) {
                 break;
             }

             chan_params.dst_devid = device_atabl_req_to_peripheral_id(
                                        unipro_dma.atabl_dev,
                                        unipro_dma.dma_channels[i].req);
        }

        device_dma_chan_alloc(unipro_dma.dev, &chan_params,
                              &unipro_dma.dma_channels[i].chan);

        if (unipro_dma.dma_channels[i].chan == NULL) {
            lowsyslog("unipro: couldn't allocate all %u requested channel(s)\n",
                    ARRAY_SIZE(unipro_dma.dma_channels));
            break;
        }

        unipro_dma.dma_channels[i].cportid = 0xFFFF;
        unipro_dma.max_channel++;
    }

    if (unipro_dma.max_channel <= 0) {
        lowsyslog("unipro: couldn't allocate a single DMA channel\n");
        retval = -ENODEV;
        goto error_no_channel;
    }

    lowsyslog("unipro: %d DMA channel(s) allocated\n", unipro_dma.max_channel);

    retval = pthread_create(&worker.thread, NULL, unipro_tx_worker, NULL);
    if (retval) {
        lldbg("Failed to create worker thread: %s.\n", strerror(errno));
        goto error_worker_create;
    }

    return 0;

error_worker_create:

    for (i = 0; i < unipro_dma.max_channel; i++) {
        if (tsb_get_rev_id() != tsb_rev_es2) {
            device_atabl_req_free(unipro_dma.atabl_dev,
                                  &unipro_dma.dma_channels[i].req);
        }

        device_dma_chan_free(unipro_dma.dev, &unipro_dma.dma_channels[i]);
    }

    unipro_dma.max_channel = 0;

error_no_channel:
    if (tsb_get_rev_id() != tsb_rev_es2) {
        device_close(unipro_dma.atabl_dev);
        unipro_dma.atabl_dev = NULL;
    }

    device_close(unipro_dma.dev);
    unipro_dma.dev = NULL;

    return retval;
}
static int unipro_dma_xfer(struct unipro_xfer_descriptor *desc,
                           struct dma_channel *channel)
{
    int retval;
    size_t xfer_len;
    void *cport_buf;
    void *xfer_buf;
    struct device_dma_op *dma_op = NULL;

    if (tsb_get_rev_id() == tsb_rev_es2) {
        xfer_len = unipro_get_tx_free_buffer_space(desc->cport);
        if (!xfer_len)
            return -ENOSPC;

        xfer_len = MIN(desc->len - desc->data_offset, xfer_len);
    } else {
        DEBUGASSERT(desc->data_offset == 0);

        xfer_len = desc->len;
    }

    desc->channel = channel;
    retval = device_dma_op_alloc(unipro_dma.dev, 1, 0, &dma_op);
    if (retval != OK) {
        lowsyslog("unipro: failed allocate a DMA op, retval = %d.\n", retval);
        return retval;
    }

    dma_op->callback = (void *) unipro_dma_tx_callback;
    dma_op->callback_arg = desc;
    dma_op->callback_events = DEVICE_DMA_CALLBACK_EVENT_COMPLETE;
    if (tsb_get_rev_id() != tsb_rev_es2) {
       dma_op->callback_events |=  DEVICE_DMA_CALLBACK_EVENT_START;
    }
    dma_op->sg_count = 1;
    dma_op->sg[0].len = xfer_len;

    DBG_UNIPRO("xfer: chan=%u, len=%zu\n", channel->id, xfer_len);

    cport_buf = desc->cport->tx_buf;
    xfer_buf = (void*) desc->data;

    /* resuming a paused xfer */
    if (desc->data_offset != 0) {
        cport_buf = (char*) cport_buf + sizeof(uint64_t); /* skip the first DWORD */

        /* move buffer offset to the beginning of the remaning bytes to xfer */
        xfer_buf = (char*) xfer_buf + desc->data_offset;
    }

    dma_op->sg[0].src_addr = (off_t) xfer_buf;
    dma_op->sg[0].dst_addr = (off_t) cport_buf;

    desc->data_offset += xfer_len;

    retval = device_dma_enqueue(unipro_dma.dev, channel->chan, dma_op);
    if (retval) {
        lowsyslog("unipro: failed to start DMA transfer: %d\n", retval);
        return retval;
    }

    return 0;
}
static int unipro_dma_tx_callback(struct device *dev, void *chan,
        struct device_dma_op *op, unsigned int event, void *arg)
{
    struct unipro_xfer_descriptor *desc = arg;
    int retval = OK;

    if ((event & DEVICE_DMA_CALLBACK_EVENT_START) &&
        (tsb_get_rev_id() != tsb_rev_es2)) {
        int req_activated = 0;
        struct dma_channel *desc_chan = desc->channel;

        if (desc_chan->cportid != 0xFFFF) {
            req_activated = device_atabl_req_is_activated(unipro_dma.atabl_dev,
                                                          desc_chan->req);
        }
        if (req_activated != 0) {
            device_atabl_deactivate_req(unipro_dma.atabl_dev,
                                        desc_chan->req);
        }

        if (desc_chan->cportid != desc->cport->cportid) {
            if (desc_chan->cportid != 0xFFFF) {
                device_atabl_disconnect_cport_from_req(unipro_dma.atabl_dev,
                        desc_chan->req);
                desc_chan->cportid = 0xffff;
            }

            retval = device_atabl_connect_cport_to_req(unipro_dma.atabl_dev,
                             desc->cport->cportid, desc_chan->req);
            if (retval != OK) {
                lldbg("Error: Failed to connect cport to REQn\n");
            }
        }
        retval = device_atabl_activate_req(unipro_dma.atabl_dev,
                                           desc_chan->req);

        if (retval) {
            lldbg("Error: Failed to activate cport %d on REQn\n",
                  desc->cport->cportid);
            return retval;
        } else {
            desc_chan->cportid = desc->cport->cportid;
        }
    }

    if (event & DEVICE_DMA_CALLBACK_EVENT_COMPLETE) {
        if (desc->data_offset >= desc->len) {
            struct dma_channel *desc_chan = desc->channel;

            unipro_dma_tx_set_eom_flag(desc->cport);

            list_del(&desc->list);
            device_dma_op_free(unipro_dma.dev, op);

            if (desc->callback != NULL) {
                desc->callback(0, desc->data, desc->priv);
            }

            if (tsb_get_rev_id() != tsb_rev_es2) {
                device_atabl_transfer_completed(unipro_dma.atabl_dev,
                                                desc_chan->req);
            }

            unipro_xfer_dequeue_descriptor(desc);
        } else {
            desc->channel = NULL;

            sem_post(&worker.tx_fifo_lock);
        }
    }

    return retval;
}
Exemple #9
0
int tsb_unipro_set_init_status(uint32_t val)
{
    if (tsb_get_rev_id() == tsb_rev_es2)
        return es2_tsb_unipro_set_init_status(val);
    return es3_tsb_unipro_set_init_status(val);
}