void unipro_init(void) { size_t size = sizeof(struct unipro_driver*) * unipro_cport_count(); g_drvs = zalloc(size); if (!g_drvs) { return; } }
/** * @brief Send data buffer(s) on CPort whenever ready. * Ensure that TX queues are reinspected until * all CPorts have no work available. * Then suspend again until new data is available. */ static void *unipro_tx_worker(void *data) { int i; bool is_busy; int retval; unsigned int cport_count = unipro_cport_count(); while (1) { /* Block until a buffer is pending on any CPort */ sem_wait(&worker.tx_fifo_lock); do { is_busy = false; for (i = 0; i < cport_count; i++) { /* Browse all CPorts sending any pending buffers */ retval = unipro_send_tx_buffer(cport_handle(i)); if (retval == -EBUSY) { /* * Buffer only partially sent, have to try again for * remaining part. */ is_busy = true; } } } while (is_busy); /* exit when CPort(s) current pending buffer sent */ } return NULL; }
static int cport_count_vendor_request_in(struct usbdev_s *dev, uint8_t req, uint16_t index, uint16_t value, void *buf, uint16_t len) { *(uint16_t *) buf = cpu_to_le16(unipro_cport_count()); return sizeof(uint16_t); }
static struct unipro_xfer_descriptor *pick_tx_descriptor(void) { struct unipro_xfer_descriptor *desc; int i; for (i = 0; i < unipro_cport_count(); i++) { struct cport *cport = cport_handle(i); if (!cport) continue; if (list_is_empty(&cport->tx_fifo)) continue; desc = containerof(cport->tx_fifo.next, struct unipro_xfer_descriptor, list); if (desc->channel) continue; if (!unipro_get_tx_free_buffer_space(desc->cport)) continue; return desc; } return NULL; }
struct cport *cport_handle(unsigned int cportid) { if (cportid >= unipro_cport_count() || cportid == CPORTID_CDSI0 || cportid == CPORTID_CDSI1) { return NULL; } else { return &cporttable[cportid]; } }
static void unipro_backend_init(void) { int i; /* unipro_init() will initialize any non-display, non-camera CPorts */ unipro_init(); /* Now register a driver for those CPorts */ for (i = 0; i < unipro_cport_count(); i++) { /* These cports are already allocated for display and camera */ if (i == CPORTID_CDSI0 || i == CPORTID_CDSI1) continue; unipro_driver_register(&unipro_driver, i); } }
static void unipro_backend_init(void) { int i; unsigned int cport_count = unipro_cport_count(); /* unipro_init{_*}() will initialize any non-display, non-camera CPorts */ unipro_init_with_event_handler(apbridge_unipro_evt_handler); /* Now register a driver for those CPorts */ for (i = 0; i < cport_count; i++) { /* These cports are already allocated for display and camera */ if (i == CPORTID_CDSI0 || i == CPORTID_CDSI1) continue; unipro_driver_register(&unipro_driver, i); } }
static struct unipro_xfer_descriptor *pick_tx_descriptor(unsigned int cportid) { struct unipro_xfer_descriptor *desc; unsigned int cport_count = unipro_cport_count(); int i; for (i = 0; i < cport_count; i++, cportid++) { struct cport *cport; cportid = cportid % cport_count; cport = cport_handle(cportid); if (!cport) continue; if (list_is_empty(&cport->tx_fifo)) { if (cport->pending_reset) { unipro_flush_cport(cport); } continue; } if (cport->pending_reset) { unipro_flush_cport(cport); } desc = containerof(cport->tx_fifo.next, struct unipro_xfer_descriptor, list); if (desc->channel) continue; if (!unipro_get_tx_free_buffer_space(desc->cport)) continue; return desc; } return NULL; }
int unipro_tx_init(void) { int i; int retval; int avail_chan = 0; enum device_dma_dev dst_device = DEVICE_DMA_DEV_MEM; sem_init(&worker.tx_fifo_lock, 0, 0); sem_init(&unipro_dma.dma_channel_lock, 0, 0); unipro_dma.dev = device_open(DEVICE_TYPE_DMA_HW, 0); if (!unipro_dma.dev) { lldbg("Failed to open DMA driver.\n"); return -ENODEV; } if (tsb_get_rev_id() != tsb_rev_es2) { /* * Setup HW hand shake threshold. */ for (i = 0; i < unipro_cport_count(); i++) { uint32_t offset_value = unipro_read(REG_TX_BUFFER_SPACE_OFFSET_REG(i)); #ifdef CONFIG_ARCH_UNIPROTX_DMA_WMB unipro_write(REG_TX_BUFFER_SPACE_OFFSET_REG(i), offset_value | (0x10 << 8)); #else unipro_write(REG_TX_BUFFER_SPACE_OFFSET_REG(i), offset_value | (0x20 << 8)); #endif } /* * Open Atabl driver. */ unipro_dma.atabl_dev = device_open(DEVICE_TYPE_ATABL_HW, 0); if (!unipro_dma.atabl_dev) { lldbg("Failed to open ATABL driver.\n"); device_close(unipro_dma.dev); return -ENODEV; } } unipro_dma.max_channel = 0; list_init(&unipro_dma.free_channel_list); avail_chan = device_dma_chan_free_count(unipro_dma.dev); if (avail_chan > ARRAY_SIZE(unipro_dma.dma_channels)) { avail_chan = ARRAY_SIZE(unipro_dma.dma_channels); } if (tsb_get_rev_id() != tsb_rev_es2) { dst_device = DEVICE_DMA_DEV_UNIPRO; if (device_atabl_req_free_count(unipro_dma.atabl_dev) < avail_chan) { device_close(unipro_dma.dev); device_close(unipro_dma.atabl_dev); return -ENODEV; } } for (i = 0; i < avail_chan; i++) { struct device_dma_params chan_params = { .src_dev = DEVICE_DMA_DEV_MEM, .src_devid = 0, .src_inc_options = DEVICE_DMA_INC_AUTO, .dst_dev = dst_device, .dst_devid = 0, .dst_inc_options = DEVICE_DMA_INC_AUTO, .transfer_size = DEVICE_DMA_TRANSFER_SIZE_64, .burst_len = DEVICE_DMA_BURST_LEN_16, .swap = DEVICE_DMA_SWAP_SIZE_NONE, }; if (tsb_get_rev_id() != tsb_rev_es2) { if (device_atabl_req_alloc(unipro_dma.atabl_dev, &unipro_dma.dma_channels[i].req)) { break; } chan_params.dst_devid = device_atabl_req_to_peripheral_id( unipro_dma.atabl_dev, unipro_dma.dma_channels[i].req); } device_dma_chan_alloc(unipro_dma.dev, &chan_params, &unipro_dma.dma_channels[i].chan); if (unipro_dma.dma_channels[i].chan == NULL) { lowsyslog("unipro: couldn't allocate all %u requested channel(s)\n", ARRAY_SIZE(unipro_dma.dma_channels)); break; } unipro_dma.dma_channels[i].cportid = 0xFFFF; unipro_dma.max_channel++; } if (unipro_dma.max_channel <= 0) { lowsyslog("unipro: couldn't allocate a single DMA channel\n"); retval = -ENODEV; goto error_no_channel; } lowsyslog("unipro: %d DMA channel(s) allocated\n", unipro_dma.max_channel); retval = pthread_create(&worker.thread, NULL, unipro_tx_worker, NULL); if (retval) { lldbg("Failed to create worker thread: %s.\n", strerror(errno)); goto error_worker_create; } return 0; error_worker_create: for (i = 0; i < unipro_dma.max_channel; i++) { if (tsb_get_rev_id() != tsb_rev_es2) { device_atabl_req_free(unipro_dma.atabl_dev, &unipro_dma.dma_channels[i].req); } device_dma_chan_free(unipro_dma.dev, &unipro_dma.dma_channels[i]); } unipro_dma.max_channel = 0; error_no_channel: if (tsb_get_rev_id() != tsb_rev_es2) { device_close(unipro_dma.atabl_dev); unipro_dma.atabl_dev = NULL; } device_close(unipro_dma.dev); unipro_dma.dev = NULL; return retval; }
/** * @brief Initialize the UniPro core */ void unipro_init(void) { unsigned int i; int retval; struct cport *cport; /* * Compute and cache the number of CPorts that this bridge has, for use * by the functions in this source file. The value does not change. */ if (cport_count == 0) cport_count = unipro_cport_count(); cporttable = zalloc(sizeof(struct cport) * cport_count); if (!cporttable) { return; } retval = unipro_tx_init(); if (retval) { free(cporttable); cporttable = NULL; return; } for (i = 0; i < cport_count; i++) { cport = &cporttable[i]; cport->tx_buf = CPORT_TX_BUF(i); cport->cportid = i; cport->connected = 0; list_init(&cport->tx_fifo); } unipro_write(LUP_INT_EN, 0x1); /* * Set transfer mode 2 on all cports * Receiver choses address for received message * Header is delivered transparently to receiver (and used to carry the first eight * L4 payload bytes) */ DEBUGASSERT(TRANSFER_MODE == 2); configure_transfer_mode(TRANSFER_MODE); /* * Initialize cports. */ unipro_write(UNIPRO_INT_EN, 0x0); for (i = 0; i < cport_count; i++) { unipro_init_cport(i); } unipro_write(UNIPRO_INT_EN, 0x1); /* * Disable FCT transmission. See ENG-376. */ unipro_write(CPB_RX_E2EFC_EN_0, 0x0); if (tsb_get_product_id() == tsb_pid_apbridge) { unipro_write(CPB_RX_E2EFC_EN_1, 0x0); } irq_attach(TSB_IRQ_UNIPRO, irq_unipro); up_enable_irq(TSB_IRQ_UNIPRO); #ifdef UNIPRO_DEBUG unipro_info(); #endif lldbg("UniPro enabled\n"); }