Ejemplo n.º 1
0
int
dmac_init(verinet_t *v)
{
    if (v->dmacInited) {
	return 0;
    }

    v->dmacListening =
	sal_sem_create("dmac listener", sal_sem_BINARY, 0);

    printk("Starting DMA service...\n");

    v->dmacListener = sal_thread_create("DMA-listener",
					SAL_THREAD_STKSZ, 100,
					dmac_listener, v);
    if (SAL_THREAD_ERROR == v->dmacListener) {
	printk("ERROR: could not create DMAC task: %s!\n", strerror(errno));
	return -2;
    }

    sal_sem_take(v->dmacListening, sal_sem_FOREVER);  /* Wait for listen() */
    sal_sem_destroy(v->dmacListening);

    v->dmacInited = 1;

    return 0;
}
Ejemplo n.º 2
0
Archivo: netdrv.c Proyecto: ariavie/bcm
void
rx_pkt_thread(void *param)
{
    int unit = (int) param;
    struct _rxctrl *prx_control = bcm_get_rx_control_ptr(unit);
    bcm_pkt_t pPkt;

    PRINTF_DEBUG(("RX:  Packet thread starting\n"));

    /* Sleep on sem */
    while (1) {
        rx_thread_dv_check(unit);

        prx_control->pkt_notify_given = FALSE;

        /* Service as many packets as possible */
        while (!RXQ_EMPTY(&g_rxq_ctrl)) {
            RXQ_DEQUEUE(&g_rxq_ctrl, &pPkt);
            NetdrvSendToEnd(&pPkt);
        }

        sal_sem_take(prx_control->pkt_notify, sal_sem_FOREVER);
    }

    prx_control->thread_exit_complete = TRUE;
    PRINTF_DEBUG(("RX: Packet thread exitting\n"));
    sal_thread_exit(0);

    return;
}
Ejemplo n.º 3
0
int
_bcm_ptp_sem_take(_bcm_ptp_sem_t b, int usec)
{
    int rv = -1;
    sal_usecs_t end_time = sal_time_usecs() + usec;
    int32 wait_time = usec;

    while ((rv != 0) && (wait_time > 0)) {
        rv = sal_sem_take(b, wait_time);
        wait_time = end_time - sal_time_usecs();
    }

    return rv;
}
Ejemplo n.º 4
0
Archivo: mbox.c Proyecto: ariavie/bcm
/*
 * Function:
 *      _bcm_ptp_rx_response_get
 * Purpose:
 *      Get Rx response data for a PTP clock.
 * Parameters:
 *      unit       - (IN)  Unit number.
 *      ptp_id     - (IN)  PTP stack ID.
 *      clock_num  - (IN)  PTP clock number.
 *      usec       - (IN)  Semaphore timeout (usec).
 *      data       - (OUT) Response data.
 *      data_len   - (OUT) Response data size (octets).
 * Returns:
 *      BCM_E_XXX
 * Notes:
 */
int
_bcm_mbox_rx_response_get(
    int unit,
    int node_num,
    int usec,
    uint8 **data,
    int *data_len)
{
#if defined(BCM_CMICM_SUPPORT)
    int rv = BCM_E_UNAVAIL;
    int spl;
    sal_usecs_t expiration_time = sal_time_usecs() + usec;

    /* soc_cm_print("cmic_rx_get\n"); */

    rv = BCM_E_FAIL;
    /* ptp_printf("Await resp @ %d\n", (int)sal_time_usecs()); */

    while (BCM_FAILURE(rv) && (int32) (sal_time_usecs() - expiration_time) < 0) {
        rv = sal_sem_take(mbox_info.unit_state[unit].response_ready, usec);
    }
    if (BCM_FAILURE(rv)) {
        SOC_DEBUG_PRINT((DK_ERR | DK_VERBOSE, "Failed management Tx to ToP\n"));
        _MBOX_ERROR_FUNC("_bcm_ptp_sem_take()");
        return rv;
    }

    /* Lock. */
    spl = sal_splhi();

    *data     = mbox_info.unit_state[unit].response_data;
    *data_len = mbox_info.unit_state[unit].response_len;

    mbox_info.unit_state[unit].response_data = 0;

    /* Unlock. */
    sal_spl(spl);

    return rv;

#else  /* BCM_CMICM_SUPPORT */
    return BCM_E_UNAVAIL;
#endif /* BCM_CMICM_SUPPORT */
}
Ejemplo n.º 5
0
STATIC void
rx_free_pkts(void *cookie)
{
    uint32 *to_free;
    uint32 *next;
    int unit = PTR_TO_INT(cookie);

    while (TRUE) {
        sal_sem_take(pkts_are_ready[unit], sal_sem_FOREVER);
        sal_mutex_take(pkt_queue_lock[unit], sal_mutex_FOREVER);
        to_free = (uint32 *)pkt_free_queue[unit];
        pkt_free_queue[unit] = NULL;
        rx_pkt_count[unit] = 0;
        sal_mutex_give(pkt_queue_lock[unit]);
        while (to_free != NULL) {
            next = *(uint32 **)to_free;
            bcm_rx_free(unit, to_free);
            to_free = next;
        }
    }
}
Ejemplo n.º 6
0
/*
 * Function: _ioctl
 *
 * Purpose:
 *    Handle IOCTL commands from user mode.
 * Parameters:
 *    cmd - IOCTL cmd
 *    arg - IOCTL parameters
 * Returns:
 *    0 on success, <0 on error
 */
static int 
_ioctl(unsigned int cmd, unsigned long arg)
{
    lubde_ioctl_t io;
    uint32 pbase, size;
    const ibde_dev_t *bde_dev;
    int inst_id;
     bde_inst_resource_t *res;

    if (copy_from_user(&io, (void *)arg, sizeof(io))) {
        return -EFAULT;
    }
  
    io.rc = LUBDE_SUCCESS;
  
    switch(cmd) {
    case LUBDE_VERSION:
        io.d0 = 0;
        break;
    case LUBDE_GET_NUM_DEVICES:

        io.d0 = user_bde->num_devices(io.dev);
        break;
    case LUBDE_GET_DEVICE:
        bde_dev = user_bde->get_dev(io.dev);
        if (bde_dev) {
            io.d0 = bde_dev->device;
            io.d1 = bde_dev->rev;
            if (BDE_DEV_MEM_MAPPED(_devices[io.dev].dev_type)) {
                /* Get physical address to map */
                io.d2 = lkbde_get_dev_phys(io.dev);
                io.d3 = lkbde_get_dev_phys_hi(io.dev);
            }
        } else {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_GET_DEVICE_TYPE:
        io.d0 = _devices[io.dev].dev_type;
        break;
    case LUBDE_GET_BUS_FEATURES:
        user_bde->pci_bus_features(io.dev, (int *) &io.d0, (int *) &io.d1,
                                   (int *) &io.d2);
        break;
    case LUBDE_PCI_CONFIG_PUT32:
        if (_devices[io.dev].dev_type & BDE_PCI_DEV_TYPE) {
            user_bde->pci_conf_write(io.dev, io.d0, io.d1);
        } else {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_PCI_CONFIG_GET32:
        if (_devices[io.dev].dev_type & BDE_PCI_DEV_TYPE) {
            io.d0 = user_bde->pci_conf_read(io.dev, io.d0);
        } else {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_GET_DMA_INFO:
        inst_id = io.dev;
        if (_bde_multi_inst){
            _dma_resource_get(inst_id, &pbase, &size);
        } else {
            lkbde_get_dma_info(&pbase, &size);
        }
        io.d0 = pbase;
        io.d1 = size; 
        /* Optionally enable DMA mmap via /dev/linux-kernel-bde */
        io.d2 = USE_LINUX_BDE_MMAP;
        break;
    case LUBDE_ENABLE_INTERRUPTS:
        if (_devices[io.dev].dev_type & BDE_SWITCH_DEV_TYPE) {
            if (_devices[io.dev].isr && !_devices[io.dev].enabled) {
                user_bde->interrupt_connect(io.dev,
                                            _devices[io.dev].isr,
                                            _devices+io.dev);
                _devices[io.dev].enabled = 1;
            }
        } else {
            /* Process ethernet device interrupt */
            /* FIXME: for multiple chips */
            if (!_devices[io.dev].enabled) {
                user_bde->interrupt_connect(io.dev,
                                            (void(*)(void *))_ether_interrupt, 
                                            _devices+io.dev);
                _devices[io.dev].enabled = 1;
            }
        }
        break;
    case LUBDE_DISABLE_INTERRUPTS:
        if (_devices[io.dev].enabled) {
            user_bde->interrupt_disconnect(io.dev);
            _devices[io.dev].enabled = 0;
        }
        break;
    case LUBDE_WAIT_FOR_INTERRUPT:
        if (_devices[io.dev].dev_type & BDE_SWITCH_DEV_TYPE) {
            res = &_bde_inst_resource[_devices[io.dev].inst];
#ifdef BDE_LINUX_NON_INTERRUPTIBLE
            wait_event_timeout(res->intr_wq, 
                               atomic_read(&res->intr) != 0, 100);

#else
            wait_event_interruptible(res->intr_wq,
                                     atomic_read(&res->intr) != 0);
#endif
            /* 
             * Even if we get multiple interrupts, we 
             * only run the interrupt handler once.
             */
            atomic_set(&res->intr, 0);
        } else {
#ifdef BDE_LINUX_NON_INTERRUPTIBLE
            wait_event_timeout(_ether_interrupt_wq,     
                               atomic_read(&_ether_interrupt_has_taken_place) != 0, 100);
#else
            wait_event_interruptible(_ether_interrupt_wq,     
                                     atomic_read(&_ether_interrupt_has_taken_place) != 0);
#endif
            /* 
             * Even if we get multiple interrupts, we 
             * only run the interrupt handler once.
             */
            atomic_set(&_ether_interrupt_has_taken_place, 0);
        }
        break;
    case LUBDE_USLEEP:
        sal_usleep(io.d0);
        break;
    case LUBDE_UDELAY:
        sal_udelay(io.d0);
        break;
    case LUBDE_SEM_OP:
        switch (io.d0) {
        case LUBDE_SEM_OP_CREATE:
            io.p0 = (bde_kernel_addr_t)sal_sem_create("", io.d1, io.d2);
            break;
        case LUBDE_SEM_OP_DESTROY:
            sal_sem_destroy((sal_sem_t)io.p0);
            break;
        case LUBDE_SEM_OP_TAKE:
            io.rc = sal_sem_take((sal_sem_t)io.p0, io.d2);
            break;
        case LUBDE_SEM_OP_GIVE:
            io.rc = sal_sem_give((sal_sem_t)io.p0);
            break;
        default:
            io.rc = LUBDE_FAIL;
            break;
        }
        break;
    case LUBDE_WRITE_IRQ_MASK:
        io.rc = lkbde_irq_mask_set(io.dev, io.d0, io.d1, 0);
        break;
    case LUBDE_SPI_READ_REG:
        if (user_bde->spi_read(io.dev, io.d0, io.dx.buf, io.d1) == -1) {
            io.rc = LUBDE_FAIL;
        } 
        break;
    case LUBDE_SPI_WRITE_REG:
        if (user_bde->spi_write(io.dev, io.d0, io.dx.buf, io.d1) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_READ_REG_16BIT_BUS:
        io.d1 = user_bde->read(io.dev, io.d0);
        break;
    case LUBDE_WRITE_REG_16BIT_BUS:
        io.rc = user_bde->write(io.dev, io.d0, io.d1);
        break;
#if (defined(BCM_PETRA_SUPPORT) || defined(BCM_DFE_SUPPORT))
    case LUBDE_CPU_WRITE_REG:
    {
        if (lkbde_cpu_write(io.dev, io.d0, (uint32*)io.dx.buf) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    }
    case LUBDE_CPU_READ_REG:
    {
        if (lkbde_cpu_read(io.dev, io.d0, (uint32*)io.dx.buf) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    }
    case LUBDE_CPU_PCI_REGISTER:
    {
        if (lkbde_cpu_pci_register(io.dev) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    }
#endif
    case LUBDE_DEV_RESOURCE:
        bde_dev = user_bde->get_dev(io.dev);
        if (bde_dev) {
            if (BDE_DEV_MEM_MAPPED(_devices[io.dev].dev_type)) {
                /* Get physical address to map */
                io.rc = lkbde_get_dev_resource(io.dev, io.d0,
                                               &io.d1, &io.d2, &io.d3);
            }
        } else {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_IPROC_READ_REG:
        io.d1 = user_bde->iproc_read(io.dev, io.d0);
        if (io.d1 == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_IPROC_WRITE_REG:
        if (user_bde->iproc_write(io.dev, io.d0, io.d1) == -1) {
            io.rc = LUBDE_FAIL;
        }
        break;
    case LUBDE_ATTACH_INSTANCE:
        io.rc = _instance_attach(io.d0, io.d1);
        break;
    default:
        gprintk("Error: Invalid ioctl (%08x)\n", cmd);
        io.rc = LUBDE_FAIL;
        break;
    }
  
    if (copy_to_user((void *)arg, &io, sizeof(io))) {
        return -EFAULT;
    }

    return 0;
}
Ejemplo n.º 7
0
/*
 * Function:
 *  soc_robo_dos_monitor_thread
 * Purpose:
 *      DOS event monitor thread
 * Parameters:
 *  unit     - unit number.
 * Returns:
 *  
 */
STATIC void
soc_robo_dos_monitor_thread(int unit)
{
    drv_robo_dos_monitor_t  *dm = drv_dm_control[unit];
    int     rv = SOC_E_NONE;
    int     interval = 0;
    uint32  events_bmp = 0, hwdos_enable_bmp = 0;
    
    dm->dm_thread = sal_thread_self();
    
    while ((interval = dm->interval) != 0) {
        DM_LOCK(unit);
        
        if (dm->err_cnt < DM_MAX_ERR_COUNT){
            /* check HW DOS enable status : if no HW DOS, stop the thread.
             *  - HW DOS configuration could be modified by API or User  
             *      Application. This process will prevent dummy thread is 
             *      running for no HW DOS enabled.
             *  - Thread will be started agagin if any HW DOS enabled through
             *      switch API.
             */
            rv = DRV_DOS_EVENT_BITMAP_GET(unit, 
                    DRV_DOS_EVT_OP_ENABLED, &hwdos_enable_bmp);
            if (hwdos_enable_bmp == 0){
                /* means no HW DOS is enabled, stop thread !
                 *  - but still proceed whole process in case there are one 
                 *      or more DOS event occurred already.
                 */
                interval = sal_sem_FOREVER;
            }
            
            /* read HW DOS event */
            rv = DRV_DOS_EVENT_BITMAP_GET(unit, 
                    DRV_DOS_EVT_OP_STATUS, &events_bmp);
            if (rv){
                dm->err_cnt ++;
                if (dm->err_cnt == DM_MAX_ERR_COUNT){
                    soc_cm_debug(DK_WARN, 
                            "%s Thread stop for %d times failed processes.\n",
                            dm->task_name, DM_MAX_ERR_COUNT);
                    /* generate an event action with THREAD error type.
                     *  - arg1 : indicate the error item.
                     *  - arg2 : indicate the error line.
                     *  - arg3 : indicate the error code.
                     */
                    soc_event_generate(unit, 
                            SOC_SWITCH_EVENT_THREAD_ERROR, 
                            SOC_SWITCH_EVENT_THREAD_HWDOS_MONITOR, 
                            __LINE__, rv);
                    dm->err_cnt = 0;
                    dm->last_dos_events = 0;
                    interval = sal_sem_FOREVER;
                }
            } else {
                if (events_bmp){
                    /* generate an event action and carry the DOS events in 
                     *  arg1.(arg2 and arg3 assigned 0 for no reference here)
                     */
                    soc_event_generate(unit, 
                            SOC_SWITCH_EVENT_DOS_ATTACK, 
                            events_bmp, 0, 0);
                    dm->last_dos_events = events_bmp;
                    soc_cm_debug(DK_VERBOSE, "Check DOS event bmp=0x%x\n", 
                            events_bmp);
                }
                dm->err_cnt = 0;
            }
        }

        DM_UNLOCK(unit);
        (void)sal_sem_take(dm->dm_sema, interval);
    }
    
    dm->dm_thread = NULL;
    sal_thread_exit(0);
}
Ejemplo n.º 8
0
STATIC void
_bcm_report_fifo_dma_thread(void *unit_vp)
{
    int unit = PTR_TO_INT(unit_vp);
    _bcm_ft_report_ctrl_t *rctrl = _bcm_ft_report_ctrl[unit];
    _bcm_ft_report_cb_entry_t *cb_entry;
    bcm_regex_report_t data;
    int rv, entries_per_buf, interval, count, i, j, non_empty;
    int chan, entry_words, pending=0;
    void *host_buf = NULL;
    void *host_entry;
    uint32 dir, *buff_max, rval;
    uint8  overflow, timeout;
    soc_mem_t   ftmem;
    soc_control_t *soc = SOC_CONTROL(unit);
    int cmc = SOC_PCI_CMC(unit);
    bcm_regex_config_t config;

    chan = SOC_MEM_FIFO_DMA_CHANNEL_0;
    rv = bcm_esw_regex_config_get(unit, &config);
    if (BCM_FAILURE(rv)) {
        LOG_VERBOSE(BSL_LS_BCM_INTR,
                    (BSL_META_U(unit,
                                " failed to retrieve configuration, rv = %d\n"), rv));
        goto cleanup_exit;
    }
    entries_per_buf = config.report_buffer_size;

    LOG_VERBOSE(BSL_LS_BCM_INTR,
                (BSL_META_U(unit,
                            " starting _bcm_report_fifo_dma_thread\n")));

    ftmem = FT_EXPORT_FIFOm;
    entry_words = soc_mem_entry_words(unit, ftmem);
    host_buf = soc_cm_salloc(unit, entries_per_buf * entry_words * WORDS2BYTES(1),
                             "FT export fifo DMA Buffer");
    if (host_buf == NULL) {
        soc_event_generate(unit, SOC_SWITCH_EVENT_THREAD_ERROR,
                           SOC_SWITCH_EVENT_THREAD_REGEX_REPORT, __LINE__,
                           BCM_E_MEMORY);
        goto cleanup_exit;
    }

    rv = soc_mem_fifo_dma_start(unit, chan,
                                ftmem, MEM_BLOCK_ANY,
                                entries_per_buf, host_buf);
    if (BCM_FAILURE(rv)) {
        soc_event_generate(unit, SOC_SWITCH_EVENT_THREAD_ERROR,
                           SOC_SWITCH_EVENT_THREAD_REGEX_REPORT,
                           __LINE__, rv);
        LOG_VERBOSE(BSL_LS_BCM_INTR,
                    (BSL_META_U(unit,
                                " soc_mem_fifo_dma_start failed, rv = %d\n"), rv));
        goto cleanup_exit;
    }

    host_entry = host_buf;
    buff_max = (uint32 *)host_entry + (entries_per_buf * entry_words);

    while ((interval = rctrl->interval) > 0) {
        overflow = 0; timeout = 0;
        if (soc->ftreportIntrEnb) {
            soc_cmicm_intr0_enable(unit, IRQ_CMCx_FIFO_CH_DMA(chan));
            if (sal_sem_take(SOC_CONTROL(unit)->ftreportIntr, interval) < 0) {
                LOG_VERBOSE(BSL_LS_BCM_INTR,
                            (BSL_META_U(unit,
                                        " polling timeout ft_export_fifo=%d\n"), interval));
            } else {
                /* Disabling the interrupt (CHAN0) as the read process is underway */
                soc_cmicm_intr0_disable(unit, IRQ_CMCx_FIFO_CH_DMA(chan));
                LOG_VERBOSE(BSL_LS_BCM_INTR,
                            (BSL_META_U(unit,
                                        "woken up interval=%d\n"), interval));
                /* check for timeout or overflow and either process or continue */
                rval = soc_pci_read(unit,
                                    CMIC_CMCx_FIFO_CHy_RD_DMA_STAT_OFFSET(cmc, chan));
                overflow = soc_reg_field_get(unit, CMIC_CMC0_FIFO_CH0_RD_DMA_STATr,
                                             rval, HOSTMEM_TIMEOUTf);
                timeout = soc_reg_field_get(unit,
                                            CMIC_CMC0_FIFO_CH0_RD_DMA_STATr, rval, HOSTMEM_OVERFLOWf);
                overflow |= timeout ? 1 : 0;
            }
        } else {
            sal_usleep(interval);
        }

        if (rctrl->interval <= 0) {
            break;
        }

        /* reconcile the user registered callbacks. */
        for (i = 0; i < _BCM_FT_REPORT_MAX_CB; i++) {
            cb_entry = &rctrl->callback_entry[i];
            switch (cb_entry->state) {
                case _BCM_FT_REPORT_CALLBACK_STATE_REGISTERED:
                    cb_entry->state = _BCM_FT_REPORT_CALLBACK_STATE_ACTIVE;
                    break;
                case _BCM_FT_REPORT_CALLBACK_STATE_UNREGISTERED:
                    cb_entry->state = _BCM_FT_REPORT_CALLBACK_STATE_INVALID;
                    break;
                default:
                    break;
            }
        }

        non_empty = FALSE;
        do {
            rv = soc_mem_fifo_dma_get_num_entries(unit, chan, &count);
            if (SOC_SUCCESS(rv)) {
                non_empty = TRUE;
                for (i = 0; i < count; i++) {
                    rv = _bcm_ft_report_process_export_entry(unit, host_entry,
                                                             &data, &pending, &dir);
                    host_entry = (uint32 *)host_entry + entry_words;
                    /* handle roll over */
                    if ((uint32 *)host_entry >= buff_max) {
                        host_entry = host_buf;
                    }
                    for (j = 0; (j < _BCM_FT_REPORT_MAX_CB) && !pending && !rv; j++) {
                        cb_entry = &rctrl->callback_entry[j];
                        if ((cb_entry->state == _BCM_FT_REPORT_CALLBACK_STATE_ACTIVE) &&
                            ((cb_entry->reports & BCM_REGEX_REPORT_ALL) |
                             (data.flags & cb_entry->reports))) {
                            cb_entry->callback(unit, &data, cb_entry->userdata);
                        }
                    }
                }
                if (overflow) {
                    rval = 0;
                    soc_reg_field_set(unit, CMIC_CMC0_FIFO_CH0_RD_DMA_STAT_CLRr,
                                      &rval, HOSTMEM_OVERFLOWf, 1);
                    soc_reg_field_set(unit, CMIC_CMC0_FIFO_CH0_RD_DMA_STAT_CLRr,
                                      &rval, HOSTMEM_TIMEOUTf, 1);
                    soc_pci_write(unit,
                                  CMIC_CMCx_FIFO_CHy_RD_DMA_STAT_CLR_OFFSET(cmc, chan), rval);
                }
                (void)_soc_mem_sbus_fifo_dma_set_entries_read(unit, chan, i);
            } else {
                if (overflow) {
                    rval = 0;
                    soc_reg_field_set(unit, CMIC_CMC0_FIFO_CH0_RD_DMA_STAT_CLRr,
                                      &rval, HOSTMEM_OVERFLOWf, 1);
                    soc_reg_field_set(unit, CMIC_CMC0_FIFO_CH0_RD_DMA_STAT_CLRr,
                                      &rval, HOSTMEM_TIMEOUTf, 1);
                    soc_pci_write(unit,
                                  CMIC_CMCx_FIFO_CHy_RD_DMA_STAT_CLR_OFFSET(cmc, chan), rval);
                }
                LOG_VERBOSE(BSL_LS_BCM_INTR,
                            (BSL_META_U(unit,
                                        " soc_mem_fifo_dma_get_num_entries failed, rv=%d\n"), rv));
                non_empty = FALSE;
            }
        } while (non_empty);
    }

cleanup_exit:
    LOG_VERBOSE(BSL_LS_BCM_INTR,
                (BSL_META_U(unit,
                            " stopping _bcm_report_fifo_dma_thread\n")));
    (void)soc_mem_fifo_dma_stop(unit, chan);

    if (host_buf != NULL) {
        soc_cm_sfree(unit, host_buf);
    }
    rctrl->pid = SAL_THREAD_ERROR;
    sal_thread_exit(0);
}
Ejemplo n.º 9
0
Archivo: mbox.c Proyecto: ariavie/bcm
int
_bcm_mbox_txrx(
    int unit,
    uint32 node_num,
    _bcm_mbox_transport_type_t transport,
    uint8 *out_data,
    int out_len,
    uint8 *in_data,
    int *in_len)
{
#if defined(BCM_CMICM_SUPPORT)
    int rv;
    uint8 *response_data;
    int response_len;

    /* soc_cm_print("cmic_txrx tx Len:%d\n", out_len); */
    /* _bcm_dump_hex(out_data, out_len, 4); */

    int max_response_len = (in_len) ? *in_len : 0;
    if (in_len) {
        *in_len = 0;
    }

    rv = sal_sem_take(mbox_info.comm_available, _BCM_MBOX_RESPONSE_TIMEOUT_US);
    if (BCM_FAILURE(rv)) {
        _MBOX_ERROR_FUNC("sal_sem_take()");
        return rv;
    }

    rv = _bcm_mbox_tx(unit, node_num, _BCM_MBOX_MESSAGE, out_data, out_len);

    if (rv != BCM_E_NONE) {
        SOC_DEBUG_PRINT((DK_ERR | DK_VERBOSE, "%s() failed %s\n", __func__, "Tx failed"));
        goto release_mgmt_lock;
    }

    /*
     * Get rx buffer, either from rx callback or from cmicm wait task
     * NOTICE: This call will return an rx response buffer that we will need to
     *         release by notifying the Rx section
     */
    rv = _bcm_mbox_rx_response_get(unit, node_num, _BCM_MBOX_RESPONSE_TIMEOUT_US,
                                   &response_data, &response_len);
    if (BCM_FAILURE(rv)) {
        SOC_DEBUG_PRINT((DK_ERR | DK_VERBOSE, "%s() failed %s\n", __func__, "No Response"));
        goto release_mgmt_lock;
    }

    
    if (in_data && in_len) {
        if (response_len > max_response_len) {
            response_len = max_response_len;
        }

        *in_len = response_len;
        sal_memcpy(in_data, response_data, response_len);
    }

    /* soc_cm_print("cmic_txrx rx Len:%d\n", *in_len); */
    /* _bcm_dump_hex(in_data, *in_len, 4); */

    rv = BCM_E_NONE;

/* dispose_of_resp: */
    _bcm_mbox_rx_response_free(unit, response_data);

release_mgmt_lock:
    rv = sal_sem_give(mbox_info.comm_available);
    if (BCM_FAILURE(rv)) {
        _MBOX_ERROR_FUNC("sal_sem_give()");
    }

    return rv;

#else  /* BCM_CMICM_SUPPORT */
    return BCM_E_UNAVAIL;
#endif /* BCM_CMICM_SUPPORT */
}