static int bcm_regex_report_control(int unit, sal_usecs_t interval) { _bcm_ft_report_ctrl_t *rctrl = _bcm_ft_report_ctrl[unit]; char name[32]; rctrl = _bcm_ft_report_ctrl[unit]; sal_snprintf(name, sizeof(name), "bcmFtExportDma.%d", unit); rctrl->interval = interval; if (interval) { if (rctrl->pid == SAL_THREAD_ERROR) { rctrl->pid = sal_thread_create(name, SAL_THREAD_STKSZ, soc_property_get(unit, spn_BCM_FT_REPORT_THREAD_PRI, 50), _bcm_report_fifo_dma_thread, INT_TO_PTR(unit)); if (rctrl->pid == SAL_THREAD_ERROR) { LOG_ERROR(BSL_LS_BCM_COMMON, (BSL_META_U(unit, "%s: Could not start thread\n"), FUNCTION_NAME())); return BCM_E_MEMORY; } } } else { /* Wake up thread so it will check the changed interval value */ sal_sem_give(SOC_CONTROL(unit)->ftreportIntr); } return BCM_E_NONE; }
STATIC bcm_rx_t rx_cb_handler(int unit, bcm_pkt_t *info, void *cookie) { int count; COMPILER_REFERENCE(cookie); count = ++rx_cb_count; DIAG_DEBUG(DIAG_DBG_RX, ("RX packet %d: unit=%d len=%d rx_port=%d reason=%d cos=%d\n", count, unit, info->tot_len, info->rx_port, info->rx_reason, info->cos)); #ifdef BCM_XGS_SUPPORT if (SOC_IS_XGS12_FABRIC(unit)) { if (DIAG_DEBUG_CHECK(DIAG_DBG_RX)) { soc_higig_dump(unit, "HG HEADER: ", (soc_higig_hdr_t *)BCM_PKT_HG_HDR(info)); } } #endif /* BCM_XGS_SUPPORT */ DIAG_DEBUG(DIAG_DBG_RX, ("Parsed packet info:\n")); DIAG_DEBUG(DIAG_DBG_RX, (" src mod=%d. src port=%d. op=%d.\n", info->src_mod, info->src_port, info->opcode)); DIAG_DEBUG(DIAG_DBG_RX, (" dest mod=%d. dest port=%d. chan=%d.\n", info->dest_mod, info->dest_port, info->dma_channel)); if (DIAG_DEBUG_CHECK(DIAG_DBG_RX)) { soc_dma_dump_pkt(unit, "Data: ", BCM_PKT_DMAC(info), info->tot_len, TRUE); } if (enqueue_pkts[unit] > 0) { sal_mutex_take(pkt_queue_lock[unit], sal_mutex_FOREVER); *(uint32 **)(info->alloc_ptr) = (uint32 *)pkt_free_queue[unit]; pkt_free_queue[unit] = info->alloc_ptr; rx_pkt_count[unit]++; if (rx_pkt_count[unit] >= enqueue_pkts[unit]) { sal_sem_give(pkts_are_ready[unit]); } sal_mutex_give(pkt_queue_lock[unit]); #if defined(BCM_RXP_DEBUG) bcm_rx_pool_own(info->alloc_ptr, "rxmon"); #endif return BCM_RX_HANDLED_OWNED; } return BCM_RX_HANDLED; }
STATIC void _bcm_mbox_rx_thread(void *arg) { int rv = 0; int unit = PTR_TO_INT(arg); while (1) { int mbox; /* The uc_msg is just a signal that there is a message somewhere to get, so look through all mboxes */ sal_usleep(10000); for (mbox = 0; mbox < _BCM_MBOX_MAX_BUFFERS; ++mbox) { switch (mbox_info.unit_state[unit].mboxes->status[mbox]) { case _BCM_MBOX_MS_TUNNEL_IN: break; case _BCM_MBOX_MS_EVENT: break; case _BCM_MBOX_MS_RESP: { mbox_info.unit_state[unit].response_data = (uint8*)mbox_info.unit_state[unit].mboxes->mbox[mbox].data; mbox_info.unit_state[unit].response_len = mbox_info.unit_state[unit].mboxes->mbox[mbox].data_len; rv = sal_sem_give(mbox_info.unit_state[unit].response_ready); if (BCM_FAILURE(rv)) { _MBOX_ERROR_FUNC("_bcm_ptp_sem_give()"); } mbox_info.unit_state[unit].mboxes->status[mbox] = _BCM_MBOX_MS_EMPTY; } break; } } } }
/* * Function: _ioctl * * Purpose: * Handle IOCTL commands from user mode. * Parameters: * cmd - IOCTL cmd * arg - IOCTL parameters * Returns: * 0 on success, <0 on error */ static int _ioctl(unsigned int cmd, unsigned long arg) { lubde_ioctl_t io; uint32 pbase, size; const ibde_dev_t *bde_dev; int inst_id; bde_inst_resource_t *res; if (copy_from_user(&io, (void *)arg, sizeof(io))) { return -EFAULT; } io.rc = LUBDE_SUCCESS; switch(cmd) { case LUBDE_VERSION: io.d0 = 0; break; case LUBDE_GET_NUM_DEVICES: io.d0 = user_bde->num_devices(io.dev); break; case LUBDE_GET_DEVICE: bde_dev = user_bde->get_dev(io.dev); if (bde_dev) { io.d0 = bde_dev->device; io.d1 = bde_dev->rev; if (BDE_DEV_MEM_MAPPED(_devices[io.dev].dev_type)) { /* Get physical address to map */ io.d2 = lkbde_get_dev_phys(io.dev); io.d3 = lkbde_get_dev_phys_hi(io.dev); } } else { io.rc = LUBDE_FAIL; } break; case LUBDE_GET_DEVICE_TYPE: io.d0 = _devices[io.dev].dev_type; break; case LUBDE_GET_BUS_FEATURES: user_bde->pci_bus_features(io.dev, (int *) &io.d0, (int *) &io.d1, (int *) &io.d2); break; case LUBDE_PCI_CONFIG_PUT32: if (_devices[io.dev].dev_type & BDE_PCI_DEV_TYPE) { user_bde->pci_conf_write(io.dev, io.d0, io.d1); } else { io.rc = LUBDE_FAIL; } break; case LUBDE_PCI_CONFIG_GET32: if (_devices[io.dev].dev_type & BDE_PCI_DEV_TYPE) { io.d0 = user_bde->pci_conf_read(io.dev, io.d0); } else { io.rc = LUBDE_FAIL; } break; case LUBDE_GET_DMA_INFO: inst_id = io.dev; if (_bde_multi_inst){ _dma_resource_get(inst_id, &pbase, &size); } else { lkbde_get_dma_info(&pbase, &size); } io.d0 = pbase; io.d1 = size; /* Optionally enable DMA mmap via /dev/linux-kernel-bde */ io.d2 = USE_LINUX_BDE_MMAP; break; case LUBDE_ENABLE_INTERRUPTS: if (_devices[io.dev].dev_type & BDE_SWITCH_DEV_TYPE) { if (_devices[io.dev].isr && !_devices[io.dev].enabled) { user_bde->interrupt_connect(io.dev, _devices[io.dev].isr, _devices+io.dev); _devices[io.dev].enabled = 1; } } else { /* Process ethernet device interrupt */ /* FIXME: for multiple chips */ if (!_devices[io.dev].enabled) { user_bde->interrupt_connect(io.dev, (void(*)(void *))_ether_interrupt, _devices+io.dev); _devices[io.dev].enabled = 1; } } break; case LUBDE_DISABLE_INTERRUPTS: if (_devices[io.dev].enabled) { user_bde->interrupt_disconnect(io.dev); _devices[io.dev].enabled = 0; } break; case LUBDE_WAIT_FOR_INTERRUPT: if (_devices[io.dev].dev_type & BDE_SWITCH_DEV_TYPE) { res = &_bde_inst_resource[_devices[io.dev].inst]; #ifdef BDE_LINUX_NON_INTERRUPTIBLE wait_event_timeout(res->intr_wq, atomic_read(&res->intr) != 0, 100); #else wait_event_interruptible(res->intr_wq, atomic_read(&res->intr) != 0); #endif /* * Even if we get multiple interrupts, we * only run the interrupt handler once. */ atomic_set(&res->intr, 0); } else { #ifdef BDE_LINUX_NON_INTERRUPTIBLE wait_event_timeout(_ether_interrupt_wq, atomic_read(&_ether_interrupt_has_taken_place) != 0, 100); #else wait_event_interruptible(_ether_interrupt_wq, atomic_read(&_ether_interrupt_has_taken_place) != 0); #endif /* * Even if we get multiple interrupts, we * only run the interrupt handler once. */ atomic_set(&_ether_interrupt_has_taken_place, 0); } break; case LUBDE_USLEEP: sal_usleep(io.d0); break; case LUBDE_UDELAY: sal_udelay(io.d0); break; case LUBDE_SEM_OP: switch (io.d0) { case LUBDE_SEM_OP_CREATE: io.p0 = (bde_kernel_addr_t)sal_sem_create("", io.d1, io.d2); break; case LUBDE_SEM_OP_DESTROY: sal_sem_destroy((sal_sem_t)io.p0); break; case LUBDE_SEM_OP_TAKE: io.rc = sal_sem_take((sal_sem_t)io.p0, io.d2); break; case LUBDE_SEM_OP_GIVE: io.rc = sal_sem_give((sal_sem_t)io.p0); break; default: io.rc = LUBDE_FAIL; break; } break; case LUBDE_WRITE_IRQ_MASK: io.rc = lkbde_irq_mask_set(io.dev, io.d0, io.d1, 0); break; case LUBDE_SPI_READ_REG: if (user_bde->spi_read(io.dev, io.d0, io.dx.buf, io.d1) == -1) { io.rc = LUBDE_FAIL; } break; case LUBDE_SPI_WRITE_REG: if (user_bde->spi_write(io.dev, io.d0, io.dx.buf, io.d1) == -1) { io.rc = LUBDE_FAIL; } break; case LUBDE_READ_REG_16BIT_BUS: io.d1 = user_bde->read(io.dev, io.d0); break; case LUBDE_WRITE_REG_16BIT_BUS: io.rc = user_bde->write(io.dev, io.d0, io.d1); break; #if (defined(BCM_PETRA_SUPPORT) || defined(BCM_DFE_SUPPORT)) case LUBDE_CPU_WRITE_REG: { if (lkbde_cpu_write(io.dev, io.d0, (uint32*)io.dx.buf) == -1) { io.rc = LUBDE_FAIL; } break; } case LUBDE_CPU_READ_REG: { if (lkbde_cpu_read(io.dev, io.d0, (uint32*)io.dx.buf) == -1) { io.rc = LUBDE_FAIL; } break; } case LUBDE_CPU_PCI_REGISTER: { if (lkbde_cpu_pci_register(io.dev) == -1) { io.rc = LUBDE_FAIL; } break; } #endif case LUBDE_DEV_RESOURCE: bde_dev = user_bde->get_dev(io.dev); if (bde_dev) { if (BDE_DEV_MEM_MAPPED(_devices[io.dev].dev_type)) { /* Get physical address to map */ io.rc = lkbde_get_dev_resource(io.dev, io.d0, &io.d1, &io.d2, &io.d3); } } else { io.rc = LUBDE_FAIL; } break; case LUBDE_IPROC_READ_REG: io.d1 = user_bde->iproc_read(io.dev, io.d0); if (io.d1 == -1) { io.rc = LUBDE_FAIL; } break; case LUBDE_IPROC_WRITE_REG: if (user_bde->iproc_write(io.dev, io.d0, io.d1) == -1) { io.rc = LUBDE_FAIL; } break; case LUBDE_ATTACH_INSTANCE: io.rc = _instance_attach(io.d0, io.d1); break; default: gprintk("Error: Invalid ioctl (%08x)\n", cmd); io.rc = LUBDE_FAIL; break; } if (copy_to_user((void *)arg, &io, sizeof(io))) { return -EFAULT; } return 0; }
/* * Function: * soc_robo_dos_monitor_enable_set (internal) * Purpose: * Enable/disable DOS event monitor threads * Parameters: * unit - unit number. * interval - time between resynchronization passes * Returns: * SOC_E_INTERNAL if can't create threads. */ int soc_robo_dos_monitor_enable_set(int unit, sal_usecs_t interval) { drv_robo_dos_monitor_t *dm = drv_dm_control[unit]; sal_usecs_t us = interval; soc_timeout_t to; /* return if not init yet */ if (dm == NULL){ if (interval == 0){ /* no error return if set thread disable when dm not init */ return SOC_E_NONE; } else { /* init problem when enabling dm thread */ return SOC_E_INIT; } } sal_snprintf(dm->task_name, sizeof(dm->task_name), "robo_DOS_EVENT.%d", unit); if (us){ /* --- enabling thread --- */ us = (interval >= MIN_DRV_DOS_MONITOR_INTERVAL) ? us : MIN_DRV_DOS_MONITOR_INTERVAL; dm->interval = us; if (dm->dm_thread != NULL){ /* if thread is running, update the period and return */ sal_sem_give(dm->dm_sema); return SOC_E_NONE; } else { if (sal_thread_create(dm->task_name, SAL_THREAD_STKSZ, ROBO_HWDOS_MONITOR_PRI, (void (*)(void*))soc_robo_dos_monitor_thread, INT_TO_PTR(unit)) == SAL_THREAD_ERROR){ dm->interval = 0; dm->err_cnt = 0; soc_cm_debug(DK_ERR, "Thread is not created\n"); soc_event_generate(unit, SOC_SWITCH_EVENT_THREAD_ERROR, SOC_SWITCH_EVENT_THREAD_HWDOS_MONITOR, __LINE__, SOC_E_MEMORY); return SOC_E_MEMORY; } else { soc_timeout_init(&to, 3000000, 0); while (dm->dm_thread == NULL) { if (soc_timeout_check(&to)) { dm->interval = 0; dm->err_cnt = 0; soc_cm_debug(DK_ERR, "%s: Thread did not start\n", dm->task_name); soc_event_generate(unit, SOC_SWITCH_EVENT_THREAD_ERROR, SOC_SWITCH_EVENT_THREAD_HWDOS_MONITOR, __LINE__, SOC_E_INTERNAL); return SOC_E_INTERNAL; break; } } } } } else { /* disabling thread */ dm->interval = 0; sal_sem_give(dm->dm_sema); soc_timeout_init(&to, 3000000, 0); while (dm->dm_thread != NULL) { if (soc_timeout_check(&to)) { dm->interval = 0; dm->err_cnt = 0; soc_cm_debug(DK_ERR, "%s: Thread did not exit\n", dm->task_name); soc_event_generate(unit, SOC_SWITCH_EVENT_THREAD_ERROR, SOC_SWITCH_EVENT_THREAD_HWDOS_MONITOR, __LINE__, SOC_E_INTERNAL); return SOC_E_INTERNAL; break; } } } /* set interval if changed */ return SOC_E_NONE; }
/* * main: Sets up the socket and handles client requests with a child * process per socket. */ void dmac_listener(void *v_void) { verinet_t *v = (verinet_t *)v_void; int sockfd, newsockfd, one = 1; socklen_t clilen; struct sockaddr_in cli_addr, serv_addr; /* Initialize socket ... */ if ((sockfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { perror("server: can't open stream socket"); exit(1); } if (setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, (char *) &one, sizeof (one)) < 0) { perror("setsockopt"); } /* * Setup server address... */ memset((void *) &serv_addr,0x0, sizeof(serv_addr)); serv_addr.sin_family = AF_INET; serv_addr.sin_addr.s_addr = htonl(INADDR_ANY); serv_addr.sin_port = htons(0); /* Pick any port */ /* * Bind our local address */ if (bind(sockfd, (struct sockaddr *) &serv_addr, sizeof(serv_addr)) < 0) { perror("dmac: unable to bind address"); sal_thread_exit(0); } v->dmaPort = getsockport(sockfd); /* Get port that was picked */ /* listen for inbound connections ... */ listen(sockfd, 5); /* Notify dmac_init that socket is listening */ sal_sem_give(v->dmacListening); printk("DMA Controller listening on port[%d]\n", v->dmaPort); while (!v->dmacWorkerExit) { clilen = sizeof(cli_addr); newsockfd = accept(sockfd, (struct sockaddr *) &cli_addr, &clilen); if (newsockfd < 0 && errno == EINTR) { continue; } if (newsockfd < 0) { perror("server: accept error"); } else { v->dmacFd = newsockfd; v->dmacHandler = sal_thread_create("DMA-controller", SAL_THREAD_STKSZ, 100, dmac_handler, v); if (SAL_THREAD_ERROR == v->dmacHandler) { printk("Thread creation error!\n"); } else { debugk(DK_VERINET, "DMAC request thread dispatched.\n"); } } } debugk(DK_VERINET, "DMA listener shutdown.\n"); sal_thread_exit(0); }
int _bcm_mbox_txrx( int unit, uint32 node_num, _bcm_mbox_transport_type_t transport, uint8 *out_data, int out_len, uint8 *in_data, int *in_len) { #if defined(BCM_CMICM_SUPPORT) int rv; uint8 *response_data; int response_len; /* soc_cm_print("cmic_txrx tx Len:%d\n", out_len); */ /* _bcm_dump_hex(out_data, out_len, 4); */ int max_response_len = (in_len) ? *in_len : 0; if (in_len) { *in_len = 0; } rv = sal_sem_take(mbox_info.comm_available, _BCM_MBOX_RESPONSE_TIMEOUT_US); if (BCM_FAILURE(rv)) { _MBOX_ERROR_FUNC("sal_sem_take()"); return rv; } rv = _bcm_mbox_tx(unit, node_num, _BCM_MBOX_MESSAGE, out_data, out_len); if (rv != BCM_E_NONE) { SOC_DEBUG_PRINT((DK_ERR | DK_VERBOSE, "%s() failed %s\n", __func__, "Tx failed")); goto release_mgmt_lock; } /* * Get rx buffer, either from rx callback or from cmicm wait task * NOTICE: This call will return an rx response buffer that we will need to * release by notifying the Rx section */ rv = _bcm_mbox_rx_response_get(unit, node_num, _BCM_MBOX_RESPONSE_TIMEOUT_US, &response_data, &response_len); if (BCM_FAILURE(rv)) { SOC_DEBUG_PRINT((DK_ERR | DK_VERBOSE, "%s() failed %s\n", __func__, "No Response")); goto release_mgmt_lock; } if (in_data && in_len) { if (response_len > max_response_len) { response_len = max_response_len; } *in_len = response_len; sal_memcpy(in_data, response_data, response_len); } /* soc_cm_print("cmic_txrx rx Len:%d\n", *in_len); */ /* _bcm_dump_hex(in_data, *in_len, 4); */ rv = BCM_E_NONE; /* dispose_of_resp: */ _bcm_mbox_rx_response_free(unit, response_data); release_mgmt_lock: rv = sal_sem_give(mbox_info.comm_available); if (BCM_FAILURE(rv)) { _MBOX_ERROR_FUNC("sal_sem_give()"); } return rv; #else /* BCM_CMICM_SUPPORT */ return BCM_E_UNAVAIL; #endif /* BCM_CMICM_SUPPORT */ }
int _bcm_mbox_comm_init(int unit, int appl_type) { #if defined(BCM_CMICM_SUPPORT) int rv = BCM_E_NONE; int timeout_usec = 1900000; int max_num_cores = 2; int result; int c; int i; /* Init the system if this is the first time in */ if (mbox_info.unit_state == NULL) { mbox_info.unit_state = soc_cm_salloc(unit, sizeof(_bcm_bs_internal_stack_state_t) * BCM_MAX_NUM_UNITS, "mbox_info_unit_state"); sal_memset(mbox_info.unit_state, 0, sizeof(_bcm_bs_internal_stack_state_t) * BCM_MAX_NUM_UNITS); } /* Init the unit if this is the first time for the unit */ if (mbox_info.unit_state[unit].mboxes == NULL) { /* allocate space for mboxes */ mbox_info.unit_state[unit].mboxes = soc_cm_salloc(unit, sizeof(_bcm_bs_internal_stack_mboxes_t), "bs msg"); if (!mbox_info.unit_state[unit].mboxes) { return BCM_E_MEMORY; } /* clear state of message mboxes */ mbox_info.unit_state[unit].mboxes->num_buffers = soc_ntohl(_BCM_MBOX_MAX_BUFFERS); for (i = 0; i < _BCM_MBOX_MAX_BUFFERS; ++i) { mbox_info.unit_state[unit].mboxes->status[i] = _BCM_MBOX_MS_EMPTY; } mbox_info.comm_available = sal_sem_create("BCM BS comms", sal_sem_BINARY, 0); rv = sal_sem_give(mbox_info.comm_available); mbox_info.unit_state[unit].response_ready = sal_sem_create("CMICM_resp", sal_sem_BINARY, 0); sal_thread_create("CMICM Rx", SAL_THREAD_STKSZ, soc_property_get(unit, spn_UC_MSG_THREAD_PRI, 50) + 1, _bcm_mbox_rx_thread, INT_TO_PTR(unit)); /* allocate space for debug log */ /* size is the size of the structure without the placeholder space for debug->buf, plus the real space for it */ mbox_info.unit_state[unit].log = soc_cm_salloc(unit, sizeof(_bcm_bs_internal_stack_log_t) - sizeof(mbox_info.unit_state[unit].log->buf) + _BCM_MBOX_MAX_LOG, "bs log"); if (!mbox_info.unit_state[unit].log) { soc_cm_sfree(unit, mbox_info.unit_state[unit].mboxes); return BCM_E_MEMORY; } /* initialize debug */ mbox_info.unit_state[unit].log->size = soc_htonl(_BCM_MBOX_MAX_LOG); mbox_info.unit_state[unit].log->head = 0; mbox_info.unit_state[unit].log->tail = 0; /* set up the network-byte-order pointers so that CMICm can access the shared memory */ mbox_info.unit_state[unit].mbox_ptr = soc_htonl(soc_cm_l2p(unit, (void*)mbox_info.unit_state[unit].mboxes)); mbox_info.unit_state[unit].log_ptr = soc_htonl(soc_cm_l2p(unit, (void*)mbox_info.unit_state[unit].log)); /* soc_cm_print("DEBUG SPACE: %p\n", (void *)mbox_info.unit_state[unit].log->buf); */ rv = BCM_E_UNAVAIL; for (c = max_num_cores - 1; c >= 0; c--) { /* soc_cm_print("Trying BS on core %d\n", c); */ result = soc_cmic_uc_appl_init(unit, c, MOS_MSG_CLASS_BS, timeout_usec, _BCM_MBOX_SDK_VERSION, _BCM_MBOX_UC_MIN_VERSION); if (SOC_E_NONE == result){ /* uKernel communcations started successfully, so run the init */ /* Note: the length of this message is unused, and can be overloaded */ mos_msg_data_t start_msg; start_msg.s.mclass = MOS_MSG_CLASS_BS; start_msg.s.subclass = MOS_MSG_SUBCLASS_MBOX_CONFIG; _shr_uint16_write((uint8*)(&(start_msg.s.len)), (uint16) appl_type); start_msg.s.data = bcm_htonl(soc_cm_l2p(unit, (void*)&mbox_info.unit_state[unit])); if (BCM_FAILURE(rv = soc_cmic_uc_msg_send(unit, c, &start_msg, timeout_usec))) { _MBOX_ERROR_FUNC("soc_cmic_uc_msg_send()"); } mbox_info.unit_state[unit].core_num = c; break; } /* soc_cm_print("No response on core %d\n", c); */ } if (BCM_FAILURE(rv)) { soc_cm_print("No response from CMICm core(s)\n"); return rv; } _bcm_mbox_debug_poll(INT_TO_PTR(&_bcm_mbox_debug_poll), INT_TO_PTR(1000), INT_TO_PTR(unit), 0, 0); } return rv; #else /* BCM_CMICM_SUPPORT */ return BCM_E_UNAVAIL; #endif /* BCM_CMICM_SUPPORT */ }
int _bcm_ptp_sem_give(_bcm_ptp_sem_t b) { return sal_sem_give(b); }