/*-----------------------------------------------------------------------------------*/ err_t netconn_write(struct netconn *conn, void *dataptr, u32_t size, u8_t copy) { struct api_msg *msg; u32_t len; if (conn == NULL) { return ERR_VAL; } if (conn->err != ERR_OK) { return conn->err; } if (conn->sem == SYS_SEM_NULL) { conn->sem = sys_sem_new(0); if (conn->sem == SYS_SEM_NULL) { return ERR_MEM; } } if ((msg = memp_mallocp(MEMP_API_MSG)) == NULL) { return (conn->err = ERR_MEM); } msg->type = API_MSG_WRITE; msg->msg.conn = conn; conn->state = NETCONN_WRITE; while (conn->err == ERR_OK && size > 0) { msg->msg.msg.w.dataptr = dataptr; msg->msg.msg.w.copy = copy; if (conn->type == NETCONN_TCP) { if (tcp_sndbuf(conn->pcb.tcp) == 0) { sys_sem_wait(conn->sem); if (conn->err != ERR_OK) { goto ret; } } if (size > tcp_sndbuf(conn->pcb.tcp)) { /* We cannot send more than one send buffer's worth of data at a time. */ len = tcp_sndbuf(conn->pcb.tcp); } else { len = size; } } else { len = size; } DEBUGF(API_LIB_DEBUG, ("netconn_write: writing %ld bytes (%d)\n", len, copy)); msg->msg.msg.w.len = len; api_msg_post(msg); sys_mbox_fetch(conn->mbox, NULL); if (conn->err == ERR_OK) { dataptr = (void *)((char *)dataptr + len); size -= len; } else if (conn->err == ERR_MEM) { conn->err = ERR_OK; sys_sem_wait(conn->sem); } else { goto ret; } } ret: memp_freep(MEMP_API_MSG, msg); conn->state = NETCONN_NONE; if (conn->sem != SYS_SEM_NULL) { sys_sem_free(conn->sem); conn->sem = SYS_SEM_NULL; } return conn->err; }
int sem_wait(sem_t sem_id) { return sys_sem_wait(sem_id, 0); }
int sem_wait_timeout(sem_t sem_id, unsigned int timeout) { return sys_sem_wait(sem_id, timeout); }
void * mem_malloc(mem_size_t size) { mem_size_t ptr, ptr2; struct mem *mem, *mem2; if (size == 0) { return NULL; } /* Expand the size of the allocated memory region so that we can adjust for alignment. */ if ((size % MEM_ALIGNMENT) != 0) { size += MEM_ALIGNMENT - ((size + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT); } if (size > MEM_SIZE) { return NULL; } sys_sem_wait(mem_sem); for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE; ptr = ((struct mem *)&ram[ptr])->next) { mem = (struct mem *)&ram[ptr]; if (!mem->used && mem->next - (ptr + SIZEOF_STRUCT_MEM) >= size + SIZEOF_STRUCT_MEM) { ptr2 = ptr + SIZEOF_STRUCT_MEM + size; mem2 = (struct mem *)&ram[ptr2]; mem2->prev = ptr; mem2->next = mem->next; mem->next = ptr2; if (mem2->next != MEM_SIZE) { ((struct mem *)&ram[mem2->next])->prev = ptr2; } mem2->used = 0; mem->used = 1; #ifdef MEM_STATS lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM); /* if (lwip_stats.mem.max < lwip_stats.mem.used) { lwip_stats.mem.max = lwip_stats.mem.used; } */ if (lwip_stats.mem.max < ptr2) { lwip_stats.mem.max = ptr2; } #endif /* MEM_STATS */ if (mem == lfree) { /* Find next free block after mem */ while (lfree->used && lfree != ram_end) { lfree = (struct mem *)&ram[lfree->next]; } LWIP_ASSERT("mem_malloc: !lfree->used", !lfree->used); } sys_sem_signal(mem_sem); LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", (u32_t)mem + SIZEOF_STRUCT_MEM + size <= (u32_t)ram_end); LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); return (u8_t *)mem + SIZEOF_STRUCT_MEM; } } LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %d bytes\n", (int)size)); #ifdef MEM_STATS ++lwip_stats.mem.err; #endif /* MEM_STATS */ sys_sem_signal(mem_sem); return NULL; }
void mcf523xfec_rx_task( void *arg ) { mcf523xfec_if_t *fecif = arg; struct pbuf *p, *q; nbuf_t *pNBuf; uint8 *pPayLoad; do { sys_sem_wait( fecif->rx_sem ); while( nbuf_rx_next_ready( ) ) { pNBuf = nbuf_rx_allocate( ); if( pNBuf != NULL ) { LWIP_ASSERT( "mcf523xfec_rx_task: pNBuf->status & RX_BD_L ", pNBuf->status & RX_BD_L ); /* This flags indicate that the frame has been damaged. In * this case we must update the link stats if enabled and * remove the frame from the FEC. */ if( pNBuf->status & ( RX_BD_LG | RX_BD_NO | RX_BD_CR | RX_BD_OV ) ) { #ifdef LINK_STATS lwip_stats.link.drop++; if( pNBuf->status & RX_BD_LG ) { lwip_stats.link.lenerr++; } else if( pNBuf->status & ( RX_BD_NO | RX_BD_OV ) ) { lwip_stats.link.err++; } else { lwip_stats.link.chkerr++; } #endif } else { /* The frame must no be valid. Perform some checks to see if the FEC * driver is working correctly. */ LWIP_ASSERT( "mcf523xfec_rx_task: pNBuf->length != 0", pNBuf->length != 0 ); p = pbuf_alloc( PBUF_RAW, pNBuf->length, PBUF_POOL ); if( p != NULL ) { #if ETH_PAD_SIZE pbuf_header( p, -ETH_PAD_SIZE ); #endif pPayLoad = pNBuf->data; for( q = p; q != NULL; q = q->next ) { memcpy( q->payload, pPayLoad, q->len ); pPayLoad += q->len; } #if ETH_PAD_SIZE pbuf_header( p, ETH_PAD_SIZE ); #endif /* Ethernet frame received. Handling it is not device * dependent and therefore done in another function. */ eth_input( fecif->netif, p ); } } nbuf_rx_release( pNBuf ); /* Tell the HW that there are new free RX buffers. */ MCF_FEC_RDAR = 1; } else { #if LINK_STATS lwip_stats.link.memerr++; lwip_stats.link.drop++; #endif } } /* Set RX Debug PIN to low since handling of next frame is possible. */ FEC_DEBUG_RX_TIMING( 0 ); } while( 1 ); }
void zot_network_task(cyg_addrword_t arg) { sys_sem_t sem; sys_init(); mem_init(); memp_init(); pbuf_init(); sem = sys_sem_new(0); tcpip_init(tcpip_init_done, &sem); #ifdef PRINT_DIAGNOSTIC // if( !diag_flag ) if( 1 ) #endif { if(EEPROM_Data.PrintServerMode & PS_DHCP_ON) { #ifdef LINKLOCAL_IP if(EEPROM_Data.RENVEnable == 1) Link_local_ip_init(); #endif mib_DHCP_p->IPAddr = 0; mib_DHCP_p->SubnetMask = 0; mib_DHCP_p->GwyAddr = 0; /* memset( EEPROM_Data.BoxIPAddress, 0, 4); memset( EEPROM_Data.SubNetMask, 0, 4); memset( EEPROM_Data.GetwayAddress, 0, 4); */ //Create DHCP Thread cyg_thread_create(DHCP_TASK_PRI, dhcp_init, 0, "dhcp_init", (void *) (DHCP_Stack), DHCP_TASK_STACK_SIZE, &DHCP_TaskHdl, &DHCP_Task); //Start DHCP Thread cyg_thread_resume(DHCP_TaskHdl); } #ifdef RENDEZVOUS else { ppause(3000); cyg_semaphore_post( &rendezvous_sem); } #endif } sys_sem_wait(sem); sys_sem_free(sem); }
/*-----------------------------------------------------------*/ static void ftMac100_rx_task ( void *arg ) { struct xFtmac100If *macIf = arg; struct eth_hdr *pxHeader; struct pbuf *p; struct ftmac100_rxdes *rxdes; struct netif *pxNetIf = macIf->netIf; int error = 0; do { sys_sem_wait( &macIf->rx_sem ); check_next: rxdes = &gRxDes[macIf->rx_pointer]; if( rxdes->rxdes0 & FTMAC100_RXDES0_RXDMA_OWN ) { continue; } if( !(rxdes->rxdes0 & FTMAC100_RXDES0_FRS) ) { error = 1; LWIP_DEBUGF( NETIF_DEBUG, ( "ftmac100 rx desc not first segment\n" ) ); } if( rxdes->rxdes0 & (FTMAC100_RXDES0_FTL | FTMAC100_RXDES0_RUNT | FTMAC100_RXDES0_RX_ODD_NB) ) { error = 1; LWIP_DEBUGF( NETIF_DEBUG, ( "ftmac100: rx length error\n" ) ); #if LINK_STATS LINK_STATS_INC( link.lenerr ); #endif } if( rxdes->rxdes0 & FTMAC100_RXDES0_CRC_ERR ) { error = 1; LWIP_DEBUGF( NETIF_DEBUG, ( "ftmac100: rx checksum error\n" ) ); #if LINK_STATS LINK_STATS_INC( link.lenerr ); #endif } if ( error ) { #if LINK_STATS LINK_STATS_INC( link.drop ); #endif rxdes->rxdes0 = FTMAC100_RXDES0_RXDMA_OWN; ftMac100_rx_pointer_incr( &macIf->rx_pointer ); goto check_next; } /* * It is impossible to get multi-segment packets * because we always provide big enough receive buffers. */ if( !(rxdes->rxdes0 & FTMAC100_RXDES0_LRS) ) LWIP_DEBUGF( NETIF_DEBUG, ( "ftmac100 rx multi-segment packets\n" ) ); /* move received packet into a new pbuf */ p = prvLowLevelInput( (const unsigned char * const) rxdes->rxdes2, (unsigned short)(rxdes->rxdes0 & FTMAC100_RXDES0_RFL) ); /* no packet could be read, silently ignore this */ if( p != NULL ) { #if LINK_STATS LINK_STATS_INC( link.recv ); #endif /* points to packet payload, which starts with an Ethernet header */ pxHeader = p->payload; switch( htons( pxHeader->type ) ) { /* IP or ARP packet? */ case ETHTYPE_IP: case ETHTYPE_ARP: /* full packet send to tcpip_thread to process */ if( pxNetIf->input( p, pxNetIf ) != ERR_OK ) { LWIP_DEBUGF(NETIF_DEBUG, ( "ethernetif_input: IP input error\n" ) ); pbuf_free(p); p = NULL; } break; default: pbuf_free( p ); p = NULL; break; } } else { #if LINK_STATS LINK_STATS_INC( link.memerr ); #endif } /* Done. Give desc back to hw and increment index */ rxdes->rxdes0 = FTMAC100_RXDES0_RXDMA_OWN; ftMac100_rx_pointer_incr( &macIf->rx_pointer ); goto check_next; } while (1); }
/** * Ethernet rx task being called periodically by FreeRTOS * * @param MAC interface descriptor * @return none */ void MAC_Rx_Task(void *arg ) { mcf5xxxfec_if_t *fecif; struct pbuf *p, *q; nbuf_t *pNBuf; uint8 *pPayLoad; fecif = (mcf5xxxfec_if_t *)arg; do { sys_sem_wait( fecif->rx_sem ); while( NBUF_ReadyRX( ) ) { pNBuf = NBUF_AllocRX( ); if( pNBuf != NULL ) { /*FSL: removed to avoid get stuck if a BABR happens*/ //LWIP_ASSERT( "MAC_Rx_Task: pNBuf->status & RX_BD_L ", // pNBuf->status & RX_BD_L ); /* This flags indicate that the frame has been damaged. In * this case we must update the link stats if enabled and * remove the frame from the FEC. */ //if ( pNBuf->status & RX_ERROR_ALL_FLAGS ) // FIXME: turn off CRC checking for now... it is throwing error even when I manually check the received packet // as byte-for-byte correct if ( pNBuf->status & (RX_ERROR_ALL_FLAGS & ~RX_ERROR_CHKSM_FLAG) ) { #if LINK_STATS lwip_stats.link.drop++; if ( pNBuf->status & RX_ERROR_LENGTH_FLAG ) { lwip_stats.link.lenerr++; } else if ( pNBuf->status & RX_ERROR_CHKSM_FLAG ) { lwip_stats.link.chkerr++; } else { lwip_stats.link.err++; } #endif } else { /* The frame must now be valid. Perform some checks to see if the FEC * driver is working correctly. */ LWIP_ASSERT( "MAC_Rx_Task: pNBuf->length != 0", pNBuf->length != 0 ); p = pbuf_alloc( PBUF_RAW, pNBuf->length, PBUF_POOL ); if( p != NULL ) { #if ETH_PAD_SIZE pbuf_header( p, -ETH_PAD_SIZE ); #endif pPayLoad = pNBuf->data; for( q = p; q != NULL; q = q->next ) { memcpy( q->payload, pPayLoad, q->len ); pPayLoad += q->len; } #if ETH_PAD_SIZE pbuf_header( p, ETH_PAD_SIZE ); #endif /* Ethernet frame received. Handling it is not device * dependent and therefore done in another function. */ eth_input( fecif->netif, p ); } } /*release the buffer under any circumstance*/ /*now we can release buffer*/ NBUF_ReleaseRX( pNBuf ); /* Tell the HW that there are new free RX buffers. */ FEC_ReadyRX(); } else { #if LINK_STATS lwip_stats.link.memerr++; lwip_stats.link.drop++; #endif } } /* Set RX Debug PIN to low since handling of next frame is possible. */ FEC_DEBUG_RX_TIMING( 0 ); } while( 1 ); }
/*-----------------------------------------------------------------------------------*/ static void main_thread(void *arg) { struct ip_addr ipaddr, netmask, gw; sys_sem_t sem; #if PPP_SUPPORT sio_fd_t ppp_sio; #endif netif_init(); sem = sys_sem_new(0); tcpip_init(tcpip_init_done, &sem); sys_sem_wait(sem); sys_sem_free(sem); printf("TCP/IP initialized.\n"); #if PPP_SUPPORT pppInit(); #if PPP_PTY_TEST ppp_sio = sio_open(2); #else ppp_sio = sio_open(0); #endif if(!ppp_sio) { perror("Error opening device: "); exit(1); } #ifdef LWIP_PPP_CHAP_TEST pppSetAuth(PPPAUTHTYPE_CHAP, "lwip", "mysecret"); #endif pppOpen(ppp_sio, pppLinkStatusCallback, NULL); #endif /* PPP_SUPPORT */ #if LWIP_DHCP { IP4_ADDR(&gw, 0,0,0,0); IP4_ADDR(&ipaddr, 0,0,0,0); IP4_ADDR(&netmask, 0,0,0,0); netif_add(&netif, &ipaddr, &netmask, &gw, NULL, tapif_init, tcpip_input); netif_set_default(&netif); dhcp_init(); dhcp_start(&netif); } #else IP4_ADDR(&gw, 192,168,0,1); IP4_ADDR(&ipaddr, 192,168,0,2); IP4_ADDR(&netmask, 255,255,255,0); netif_set_default(netif_add(&netif,&ipaddr, &netmask, &gw, NULL, tapif_init, tcpip_input)); netif_set_up(&netif); #endif /* Only used for testing purposes: */ /* IP4_ADDR(&gw, 193,10,66,1); IP4_ADDR(&ipaddr, 193,10,66,107); IP4_ADDR(&netmask, 255,255,252,0); netif_add(&ipaddr, &netmask, &gw, NULL, pcapif_init, tcpip_input);*/ #if LWIP_HAVE_LOOPIF IP4_ADDR(&gw, 127,0,0,1); IP4_ADDR(&ipaddr, 127,0,0,1); IP4_ADDR(&netmask, 255,0,0,0); netif_set_default(netif_add(&loopif, &ipaddr, &netmask, &gw, NULL, loopif_init, tcpip_input)); #endif #if LWIP_TCP tcpecho_init(); shell_init(); httpd_init(); #endif #if LWIP_UDP udpecho_init(); #endif #if LWIP_RAW sys_thread_new(ping_thread, NULL, DEFAULT_THREAD_PRIO); #endif printf("Applications started.\n"); /* sys_timeout(5000, tcp_timeout, NULL);*/ #ifdef MEM_PERF mem_perf_init("/tmp/memstats.client"); #endif /* MEM_PERF */ #if 0 stats_display(); #endif /* Block for ever. */ sem = sys_sem_new(0); sys_sem_wait(sem); }
/** * Adam's mem_malloc() plus solution for bug #17922 */ void * mem_malloc(mem_size_t size) { mem_size_t ptr, ptr2; struct mem *mem, *mem2; if (size == 0) { return NULL; } /* Expand the size of the allocated memory region so that we can adjust for alignment. */ if ((size % MEM_ALIGNMENT) != 0) { size += MEM_ALIGNMENT - ((size + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT); } if (size > MEM_SIZE) { return NULL; } sys_sem_wait(mem_sem); for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE - size; ptr = ((struct mem *)&ram[ptr])->next) { mem = (struct mem *)&ram[ptr]; if (!mem->used) { ptr2 = ptr + SIZEOF_STRUCT_MEM + size; if (mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) >= size) { /* split large block, create empty remainder */ mem->next = ptr2; mem->used = 1; /* create mem2 struct */ mem2 = (struct mem *)&ram[ptr2]; mem2->used = 0; mem2->next = mem->next; mem2->prev = ptr; if (mem2->next != MEM_SIZE) { ((struct mem *)&ram[mem2->next])->prev = ptr2; } } else if (mem->next - (ptr + SIZEOF_STRUCT_MEM) > size) { /* near fit, no split, no mem2 creation, round up to mem->next */ ptr2 = mem->next; mem->used = 1; } else if (mem->next - (ptr + SIZEOF_STRUCT_MEM) == size) { /* exact fit, do not split, no mem2 creation */ mem->next = ptr2; mem->used = 1; } if (mem->used) { #if MEM_STATS lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM); if (lwip_stats.mem.max < ptr2) { lwip_stats.mem.max = ptr2; } #endif /* MEM_STATS */ if (mem == lfree) { /* Find next free block after mem */ while (lfree->used && lfree != ram_end) { lfree = (struct mem *)&ram[lfree->next]; } LWIP_ASSERT("mem_malloc: !lfree->used", !lfree->used); } sys_sem_signal(mem_sem); LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); return (u8_t *)mem + SIZEOF_STRUCT_MEM; } } } LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); #if MEM_STATS ++lwip_stats.mem.err; #endif /* MEM_STATS */ sys_sem_signal(mem_sem); return NULL; }
int main() { sys_sem_t sem; sys_init(); if(sys_sem_new(&sem, 0) != ERR_OK) { LWIP_ASSERT("failed to create semaphore", 0); } tcpip_init(tcpip_init_done, &sem); sys_sem_wait(&sem); sys_sem_free(&sem); /////////////////////////////////////////////////////////////////////////////////////////////////// struct netconn *conn, *newconn; err_t err; /* Create a new connection identifier. */ conn = netconn_new(NETCONN_TCP); netconn_set_noautorecved(conn, 0); tcp_nagle_disable(conn->pcb.tcp); /* Bind connection to well known port number 7. */ netconn_bind(conn, NULL, 80); /* Tell connection to go into listening mode. */ netconn_listen(conn); while (1) { /* Grab new connection. */ err = netconn_accept(conn, &newconn); printf("accepted new connection %p\n", newconn); /* Process the new connection. */ if (err == ERR_OK) { struct netbuf *buf; void *data; u16_t len; u64_t total_rcvd = 0; u64_t eal_tsc_resolution_hz = rte_get_timer_hz(); u64_t end = rte_get_timer_cycles() + eal_tsc_resolution_hz; while ((err = netconn_recv(newconn, &buf)) == ERR_OK) { netbuf_data(buf, &data, &len); if (len > 0) { total_rcvd += len; } if (rte_get_timer_cycles() >= end) { printf("%llu \n", (unsigned long long)total_rcvd); total_rcvd = 0; end = rte_get_timer_cycles() + eal_tsc_resolution_hz; } #if 0 if (err != ERR_OK) { printf("tcpecho: netconn_write: error \"%s\"\n", lwip_strerr(err)); } #endif //} while (netbuf_next(buf) >= 0); netbuf_delete(buf); } /*printf("Got EOF, looping\n");*/ /* Close connection and discard connection identifier. */ netconn_close(newconn); netconn_delete(newconn); } } while (1); }
err_t netconn_write(struct netconn *conn, void *dataptr, u16_t size, u8_t copy) { struct stack *stack = conn->stack; struct api_msg msg; u16_t len; if (conn == NULL) { return ERR_VAL; } if (conn->err != ERR_OK) { return conn->err; } if (conn->sem == SYS_SEM_NULL) { conn->sem = sys_sem_new(0); if (conn->sem == SYS_SEM_NULL) { return ERR_MEM; } } msg.type = API_MSG_WRITE; msg.msg.conn = conn; conn->state = NETCONN_WRITE; while (conn->err == ERR_OK && size > 0) { msg.msg.msg.w.dataptr = dataptr; msg.msg.msg.w.copy = copy; if (conn->type == NETCONN_TCP) { int avail; while ((avail=tcp_sndbuf(conn->pcb.tcp)) == 0) { sys_sem_wait(conn->sem); if (conn->err != ERR_OK) { goto ret; } } if (size > avail) { /* We cannot send more than one send buffer's worth of data at a time. */ len = avail; } else { len = size; } } else { len = size; } LWIP_DEBUGF(API_LIB_DEBUG, ("netconn_write: writing %d bytes (%d)\n", len, copy)); msg.msg.msg.w.len = len; api_msg_post(stack, &msg); sys_mbox_fetch(conn->mbox, NULL); if (conn->err == ERR_OK) { dataptr = (void *)((char *)dataptr + len); size -= len; } else if (conn->err == ERR_MEM) { conn->err = ERR_OK; sys_sem_wait(conn->sem); } else { goto ret; } } ret: conn->state = NETCONN_NONE; // X1004 /* if (conn->sem != SYS_SEM_NULL) { sys_sem_free(conn->sem); conn->sem = SYS_SEM_NULL; } // */ return conn->err; }
/* This is somewhat different to other ports: we have a main loop here: * a dedicated task that waits for packets to arrive. This would normally be * done from interrupt context with embedded hardware, but we don't get an * interrupt in windows for that :-) */ static void main_loop(void) { #if !NO_SYS err_t err; sys_sem_t init_sem; #endif /* NO_SYS */ #if USE_PPP #if !USE_ETHERNET int count; u8_t rxbuf[1024]; #endif volatile int callClosePpp = 0; #endif /* USE_PPP */ /* initialize lwIP stack, network interfaces and applications */ #if NO_SYS lwip_init(); test_init(NULL); #else /* NO_SYS */ err = sys_sem_new(&init_sem, 0); LWIP_ASSERT("failed to create init_sem", err == ERR_OK); tcpip_init(test_init, &init_sem); /* we have to wait for initialization to finish before * calling update_adapter()! */ sys_sem_wait(&init_sem); sys_sem_free(&init_sem); #endif /* NO_SYS */ #if (LWIP_SOCKET || LWIP_NETCONN) && LWIP_NETCONN_SEM_PER_THREAD netconn_thread_init(); #endif /* MAIN LOOP for driver update (and timers if NO_SYS) */ while (!_kbhit()) { #if NO_SYS /* handle timers (already done in tcpip.c when NO_SYS=0) */ sys_check_timeouts(); #endif /* NO_SYS */ #if USE_ETHERNET #if !PCAPIF_RX_USE_THREAD /* check for packets and link status*/ pcapif_poll(&netif); /* When pcapif_poll comes back, there are not packets, so sleep to prevent 100% CPU load. Don't do this in an embedded system since it increases latency! */ sys_msleep(1); #else /* !PCAPIF_RX_USE_THREAD */ sys_msleep(50); #endif /* !PCAPIF_RX_USE_THREAD */ #else /* USE_ETHERNET */ /* try to read characters from serial line and pass them to PPPoS */ count = sio_read(ppp_sio, (u8_t*)rxbuf, 1024); if(count > 0) { pppos_input(ppp, rxbuf, count); } else { /* nothing received, give other tasks a chance to run */ sys_msleep(1); } #endif /* USE_ETHERNET */ #if USE_SLIPIF slipif_poll(&slipif1); #if USE_SLIPIF > 1 slipif_poll(&slipif2); #endif /* USE_SLIPIF > 1 */ #endif /* USE_SLIPIF */ #if ENABLE_LOOPBACK && !LWIP_NETIF_LOOPBACK_MULTITHREADING /* check for loopback packets on all netifs */ netif_poll_all(); #endif /* ENABLE_LOOPBACK && !LWIP_NETIF_LOOPBACK_MULTITHREADING */ #if USE_PPP { int do_hup = 0; if(do_hup) { ppp_close(ppp, 1); do_hup = 0; } } if(callClosePpp && ppp) { /* make sure to disconnect PPP before stopping the program... */ callClosePpp = 0; #if NO_SYS ppp_close(ppp, 0); #else pppapi_close(ppp, 0); #endif ppp = NULL; } #endif /* USE_PPP */ } #if USE_PPP if(ppp) { u32_t started; printf("Closing PPP connection...\n"); /* make sure to disconnect PPP before stopping the program... */ #if NO_SYS ppp_close(ppp, 0); #else pppapi_close(ppp, 0); #endif ppp = NULL; /* Wait for some time to let PPP finish... */ started = sys_now(); do { #if USE_ETHERNET && !PCAPIF_RX_USE_THREAD pcapif_poll(&netif); #else /* USE_ETHERNET && !PCAPIF_RX_USE_THREAD */ sys_msleep(50); #endif /* USE_ETHERNET && !PCAPIF_RX_USE_THREAD */ /* @todo: need a better check here: only wait until PPP is down */ } while(sys_now() - started < 5000); } #endif /* USE_PPP */ #if (LWIP_SOCKET || LWIP_NETCONN) && LWIP_NETCONN_SEM_PER_THREAD netconn_thread_cleanup(); #endif #if USE_ETHERNET /* release the pcap library... */ pcapif_shutdown(&netif); #endif /* USE_ETHERNET */ }
/** This is an example function that tests more than one thread being active in select. */ static void sockex_testtwoselects(void *arg) { int s1; int s2; int ret; struct sockaddr_in addr; size_t len; err_t lwiperr; struct sockex_select_helper h1, h2, h3, h4; LWIP_UNUSED_ARG(arg); /* set up address to connect to */ memset(&addr, 0, sizeof(addr)); addr.sin_len = sizeof(addr); addr.sin_family = AF_INET; addr.sin_port = PP_HTONS(SOCK_TARGET_PORT); addr.sin_addr.s_addr = inet_addr(SOCK_TARGET_HOST); /* create the sockets */ s1 = lwip_socket(AF_INET, SOCK_STREAM, 0); LWIP_ASSERT("s1 >= 0", s1 >= 0); s2 = lwip_socket(AF_INET, SOCK_STREAM, 0); LWIP_ASSERT("s2 >= 0", s2 >= 0); /* connect, should succeed */ ret = lwip_connect(s1, (struct sockaddr*)&addr, sizeof(addr)); LWIP_ASSERT("ret == 0", ret == 0); ret = lwip_connect(s2, (struct sockaddr*)&addr, sizeof(addr)); LWIP_ASSERT("ret == 0", ret == 0); /* write the start of a GET request */ #define SNDSTR1 "G" len = strlen(SNDSTR1); ret = lwip_write(s1, SNDSTR1, len); LWIP_ASSERT("ret == len", ret == (int)len); ret = lwip_write(s2, SNDSTR1, len); LWIP_ASSERT("ret == len", ret == (int)len); h1.wait_read = 1; h1.wait_write = 1; h1.wait_err = 1; h1.expect_read = 0; h1.expect_write = 0; h1.expect_err = 0; lwiperr = sys_sem_new(&h1.sem, 0); LWIP_ASSERT("lwiperr == ERR_OK", lwiperr == ERR_OK); h1.socket = s1; h1.wait_ms = 500; h2 = h1; lwiperr = sys_sem_new(&h2.sem, 0); LWIP_ASSERT("lwiperr == ERR_OK", lwiperr == ERR_OK); h2.socket = s2; h2.wait_ms = 1000; h3 = h1; lwiperr = sys_sem_new(&h3.sem, 0); LWIP_ASSERT("lwiperr == ERR_OK", lwiperr == ERR_OK); h3.socket = s2; h3.wait_ms = 1500; h4 = h1; lwiperr = sys_sem_new(&h4.sem, 0); LWIP_ASSERT("lwiperr == ERR_OK", lwiperr == ERR_OK); h4.socket = s2; h4.wait_ms = 2000; /* select: all sockets should time out if the other side is a good HTTP server */ sys_thread_new("sockex_select_waiter1", sockex_select_waiter, &h2, 0, 0); sys_msleep(100); sys_thread_new("sockex_select_waiter2", sockex_select_waiter, &h1, 0, 0); sys_msleep(100); sys_thread_new("sockex_select_waiter2", sockex_select_waiter, &h4, 0, 0); sys_msleep(100); sys_thread_new("sockex_select_waiter2", sockex_select_waiter, &h3, 0, 0); sys_sem_wait(&h1.sem); sys_sem_wait(&h2.sem); sys_sem_wait(&h3.sem); sys_sem_wait(&h4.sem); /* close */ ret = lwip_close(s1); LWIP_ASSERT("ret == 0", ret == 0); ret = lwip_close(s2); LWIP_ASSERT("ret == 0", ret == 0); printf("sockex_testtwoselects finished successfully\n"); }
int lwip_select(int maxfdp1, fd_set *readset, fd_set *writeset, fd_set *exceptset, struct timeval *timeout) { int i; int nready; fd_set lreadset, lwriteset, lexceptset; u32_t msectimeout; struct lwip_select_cb select_cb; struct lwip_select_cb *p_selcb; LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select(%d, %p, %p, %p, tvsec=%ld tvusec=%ld)\n", maxfdp1, (void *)readset, (void *) writeset, (void *) exceptset, timeout ? timeout->tv_sec : -1L, timeout ? timeout->tv_usec : -1L)); select_cb.next = 0; select_cb.readset = readset; select_cb.writeset = writeset; select_cb.exceptset = exceptset; select_cb.sem_signalled = 0; /* Protect ourselves searching through the list */ if (!selectsem) selectsem = sys_sem_new(1); sys_sem_wait(selectsem); if (readset) lreadset = *readset; else FD_ZERO(&lreadset); if (writeset) lwriteset = *writeset; else FD_ZERO(&lwriteset); if (exceptset) lexceptset = *exceptset; else FD_ZERO(&lexceptset); /* Go through each socket in each list to count number of sockets which currently match */ nready = lwip_selscan(maxfdp1, &lreadset, &lwriteset, &lexceptset); /* If we don't have any current events, then suspend if we are supposed to */ if (!nready) { if (timeout && timeout->tv_sec == 0 && timeout->tv_usec == 0) { sys_sem_signal(selectsem); if (readset) FD_ZERO(readset); if (writeset) FD_ZERO(writeset); if (exceptset) FD_ZERO(exceptset); LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: no timeout, returning 0\n")); set_errno(0); return 0; } /* add our semaphore to list */ /* We don't actually need any dynamic memory. Our entry on the * list is only valid while we are in this function, so it's ok * to use local variables */ select_cb.sem = sys_sem_new(0); /* Note that we are still protected */ /* Put this select_cb on top of list */ select_cb.next = select_cb_list; select_cb_list = &select_cb; /* Now we can safely unprotect */ sys_sem_signal(selectsem); /* Now just wait to be woken */ if (timeout == 0) /* Wait forever */ msectimeout = 0; else msectimeout = ((timeout->tv_sec * 1000) + ((timeout->tv_usec + 500)/1000)); i = sys_sem_wait_timeout(select_cb.sem, msectimeout); /* Take us off the list */ sys_sem_wait(selectsem); if (select_cb_list == &select_cb) select_cb_list = select_cb.next; else for (p_selcb = select_cb_list; p_selcb; p_selcb = p_selcb->next) if (p_selcb->next == &select_cb) { p_selcb->next = select_cb.next; break; } sys_sem_signal(selectsem); sys_sem_free(select_cb.sem); if (i == 0) /* Timeout */ { if (readset) FD_ZERO(readset); if (writeset) FD_ZERO(writeset); if (exceptset) FD_ZERO(exceptset); LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: timeout expired\n")); set_errno(0); return 0; } if (readset) lreadset = *readset; else FD_ZERO(&lreadset); if (writeset) lwriteset = *writeset; else FD_ZERO(&lwriteset); if (exceptset) lexceptset = *exceptset; else FD_ZERO(&lexceptset); /* See what's set */ nready = lwip_selscan(maxfdp1, &lreadset, &lwriteset, &lexceptset); } else sys_sem_signal(selectsem); if (readset) *readset = lreadset; if (writeset) *writeset = lwriteset; if (exceptset) *exceptset = lexceptset; LWIP_DEBUGF(SOCKETS_DEBUG, ("lwip_select: nready=%d\n", nready)); set_errno(0); return nready; }
static err_t prvLowLevelOutput( struct netif *pxNetIf, struct pbuf *p ) { /* This is taken from lwIP example code and therefore does not conform to the FreeRTOS coding standard. */ struct pbuf *q; unsigned char *pucBuffer = tx_buf; unsigned char *pucChar; u16_t usTotalLength = p->tot_len - ETH_PAD_SIZE; err_t xReturn = ERR_OK; long x; struct ftmac100_txdes *txdes; ( void ) pxNetIf; #if defined(LWIP_DEBUG) && LWIP_NETIF_TX_SINGLE_PBUF LWIP_ASSERT("p->next == NULL && p->len == p->tot_len", p->next == NULL && p->len == p->tot_len); #endif /* Initiate transfer. */ if( p->len == p->tot_len ) { memcpy( pucBuffer, &( ( unsigned char * ) p->payload )[ ETH_PAD_SIZE ], usTotalLength ); } else { /* pbuf chain, copy into contiguous tx_buf. */ if( p->tot_len >= sizeof( tx_buf ) ) { #if LINK_STATS LINK_STATS_INC( link.lenerr ); LINK_STATS_INC( link.drop ); #endif xReturn = ERR_BUF; } else { pucChar = tx_buf; for( q = p; q != NULL; q = q->next ) { /* Send the data from the pbuf to the interface, one pbuf at a time. The size of the data in each pbuf is kept in the ->len variable. */ /* send data from(q->payload, q->len); */ LWIP_DEBUGF( NETIF_DEBUG, ( "NETIF: send pucChar %p q->payload %p q->len %i q->next %p\n", pucChar, q->payload, ( int ) q->len, ( void* ) q->next ) ); if( q == p ) { memcpy( pucChar, &( ( char * ) q->payload )[ ETH_PAD_SIZE ], q->len - ETH_PAD_SIZE ); pucChar += q->len - ETH_PAD_SIZE; } else { memcpy( pucChar, q->payload, q->len ); pucChar += q->len; } } } } if( xReturn == ERR_OK ) { sys_sem_wait( &pgMac100If->tx_sem ); xReturn = ERR_BUF; #if 0 /* Only has 1 tx descriptor */ txdes = &gTxDes[0]; #else txdes = &gTxDes[pgMac100If->tx_pointer]; pgMac100If->tx_pointer = (pgMac100If->tx_pointer + 1) & (TX_QUEUE_ENTRIES - 1);; #endif /* setup TX descriptor */ txdes->txdes2 = (int) pucBuffer; txdes->txdes1 &= FTMAC100_TXDES1_EDOTR; if (usTotalLength < 64) usTotalLength = 64; txdes->txdes1 |= ( FTMAC100_TXDES1_FTS | FTMAC100_TXDES1_LTS | FTMAC100_TXDES1_TXBUF_SIZE( usTotalLength ) ); /* Descriptor owned by FTMAC */ txdes->txdes0 = FTMAC100_TXDES0_TXDMA_OWN; /* start transmit */ FTMAC100_OFFSET_TXPD = 1; for( x = 0; x < netifMAX_TX_ATTEMPTS; x++ ) { if( !(txdes->txdes0 & FTMAC100_TXDES0_TXDMA_OWN) ) { xReturn = ERR_OK; #if LINK_STATS LINK_STATS_INC( link.xmit ); #endif break; } else { vTaskDelay( netifTX_BUFFER_FREE_WAIT ); } } if( xReturn != ERR_OK ) { #if LINK_STATS LINK_STATS_INC( link.memerr ); LINK_STATS_INC( link.drop ); #endif } /* reset the descriptor */ txdes->txdes1 &= ~( FTMAC100_TXDES1_FTS | FTMAC100_TXDES1_LTS | FTMAC100_TXDES1_TXBUF_SIZE( 0x7ff ) ); sys_sem_signal( &pgMac100If->tx_sem ); } return xReturn; }
static void event_callback(struct netconn *conn, enum netconn_evt evt, u16_t len) { int s; struct lwip_socket *sock; struct lwip_select_cb *scb; /* Get socket */ if (conn) { s = conn->socket; if (s < 0) { /* Data comes in right away after an accept, even though * the server task might not have created a new socket yet. * Just count down (or up) if that's the case and we * will use the data later. Note that only receive events * can happen before the new socket is set up. */ if (evt == NETCONN_EVT_RCVPLUS) conn->socket--; return; } sock = get_socket(s); if (!sock) return; } else return; if (!selectsem) selectsem = sys_sem_new(1); sys_sem_wait(selectsem); /* Set event as required */ switch (evt) { case NETCONN_EVT_RCVPLUS: sock->rcvevent++; break; case NETCONN_EVT_RCVMINUS: sock->rcvevent--; break; case NETCONN_EVT_SENDPLUS: sock->sendevent = 1; break; case NETCONN_EVT_SENDMINUS: sock->sendevent = 0; break; } sys_sem_signal(selectsem); /* Now decide if anyone is waiting for this socket */ /* NOTE: This code is written this way to protect the select link list but to avoid a deadlock situation by releasing socksem before signalling for the select. This means we need to go through the list multiple times ONLY IF a select was actually waiting. We go through the list the number of waiting select calls + 1. This list is expected to be small. */ while (1) { sys_sem_wait(selectsem); for (scb = select_cb_list; scb; scb = scb->next) { if (scb->sem_signalled == 0) { /* Test this select call for our socket */ if (scb->readset && FD_ISSET(s, scb->readset)) if (sock->rcvevent) break; if (scb->writeset && FD_ISSET(s, scb->writeset)) if (sock->sendevent) break; } } if (scb) { scb->sem_signalled = 1; sys_sem_signal(selectsem); sys_sem_signal(scb->sem); } else { sys_sem_signal(selectsem); break; } } }
/*-----------------------------------------------------------------------------------*/ static void main_thread(void *arg) { struct ip_addr ipaddr, netmask, gw; sys_sem_t sem; netif_init(); sem = sys_sem_new(0); tcpip_init(tcpip_init_done, &sem); sys_sem_wait(sem); sys_sem_free(sem); printf("TCP/IP initialized.\n"); #if LWIP_DHCP { struct netif *netif; IP4_ADDR(&gw, 0,0,0,0); IP4_ADDR(&ipaddr, 0,0,0,0); IP4_ADDR(&netmask, 0,0,0,0); netif = netif_add(&ipaddr, &netmask, &gw, tapif_init, tcpip_input); netif_set_default(netif); dhcp_init(); dhcp_start(netif); } #else IP4_ADDR(&gw, 192,168,0,1); IP4_ADDR(&ipaddr, 192,168,0,2); IP4_ADDR(&netmask, 255,255,255,0); /* netif_set_default(netif_add(&ipaddr, &netmask, &gw, tapif_init, tcpip_input));*/ netif_set_default(netif_add(&ipaddr, &netmask, &gw, tapif_init, tcpip_input)); #endif /* Only used for testing purposes: */ /* IP4_ADDR(&gw, 193,10,66,1); IP4_ADDR(&ipaddr, 193,10,66,107); IP4_ADDR(&netmask, 255,255,252,0); netif_add(&ipaddr, &netmask, &gw, pcapif_init, tcpip_input);*/ IP4_ADDR(&gw, 127,0,0,1); IP4_ADDR(&ipaddr, 127,0,0,1); IP4_ADDR(&netmask, 255,0,0,0); netif_add(&ipaddr, &netmask, &gw, loopif_init, tcpip_input); tcpecho_init(); shell_init(); httpd_init(); udpecho_init(); printf("Applications started.\n"); /* sys_timeout(5000, tcp_timeout, NULL);*/ #ifdef MEM_PERF mem_perf_init("/tmp/memstats.client"); #endif /* MEM_PERF */ /* Block for ever. */ sem = sys_sem_new(0); sys_sem_wait(sem); }