/** * Call netif_poll() in the main loop of your application. This is to prevent * reentering non-reentrant functions like tcp_input(). Packets passed to * netif_loop_output() are put on a list that is passed to netif->input() by * netif_poll(). */ void netif_poll(struct netif *netif) { struct pbuf *in; /* If we have a loopif, SNMP counters are adjusted for it, * if not they are adjusted for 'netif'. */ #if LWIP_SNMP #if LWIP_HAVE_LOOPIF struct netif *stats_if = &loop_netif; #else /* LWIP_HAVE_LOOPIF */ struct netif *stats_if = netif; #endif /* LWIP_HAVE_LOOPIF */ #endif /* LWIP_SNMP */ SYS_ARCH_DECL_PROTECT(lev); do { /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ SYS_ARCH_PROTECT(lev); in = netif->loop_first; if (in != NULL) { struct pbuf *in_end = in; #if LWIP_LOOPBACK_MAX_PBUFS u8_t clen = pbuf_clen(in); /* adjust the number of pbufs on queue */ netif->loop_cnt_current -= clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ while (in_end->len != in_end->tot_len) { in_end = in_end->next; } /* 'in_end' now points to the last pbuf from 'in' */ if (in_end == netif->loop_last) { /* this was the last pbuf in the list */ netif->loop_first = netif->loop_last = NULL; } else { /* pop the pbuf off the list */ netif->loop_first = in_end->next; } /* De-queue the pbuf from its successors on the 'loop_' list. */ in_end->next = NULL; } SYS_ARCH_UNPROTECT(lev); if (in != NULL) { LINK_STATS_INC(link.recv); snmp_add_ifinoctets(stats_if, in->tot_len); snmp_inc_ifinucastpkts(stats_if); /* loopback packets are always IP packets! */ if (ip_input(in, netif) != ERR_OK) { pbuf_free(in); } /* Don't reference the packet any more! */ in = NULL; } /* go on while there is a packet on the list */ } while (netif->loop_first != NULL); }
/** Queue a call to pbuf_free_ooseq if not already queued. */ static void pbuf_pool_is_empty(void) { u8_t queued; SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); queued = pbuf_free_ooseq_queued; pbuf_free_ooseq_queued = 1; SYS_ARCH_UNPROTECT(old_level); if(!queued) { /* queue a call to pbuf_free_ooseq if not already queued */ if(tcpip_callback_with_block(pbuf_free_ooseq, NULL, 0) != ERR_OK) { SYS_ARCH_PROTECT(old_level); pbuf_free_ooseq_queued = 0; SYS_ARCH_UNPROTECT(old_level); } } }
/** * Increment the reference count of the pbuf. * * @param p pbuf to increase reference counter of * */ void pbuf_ref(struct pbuf *p) { SYS_ARCH_DECL_PROTECT(old_level); /* pbuf given? */ if (p != NULL) { SYS_ARCH_PROTECT(old_level); ++(p->ref); SYS_ARCH_UNPROTECT(old_level); } }
/** * * Increment the reference count of all pbufs in a chain. * * @param p first pbuf of chain * */ void pbuf_ref_chain(struct pbuf *p) { SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); while (p != NULL) { ++p->ref; p = p->next; } SYS_ARCH_UNPROTECT(old_level); }
/** Queue a call to pbuf_free_ooseq if not already queued. */ static void pbuf_pool_is_empty(void) { #ifndef PBUF_POOL_FREE_OOSEQ_QUEUE_CALL SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); pbuf_free_ooseq_pending = 1; SYS_ARCH_UNPROTECT(old_level); #else /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ u8_t queued; SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); queued = pbuf_free_ooseq_pending; pbuf_free_ooseq_pending = 1; SYS_ARCH_UNPROTECT(old_level); if (!queued) { /* queue a call to pbuf_free_ooseq if not already queued */ PBUF_POOL_FREE_OOSEQ_QUEUE_CALL(); } #endif /* PBUF_POOL_FREE_OOSEQ_QUEUE_CALL */ }
void * memp_malloc(memp_t type) { struct memp *memp; void *mem; #if SYS_LIGHTWEIGHT_PROT SYS_ARCH_DECL_PROTECT(old_level); #endif LWIP_ASSERT("memp_malloc: type < MEMP_MAX", type < MEMP_MAX); #if SYS_LIGHTWEIGHT_PROT SYS_ARCH_PROTECT(old_level); #else /* SYS_LIGHTWEIGHT_PROT */ sys_sem_wait(mutex); #endif /* SYS_LIGHTWEIGHT_PROT */ memp = memp_tab[type]; if (memp != NULL) { memp_tab[type] = memp->next; memp->next = NULL; #if MEMP_STATS ++lwip_stats.memp[type].used; if (lwip_stats.memp[type].used > lwip_stats.memp[type].max) { lwip_stats.memp[type].max = lwip_stats.memp[type].used; } #endif /* MEMP_STATS */ #if SYS_LIGHTWEIGHT_PROT SYS_ARCH_UNPROTECT(old_level); #else /* SYS_LIGHTWEIGHT_PROT */ sys_sem_signal(mutex); #endif /* SYS_LIGHTWEIGHT_PROT */ LWIP_ASSERT("memp_malloc: memp properly aligned", ((mem_ptr_t)MEM_ALIGN((u8_t *)memp + sizeof(struct memp)) % MEM_ALIGNMENT) == 0); mem = MEM_ALIGN((u8_t *)memp + sizeof(struct memp)); return mem; } else { LWIP_DEBUGF(MEMP_DEBUG | 2, ("memp_malloc: out of memory in pool %d\n", type)); #if MEMP_STATS ++lwip_stats.memp[type].err; #endif /* MEMP_STATS */ #if SYS_LIGHTWEIGHT_PROT SYS_ARCH_UNPROTECT(old_level); #else /* SYS_LIGHTWEIGHT_PROT */ sys_sem_signal(mutex); #endif /* SYS_LIGHTWEIGHT_PROT */ return NULL; } }
/* * low_level_output(): * * Should do the actual transmission of the packet. The packet is * contained in the pbuf that is passed to the function. This pbuf * might be chained. * */ static err_t low_level_output(struct netif *netif, struct pbuf *p) { SYS_ARCH_DECL_PROTECT(lev); struct xemac_s *xemac = (struct xemac_s *)(netif->state); xemacliteif_s *xemacliteif = (xemacliteif_s *)(xemac->state); XEmacLite *instance = xemacliteif->instance; struct pbuf *q; SYS_ARCH_PROTECT(lev); /* check if space is available to send */ if (XEmacLite_TxBufferAvailable(instance) == TRUE) { if (pq_qlength(xemacliteif->send_q)) { /* send backlog */ _unbuffered_low_level_output(instance, (struct pbuf *)pq_dequeue(xemacliteif->send_q)); } else { /* send current */ _unbuffered_low_level_output(instance, p); SYS_ARCH_UNPROTECT(lev); return ERR_OK; } } /* if we cannot send the packet immediately, then make a copy of the whole packet * into a separate pbuf and store it in send_q. We cannot enqueue the pbuf as is * since parts of the pbuf may be modified inside lwIP. */ q = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_POOL); if (!q) { #if LINK_STATS lwip_stats.link.drop++; #endif SYS_ARCH_UNPROTECT(lev); return ERR_MEM; } for (q->len = 0; p; p = p->next) { memcpy(q->payload + q->len, p->payload, p->len); q->len += p->len; } if (pq_enqueue(xemacliteif->send_q, (void *)q) < 0) { #if LINK_STATS lwip_stats.link.drop++; #endif SYS_ARCH_UNPROTECT(lev); return ERR_MEM; } SYS_ARCH_UNPROTECT(lev); return ERR_OK; }
static err_t low_level_2_output(struct netif *netif, struct pbuf *p) { SYS_ARCH_DECL_PROTECT(sr); /* Interrupts are disabled through this whole thing to support multi-threading transmit calls. Also this function might be called from an ISR. */ SYS_ARCH_PROTECT(sr); enc28j60PacketSend(p); SYS_ARCH_UNPROTECT(sr); return ERR_OK; }
static struct pbuf * low_level_2_input(struct netif *netif) { struct pbuf *p; SYS_ARCH_DECL_PROTECT(sr); SYS_ARCH_PROTECT(sr); p = enc28j60PacketReceive(); SYS_ARCH_UNPROTECT(sr); return p; }
void mem_free(void *rmem) { struct mem *mem; //printf("mem_free %p\n", rmem); LWIP_MEM_FREE_DECL_PROTECT(); if (rmem == NULL) { LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("mem_free(p == NULL) was called.\n")); return; } LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0); LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram && (u8_t *)rmem < (u8_t *)ram_end); if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { SYS_ARCH_DECL_PROTECT(lev); LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_free: illegal memory\n")); /* protect mem stats from concurrent access */ SYS_ARCH_PROTECT(lev); MEM_STATS_INC(illegal); SYS_ARCH_UNPROTECT(lev); return; } /* protect the heap from concurrent access */ LWIP_MEM_FREE_PROTECT(); /* Get the corresponding struct mem ... */ mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); /* ... which has to be in a used state ... */ LWIP_ASSERT("mem_free: mem->used", mem->used); /* ... and is now unused. */ mem->used = 0; if (mem < lfree) { /* the newly freed struct is now the lowest */ lfree = mem; } MEM_STATS_DEC_USED(used, mem->next - (mem_size_t)(((u8_t *)mem - ram))); /* finally, see if prev or next are free also */ plug_holes(mem); #if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT mem_free_count = 1; #endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */ LWIP_MEM_FREE_UNPROTECT(); }
/** * Call netif_poll() in the main loop of your application. This is to prevent * reentering non-reentrant functions like tcp_input(). Packets passed to * netif_loop_output() are put on a list that is passed to netif->input() by * netif_poll(). */ void netif_poll(struct netif *netif) { _printf("<%s>", __func__); struct pbuf *in; SYS_ARCH_DECL_PROTECT(lev); do { /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */ SYS_ARCH_PROTECT(lev); in = netif->loop_first; if(in != NULL) { struct pbuf *in_end = in; #if LWIP_LOOPBACK_MAX_PBUFS u8_t clen = pbuf_clen(in); /* adjust the number of pbufs on queue */ LWIP_ASSERT("netif->loop_cnt_current underflow", ((netif->loop_cnt_current - clen) < netif->loop_cnt_current)); netif->loop_cnt_current -= clen; #endif /* LWIP_LOOPBACK_MAX_PBUFS */ while(in_end->len != in_end->tot_len) { LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL); in_end = in_end->next; } /* 'in_end' now points to the last pbuf from 'in' */ if(in_end == netif->loop_last) { /* this was the last pbuf in the list */ netif->loop_first = netif->loop_last = NULL; } else { /* pop the pbuf off the list */ netif->loop_first = in_end->next; LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL); } /* De-queue the pbuf from its successors on the 'loop_' list. */ in_end->next = NULL; } SYS_ARCH_UNPROTECT(lev); if(in != NULL) { /* loopback packets are always IP packets! */ if(ip_input(in, netif) != ERR_OK) { pbuf_free(in); } /* Don't reference the packet any more! */ in = NULL; } /* go on while there is a packet on the list */ } while(netif->loop_first != NULL); }
/** * @internal only called from pbuf_alloc() */ static struct pbuf * pbuf_pool_alloc(void) { struct pbuf *p = NULL; #ifdef PBUF_DEBUG pbufstats_print_pbuf_counter("pbuf_pool_alloc"); #endif SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); #if !SYS_LIGHTWEIGHT_PROT /* Next, check the actual pbuf pool, but if the pool is locked, we pretend to be out of buffers and return NULL. */ if (pbuf_pool_free_lock) { #ifdef PBUF_STATS ++lwip_stats.pbuf.alloc_locked; #endif /* PBUF_STATS */ return NULL; } pbuf_pool_alloc_lock = 1; if (!pbuf_pool_free_lock) { #endif /* SYS_LIGHTWEIGHT_PROT */ p = pbuf_pool; if (p) { pbuf_pool = p->next; } #if !SYS_LIGHTWEIGHT_PROT #ifdef PBUF_STATS } else { ++lwip_stats.pbuf.alloc_locked; #endif /* PBUF_STATS */ } pbuf_pool_alloc_lock = 0; #endif /* SYS_LIGHTWEIGHT_PROT */ #ifdef PBUF_STATS if (p != NULL) { ++lwip_stats.pbuf.used; if (lwip_stats.pbuf.used > lwip_stats.pbuf.max) { lwip_stats.pbuf.max = lwip_stats.pbuf.used; } } #endif /* PBUF_STATS */ SYS_ARCH_UNPROTECT(old_level); return p; }
void my_pbuf_free_custom(void* p) { SYS_ARCH_DECL_PROTECT(old_level); my_custom_pbuf_t* my_puf = (my_custom_pbuf_t*)p; // invalidate data cache here - lwIP and/or application may have written into buffer! // (invalidate is faster than flushing, and noone needs the correct data in the buffer) invalidate_cpu_cache(p->payload, p->tot_len); SYS_ARCH_PROTECT(old_level); free_rx_dma_descriptor(my_pbuf->dma_descriptor); LWIP_MEMPOOL_FREE(RX_POOL, my_pbuf); SYS_ARCH_UNPROTECT(old_level); }
int xlltemacif_input(struct netif *netif) { struct eth_hdr *ethhdr; struct pbuf *p; SYS_ARCH_DECL_PROTECT(lev); /* move received packet into a new pbuf */ SYS_ARCH_PROTECT(lev); p = low_level_input(netif); SYS_ARCH_UNPROTECT(lev); /* no packet could be read, silently ignore this */ if (p == NULL) return 0; /* points to packet payload, which starts with an Ethernet header */ ethhdr = p->payload; #if LINK_STATS lwip_stats.link.recv++; #endif /* LINK_STATS */ switch (htons(ethhdr->type)) { /* IP or ARP packet? */ case ETHTYPE_IP: case ETHTYPE_ARP: #if PPPOE_SUPPORT /* PPPoE packet? */ case ETHTYPE_PPPOEDISC: case ETHTYPE_PPPOE: #endif /* PPPOE_SUPPORT */ /* full packet send to tcpip_thread to process */ if (netif->input(p, netif) != ERR_OK) { LWIP_DEBUGF(NETIF_DEBUG, ("xlltemacif_input: IP input error\r\n")); pbuf_free(p); p = NULL; } break; default: pbuf_free(p); p = NULL; break; } return 1; }
/** * This function with either place the packet into the Stellaris transmit fifo, * or will place the packet in the interface PBUF Queue for subsequent * transmission when the transmitter becomes idle. * * @param netif the lwip network interface structure for this ethernetif * @param p the MAC packet to send (e.g. IP packet including MAC addresses and type) * @return ERR_OK if the packet could be sent * an err_t value if the packet couldn't be sent * */ static err_t low_level_output(struct netif *netif, struct pbuf *p) { LWIP_DRIVER_DATA* drv_data = (LWIP_DRIVER_DATA*)netif; MAC_Type* mac = (MAC_Type*)netif->state; SYS_ARCH_DECL_PROTECT(lev); /** * This entire function must run within a "critical section" to preserve * the integrity of the transmit pbuf queue. * */ SYS_ARCH_PROTECT(lev); /** * Bump the reference count on the pbuf to prevent it from being * freed till we are done with it. * */ pbuf_ref(p); /** * If the transmitter is idle, and there is nothing on the queue, * send the pbuf now. * */ if (PBUF_QUEUE_EMPTY(&drv_data->txq) && ((mac->MACTR & MAC_TR_NEWTX) == 0)) { low_level_transmit(netif, p); } /* Otherwise place the pbuf on the transmit queue. */ else { /* Add to transmit packet queue */ if (!enqueue_packet(p, &drv_data->txq)) { /* if no room on the queue, free the pbuf reference and return error. */ pbuf_free(p); SYS_ARCH_UNPROTECT(lev); return (ERR_MEM); } } /* Return to prior interrupt state and return. */ SYS_ARCH_UNPROTECT(lev); return (ERR_OK); }
/** * This function with either place the packet into the Stellaris transmit fifo, * or will place the packet in the interface PBUF Queue for subsequent * transmission when the transmitter becomes idle. * * @param netif the lwip network interface structure for this ethernetif * @param p the MAC packet to send (e.g. IP packet including MAC addresses and type) * @return ERR_OK if the packet could be sent * an err_t value if the packet couldn't be sent * */ static err_t stellarisif_output(struct netif *netif, struct pbuf *p) { struct stellarisif *stellarisif = netif->state; SYS_ARCH_DECL_PROTECT(lev); /** * This entire function must run within a "critical section" to preserve * the integrity of the transmit pbuf queue. * */ SYS_ARCH_PROTECT(lev); /** * Bump the reference count on the pbuf to prevent it from being * freed till we are done with it. * */ pbuf_ref(p); /** * If the transmitter is idle, and there is nothing on the queue, * send the pbuf now. * */ if(PBUF_QUEUE_EMPTY(&stellarisif->txq) && ((HWREG(ETH_BASE + MAC_O_TR) & MAC_TR_NEWTX) == 0)) { stellarisif_transmit(netif, p); } /* Otherwise place the pbuf on the transmit queue. */ else { /* Add to transmit packet queue */ if(!enqueue_packet(p, &(stellarisif->txq))) { /* if no room on the queue, free the pbuf reference and return error. */ pbuf_free(p); SYS_ARCH_UNPROTECT(lev); return (ERR_MEM); } } /* Return to prior interrupt state and return. */ SYS_ARCH_UNPROTECT(lev); return ERR_OK; }
/** * Do an overflow check for all elements in every pool. * * @see memp_overflow_check_element for a description of the check */ static void memp_overflow_check_all(void) { u16_t i, j; struct memp *p; SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); for (i = 0; i < MEMP_MAX; ++i) { p = (struct memp*)LWIP_MEM_ALIGN(memp_pools[i]->base); for (j = 0; j < memp_pools[i]->num; ++j) { memp_overflow_check_element_overflow(p, memp_pools[i]); memp_overflow_check_element_underflow(p, memp_pools[i]); p = LWIP_ALIGNMENT_CAST(struct memp*, ((u8_t*)p + MEMP_SIZE + memp_pools[i]->size + MEMP_SANITY_REGION_AFTER_ALIGNED)); } } SYS_ARCH_UNPROTECT(old_level); }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { SYS_ARCH_DECL_PROTECT(lev); err_t err; struct xemac_s *xemac = (struct xemac_s *)(netif->state); xaxiemacif_s *xaxiemacif = (xaxiemacif_s *)(xemac->state); /* * With AXI Ethernet on Zynq, we observed unexplained delays for * BD Status update. As a result, we are hitting a condition where * there are no BDs free to transmit packets. So, we have added * this logic where we look for the status update in a definite * loop. */ XAxiDma_BdRing *txring = XAxiDma_GetTxRing(&xaxiemacif->axidma); int count = 100; SYS_ARCH_PROTECT(lev); while (count) { /* check if space is available to send */ if (is_tx_space_available(xaxiemacif)) { _unbuffered_low_level_output(xaxiemacif, p); err = ERR_OK; break; } else { #if LINK_STATS lwip_stats.link.drop++; #endif process_sent_bds(txring); count--; } } if (count == 0) { print("pack dropped, no space\r\n"); err = ERR_MEM; } SYS_ARCH_UNPROTECT(lev); return err; }
/** * TCP callback function if a connection (opened by tcp_connect/do_connect) has * been established (or reset by the remote host). * * @see tcp.h (struct tcp_pcb.connected) for parameters and return values */ static err_t do_connected(void *arg, struct tcp_pcb *pcb, err_t err) { struct netconn *conn; int was_blocking; LWIP_UNUSED_ARG(pcb); conn = (struct netconn *)arg; if (conn == NULL) { return ERR_VAL; } LWIP_ASSERT("conn->state == NETCONN_CONNECT", conn->state == NETCONN_CONNECT); LWIP_ASSERT("(conn->current_msg != NULL) || conn->in_non_blocking_connect", (conn->current_msg != NULL) || IN_NONBLOCKING_CONNECT(conn)); if (conn->current_msg != NULL) { conn->current_msg->err = err; } if ((conn->type == NETCONN_TCP) && (err == ERR_OK)) { setup_tcp(conn); } was_blocking = !IN_NONBLOCKING_CONNECT(conn); SET_NONBLOCKING_CONNECT(conn, 0); conn->current_msg = NULL; conn->state = NETCONN_NONE; if (!was_blocking) { SYS_ARCH_DECL_PROTECT(lev); SYS_ARCH_PROTECT(lev); if (conn->last_err == ERR_INPROGRESS) { conn->last_err = ERR_OK; } SYS_ARCH_UNPROTECT(lev); } API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0); if (was_blocking) { sys_sem_signal(&conn->op_completed); } return ERR_OK; }
/** * This function should be called when a packet is ready to be read * from the interface. It uses the function low_level_input() that * should handle the actual reception of bytes from the network * interface. Then the type of the received packet is determined and * the appropriate input function is called. * * @param netif the lwip network interface structure for this ethernetif */ err_t ethernetif_1_input (struct netif *netif) { //(void)p_arg; //INT8U err; struct pbuf *p; extern struct netif netif_1; netif = &netif_1; while(1) { // OSSemPend(Eth1_pkt_Sem,0,&err); // printf("\n\n\r网口1数据包接收信号量"); //ethernetif_1_input(netif); if(ETH_GetRxPktSize() != 0) { //ethernetif_1_input(netif); SYS_ARCH_DECL_PROTECT(sr); SYS_ARCH_PROTECT(sr); /* move received packet into a new pbuf */ p = low_level_1_input(netif); SYS_ARCH_UNPROTECT(sr); if (p != NULL) { err_t err; err = netif->input(p, netif); // 将pbuf传递给上层协议栈 if (err != ERR_OK) { LWIP_DEBUGF(NETIF_DEBUG, ("ethernetif_input: IP input error\n")); pbuf_free(p); p = NULL; } } } else { OSTimeDlyHMSM(0, 0, 0, 5); } } }
/** * Attempt to reclaim some memory from queued out-of-sequence TCP segments * if we run out of pool pbufs. It's better to give priority to new packets * if we're running out. * * This must be done in the correct thread context therefore this function * can only be used with NO_SYS=0 and through tcpip_callback. */ #if !NO_SYS //static #endif /* !NO_SYS */ void pbuf_free_ooseq(void) { struct tcp_pcb* pcb; SYS_ARCH_DECL_PROTECT(old_level); SYS_ARCH_PROTECT(old_level); pbuf_free_ooseq_pending = 0; SYS_ARCH_UNPROTECT(old_level); for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { if (NULL != pcb->ooseq) { /** Free the ooseq pbufs of one PCB only */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); tcp_segs_free(pcb->ooseq); pcb->ooseq = NULL; return; } } }
/** * pcapif_init(): initialization function, pass to netif_add(). */ err_t pcapif_init(struct netif *netif) { static int ethernetif_index; int local_index; SYS_ARCH_DECL_PROTECT(lev); SYS_ARCH_PROTECT(lev); local_index = ethernetif_index++; SYS_ARCH_UNPROTECT(lev); netif->name[0] = IFNAME0; netif->name[1] = (char)(IFNAME1 + local_index); netif->linkoutput = pcapif_low_level_output; #if LWIP_ARP netif->output = etharp_output; #if LWIP_IPV6 netif->output_ip6 = ethip6_output; #endif /* LWIP_IPV6 */ #else /* LWIP_ARP */ netif->output = NULL; /* not used for PPPoE */ #if LWIP_IPV6 netif->output_ip6 = NULL; /* not used for PPPoE */ #endif /* LWIP_IPV6 */ #endif /* LWIP_ARP */ #if LWIP_NETIF_HOSTNAME /* Initialize interface hostname */ netif_set_hostname(netif, "lwip"); #endif /* LWIP_NETIF_HOSTNAME */ netif->mtu = 1500; netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_IGMP; netif->hwaddr_len = ETHARP_HWADDR_LEN; NETIF_INIT_SNMP(netif, snmp_ifType_ethernet_csmacd, 100000000); /* sets link up or down based on current status */ pcapif_low_level_init(netif); return ERR_OK; }
/** * Attempt to reclaim some memory from queued out-of-sequence TCP segments * if we run out of pool pbufs. It's better to give priority to new packets * if we're running out. * * This must be done in the correct thread context therefore this function * can only be used with NO_SYS=0 and through tcpip_callback. */ static void ICACHE_FLASH_ATTR pbuf_free_ooseq(void* arg) { struct tcp_pcb* pcb; SYS_ARCH_DECL_PROTECT(old_level); LWIP_UNUSED_ARG(arg); SYS_ARCH_PROTECT(old_level); pbuf_free_ooseq_queued = 0; SYS_ARCH_UNPROTECT(old_level); for (pcb = tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { if (NULL != pcb->ooseq) { /** Free the ooseq pbufs of one PCB only */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); tcp_segs_free(pcb->ooseq); pcb->ooseq = NULL; return; } } }
/** * Grab the pointer to this thread's timeouts from TLS. */ struct sys_timeouts *sys_arch_timeouts(void) { unsigned i; #if SYS_LIGHTWEIGHT_PROT SYS_ARCH_DECL_PROTECT(old_level); #endif RTTHREAD myself; struct sys_timeouts *to = NULL; myself = RTThreadSelf(); #if SYS_LIGHTWEIGHT_PROT SYS_ARCH_PROTECT(old_level); #else RTSemEventWait(g_ThreadSem, RT_INDEFINITE_WAIT); #endif for (i = 0; i < g_cThreads; i++) { if (g_aTLS[i].tid == myself) { to = &g_aTLS[i].timeouts; break; } } /* Auto-adopt new threads which use lwIP as they pop up. */ if (!to) { unsigned id; id = g_cThreads; g_cThreads++; Assert(g_cThreads <= THREADS_MAX); g_aTLS[id].tid = myself; to = &g_aTLS[id].timeouts; } #if SYS_LIGHTWEIGHT_PROT SYS_ARCH_UNPROTECT(old_level); #else RTSemEventSignal(g_ThreadSem); #endif return to; }
/** * In this function, the hardware should be initialized. * Called from ethernetif_init(). * * @param netif the already initialized lwip network interface structure * for this ethernetif */ static void low_level_init(struct netif *netif) { CPU_INT08U os_err; SYS_ARCH_DECL_PROTECT(sr); /* set MAC hardware address length */ netif->hwaddr_len = ETHARP_HWADDR_LEN; /* set MAC hardware address */ netif->hwaddr[0] = emacETHADDR0; netif->hwaddr[1] = emacETHADDR1; netif->hwaddr[2] = emacETHADDR2; netif->hwaddr[3] = emacETHADDR3; netif->hwaddr[4] = emacETHADDR4; netif->hwaddr[5] = emacETHADDR5; /* maximum transfer unit */ netif->mtu = 1500; /* device capabilities */ /* don't set NETIF_FLAG_ETHARP if this device is not an ethernet one */ netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_LINK_UP; SYS_ARCH_PROTECT(sr); Ethernet_Initialize(); SYS_ARCH_UNPROTECT(sr); os_err = OSTaskCreate( (void (*)(void *)) ethernetif_input, (void * ) 0, (OS_STK * )&App_Task_Ethernetif_Input_Stk[APP_TASK_ETHERNETIF_INPUT_STK_SIZE - 1], (INT8U ) APP_TASK_ETHERNETIF_INPUT_PRIO ); // #if OS_TASK_NAME_EN > 0 // OSTaskNameSet(APP_TASK_BLINK_PRIO, "Task ethernetif_input", &os_err); // #endif }
u32_t sys_arch_mbox_fetch(sys_mbox_t q, void **msg, u32_t timeout) { DWORD ret; LONGLONG starttime, endtime; SYS_ARCH_DECL_PROTECT(lev); /* parameter check */ LWIP_ASSERT("sys_mbox_free ", q != SYS_MBOX_NULL ); LWIP_ASSERT("q->sem != NULL", q->sem != NULL); LWIP_ASSERT("q->sem != INVALID_HANDLE_VALUE", q->sem != INVALID_HANDLE_VALUE); if (timeout == 0) { timeout = INFINITE; } starttime = sys_get_ms_longlong(); if ((ret = WaitForSingleObject(q->sem, timeout)) == WAIT_OBJECT_0) { SYS_ARCH_PROTECT(lev); if(msg != NULL) { *msg = q->q_mem[q->tail]; } (q->tail)++; if (q->tail >= MAX_QUEUE_ENTRIES) { q->tail = 0; } SYS_ARCH_UNPROTECT(lev); endtime = sys_get_ms_longlong(); return (u32_t)(endtime - starttime); } else { LWIP_ASSERT("Error waiting for sem", ret == WAIT_TIMEOUT); if(msg != NULL) { *msg = NULL; } return SYS_ARCH_TIMEOUT; } }
void sys_mbox_post(sys_mbox_t q, void *msg) { DWORD ret; SYS_ARCH_DECL_PROTECT(lev); /* parameter check */ LWIP_ASSERT("sys_mbox_free ", q != SYS_MBOX_NULL ); LWIP_ASSERT("q->sem != NULL", q->sem != NULL); LWIP_ASSERT("q->sem != INVALID_HANDLE_VALUE", q->sem != INVALID_HANDLE_VALUE); SYS_ARCH_PROTECT(lev); q->q_mem[q->head] = msg; (q->head)++; if (q->head >= MAX_QUEUE_ENTRIES) { q->head = 0; } LWIP_ASSERT("mbox is full!", q->head != q->tail); ret = ReleaseSemaphore(q->sem, 1, 0); LWIP_ASSERT("Error releasing sem", ret != 0); SYS_ARCH_UNPROTECT(lev); }
/** * Attempt to reclaim some memory from queued out-of-sequence TCP segments * if we run out of pool pbufs. It's better to give priority to new packets * if we're running out. * * This must be done in the correct thread context therefore this function * can only be used with NO_SYS=0 and through tcpip_callback. */ static void pbuf_free_ooseq(void* arg) { struct tcp_pcb* pcb; char cpu = sched_getcpu(); SYS_ARCH_DECL_PROTECT(old_level); LWIP_UNUSED_ARG(arg); SYS_ARCH_PROTECT(old_level); pbuf_free_ooseq_queued = 0; SYS_ARCH_UNPROTECT(old_level); for (pcb = lwip_tcpip_thread[cpu]->tcpip_data.tcp_active_pcbs; NULL != pcb; pcb = pcb->next) { if (NULL != pcb->ooseq) { /** Free the ooseq pbufs of one PCB only */ LWIP_DEBUGF(PBUF_DEBUG | LWIP_DBG_TRACE, ("pbuf_free_ooseq: freeing out-of-sequence pbufs\n")); tcp_segs_free(pcb->ooseq); pcb->ooseq = NULL; return; } } }
/** * Push a pbuf packet onto a pbuf packet queue * * @param p is the pbuf to push onto the packet queue. * @param q is the packet queue. * * @return 1 if successful, 0 if q is full. */ static int enqueue_packet(struct pbuf *p, struct pbufq *q) { SYS_ARCH_DECL_PROTECT(lev); int ret; /** * This entire function must run within a "critical section" to preserve * the integrity of the transmit pbuf queue. * */ SYS_ARCH_PROTECT(lev); if (!PBUF_QUEUE_FULL(q)) { /** * The queue isn't full so we add the new frame at the current * write position and move the write pointer. * */ q->pbuf[q->qwrite] = p; q->qwrite = ((q->qwrite + 1) % STELLARIS_NUM_PBUF_QUEUE); ret = 1; } else { /** * The stack is full so we are throwing away this value. Keep track * of the number of times this happens. * */ q->overflow++; ret = 0; } /* Return to prior interrupt state and return the pbuf pointer. */ SYS_ARCH_UNPROTECT(lev); return (ret); }
static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct pbuf *q; int len = 0; SYS_ARCH_DECL_PROTECT(sr); /* Interrupts are disabled through this whole thing to support multi-threading transmit calls. Also this function might be called from an ISR. */ SYS_ARCH_PROTECT(sr); for(q = p; q != NULL; q = q->next) { memcpy((u8_t*)&gTxBuf[len], q->payload, q->len); len = len + q->len; } SendFrame(gTxBuf, len); SYS_ARCH_UNPROTECT(sr); return ERR_OK; }