/*-----------------------------------------------------------------------------------* This function must be run as a task, since it ends up calling free() through pbuf_free() *-----------------------------------------------------------------------------------*/ static void mcf5272fec_tx_cleanup(void) { struct pbuf *p; mcf5272if_t *mcf5272 = mcf5272if; MCF5272_IMM *imm = mcf5272->imm; u32_t value; u32_t old_level; unsigned int tx_remove_sof; unsigned int tx_remove_eof; unsigned int i; u16_t flags; tx_remove_sof = tx_remove_eof = mcf5272->tx_remove; /* We must protect reading the flags and then reading the buffer pointer. They must both be read together. */ old_level = sys_arch_protect(); /* Loop, looking for completed buffers at eof */ while ((((flags = mcf5272->txbd_a[tx_remove_eof].flags) & MCF5272_FEC_TX_BD_R) == 0) && (mcf5272->tx_pbuf_a[tx_remove_eof] != 0)) { /* See if this is last buffer in frame */ if ((flags & MCF5272_FEC_TX_BD_L) != 0) { i = tx_remove_eof; /* This frame is complete. Take the frame off backwards */ do { p = mcf5272->tx_pbuf_a[i]; mcf5272->tx_pbuf_a[i] = 0; mcf5272->txbd_a[i].p_buf = 0; mcf5272->tx_free++; if (i != tx_remove_sof) DEC_TX_BD_INDEX(i); else break; } while (1); sys_arch_unprotect(old_level); pbuf_free(p); // Will be head of chain old_level = sys_arch_protect(); /* Look at next descriptor */ INC_TX_BD_INDEX(tx_remove_eof); tx_remove_sof = tx_remove_eof; } else INC_TX_BD_INDEX(tx_remove_eof); } mcf5272->tx_remove = tx_remove_sof; /* clear interrupt status for tx interrupt */ MCF5272_WR_FEC_EIR(imm, MCF5272_FEC_EIR_TXF); value = MCF5272_RD_FEC_IMR(imm); /* Set tx interrupt bit again */ MCF5272_WR_FEC_IMR(imm, (value | MCF5272_FEC_IMR_TXFEN)); /* Now we can re-enable higher priority interrupts again */ sys_arch_unprotect(old_level); }
PACK_STRUCT_END /*########################################################################## * * processes the transmitted packets by releasing the memory * *#########################################################################*/ static void process_xmtd_packets(void *arg_nif) { struct netif* netif = (struct netif*)arg_nif; struct nifce_info* nip = (struct nifce_info*)netif->state; unsigned int *mangle_ptr; void *handle = nip->handle; int int_sts; ADI_ETHER_BUFFER* buffers_to_reuse; ADI_ETHER_BUFFER* buffers_to_process; ADI_ETHER_BUFFER* recycle_list = 0; ADI_ETHER_BUFFER* bp; ADI_ETHER_BUFFER* nbp; ADI_ETHER_BUFFER* lbp; int_sts = ker_disable_interrupts(ker_kPriorityLevelAll); buffers_to_reuse = nip->xmt_list; nip->xmt_list = NULL; nip->txmsg_processed =0; ker_enable_interrupts(int_sts); // add transmitted buffers to the available list after resetting length lbp = bp = buffers_to_reuse; while (bp != 0) { // this is a hack to use the num_rows and num_rows for the // purpose of storing the buffer_info address. // TODO: getrid of using reserved area // mangle_ptr = ((unsigned int*)&bp->Reserved)+4; bp->ElementCount = ((struct buffer_info*)((*(unsigned int*)mangle_ptr)))->max_buf_len; bp->CallbackParameter =bp; bp->StatusWord = 0; bp->PayLoad =0; bp->ProcessedElementCount=0; bp->ProcessedFlag =0; lbp = bp; bp = bp->pNext; } //while bp = buffers_to_reuse; if (bp) { u32_t old_level; old_level = sys_arch_protect(); lbp->pNext = nip->x; nip->x = bp; sys_arch_unprotect(old_level); } }
/*-----------------------------------------------------------------------------------*/ static void disable_fec(mcf5272if_t *mcf5272) { MCF5272_IMM *imm = mcf5272->imm; int i; u32_t value; u32_t old_level; /* We need to disable interrupts here, It is important when dealing with shared registers. */ old_level = sys_arch_protect(); /* First disable the FEC interrupts. Do it in the appropriate ICR register. */ value = MCF5272_RD_SIM_ICR3(imm); MCF5272_WR_SIM_ICR3(imm, (value & ~(MCF5272_SIM_ICR_ERX_IL(7) | MCF5272_SIM_ICR_ETX_IL(7) | MCF5272_SIM_ICR_ENTC_IL(7)))); /* Now we can restore interrupts. This is because we can assume that * we are single threaded here (only 1 thread will be calling disable_fec * for THIS interface). */ sys_arch_unprotect(old_level); /* Release all buffers attached to the descriptors. Since the driver * ALWAYS zeros the pbuf array locations and descriptors when buffers are * removed, we know we just have to free any non-zero descriptors */ for (i = 0; i < NUM_RXBDS; i++) if (mcf5272->rx_pbuf_a[i]) { pbuf_free(mcf5272->rx_pbuf_a[i]); mcf5272->rx_pbuf_a[i] = 0; mcf5272->rxbd_a->p_buf = 0; } for (i = 0; i < NUM_TXBDS; i++) if (mcf5272->tx_pbuf_a[i]) { pbuf_free(mcf5272->tx_pbuf_a[i]); mcf5272->tx_pbuf_a[i] = 0; mcf5272->txbd_a->p_buf = 0; } /* Reset the FEC - equivalent to a hard reset */ MCF5272_WR_FEC_ECR(imm,MCF5272_FEC_ECR_RESET); /* Wait for the reset sequence to complete, it should take about 16 clock cycles */ i = 0; while (MCF5272_RD_FEC_ECR(imm) & MCF5272_FEC_ECR_RESET) { if (++i > 100) abort(); } /* Disable all FEC interrupts by clearing the IMR register */ MCF5272_WR_FEC_IMR(imm,0); /* Clear any interrupts by setting all bits in the EIR register */ MCF5272_WR_FEC_EIR(imm,0xFFFFFFFF); }
static void lwip_socket_callback(struct netconn *nc, enum netconn_evt eh, u16_t len) { sys_prot_t prot = sys_arch_protect(); for (int i = 0; i < MEMP_NUM_NETCONN; i++) { if (lwip_arena[i].in_use && lwip_arena[i].conn == nc && lwip_arena[i].cb) { lwip_arena[i].cb(lwip_arena[i].data); } } sys_arch_unprotect(prot); }
/* * Prints an assertion messages and aborts execution. */ void sys_assert( const char *msg ) { //FSL:only needed for debugging #ifdef LWIP_DEBUG printf(msg); printf("\n\r"); #endif (void)sys_arch_protect(); for(;;) ; }
static struct lwip_socket *lwip_arena_alloc(void) { sys_prot_t prot = sys_arch_protect(); for (int i = 0; i < MEMP_NUM_NETCONN; i++) { if (!lwip_arena[i].in_use) { struct lwip_socket *s = &lwip_arena[i]; memset(s, 0, sizeof *s); s->in_use = true; sys_arch_unprotect(prot); return s; } } sys_arch_unprotect(prot); return 0; }
static void mbed_lwip_socket_callback(struct netconn *nc, enum netconn_evt eh, u16_t len) { // Filter send minus events if (eh == NETCONN_EVT_SENDMINUS && nc->state == NETCONN_WRITE) { return; } sys_prot_t prot = sys_arch_protect(); for (int i = 0; i < MEMP_NUM_NETCONN; i++) { if (lwip_arena[i].in_use && lwip_arena[i].conn == nc && lwip_arena[i].cb) { lwip_arena[i].cb(lwip_arena[i].data); } } sys_arch_unprotect(prot); }
struct sys_timeouts* sys_arch_timeouts(void) { //Return the timeout-list for this thread. int iThreadID=GetThreadId(); Timeout** ppTimeout; Timeout* pTimeout; sys_prot_t Flags=sys_arch_protect(); //Does it exist an entry for this thread? ppTimeout=FindTimeout(iThreadID); if (*ppTimeout==NULL) { //No, allocate an entry for this thread. ppTimeout=AllocTimeout(); (*ppTimeout)->iTID=iThreadID; } //The active entries are listed in MRU order. The entry for this thread is the MRU and therefore should be first in the //active-list. pTimeout=*ppTimeout; *ppTimeout=pTimeout->pNext; pTimeout->pNext=pActiveTimeouts; pActiveTimeouts=pTimeout; sys_arch_unprotect(Flags); //Return the timeout-list. return &pTimeout->Timeouts; }
static nsapi_error_t mbed_lwip_setsockopt(nsapi_stack_t *stack, nsapi_socket_t handle, int level, int optname, const void *optval, unsigned optlen) { struct lwip_socket *s = (struct lwip_socket *)handle; switch (optname) { #if LWIP_TCP case NSAPI_KEEPALIVE: if (optlen != sizeof(int) || s->conn->type != NETCONN_TCP) { return NSAPI_ERROR_UNSUPPORTED; } s->conn->pcb.tcp->so_options |= SOF_KEEPALIVE; return 0; case NSAPI_KEEPIDLE: if (optlen != sizeof(int) || s->conn->type != NETCONN_TCP) { return NSAPI_ERROR_UNSUPPORTED; } s->conn->pcb.tcp->keep_idle = *(int*)optval; return 0; case NSAPI_KEEPINTVL: if (optlen != sizeof(int) || s->conn->type != NETCONN_TCP) { return NSAPI_ERROR_UNSUPPORTED; } s->conn->pcb.tcp->keep_intvl = *(int*)optval; return 0; #endif case NSAPI_REUSEADDR: if (optlen != sizeof(int)) { return NSAPI_ERROR_UNSUPPORTED; } if (*(int *)optval) { ip_set_option(s->conn->pcb.ip, SOF_REUSEADDR); } else { ip_reset_option(s->conn->pcb.ip, SOF_REUSEADDR); } return 0; case NSAPI_ADD_MEMBERSHIP: case NSAPI_DROP_MEMBERSHIP: { if (optlen != sizeof(nsapi_ip_mreq_t)) { return NSAPI_ERROR_PARAMETER; } err_t igmp_err; const nsapi_ip_mreq_t *imr = optval; /* Check interface address type matches group, or is unspecified */ if (imr->imr_interface.version != NSAPI_UNSPEC && imr->imr_interface.version != imr->imr_multiaddr.version) { return NSAPI_ERROR_PARAMETER; } ip_addr_t if_addr; ip_addr_t multi_addr; /* Convert the group address */ if (!convert_mbed_addr_to_lwip(&multi_addr, &imr->imr_multiaddr)) { return NSAPI_ERROR_PARAMETER; } /* Convert the interface address, or make sure it's the correct sort of "any" */ if (imr->imr_interface.version != NSAPI_UNSPEC) { if (!convert_mbed_addr_to_lwip(&if_addr, &imr->imr_interface)) { return NSAPI_ERROR_PARAMETER; } } else { ip_addr_set_any(IP_IS_V6(&if_addr), &if_addr); } igmp_err = ERR_USE; // Maps to NSAPI_ERROR_UNSUPPORTED int32_t member_pair_index = find_multicast_member(s, imr); if (optname == NSAPI_ADD_MEMBERSHIP) { if (!s->multicast_memberships) { // First multicast join on this socket, allocate space for membership tracking s->multicast_memberships = malloc(sizeof(nsapi_ip_mreq_t) * LWIP_SOCKET_MAX_MEMBERSHIPS); if (!s->multicast_memberships) { return NSAPI_ERROR_NO_MEMORY; } } else if(s->multicast_memberships_count == LWIP_SOCKET_MAX_MEMBERSHIPS) { return NSAPI_ERROR_NO_MEMORY; } if (member_pair_index != -1) { return NSAPI_ERROR_ADDRESS_IN_USE; } member_pair_index = next_free_multicast_member(s, 0); sys_prot_t prot = sys_arch_protect(); #if LWIP_IPV4 if (IP_IS_V4(&if_addr)) { igmp_err = igmp_joingroup(ip_2_ip4(&if_addr), ip_2_ip4(&multi_addr)); } #endif #if LWIP_IPV6 if (IP_IS_V6(&if_addr)) { igmp_err = mld6_joingroup(ip_2_ip6(&if_addr), ip_2_ip6(&multi_addr)); } #endif sys_arch_unprotect(prot); if (igmp_err == ERR_OK) { set_multicast_member_registry_bit(s, member_pair_index); s->multicast_memberships[member_pair_index] = *imr; s->multicast_memberships_count++; } } else { if (member_pair_index == -1) { return NSAPI_ERROR_NO_ADDRESS; } clear_multicast_member_registry_bit(s, member_pair_index); s->multicast_memberships_count--; sys_prot_t prot = sys_arch_protect(); #if LWIP_IPV4 if (IP_IS_V4(&if_addr)) { igmp_err = igmp_leavegroup(ip_2_ip4(&if_addr), ip_2_ip4(&multi_addr)); } #endif #if LWIP_IPV6 if (IP_IS_V6(&if_addr)) { igmp_err = mld6_leavegroup(ip_2_ip6(&if_addr), ip_2_ip6(&multi_addr)); } #endif sys_arch_unprotect(prot); } return mbed_lwip_err_remap(igmp_err); } default: return NSAPI_ERROR_UNSUPPORTED; } }
/*-----------------------------------------------------------------------------------*/ static void low_level_init(struct netif *netif) { mcf5272if_t *mcf5272; MCF5272_IMM *imm; VOID (*old_lisr)(INT); /* old LISR */ u32_t value; u32_t old_level; struct pbuf *p; int i; mcf5272 = netif->state; imm = mcf5272->imm; /* Initialize our ethernet address */ sys_get_eth_addr(mcf5272->ethaddr); /* First disable fec */ disable_fec(mcf5272); /* Plug appropriate low level interrupt vectors */ sys_setvect(MCF5272_VECTOR_ERx, mcf5272fec_rx, mcf5272_dis_rx_int); sys_setvect(MCF5272_VECTOR_ETx, mcf5272fec_tx_hisr, mcf5272_dis_tx_int); //sys_setvect(MCF5272_VECTOR_ENTC, mcf5272fec_ntc); /* Set the I_MASK register to enable only rx & tx frame interrupts */ MCF5272_WR_FEC_IMR(imm, MCF5272_FEC_IMR_TXFEN | MCF5272_FEC_IMR_RXFEN); /* Clear I_EVENT register */ MCF5272_WR_FEC_EIR(imm,0xFFFFFFFF); /* Set up the appropriate interrupt levels */ /* Disable interrupts, since this is a read/modify/write operation */ old_level = sys_arch_protect(); value = MCF5272_RD_SIM_ICR3(imm); MCF5272_WR_SIM_ICR3(imm, value | MCF5272_SIM_ICR_ERX_IL(FEC_LEVEL) | MCF5272_SIM_ICR_ETX_IL(FEC_LEVEL)); sys_arch_unprotect(old_level); /* Set the source address for the controller */ MCF5272_WR_FEC_MALR(imm,0 | (mcf5272->ethaddr->addr[0] <<24) | (mcf5272->ethaddr->addr[1] <<16) | (mcf5272->ethaddr->addr[2] <<8) | (mcf5272->ethaddr->addr[3] <<0)); MCF5272_WR_FEC_MAUR(imm,0 | (mcf5272->ethaddr->addr[4] <<24) | (mcf5272->ethaddr->addr[5] <<16)); /* Initialize the hash table registers */ /* We are not supporting multicast addresses */ MCF5272_WR_FEC_HTUR(imm,0); MCF5272_WR_FEC_HTLR(imm,0); /* Set Receive Buffer Size. We subtract 16 because the start of the receive * buffer MUST be divisible by 16, so depending on where the payload really * starts in the pbuf, we might be increasing the start point by up to 15 bytes. * See the alignment code in fill_rx_ring() */ /* There might be an offset to the payload address and we should subtract * that offset */ p = pbuf_alloc(PBUF_RAW, PBUF_POOL_BUFSIZE, PBUF_POOL); i = 0; if (p) { struct pbuf *q = p; while ((q = q->next) != 0) i += q->len; mcf5272->rx_buf_len = PBUF_POOL_BUFSIZE-16-i; pbuf_free(p); } MCF5272_WR_FEC_EMRBR(imm, (u16_t) mcf5272->rx_buf_len); /* Point to the start of the circular Rx buffer descriptor queue */ MCF5272_WR_FEC_ERDSR(imm, ((u32_t) &mcf5272->rxbd_a[0])); /* Point to the start of the circular Tx buffer descriptor queue */ MCF5272_WR_FEC_ETDSR(imm, ((u32_t) &mcf5272->txbd_a[0])); /* Set the tranceiver interface to MII mode */ MCF5272_WR_FEC_RCR(imm, 0 | MCF5272_FEC_RCR_MII_MODE | MCF5272_FEC_RCR_DRT); /* half duplex */ /* Only operate in half-duplex, no heart beat control */ MCF5272_WR_FEC_TCR(imm, 0); /* Set the maximum frame length (MTU) */ MCF5272_WR_FEC_MFLR(imm, MTU_FEC); /* Set MII bus speed */ MCF5272_WR_FEC_MSCR(imm, 0x0a); /* Enable fec i/o pins */ value = MCF5272_RD_GPIO_PBCNT(imm); MCF5272_WR_GPIO_PBCNT(imm, ((value & 0x0000ffff) | 0x55550000)); /* Clear MII interrupt status */ MCF5272_WR_FEC_EIR(imm, MCF5272_FEC_IMR_MIIEN); /* /\* Read phy ID *\/ */ /* MCF5272_WR_FEC_MMFR(imm, 0x600a0000); */ /* while (1) */ /* { */ /* value = MCF5272_RD_FEC_EIR(imm); */ /* if ((value & MCF5272_FEC_IMR_MIIEN) != 0) */ /* { */ /* MCF5272_WR_FEC_EIR(imm, MCF5272_FEC_IMR_MIIEN); */ /* break; */ /* } */ /* } */ /* phy = MCF5272_RD_FEC_MMFR(imm); */ /* Enable FEC */ enable_fec(mcf5272); /* THIS IS FOR LEVEL ONE/INTEL PHY ONLY!!! */ /* Program Phy LED 3 to tell us transmit status */ MCF5272_WR_FEC_MMFR(imm, 0x50520412); }
/*-----------------------------------------------------------------------------------*/ static void mcf5272fec_rx(void) { /* This is the receive ISR. It is written to be a high-level ISR. */ u32_t old_level; mcf5272if_t *mcf5272 = mcf5272if; MCF5272_IMM *imm = mcf5272->imm; u32_t value; u16_t flags; unsigned int rx_remove_sof; unsigned int rx_remove_eof; struct pbuf *p; rx_remove_sof = rx_remove_eof = mcf5272->rx_remove; /* Loop, looking for filled buffers at eof */ while ((((flags = mcf5272->rxbd_a[rx_remove_eof].flags) & MCF5272_FEC_RX_BD_E) == 0) && (mcf5272->rx_pbuf_a[rx_remove_eof] != 0)) { /* See if this is last buffer in frame */ if ((flags & MCF5272_FEC_RX_BD_L) != 0) { /* This frame is ready to go. Start at first descriptor in frame. */ p = 0; do { /* Adjust pbuf length if this is last buffer in frame */ if (rx_remove_sof == rx_remove_eof) { mcf5272->rx_pbuf_a[rx_remove_sof]->tot_len = mcf5272->rx_pbuf_a[rx_remove_sof]->len = (u16_t) (mcf5272->rxbd_a[rx_remove_sof].data_len - (p ? p->tot_len : 0)); } else mcf5272->rx_pbuf_a[rx_remove_sof]->len = mcf5272->rx_pbuf_a[rx_remove_sof]->tot_len = mcf5272->rxbd_a[rx_remove_sof].data_len; /* Chain pbuf */ if (p == 0) { p = mcf5272->rx_pbuf_a[rx_remove_sof]; // First in chain p->tot_len = p->len; // Important since len might have changed } else { pbuf_chain(p, mcf5272->rx_pbuf_a[rx_remove_sof]); pbuf_free(mcf5272->rx_pbuf_a[rx_remove_sof]); } /* Clear pointer to mark descriptor as free */ mcf5272->rx_pbuf_a[rx_remove_sof] = 0; mcf5272->rxbd_a[rx_remove_sof].p_buf = 0; if (rx_remove_sof != rx_remove_eof) INC_RX_BD_INDEX(rx_remove_sof); else break; } while (1); INC_RX_BD_INDEX(rx_remove_sof); /* Check error status of frame */ if (flags & (MCF5272_FEC_RX_BD_LG | MCF5272_FEC_RX_BD_NO | MCF5272_FEC_RX_BD_CR | MCF5272_FEC_RX_BD_OV)) { #ifdef LINK_STATS lwip_stats.link.drop++; if (flags & MCF5272_FEC_RX_BD_LG) lwip_stats.link.lenerr++; //Jumbo gram else if (flags & (MCF5272_FEC_RX_BD_NO | MCF5272_FEC_RX_BD_OV)) lwip_stats.link.err++; else if (flags & MCF5272_FEC_RX_BD_CR) lwip_stats.link.chkerr++; // CRC errors #endif /* Drop errored frame */ pbuf_free(p); } else { /* Good frame. increment stat */ #ifdef LINK_STATS lwip_stats.link.recv++; #endif eth_input(p, mcf5272->netif); } } INC_RX_BD_INDEX(rx_remove_eof); } mcf5272->rx_remove = rx_remove_sof; /* clear interrupt status for rx interrupt */ old_level = sys_arch_protect(); MCF5272_WR_FEC_EIR(imm, MCF5272_FEC_EIR_RXF); value = MCF5272_RD_FEC_IMR(imm); /* Set rx interrupt bit again */ MCF5272_WR_FEC_IMR(imm, (value | MCF5272_FEC_IMR_RXFEN)); /* Now we can re-enable higher priority interrupts again */ sys_arch_unprotect(old_level); /* Fill up empty descriptor rings */ fill_rx_ring(mcf5272); /* Tell fec that we have filled up her ring */ MCF5272_WR_FEC_RDAR(imm, 1); return; }
/*-----------------------------------------------------------------------------------* void low_level_output(mcf5272if_t *mcf5272, struct pbuf *p) Output pbuf chain to hardware. It is assumed that there is a complete and correct ethernet frame in p. The only buffering we have in this system is in the hardware descriptor ring. If there is no room on the ring, then drop the frame. *-----------------------------------------------------------------------------------*/ static err_t low_level_output(struct netif *netif, struct pbuf *p) { struct pbuf *q; mcf5272if_t *mcf5272 = netif->state; MCF5272_IMM *imm = mcf5272->imm; int num_desc; int num_free; unsigned int tx_insert_sof, tx_insert_eof; unsigned int i; u32_t old_level; /* Make sure that there are no PBUF_REF buffers in the chain. These buffers have to be freed immediately and this ethernet driver puts the buffers on the dma chain, so they get freed later */ p = pbuf_take(p); /* Interrupts are disabled through this whole thing to support multi-threading * transmit calls. Also this function might be called from an ISR. */ old_level = sys_arch_protect(); /* Determine number of descriptors needed */ num_desc = pbuf_clen(p); if (num_desc > mcf5272->tx_free) { /* Drop the frame, we have no place to put it */ #ifdef LINK_STATS lwip_stats.link.memerr++; #endif sys_arch_unprotect(old_level); return ERR_MEM; } else { /* Increment use count on pbuf */ pbuf_ref(p); /* Put buffers on descriptor ring, but don't mark them as ready yet */ tx_insert_eof = tx_insert_sof = mcf5272->tx_insert; q = p; do { mcf5272->tx_free--; mcf5272->tx_pbuf_a[tx_insert_eof] = q; mcf5272->txbd_a[tx_insert_eof].p_buf = q->payload; mcf5272->txbd_a[tx_insert_eof].data_len = q->len; q = q->next; if (q) INC_TX_BD_INDEX(tx_insert_eof); } while (q); /* Go backwards through descriptor ring setting flags */ i = tx_insert_eof; do { mcf5272->txbd_a[i].flags = (u16_t) (MCF5272_FEC_TX_BD_R | (mcf5272->txbd_a[i].flags & MCF5272_FEC_TX_BD_W) | ((i == tx_insert_eof) ? (MCF5272_FEC_TX_BD_L | MCF5272_FEC_TX_BD_TC) : 0)); if (i != tx_insert_sof) DEC_TX_BD_INDEX(i); else break; } while (1); INC_TX_BD_INDEX(tx_insert_eof); mcf5272->tx_insert = tx_insert_eof; #ifdef LINK_STATS lwip_stats.link.xmit++; #endif /* Indicate that there has been a transmit buffer produced */ MCF5272_WR_FEC_TDAR(imm,1); sys_arch_unprotect(old_level); } return ERR_OK; }
/*########################################################################## * * Output pbuf chain to hardware. It is assumed that there is a complete and * correct ethernet frame in p. The only buffering in this system is in the * list of tx ADI_ETHER_BUFFER's. If there is no room in it, then drop the frame. * *#########################################################################*/ static err_t low_level_output(struct netif* netif, struct pbuf* p) { struct nifce_info* nip = (struct nifce_info*)netif->state; ADI_ETHER_BUFFER* tx; struct pbuf *q; char* data; unsigned short *ps; struct hw_eth_hdr* ethhdr; u32_t old_level; int len; if (p->tot_len > nip->tx_buff_datalen) { // frame too big for our buffers #ifdef LINK_STATS lwip_stats.link.memerr++; #endif return ERR_MEM; } // No need of data maipulation as we are directly getting from pbufs. if (Trace_Function) Trace_Function('T',p->tot_len,((unsigned char *)p->payload)+2); // see whether we've got a free transmit buffer old_level = sys_arch_protect(); tx = (ADI_ETHER_BUFFER*)nip->x; if (tx == NULL) { #ifdef LINK_STATS lwip_stats.link.memerr++; #endif sys_arch_unprotect(old_level); return ERR_MEM; } // remove first free one from the list nip->x = tx->pNext; tx->pNext = NULL; sys_arch_unprotect(old_level); // copy data from pbuf(s) into our buffer q = p; //data = (char*)tx->Data; // // first two bytes reserved for length data = (char*)tx->Data+2; // ... first pbuf: skip field unused1 in struct eth_hdr memcpy(data, ((u8_t*)q->payload) + 2, q->len - 2); data += (q->len - 2); len = q->len - 2; q = q->next; // copy any subsequent pbufs while (q) { memcpy(data, q->payload, q->len); data += q->len; len += q->len; q = q->next; } LWIP_ASSERT("low_level_output: data length correct", len == (p->tot_len - 2)); //tx->ElementCount = p->tot_len - 2; tx->ElementCount = p->tot_len; // total element count including 2 byte header // see whether we need to shuffle etharp frame up to account for // the alignment fields unused2 and unused3 //ethhdr = (struct hw_eth_hdr*)tx->Data; // ethhdr = (struct hw_eth_hdr*)((char*)tx->Data+2);//skip the header if (htons(ethhdr->type) == ETHTYPE_ARP) { u8_t* pdst = (u8_t*)tx->Data + 28 +2; u8_t* psrc = pdst + 2; // skip unused2 field memmove(pdst, psrc, 10); pdst += 10; psrc += 12; // skip unused3 field memmove(pdst, psrc, 4); //tx->num_elements -= 4; tx->ElementCount -= 4; } ps = (unsigned short*)tx->Data; *ps = tx->ElementCount-2; // only the frame size excluding 2 byte header tx->PayLoad = 0; // payload is part of the packet tx->StatusWord = 0; // changes from 0 to the status info // give it to the physical driver adi_dev_Write(nip->handle,ADI_DEV_1D,(ADI_DEV_BUFFER*)tx); #ifdef LINK_STATS lwip_stats.link.xmit++; #endif //sys_arch_unprotect(old_level); return ERR_OK; }