/** * Send some data on a TCP pcb contained in a netconn * Called from netconn_write * * @param msg the api_msg_msg pointing to the connection */ void lwip_netconn_do_write(struct api_msg_msg *msg) { if (ERR_IS_FATAL(msg->conn->last_err)) { msg->err = msg->conn->last_err; } else { if (NETCONNTYPE_GROUP(msg->conn->type) == NETCONN_TCP) { #if LWIP_TCP if (msg->conn->state != NETCONN_NONE) { /* netconn is connecting, closing or in blocking write */ msg->err = ERR_INPROGRESS; } else if (msg->conn->pcb.tcp != NULL) { msg->conn->state = NETCONN_WRITE; /* set all the variables used by lwip_netconn_do_writemore */ LWIP_ASSERT("already writing or closing", msg->conn->current_msg == NULL && msg->conn->write_offset == 0); LWIP_ASSERT("msg->msg.w.len != 0", msg->msg.w.len != 0); msg->conn->current_msg = msg; msg->conn->write_offset = 0; #if LWIP_TCPIP_CORE_LOCKING msg->conn->flags &= ~NETCONN_FLAG_WRITE_DELAYED; if (lwip_netconn_do_writemore(msg->conn) != ERR_OK) { LWIP_ASSERT("state!", msg->conn->state == NETCONN_WRITE); UNLOCK_TCPIP_CORE(); sys_arch_sem_wait(&msg->conn->op_completed, 0); LOCK_TCPIP_CORE(); LWIP_ASSERT("state!", msg->conn->state == NETCONN_NONE); } #else /* LWIP_TCPIP_CORE_LOCKING */ lwip_netconn_do_writemore(msg->conn); #endif /* LWIP_TCPIP_CORE_LOCKING */ /* for both cases: if lwip_netconn_do_writemore was called, don't ACK the APIMSG since lwip_netconn_do_writemore ACKs it! */ return; } else { msg->err = ERR_CONN; } #else /* LWIP_TCP */ msg->err = ERR_VAL; #endif /* LWIP_TCP */ #if (LWIP_UDP || LWIP_RAW) } else { msg->err = ERR_VAL; #endif /* (LWIP_UDP || LWIP_RAW) */ } } TCPIP_APIMSG_ACK(msg); }
/*-----------------------------------------------------------------------------------*/ void sys_mbox_free(struct sys_mbox **mb) { if ((mb != NULL) && (*mb != SYS_MBOX_NULL)) { struct sys_mbox *mbox = *mb; SYS_STATS_DEC(mbox.used); sys_arch_sem_wait(&mbox->mutex, 0); sys_sem_free_internal(mbox->not_empty); sys_sem_free_internal(mbox->not_full); sys_sem_free_internal(mbox->mutex); mbox->not_empty = mbox->not_full = mbox->mutex = NULL; /* LWIP_DEBUGF("sys_mbox_free: mbox 0x%lx\n", mbox); */ free(mbox); } }
/** * Call the lower part of a netconn_* function * This function is then running in the thread context * of tcpip_thread and has exclusive access to lwIP core code. * * @param apimsg a struct containing the function to call and its parameters * @return ERR_OK if the function was called, another err_t if not */ err_t tcpip_apimsg(struct api_msg *apimsg) { struct tcpip_msg msg; if (mbox != SYS_MBOX_NULL) { msg.type = TCPIP_MSG_API; msg.msg.apimsg = apimsg; //acoral_prints("\r\ntcpip_apimsg_post\r\n"); sys_mbox_post(mbox, &msg); //lwip_printf("\r\n %s \r\n====\r\n", apimsg->msg.msg.w.dataptr); //acoral_prints("\r\ntcpip_apimsg_op_completed\r\n"); sys_arch_sem_wait(apimsg->msg.conn->op_completed, 0); return ERR_OK; } return ERR_VAL; }
/** * Put a struct mem back on the heap * * @param rmem is the data portion of a struct mem as returned by a previous * call to mem_malloc() */ void mem_free(void *rmem) { struct mem *mem; if (rmem == NULL) { LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_TRACE | 2, ("mem_free(p == NULL) was called.\n")); return; } LWIP_ASSERT("mem_free: sanity check alignment", (((mem_ptr_t)rmem) & (MEM_ALIGNMENT-1)) == 0); /* protect the heap from concurrent access */ sys_arch_sem_wait(mem_sem, 0); LWIP_ASSERT("mem_free: legal memory", (u8_t *)rmem >= (u8_t *)ram && (u8_t *)rmem < (u8_t *)ram_end); if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_free: illegal memory\n")); #if MEM_STATS ++lwip_stats.mem.err; #endif /* MEM_STATS */ sys_sem_signal(mem_sem); return; } /* Get the corresponding struct mem ... */ mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); /* ... which has to be in a used state ... */ LWIP_ASSERT("mem_free: mem->used", mem->used); /* ... and is now unused. */ mem->used = 0; if (mem < lfree) { /* the newly freed struct is now the lowest */ lfree = mem; } #if MEM_STATS lwip_stats.mem.used -= mem->next - ((u8_t *)mem - ram); #endif /* MEM_STATS */ /* finally, see if prev or next are free also */ plug_holes(mem); sys_sem_signal(mem_sem); }
/* Transmit cleanup task for FreeRTOS */ STATIC portTASK_FUNCTION(vTransmitCleanupTask, pvParameters) { lpc_enetdata_t *lpc_enetif = pvParameters; s32_t idx; while (1) { /* Wait for transmit cleanup task to wakeup */ sys_arch_sem_wait(&lpc_enetif->tx_clean_sem, 0); /* Error handling for TX underruns. This should never happen unless something is holding the bus or the clocks are going too slow. It can probably be safely removed. */ if (Chip_ENET_GetIntStatus(LPC_ETHERNET) & ENET_INT_TXUNDERRUN) { LINK_STATS_INC(link.err); LINK_STATS_INC(link.drop); #if NO_SYS == 0 /* Get exclusive access */ sys_mutex_lock(&lpc_enetif->tx_lock_mutex); #endif /* Reset the TX side */ Chip_ENET_ResetTXLogic(LPC_ETHERNET); Chip_ENET_ClearIntStatus(LPC_ETHERNET, ENET_INT_TXUNDERRUN); /* De-allocate all queued TX pbufs */ for (idx = 0; idx < LPC_NUM_BUFF_TXDESCS; idx++) { if (lpc_enetif->txb[idx] != NULL) { pbuf_free(lpc_enetif->txb[idx]); lpc_enetif->txb[idx] = NULL; } } #if NO_SYS == 0 /* Restore access */ sys_mutex_unlock(&lpc_enetif->tx_lock_mutex); #endif /* Start TX side again */ lpc_tx_setup(lpc_enetif); } else { /* Free TX buffers that are done sending */ lpc_tx_reclaim(lpc_enetdata.pnetif); } } }
/** * Call the lower part of a netconn_* function * This function is then running in the thread context * of tcpip_thread and has exclusive access to lwIP core code. * * @param apimsg a struct containing the function to call and its parameters * @return ERR_OK if the function was called, another err_t if not */ err_t tcpip_apimsg(struct api_msg *apimsg) { struct tcpip_msg msg; #ifdef LWIP_DEBUG /* catch functions that don't set err */ apimsg->msg.err = ERR_VAL; #endif if (sys_mbox_valid(&mbox)) { msg.type = TCPIP_MSG_API; msg.msg.apimsg = apimsg; sys_mbox_post(&mbox, &msg); sys_arch_sem_wait(&apimsg->msg.conn->op_completed, 0); return apimsg->msg.err; } return ERR_VAL; }
/** \brief Transmit cleanup task * * This task is called when a transmit interrupt occurs and * reclaims the pbuf and descriptor used for the packet once * the packet has been transferred. * * \param[in] pvParameters Not used yet */ static void packet_tx(void* pvParameters) { struct lpc_enetdata *lpc_enetif = pvParameters; s32_t idx; while (1) { /* Wait for transmit cleanup task to wakeup */ sys_arch_sem_wait(&lpc_enetif->TxCleanSem, 0); /* Error handling for TX underruns. This should never happen unless something is holding the bus or the clocks are going too slow. It can probably be safely removed. */ if (LPC_EMAC->IntStatus & EMAC_INT_TX_UNDERRUN) { LINK_STATS_INC(link.err); LINK_STATS_INC(link.drop); #if NO_SYS == 0 /* Get exclusive access */ sys_mutex_lock(&lpc_enetif->TXLockMutex); #endif /* Reset the TX side */ LPC_EMAC->MAC1 |= EMAC_MAC1_RES_TX; LPC_EMAC->IntClear = EMAC_INT_TX_UNDERRUN; /* De-allocate all queued TX pbufs */ for (idx = 0; idx < LPC_NUM_BUFF_TXDESCS; idx++) { if (lpc_enetif->txb[idx] != NULL) { pbuf_free(lpc_enetif->txb[idx]); lpc_enetif->txb[idx] = NULL; } } #if NO_SYS == 0 /* Restore access */ sys_mutex_unlock(&lpc_enetif->TXLockMutex); #endif /* Start TX side again */ lpc_tx_setup(lpc_enetif); } else { /* Free TX buffers that are done sending */ lpc_tx_reclaim(lpc_enetdata.netif); } } }
int TftpClient::get(char *rem_file, char *loc_file) { struct udp_pcb * pcb = udp_new(); err = udp_bind(pcb, IP_ADDR_ANY, loc_port); if(err != ERR_OK){ error(lwip_strerr(err), false); return -1; } fd = open(loc_file, O_WRONLY); blkno = 1; // Craft the initial get request with appropriate mode int bufsize = strlen(rem_file) + 4 + (mode == MODE_NETASCII ? strlen("netascii") : strlen("octet")); char *pkt = (char *)safe_malloc(bufsize); memset(pkt, 0, bufsize); u16_t *opcode = (u16_t *) pkt; *opcode = htons(1); memcpy(pkt+2, rem_file, strlen(rem_file)+1); if(mode == MODE_NETASCII) memcpy(pkt+3+strlen(rem_file), "netascii", strlen("netascii")); else memcpy(pkt+3+strlen(rem_file), "octet", strlen("octet")); // Set the packet recv handler udp_recv(pcb, hndl_pkt, this); // Send the packet struct pbuf *p = pbuf_alloc(PBUF_TRANSPORT, bufsize, PBUF_ROM); p->payload = pkt; udp_sendto(pcb, p, &rem_host, rem_port); // Block the thread until the request is satisfied sys_sem_new(&get_wait, 0); sys_arch_sem_wait(&get_wait, 0); sys_sem_free(&get_wait); udp_remove(pcb); //pbuf_free(p); }
static void rza1_recv_task(void *arg) { struct netif *netif = (struct netif*)arg; struct eth_hdr *ethhdr; u16_t recv_size; struct pbuf *p; int cnt; while (1) { sys_arch_sem_wait(&recv_ready_sem, 0); for (cnt = 0; cnt < 16; cnt++) { recv_size = ethernet_receive(); if (recv_size != 0) { p = pbuf_alloc(PBUF_RAW, recv_size, PBUF_RAM); if (p != NULL) { (void)ethernet_read((char *)p->payload, p->len); ethhdr = p->payload; switch (htons(ethhdr->type)) { case ETHTYPE_IP: case ETHTYPE_ARP: #if PPPOE_SUPPORT case ETHTYPE_PPPOEDISC: case ETHTYPE_PPPOE: #endif /* PPPOE_SUPPORT */ /* full packet send to tcpip_thread to process */ if (netif->input(p, netif) != ERR_OK) { /* Free buffer */ pbuf_free(p); } break; default: /* Return buffer */ pbuf_free(p); break; } } } else { break; } } } }
/** * Synchronously calls function in TCPIP thread and waits for its completion. * It is recommended to use LWIP_TCPIP_CORE_LOCKING (preferred) or * LWIP_NETCONN_SEM_PER_THREAD. * If not, a semaphore is created and destroyed on every call which is usually * an expensive/slow operation. * @param fn Function to call * @param call Call parameters * @return Return value from tcpip_api_call_fn */ err_t tcpip_api_call(tcpip_api_call_fn fn, struct tcpip_api_call_data *call) { #if LWIP_TCPIP_CORE_LOCKING err_t err; LOCK_TCPIP_CORE(); err = fn(call); UNLOCK_TCPIP_CORE(); return err; #else /* LWIP_TCPIP_CORE_LOCKING */ TCPIP_MSG_VAR_DECLARE(msg); #if !LWIP_NETCONN_SEM_PER_THREAD err_t err = sys_sem_new(&call->sem, 0); if (err != ERR_OK) { return err; } #endif /* LWIP_NETCONN_SEM_PER_THREAD */ LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox)); TCPIP_MSG_VAR_ALLOC(msg); TCPIP_MSG_VAR_REF(msg).type = TCPIP_MSG_API_CALL; TCPIP_MSG_VAR_REF(msg).msg.api_call.arg = call; TCPIP_MSG_VAR_REF(msg).msg.api_call.function = fn; #if LWIP_NETCONN_SEM_PER_THREAD TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = LWIP_NETCONN_THREAD_SEM_GET(); #else /* LWIP_NETCONN_SEM_PER_THREAD */ TCPIP_MSG_VAR_REF(msg).msg.api_call.sem = &call->sem; #endif /* LWIP_NETCONN_SEM_PER_THREAD */ sys_mbox_post(&mbox, &TCPIP_MSG_VAR_REF(msg)); sys_arch_sem_wait(TCPIP_MSG_VAR_REF(msg).msg.api_call.sem, 0); TCPIP_MSG_VAR_FREE(msg); #if !LWIP_NETCONN_SEM_PER_THREAD sys_sem_free(&call->sem); #endif /* LWIP_NETCONN_SEM_PER_THREAD */ return call->err; #endif /* LWIP_TCPIP_CORE_LOCKING */ }
int _start(int argc,char** argv) { sys_sem_t Sema; int iRet; dbgprintf("PS2IP: Module Loaded.\n"); if ((iRet=RegisterLibraryEntries(&_exp_ps2ip))!=0) { printf("PS2IP: RegisterLibraryEntries returned: %d\n",iRet); } sys_init(); mem_init(); memp_init(); pbuf_init(); dbgprintf("PS2IP: sys_init, mem_init, memp_init, pbuf_init called\n"); netif_init(); dbgprintf("PS2IP: netif_init called\n"); Sema=sys_sem_new(0); dbgprintf("PS2IP: Calling tcpip_init\n"); tcpip_init(InitDone,&Sema); sys_arch_sem_wait(Sema,0); sys_sem_free(Sema); dbgprintf("PS2IP: tcpip_init called\n"); AddLoopIF(); InitTimer(); dbgprintf("PS2IP: System Initialised\n"); return iRet; }
/*-----------------------------------------------------------------------------------*/ err_t sys_mbox_trypost(struct sys_mbox **mb, void *msg) { u8_t first; struct sys_mbox *mbox; LWIP_ASSERT("invalid mbox", (mb != NULL) && (*mb != NULL)); mbox = *mb; sys_arch_sem_wait(&mbox->mutex, 0); LWIP_DEBUGF(SYS_DEBUG, ("sys_mbox_trypost: mbox %p msg %p\n", (void *)mbox, (void *)msg)); if ((mbox->last + 1) >= (mbox->first + SYS_MBOX_SIZE)) { sys_sem_signal(&mbox->mutex); return ERR_MEM; } mbox->msgs[mbox->last % SYS_MBOX_SIZE] = msg; if (mbox->last == mbox->first) { first = 1; } else { first = 0; } mbox->last++; if (first) { sys_sem_signal(&mbox->not_empty); } sys_sem_signal(&mbox->mutex); return ERR_OK; }
int mutexLock(void *p){ if (p==0) return 0; while (sys_arch_sem_wait(p,100) != 1) ; return 1; }
/** * \brief Send a JSON string representing the board status. * * \param name Not used. * \param recv_buf Receive buffer. * \param recv_len Receive buffer length. * * \return 0. */ static int cgi_status(struct netconn *client, const char *name, char *recv_buf, size_t recv_len) { (void)recv_buf; (void)recv_len; (void)name; uint32_t length = 0; uint32_t nb = 11; uint32_t i, count, new_entry; #if LWIP_STATS extern uint32_t lwip_tx_rate; extern uint32_t lwip_rx_rate; #else volatile uint32_t lwip_tx_rate = 0; volatile uint32_t lwip_rx_rate = 0; #endif /* Protect tx_buf buffer from concurrent access. */ sys_arch_sem_wait(&cgi_sem, 0); status.tot_req++; status.up_time = xTaskGetTickCount() / 1000; /* Update board status. */ sprintf(status.last_connected_ip, "%d.%d.%d.%d", IP_ADDR_TO_INT_TUPLE(client->pcb.ip->remote_ip.addr)); sprintf(status.local_ip, "%d.%d.%d.%d", IP_ADDR_TO_INT_TUPLE(client->pcb.ip->local_ip.addr)); length += sprintf((char *)tx_buf, "{\"board_ip\":\"%s\",\"remote_ip\":\"%s\",\"download\":%u,\"upload\":%u", status.local_ip, status.last_connected_ip, lwip_rx_rate, lwip_tx_rate); /* Turn FreeRTOS stats into JSON. */ vTaskGetRunTimeStats(freertos_stats); length += sprintf((char *)tx_buf + length, ",\"rtos\":{\"10"); // i = 2 to skip first 13 10 sequence. for (i = 2, count = 0, new_entry = 0; i < FREERTOS_STATS_BUFLEN && freertos_stats[i]; ++i) { if (freertos_stats[i] == 13) { tx_buf[length++] = '\"'; new_entry = 1; continue; } if (freertos_stats[i] == 10) continue; if (freertos_stats[i] == 9) { count += 1; if (count == 4) { tx_buf[length++] = '\"'; tx_buf[length++] = ':'; tx_buf[length++] = '\"'; count = 0; continue; } } if (count != 0) continue; if (new_entry == 1) { new_entry = 0; tx_buf[length++] = ','; tx_buf[length++] = '\"'; /* Append ID to task name since JSON id must be unique. */ tx_buf[length++] = '0' + nb / 10; tx_buf[length++] = '0' + nb % 10; nb++; } tx_buf[length++] = freertos_stats[i]; } tx_buf[length++] = '}'; char * memp_names[] = { #define LWIP_MEMPOOL(name,num,size,desc) desc, #include "lwip/memp_std.h" }; length += sprintf((char *)tx_buf + length, ",\"lwip\":{"); #if MEM_STATS || MEMP_STATS length += sprintf((char *)tx_buf + length, "\"HEAP\":{\"Cur\":%d,\"Size\":%d,\"Max\":%d,\"Err\":%u}", lwip_stats.mem.used, lwip_stats.mem.avail, lwip_stats.mem.max, lwip_stats.mem.err); if (MEMP_MAX > 0) tx_buf[length++] = ','; #endif for (uint32_t z= 0; z < MEMP_MAX; ++z) { length += sprintf((char *)tx_buf + length, "\"%s\":{\"Cur\":%d,\"Size\":%d,\"Max\":%d,\"Err\":%u}", memp_names[z], lwip_stats.memp[z].used, lwip_stats.memp[z].avail, lwip_stats.memp[z].max, lwip_stats.memp[z].err); if (z + 1 < MEMP_MAX) tx_buf[length++] = ','; } tx_buf[length++] = '}'; /* Remaining board status. */ length += sprintf((char *)tx_buf + length, ",\"up_time\":%u,\"tot_req\":%u}", status.up_time, status.tot_req); /* Send answer. */ http_sendOk(client, HTTP_CONTENT_JSON); /* Use NETCONN_COPY to avoid corrupting the buffer after releasing the semaphore. */ netconn_write(client, tx_buf, strlen((char *)tx_buf), NETCONN_COPY); /* Release semaphore to allow further use of tx_buf. */ sys_sem_signal(&cgi_sem); return 0; }
/** * Adam's mem_malloc() plus solution for bug #17922 * Allocate a block of memory with a minimum of 'size' bytes. * * @param size is the minimum size of the requested block in bytes. * @return pointer to allocated memory or NULL if no free memory was found. * * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT). */ void * mem_malloc(mem_size_t size) { mem_size_t ptr, ptr2; struct mem *mem, *mem2; if (size == 0) { return NULL; } /* Expand the size of the allocated memory region so that we can adjust for alignment. */ size = LWIP_MEM_ALIGN_SIZE(size); if(size < MIN_SIZE_ALIGNED) { /* every data block must be at least MIN_SIZE_ALIGNED long */ size = MIN_SIZE_ALIGNED; } if (size > MEM_SIZE_ALIGNED) { return NULL; } /* protect the heap from concurrent access */ sys_arch_sem_wait(mem_sem, 0); /* Scan through the heap searching for a free block that is big enough, * beginning with the lowest free block. */ for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size; ptr = ((struct mem *)&ram[ptr])->next) { mem = (struct mem *)&ram[ptr]; if ((!mem->used) && (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) { /* mem is not used and at least perfect fit is possible: * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */ if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) { /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem') * -> split large block, create empty remainder, * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size, * struct mem would fit in but no data between mem2 and mem2->next * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty * region that couldn't hold data, but when mem->next gets freed, * the 2 regions would be combined, resulting in more free memory */ ptr2 = ptr + SIZEOF_STRUCT_MEM + size; /* create mem2 struct */ mem2 = (struct mem *)&ram[ptr2]; mem2->used = 0; mem2->next = mem->next; mem2->prev = ptr; /* and insert it between mem and mem->next */ mem->next = ptr2; mem->used = 1; if (mem2->next != MEM_SIZE_ALIGNED) { ((struct mem *)&ram[mem2->next])->prev = ptr2; } #if MEM_STATS lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM); if (lwip_stats.mem.max < lwip_stats.mem.used) { lwip_stats.mem.max = lwip_stats.mem.used; } #endif /* MEM_STATS */ } else { /* (a mem2 struct does no fit into the user data space of mem and mem->next will always * be used at this point: if not we have 2 unused structs in a row, plug_holes should have * take care of this). * -> near fit or excact fit: do not split, no mem2 creation * also can't move mem->next directly behind mem, since mem->next * will always be used at this point! */ mem->used = 1; #if MEM_STATS lwip_stats.mem.used += mem->next - ((u8_t *)mem - ram); if (lwip_stats.mem.max < lwip_stats.mem.used) { lwip_stats.mem.max = lwip_stats.mem.used; } #endif /* MEM_STATS */ } if (mem == lfree) { /* Find next free block after mem and update lowest free pointer */ while (lfree->used && lfree != ram_end) { lfree = (struct mem *)&ram[lfree->next]; } LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used))); } sys_sem_signal(mem_sem); LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.", (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end); LWIP_ASSERT("mem_malloc: allocated memory properly aligned.", (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0); LWIP_ASSERT("mem_malloc: sanity check alignment", (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0); return (u8_t *)mem + SIZEOF_STRUCT_MEM; } } LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size)); #if MEM_STATS ++lwip_stats.mem.err; #endif /* MEM_STATS */ sys_sem_signal(mem_sem); return NULL; }
int TftpClient::put(char *filename) { struct udp_pcb * pcb = udp_new(); err = udp_bind(pcb, IP_ADDR_ANY, loc_port); if(err != ERR_OK){ error(lwip_strerr(err), false); return -1; } if(mode == MODE_OCTET){ fd = open(filename, O_RDONLY); }else{ fd = netascii_open(filename, O_RDONLY); } sys_sem_new(&snd_nxt, 0); // Craft the initial get request with appropriate mode int bufsize = strlen(filename) + 4 + (mode == MODE_NETASCII ? strlen("netascii") : strlen("octet")); char *pkt = (char *)safe_malloc(bufsize); memset(pkt, 0, bufsize); u16_t *opcode = (u16_t *) pkt; *opcode = htons(2); memcpy(pkt+2, filename, strlen(filename)+1); if(mode == MODE_NETASCII) memcpy(pkt+3+strlen(filename), "netascii", strlen("netascii")); else memcpy(pkt+3+strlen(filename), "octet", strlen("octet")); blkno = 0; // Set the packet recv handler udp_recv(pcb, ack_recvr, this); // Send the packet u32_t w_ack; struct pbuf *p = pbuf_alloc(PBUF_TRANSPORT, bufsize, PBUF_ROM); p->payload = pkt; do{ err_t e = udp_sendto(pcb, p, &rem_host, rem_port); if(e != ERR_OK) error(lwip_strerr(e), false); w_ack = sys_arch_sem_wait(&snd_nxt, rtt); }while(w_ack == SYS_ARCH_TIMEOUT); pbuf_free(p); // Craft the next data packet char data[600]; u16_t *tmp; int kbytes; while(1){ tmp = (u16_t *)data; *tmp = htons(3); tmp = (u16_t *)(data+sizeof(u16_t)); *tmp = htons(blkno); kbytes = read(fd, data+4, 512); p = pbuf_alloc(PBUF_TRANSPORT, 4+kbytes, PBUF_ROM); p->payload = data; do{ err_t e = udp_sendto(pcb, p, &rem_host, sec_port); w_ack = sys_arch_sem_wait(&snd_nxt, rtt); }while(w_ack == SYS_ARCH_TIMEOUT); pbuf_free(p); if(kbytes < 512){ if(mode == MODE_OCTET){ close(fd); }else{ netascii_close(fd); } udp_remove(pcb); sec_port = 0; free(pkt); return 0; } } }
/** * In contrast to its name, mem_realloc can only shrink memory, not expand it. * Since the only use (for now) is in pbuf_realloc (which also can only shrink), * this shouldn't be a problem! * * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked * @param newsize required size after shrinking (needs to be smaller than or * equal to the previous size) * @return for compatibility reasons: is always == rmem, at the moment */ void * mem_realloc(void *rmem, mem_size_t newsize) { mem_size_t size; mem_size_t ptr, ptr2; struct mem *mem, *mem2; /* Expand the size of the allocated memory region so that we can adjust for alignment. */ newsize = LWIP_MEM_ALIGN_SIZE(newsize); if(newsize < MIN_SIZE_ALIGNED) { /* every data block must be at least MIN_SIZE_ALIGNED long */ newsize = MIN_SIZE_ALIGNED; } if (newsize > MEM_SIZE_ALIGNED) { return NULL; } LWIP_ASSERT("mem_realloc: legal memory", (u8_t *)rmem >= (u8_t *)ram && (u8_t *)rmem < (u8_t *)ram_end); if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) { LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n")); return rmem; } /* Get the corresponding struct mem ... */ mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM); /* ... and its offset pointer */ ptr = (u8_t *)mem - ram; size = mem->next - ptr - SIZEOF_STRUCT_MEM; LWIP_ASSERT("mem_realloc can only shrink memory", newsize <= size); if (newsize > size) { /* not supported */ return NULL; } if (newsize == size) { /* No change in size, simply return */ return rmem; } /* protect the heap from concurrent access */ sys_arch_sem_wait(mem_sem, 0); #if MEM_STATS lwip_stats.mem.used -= (size - newsize); #endif /* MEM_STATS */ mem2 = (struct mem *)&ram[mem->next]; if(mem2->used == 0) { /* The next struct is unused, we can simply move it at little */ mem_size_t next; /* remember the old next pointer */ next = mem2->next; /* create new struct mem which is moved directly after the shrinked mem */ ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; if (lfree == mem2) { lfree = (struct mem *)&ram[ptr2]; } mem2 = (struct mem *)&ram[ptr2]; mem2->used = 0; /* restore the next pointer */ mem2->next = next; /* link it back to mem */ mem2->prev = ptr; /* link mem to it */ mem->next = ptr2; /* last thing to restore linked list: as we have moved mem2, * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not * the end of the heap */ if (mem2->next != MEM_SIZE_ALIGNED) { ((struct mem *)&ram[mem2->next])->prev = ptr2; } /* no need to plug holes, we've already done that */ } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) { /* Next struct is used but there's room for another struct mem with * at least MIN_SIZE_ALIGNED of data. * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED'). * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty * region that couldn't hold data, but when mem->next gets freed, * the 2 regions would be combined, resulting in more free memory */ ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize; mem2 = (struct mem *)&ram[ptr2]; if (mem2 < lfree) { lfree = mem2; } mem2->used = 0; mem2->next = mem->next; mem2->prev = ptr; mem->next = ptr2; if (mem2->next != MEM_SIZE_ALIGNED) { ((struct mem *)&ram[mem2->next])->prev = ptr2; } /* the original mem->next is used, so no need to plug holes! */ } /* else { next struct mem is used but size between mem and mem2 is not big enough to create another struct mem -> don't do anyhting. -> the remaining space stays unused since it is too small } */ sys_sem_signal(mem_sem); return rmem; }
/* Lock a mutex*/ void sys_mutex_lock(sys_mutex_t *mutex) { sys_arch_sem_wait(*mutex, 0); }
void sys_mbox_post(sys_mbox_t mbox, void *msg) { int ret = sys_arch_sem_wait(mbox->free, 0); assert(ret != SYS_ARCH_TIMEOUT); sys_mbox_dopost(mbox, msg); }
nsapi_error_t mbed_lwip_bringup(bool dhcp, const char *ip, const char *netmask, const char *gw) { // Check if we've already connected if (lwip_connected) { return NSAPI_ERROR_PARAMETER; } if(mbed_lwip_init(NULL) != NSAPI_ERROR_OK) { return NSAPI_ERROR_DEVICE_ERROR; } // Zero out socket set mbed_lwip_arena_init(); #if LWIP_IPV6 netif_create_ip6_linklocal_address(&lwip_netif, 1/*from MAC*/); #if LWIP_IPV6_MLD /* * For hardware/netifs that implement MAC filtering. * All-nodes link-local is handled by default, so we must let the hardware know * to allow multicast packets in. * Should set mld_mac_filter previously. */ if (lwip_netif.mld_mac_filter != NULL) { ip6_addr_t ip6_allnodes_ll; ip6_addr_set_allnodes_linklocal(&ip6_allnodes_ll); lwip_netif.mld_mac_filter(&lwip_netif, &ip6_allnodes_ll, MLD6_ADD_MAC_FILTER); } #endif /* LWIP_IPV6_MLD */ #if LWIP_IPV6_AUTOCONFIG /* IPv6 address autoconfiguration not enabled by default */ lwip_netif.ip6_autoconfig_enabled = 1; #endif /* LWIP_IPV6_AUTOCONFIG */ #endif u32_t ret; if (!netif_is_link_up(&lwip_netif)) { ret = sys_arch_sem_wait(&lwip_netif_linked, 15000); if (ret == SYS_ARCH_TIMEOUT) { return NSAPI_ERROR_NO_CONNECTION; } } #if LWIP_IPV4 if (!dhcp) { ip4_addr_t ip_addr; ip4_addr_t netmask_addr; ip4_addr_t gw_addr; if (!inet_aton(ip, &ip_addr) || !inet_aton(netmask, &netmask_addr) || !inet_aton(gw, &gw_addr)) { return NSAPI_ERROR_PARAMETER; } netif_set_addr(&lwip_netif, &ip_addr, &netmask_addr, &gw_addr); } #endif netif_set_up(&lwip_netif); #if LWIP_IPV4 // Connect to the network lwip_dhcp = dhcp; if (lwip_dhcp) { err_t err = dhcp_start(&lwip_netif); if (err) { return NSAPI_ERROR_DHCP_FAILURE; } } #endif // If doesn't have address if (!mbed_lwip_get_ip_addr(true, &lwip_netif)) { ret = sys_arch_sem_wait(&lwip_netif_has_addr, 15000); if (ret == SYS_ARCH_TIMEOUT) { return NSAPI_ERROR_DHCP_FAILURE; } lwip_connected = true; } #if ADDR_TIMEOUT // If address is not for preferred stack waits a while to see // if preferred stack address is acquired if (!mbed_lwip_get_ip_addr(false, &lwip_netif)) { ret = sys_arch_sem_wait(&lwip_netif_has_addr, ADDR_TIMEOUT * 1000); } #endif #if LWIP_IPV6 add_dns_addr(&lwip_netif); #endif return 0; }
nsapi_error_t mbed_lwip_bringup_2(bool dhcp, bool ppp, const char *ip, const char *netmask, const char *gw, const nsapi_ip_stack_t stack) { // Check if we've already connected if (lwip_connected == NSAPI_STATUS_GLOBAL_UP) { return NSAPI_ERROR_IS_CONNECTED; } else if (lwip_connected == NSAPI_STATUS_CONNECTING) { return NSAPI_ERROR_ALREADY; } lwip_connected = NSAPI_STATUS_CONNECTING; lwip_ppp = ppp; #if LWIP_DHCP lwip_dhcp_has_to_be_set = true; if (stack != IPV6_STACK) { lwip_dhcp = dhcp; } else { lwip_dhcp = false; } #endif mbed_lwip_core_init(); nsapi_error_t ret; if (netif_inited) { /* Can't cope with changing mode */ if (netif_is_ppp == ppp) { ret = NSAPI_ERROR_OK; } else { ret = NSAPI_ERROR_PARAMETER; } } else { if (ppp) { ret = ppp_lwip_if_init(&lwip_netif, stack); } else { ret = mbed_lwip_emac_init(NULL); } } if (ret != NSAPI_ERROR_OK) { lwip_connected = NSAPI_STATUS_DISCONNECTED; return ret; } if (lwip_client_callback) { lwip_client_callback(lwip_status_cb_handle, NSAPI_EVENT_CONNECTION_STATUS_CHANGE, NSAPI_STATUS_CONNECTING); } netif_inited = true; if (ppp) { netif_is_ppp = ppp; } netif_set_default(&lwip_netif); netif_set_link_callback(&lwip_netif, mbed_lwip_netif_link_irq); netif_set_status_callback(&lwip_netif, mbed_lwip_netif_status_irq); #if LWIP_IPV6 if (stack != IPV4_STACK) { if (lwip_netif.hwaddr_len == ETH_HWADDR_LEN) { netif_create_ip6_linklocal_address(&lwip_netif, 1/*from MAC*/); } #if LWIP_IPV6_MLD /* * For hardware/netifs that implement MAC filtering. * All-nodes link-local is handled by default, so we must let the hardware know * to allow multicast packets in. * Should set mld_mac_filter previously. */ if (lwip_netif.mld_mac_filter != NULL) { ip6_addr_t ip6_allnodes_ll; ip6_addr_set_allnodes_linklocal(&ip6_allnodes_ll); lwip_netif.mld_mac_filter(&lwip_netif, &ip6_allnodes_ll, NETIF_ADD_MAC_FILTER); } #endif /* LWIP_IPV6_MLD */ #if LWIP_IPV6_AUTOCONFIG /* IPv6 address autoconfiguration not enabled by default */ lwip_netif.ip6_autoconfig_enabled = 1; } else { // Disable router solidifications lwip_netif.rs_count = 0; } #endif /* LWIP_IPV6_AUTOCONFIG */ #endif // LWIP_IPV6 #if LWIP_IPV4 if (stack != IPV6_STACK) { if (!dhcp && !ppp) { ip4_addr_t ip_addr; ip4_addr_t netmask_addr; ip4_addr_t gw_addr; if (!inet_aton(ip, &ip_addr) || !inet_aton(netmask, &netmask_addr) || !inet_aton(gw, &gw_addr)) { lwip_connected = NSAPI_STATUS_DISCONNECTED; if (lwip_client_callback) { lwip_client_callback(lwip_status_cb_handle, NSAPI_EVENT_CONNECTION_STATUS_CHANGE, NSAPI_STATUS_DISCONNECTED); } return NSAPI_ERROR_PARAMETER; } netif_set_addr(&lwip_netif, &ip_addr, &netmask_addr, &gw_addr); } } #endif if (ppp) { err_t err = ppp_lwip_connect(); if (err) { lwip_connected = NSAPI_STATUS_DISCONNECTED; if (lwip_client_callback) { lwip_client_callback(lwip_status_cb_handle, NSAPI_EVENT_CONNECTION_STATUS_CHANGE, NSAPI_STATUS_DISCONNECTED); } return mbed_lwip_err_remap(err); } } if (!netif_is_link_up(&lwip_netif)) { if (lwip_blocking) { if (sys_arch_sem_wait(&lwip_netif_linked, 15000) == SYS_ARCH_TIMEOUT) { if (ppp) { ppp_lwip_disconnect(); } return NSAPI_ERROR_NO_CONNECTION; } } } else { ret = mbed_set_dhcp(&lwip_netif); if (ret != NSAPI_ERROR_OK) { return ret; } } if (lwip_blocking) { // If doesn't have address if (!mbed_lwip_get_ip_addr(true, &lwip_netif)) { if (sys_arch_sem_wait(&lwip_netif_has_any_addr, DHCP_TIMEOUT * 1000) == SYS_ARCH_TIMEOUT) { if (ppp) { ppp_lwip_disconnect(); } return NSAPI_ERROR_DHCP_FAILURE; } } } else { return NSAPI_ERROR_OK; } #if PREF_ADDR_TIMEOUT if (stack != IPV4_STACK && stack != IPV6_STACK) { // If address is not for preferred stack waits a while to see // if preferred stack address is acquired if (!mbed_lwip_get_ip_addr(false, &lwip_netif)) { sys_arch_sem_wait(&lwip_netif_has_pref_addr, PREF_ADDR_TIMEOUT * 1000); } } #endif #if BOTH_ADDR_TIMEOUT if (stack != IPV4_STACK && stack != IPV6_STACK) { // If addresses for both stacks are not available waits a while to // see if address for both stacks are acquired if (!(mbed_lwip_get_ipv4_addr(&lwip_netif) && mbed_lwip_get_ipv6_addr(&lwip_netif))) { sys_arch_sem_wait(&lwip_netif_has_both_addr, BOTH_ADDR_TIMEOUT * 1000); } } #endif add_dns_addr(&lwip_netif); return NSAPI_ERROR_OK; }