Exemple #1
0
sys_thread_t sys_thread_new(const char *name, lwip_thread_fn thread, void *arg, int stacksize, int prio)
{
  sys_thread_t    newthread;
  portBASE_TYPE   result;
  SYS_ARCH_DECL_PROTECT(protectionLevel);

  result = xTaskCreate( thread, (signed portCHAR *)name, stacksize, arg, prio, &newthread );

  // Need to protect this -- preemption here could be a problem!
  SYS_ARCH_PROTECT(protectionLevel);
  if( pdPASS == result )
  {
    // For each task created, store the task handle (pid) in the timers array.
    // This scheme doesn't allow for threads to be deleted
    Threads_TimeoutsList[NbActiveThreads++].pid = newthread;
  }
  else
  {
    newthread = NULL;
  }
  SYS_ARCH_UNPROTECT(protectionLevel);

  return( newthread );
}
void
memp_free(memp_t type, void *mem)
{
  struct memp *memp;
#if SYS_LIGHTWEIGHT_PROT
  SYS_ARCH_DECL_PROTECT(old_level);
#endif /* SYS_LIGHTWEIGHT_PROT */  

  if (mem == NULL) {
    return;
  }
  memp = (struct memp *)((u8_t *)mem - sizeof(struct memp));

#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_PROTECT(old_level);
#else /* SYS_LIGHTWEIGHT_PROT */  
  sys_sem_wait(mutex);
#endif /* SYS_LIGHTWEIGHT_PROT */  

#if MEMP_STATS
  lwip_stats.memp[type].used--; 
#endif /* MEMP_STATS */
  
  memp->next = memp_tab[type]; 
  memp_tab[type] = memp;

#if MEMP_SANITY_CHECK
  LWIP_ASSERT("memp sanity", memp_sanity());
#endif  

#if SYS_LIGHTWEIGHT_PROT
  SYS_ARCH_UNPROTECT(old_level);
#else /* SYS_LIGHTWEIGHT_PROT */
  sys_sem_signal(mutex);
#endif /* SYS_LIGHTWEIGHT_PROT */  
}
Exemple #3
0
/********************************************
* 函数名称 : sys_arch_timeouts
* 描    述 : 获得线程的超时结构
* 输 	 入 : 无
*        
* 输 	 出 : struct sys_timeouts *: 线程的超时结构
********************************************/
struct sys_timeouts * sys_arch_timeouts(void)
{
    struct timeoutnode * pto = timeoutslist;
   	u8_t curprio;	    
	SYS_ARCH_DECL_PROTECT(cpusr);

	SYS_ARCH_PROTECT(cpusr);
	curprio = acoral_cur_thread->prio;
    SYS_ARCH_UNPROTECT(cpusr);
           	    
    while (pto != &nulltimeouts)
    {
    	if (pto->prio == curprio)
    	{
    		return &(pto->timeouts);
    	}
    	else
    	{
    		pto = pto->next;
    	}
    }

    return &(pto->timeouts);
}
Exemple #4
0
/**
 * Create new thread.
 */
sys_thread_t sys_thread_new(const char*name, lwip_thread_fn thread, void *arg, int stacksize, int prio)
{
    int rc;
#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_DECL_PROTECT(old_level);
#endif
    unsigned id;
    RTTHREAD tid;

#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_PROTECT(old_level);
#else
    RTSemEventWait(g_ThreadSem, RT_INDEFINITE_WAIT);
#endif
    id = g_cThreads;
    g_cThreads++;
    Assert(g_cThreads <= THREADS_MAX);
    g_aTLS[id].thread = thread;
    g_aTLS[id].arg = arg;
    rc = RTThreadCreateF(&tid, sys_thread_adapter, &g_aTLS[id], 0,
                         RTTHREADTYPE_IO, 0, "lwIP%u", id);
    if (RT_FAILURE(rc))
    {
        g_cThreads--;
        tid = NIL_RTTHREAD;
    }
    else
        g_aTLS[id].tid = tid;
#if SYS_LIGHTWEIGHT_PROT
    SYS_ARCH_UNPROTECT(old_level);
#else
    RTSemEventSignal(g_ThreadSem);
#endif
    AssertRC(rc);
    return tid;
}
Exemple #5
0
int
xemacif_input(struct netif *netif)
{
	struct xemac_s *emac = (struct xemac_s *)netif->state;
	SYS_ARCH_DECL_PROTECT(lev);

	int n_packets = 0;

	switch (emac->type) {
		case xemac_type_xps_emaclite:
#ifdef XLWIP_CONFIG_INCLUDE_EMACLITE
			SYS_ARCH_PROTECT(lev);
			n_packets = xemacliteif_input(netif);
			SYS_ARCH_UNPROTECT(lev);
			break;
#else
			print("incorrect configuration: xps_ethernetlite drivers not present?");
			while(1);
			return 0;
#endif
		case xemac_type_xps_ll_temac:
#ifdef XLWIP_CONFIG_INCLUDE_TEMAC
			SYS_ARCH_PROTECT(lev);
			n_packets = xlltemacif_input(netif);
			SYS_ARCH_UNPROTECT(lev);
			break;
#else
			print("incorrect configuration: xps_ll_temac drivers not present?");
			while(1);
			return 0;
#endif
		case xemac_type_axi_ethernet:
#ifdef XLWIP_CONFIG_INCLUDE_AXI_ETHERNET
			SYS_ARCH_PROTECT(lev);
			n_packets = xaxiemacif_input(netif);
			SYS_ARCH_UNPROTECT(lev);
			break;
#else
			print("incorrect configuration: axi_ethernet drivers not present?");
			while(1);
			return 0;
#endif
#ifdef __arm__
		case xemac_type_emacps:
#ifdef XLWIP_CONFIG_INCLUDE_GEM
			SYS_ARCH_PROTECT(lev);
			n_packets = xemacpsif_input(netif);
			SYS_ARCH_UNPROTECT(lev);
			break;
#else
			xil_printf("incorrect configuration: ps7_ethernet drivers not present?\r\n");
			while(1);
			return 0;
#endif
#endif
		default:
			print("incorrect configuration: unknown temac type");
			while(1);
			return 0;
	}

	return n_packets;
}
Exemple #6
0
/**
 * Receive callback function for UDP netconns.
 * Posts the packet to conn->recvmbox or deletes it on memory error.
 *
 * @see udp.h (struct udp_pcb.recv) for parameters
 */
static void
recv_udp(void *arg, struct udp_pcb *pcb, struct pbuf *p,
         ip_addr_t *addr, u16_t port)
{
    struct netbuf *buf;
    struct netconn *conn;
    u16_t len;
#if LWIP_SO_RCVBUF
    int recv_avail;
#endif /* LWIP_SO_RCVBUF */

    LWIP_UNUSED_ARG(pcb); /* only used for asserts... */
    LWIP_ASSERT("recv_udp must have a pcb argument", pcb != NULL);
    LWIP_ASSERT("recv_udp must have an argument", arg != NULL);
    conn = (struct netconn *)arg;
    LWIP_ASSERT("recv_udp: recv for wrong pcb!", conn->pcb.udp == pcb);

#if LWIP_SO_RCVBUF
    SYS_ARCH_GET(conn->recv_avail, recv_avail);
    if ((conn == NULL) || !sys_mbox_valid(&conn->recvmbox) ||
            ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize)) {
#else  /* LWIP_SO_RCVBUF */
    if ((conn == NULL) || !sys_mbox_valid(&conn->recvmbox)) {
#endif /* LWIP_SO_RCVBUF */
        pbuf_free(p);
        return;
    }

    buf = (struct netbuf *)memp_malloc(MEMP_NETBUF);
    if (buf == NULL) {
        pbuf_free(p);
        return;
    } else {
        buf->p = p;
        buf->ptr = p;
        ip_addr_set(&buf->addr, addr);
        buf->port = port;
#if LWIP_NETBUF_RECVINFO
        {
            const struct ip_hdr* iphdr = ip_current_header();
            /* get the UDP header - always in the first pbuf, ensured by udp_input */
            const struct udp_hdr* udphdr = (void*)(((char*)iphdr) + IPH_LEN(iphdr));
#if LWIP_CHECKSUM_ON_COPY
            buf->flags = NETBUF_FLAG_DESTADDR;
#endif /* LWIP_CHECKSUM_ON_COPY */
            ip_addr_set(&buf->toaddr, ip_current_dest_addr());
            buf->toport_chksum = udphdr->dest;
        }
#endif /* LWIP_NETBUF_RECVINFO */
    }

    len = p->tot_len;
    if (sys_mbox_trypost(&conn->recvmbox, buf) != ERR_OK) {
        netbuf_delete(buf);
        return;
    } else {
#if LWIP_SO_RCVBUF
        SYS_ARCH_INC(conn->recv_avail, len);
#endif /* LWIP_SO_RCVBUF */
        /* Register event with callback */
        API_EVENT(conn, NETCONN_EVT_RCVPLUS, len);
    }
}
#endif /* LWIP_UDP */

#if LWIP_TCP
/**
 * Receive callback function for TCP netconns.
 * Posts the packet to conn->recvmbox, but doesn't delete it on errors.
 *
 * @see tcp.h (struct tcp_pcb.recv) for parameters and return value
 */
static err_t
recv_tcp(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err)
{
    struct netconn *conn;
    u16_t len;

    LWIP_UNUSED_ARG(pcb);
    LWIP_ASSERT("recv_tcp must have a pcb argument", pcb != NULL);
    LWIP_ASSERT("recv_tcp must have an argument", arg != NULL);
    conn = (struct netconn *)arg;
    LWIP_ASSERT("recv_tcp: recv for wrong pcb!", conn->pcb.tcp == pcb);

    if (conn == NULL) {
        return ERR_VAL;
    }
    if (!sys_mbox_valid(&conn->recvmbox)) {
        /* recvmbox already deleted */
        if (p != NULL) {
            tcp_recved(pcb, p->tot_len);
            pbuf_free(p);
        }
        return ERR_OK;
    }
    /* Unlike for UDP or RAW pcbs, don't check for available space
       using recv_avail since that could break the connection
       (data is already ACKed) */

    /* don't overwrite fatal errors! */
    NETCONN_SET_SAFE_ERR(conn, err);

    if (p != NULL) {
        len = p->tot_len;
    } else {
        len = 0;
    }

    if (sys_mbox_trypost(&conn->recvmbox, p) != ERR_OK) {
        /* don't deallocate p: it is presented to us later again from tcp_fasttmr! */
        return ERR_MEM;
    } else {
#if LWIP_SO_RCVBUF
        SYS_ARCH_INC(conn->recv_avail, len);
#endif /* LWIP_SO_RCVBUF */
        /* Register event with callback */
        API_EVENT(conn, NETCONN_EVT_RCVPLUS, len);
    }

    return ERR_OK;
}

/**
 * Poll callback function for TCP netconns.
 * Wakes up an application thread that waits for a connection to close
 * or data to be sent. The application thread then takes the
 * appropriate action to go on.
 *
 * Signals the conn->sem.
 * netconn_close waits for conn->sem if closing failed.
 *
 * @see tcp.h (struct tcp_pcb.poll) for parameters and return value
 */
static err_t
poll_tcp(void *arg, struct tcp_pcb *pcb)
{
    struct netconn *conn = (struct netconn *)arg;

    LWIP_UNUSED_ARG(pcb);
    LWIP_ASSERT("conn != NULL", (conn != NULL));

    if (conn->state == NETCONN_WRITE) {
        do_writemore(conn);
    } else if (conn->state == NETCONN_CLOSE) {
        do_close_internal(conn);
    }
    /* @todo: implement connect timeout here? */

    /* Did a nonblocking write fail before? Then check available write-space. */
    if (conn->flags & NETCONN_FLAG_CHECK_WRITESPACE) {
        /* If the queued byte- or pbuf-count drops below the configured low-water limit,
           let select mark this pcb as writable again. */
        if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) &&
                (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) {
            conn->flags &= ~NETCONN_FLAG_CHECK_WRITESPACE;
            API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0);
        }
    }

    return ERR_OK;
}

/**
 * Sent callback function for TCP netconns.
 * Signals the conn->sem and calls API_EVENT.
 * netconn_write waits for conn->sem if send buffer is low.
 *
 * @see tcp.h (struct tcp_pcb.sent) for parameters and return value
 */
static err_t
sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len)
{
    struct netconn *conn = (struct netconn *)arg;

    LWIP_UNUSED_ARG(pcb);
    LWIP_ASSERT("conn != NULL", (conn != NULL));

    if (conn->state == NETCONN_WRITE) {
        do_writemore(conn);
    } else if (conn->state == NETCONN_CLOSE) {
        do_close_internal(conn);
    }

    if (conn) {
        /* If the queued byte- or pbuf-count drops below the configured low-water limit,
           let select mark this pcb as writable again. */
        if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT) &&
                (tcp_sndqueuelen(conn->pcb.tcp) < TCP_SNDQUEUELOWAT)) {
            conn->flags &= ~NETCONN_FLAG_CHECK_WRITESPACE;
            API_EVENT(conn, NETCONN_EVT_SENDPLUS, len);
        }
    }

    return ERR_OK;
}

/**
 * Error callback function for TCP netconns.
 * Signals conn->sem, posts to all conn mboxes and calls API_EVENT.
 * The application thread has then to decide what to do.
 *
 * @see tcp.h (struct tcp_pcb.err) for parameters
 */
static void
err_tcp(void *arg, err_t err)
{
    struct netconn *conn;
    enum netconn_state old_state;
    SYS_ARCH_DECL_PROTECT(lev);

    conn = (struct netconn *)arg;
    LWIP_ASSERT("conn != NULL", (conn != NULL));

    conn->pcb.tcp = NULL;

    /* no check since this is always fatal! */
    SYS_ARCH_PROTECT(lev);
    conn->last_err = err;
    SYS_ARCH_UNPROTECT(lev);

    /* reset conn->state now before waking up other threads */
    old_state = conn->state;
    conn->state = NETCONN_NONE;

    /* Notify the user layer about a connection error. Used to signal
       select. */
    API_EVENT(conn, NETCONN_EVT_ERROR, 0);
    /* Try to release selects pending on 'read' or 'write', too.
       They will get an error if they actually try to read or write. */
    API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
    API_EVENT(conn, NETCONN_EVT_SENDPLUS, 0);

    /* pass NULL-message to recvmbox to wake up pending recv */
    if (sys_mbox_valid(&conn->recvmbox)) {
        /* use trypost to prevent deadlock */
        sys_mbox_trypost(&conn->recvmbox, NULL);
    }
    /* pass NULL-message to acceptmbox to wake up pending accept */
    if (sys_mbox_valid(&conn->acceptmbox)) {
        /* use trypost to preven deadlock */
        sys_mbox_trypost(&conn->acceptmbox, NULL);
    }

    if ((old_state == NETCONN_WRITE) || (old_state == NETCONN_CLOSE) ||
            (old_state == NETCONN_CONNECT)) {
        /* calling do_writemore/do_close_internal is not necessary
           since the pcb has already been deleted! */
        int was_nonblocking_connect = IN_NONBLOCKING_CONNECT(conn);
        SET_NONBLOCKING_CONNECT(conn, 0);

        if (!was_nonblocking_connect) {
            /* set error return code */
            LWIP_ASSERT("conn->current_msg != NULL", conn->current_msg != NULL);
            conn->current_msg->err = err;
            conn->current_msg = NULL;
            /* wake up the waiting task */
            sys_sem_signal(&conn->op_completed);
        }
    } else {
        LWIP_ASSERT("conn->current_msg == NULL", conn->current_msg == NULL);
    }
}

/**
 * Setup a tcp_pcb with the correct callback function pointers
 * and their arguments.
 *
 * @param conn the TCP netconn to setup
 */
static void
setup_tcp(struct netconn *conn)
{
    struct tcp_pcb *pcb;

    pcb = conn->pcb.tcp;
    tcp_arg(pcb, conn);
    tcp_recv(pcb, recv_tcp);
    tcp_sent(pcb, sent_tcp);
    tcp_poll(pcb, poll_tcp, 4);
    tcp_err(pcb, err_tcp);
}

/**
 * Accept callback function for TCP netconns.
 * Allocates a new netconn and posts that to conn->acceptmbox.
 *
 * @see tcp.h (struct tcp_pcb_listen.accept) for parameters and return value
 */
static err_t
accept_function(void *arg, struct tcp_pcb *newpcb, err_t err)
{
    struct netconn *newconn;
    struct netconn *conn = (struct netconn *)arg;

    LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: newpcb->tate: %s\n", tcp_debug_state_str(newpcb->state)));

    if (!sys_mbox_valid(&conn->acceptmbox)) {
        LWIP_DEBUGF(API_MSG_DEBUG, ("accept_function: acceptmbox already deleted\n"));
        return ERR_VAL;
    }

    /* We have to set the callback here even though
     * the new socket is unknown. conn->socket is marked as -1. */
    newconn = netconn_alloc(conn->type, conn->callback);
    if (newconn == NULL) {
        return ERR_MEM;
    }
    newconn->pcb.tcp = newpcb;
    setup_tcp(newconn);
    /* no protection: when creating the pcb, the netconn is not yet known
       to the application thread */
    newconn->last_err = err;

    if (sys_mbox_trypost(&conn->acceptmbox, newconn) != ERR_OK) {
        /* When returning != ERR_OK, the pcb is aborted in tcp_process(),
           so do nothing here! */
        /* remove all references to this netconn from the pcb */
        struct tcp_pcb* pcb = newconn->pcb.tcp;
        tcp_arg(pcb, NULL);
        tcp_recv(pcb, NULL);
        tcp_sent(pcb, NULL);
        tcp_poll(pcb, NULL, 4);
        tcp_err(pcb, NULL);
        /* remove reference from to the pcb from this netconn */
        newconn->pcb.tcp = NULL;
        /* no need to drain since we know the recvmbox is empty. */
        sys_mbox_free(&newconn->recvmbox);
        sys_mbox_set_invalid(&newconn->recvmbox);
        netconn_free(newconn);
        return ERR_MEM;
    } else {
        /* Register event with callback */
        API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
    }

    return ERR_OK;
}
#endif /* LWIP_TCP */

/**
 * Create a new pcb of a specific type.
 * Called from do_newconn().
 *
 * @param msg the api_msg_msg describing the connection type
 * @return msg->conn->err, but the return value is currently ignored
 */
static void
pcb_new(struct api_msg_msg *msg)
{
    LWIP_ASSERT("pcb_new: pcb already allocated", msg->conn->pcb.tcp == NULL);

    /* Allocate a PCB for this connection */
    switch(NETCONNTYPE_GROUP(msg->conn->type)) {
#if LWIP_RAW
    case NETCONN_RAW:
        msg->conn->pcb.raw = raw_new(msg->msg.n.proto);
        if(msg->conn->pcb.raw == NULL) {
            msg->err = ERR_MEM;
            break;
        }
        raw_recv(msg->conn->pcb.raw, recv_raw, msg->conn);
        break;
#endif /* LWIP_RAW */
#if LWIP_UDP
    case NETCONN_UDP:
        msg->conn->pcb.udp = udp_new();
        if(msg->conn->pcb.udp == NULL) {
            msg->err = ERR_MEM;
            break;
        }
#if LWIP_UDPLITE
        if (msg->conn->type==NETCONN_UDPLITE) {
            udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_UDPLITE);
        }
#endif /* LWIP_UDPLITE */
        if (msg->conn->type==NETCONN_UDPNOCHKSUM) {
            udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
        }
        udp_recv(msg->conn->pcb.udp, recv_udp, msg->conn);
        break;
#endif /* LWIP_UDP */
#if LWIP_TCP
    case NETCONN_TCP:
        msg->conn->pcb.tcp = tcp_new();
        if(msg->conn->pcb.tcp == NULL) {
            msg->err = ERR_MEM;
            break;
        }
        setup_tcp(msg->conn);
        break;
#endif /* LWIP_TCP */
    default:
        /* Unsupported netconn type, e.g. protocol disabled */
        msg->err = ERR_VAL;
        break;
    }
}
Exemple #7
0
/**
 * Shrink memory returned by mem_malloc().
 *
 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
 * @param newsize required size after shrinking (needs to be smaller than or
 *                equal to the previous size)
 * @return for compatibility reasons: is always == rmem, at the moment
 *         or NULL if newsize is > old size, in which case rmem is NOT touched
 *         or freed!
 */
void *
mem_trim(void *rmem, mem_size_t newsize)
{
  mem_size_t size;
  mem_size_t ptr, ptr2;
  struct mem *mem, *mem2;
  /* use the FREE_PROTECT here: it protects with sem OR SYS_ARCH_PROTECT */
  LWIP_MEM_FREE_DECL_PROTECT();

  /* Expand the size of the allocated memory region so that we can
     adjust for alignment. */
  newsize = LWIP_MEM_ALIGN_SIZE(newsize);

  if(newsize < MIN_SIZE_ALIGNED) {
    /* every data block must be at least MIN_SIZE_ALIGNED long */
    newsize = MIN_SIZE_ALIGNED;
  }

  if (newsize > MEM_SIZE_ALIGNED) {
    return NULL;
  }

  LWIP_ASSERT("mem_trim: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
   (u8_t *)rmem < (u8_t *)ram_end);

  if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
    SYS_ARCH_DECL_PROTECT(lev);
    LWIP_DEBUGF(MEM_DEBUG | LWIP_DBG_LEVEL_SEVERE, ("mem_trim: illegal memory\n"));
    /* protect mem stats from concurrent access */
    SYS_ARCH_PROTECT(lev);
    MEM_STATS_INC(illegal);
    SYS_ARCH_UNPROTECT(lev);
    return rmem;
  }
  /* Get the corresponding struct mem ... */
  mem = (struct mem *)(void *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
  /* ... and its offset pointer */
  ptr = (mem_size_t)((u8_t *)mem - ram);

  size = mem->next - ptr - SIZEOF_STRUCT_MEM;
  LWIP_ASSERT("mem_trim can only shrink memory", newsize <= size);
  if (newsize > size) {
    /* not supported */
    return NULL;
  }
  if (newsize == size) {
    /* No change in size, simply return */
    return rmem;
  }

  /* protect the heap from concurrent access */
  LWIP_MEM_FREE_PROTECT();

  mem2 = (struct mem *)(void *)&ram[mem->next];
  if(mem2->used == 0) {
    /* The next struct is unused, we can simply move it at little */
    mem_size_t next;
    /* remember the old next pointer */
    next = mem2->next;
    /* create new struct mem which is moved directly after the shrinked mem */
    ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
    if (lfree == mem2) {
      lfree = (struct mem *)(void *)&ram[ptr2];
    }
    mem2 = (struct mem *)(void *)&ram[ptr2];
    mem2->used = 0;
    /* restore the next pointer */
    mem2->next = next;
    /* link it back to mem */
    mem2->prev = ptr;
    /* link mem to it */
    mem->next = ptr2;
    /* last thing to restore linked list: as we have moved mem2,
     * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
     * the end of the heap */
    if (mem2->next != MEM_SIZE_ALIGNED) {
      ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
    }
    MEM_STATS_DEC_USED(used, (size - newsize));
    /* no need to plug holes, we've already done that */
  } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
    /* Next struct is used but there's room for another struct mem with
     * at least MIN_SIZE_ALIGNED of data.
     * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
     * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
     * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
     *       region that couldn't hold data, but when mem->next gets freed,
     *       the 2 regions would be combined, resulting in more free memory */
    ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
    mem2 = (struct mem *)(void *)&ram[ptr2];
    if (mem2 < lfree) {
      lfree = mem2;
    }
    mem2->used = 0;
    mem2->next = mem->next;
    mem2->prev = ptr;
    mem->next = ptr2;
    if (mem2->next != MEM_SIZE_ALIGNED) {
      ((struct mem *)(void *)&ram[mem2->next])->prev = ptr2;
    }
    MEM_STATS_DEC_USED(used, (size - newsize));
    /* the original mem->next is used, so no need to plug holes! */
  }
  /* else {
    next struct mem is used but size between mem and mem2 is not big enough
    to create another struct mem
    -> don't do anyhting. 
    -> the remaining space stays unused since it is too small
  } */
#if LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT
  mem_free_count = 1;
#endif /* LWIP_ALLOW_MEM_FREE_FROM_OTHER_CONTEXT */
  LWIP_MEM_FREE_UNPROTECT();
  return rmem;
}
Exemple #8
0
/**
 * Send an IP packet to be received on the same netif (loopif-like).
 * The pbuf is simply copied and handed back to netif->input.
 * In multithreaded mode, this is done directly since netif->input must put
 * the packet on a queue.
 * In callback mode, the packet is put on an internal queue and is fed to
 * netif->input by netif_poll().
 *
 * @param netif the lwip network interface structure
 * @param p the (IP) packet to 'send'
 * @param ipaddr the ip address to send the packet to (not used)
 * @return ERR_OK if the packet has been sent
 *         ERR_MEM if the pbuf used to copy the packet couldn't be allocated
 */
err_t
netif_loop_output(struct netif *netif, struct pbuf *p,
       ip_addr_t *ipaddr)
{
  struct pbuf *r;
  err_t err;
  struct pbuf *last;
#if LWIP_LOOPBACK_MAX_PBUFS
  u8_t clen = 0;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */
  /* If we have a loopif, SNMP counters are adjusted for it,
   * if not they are adjusted for 'netif'. */
#if LWIP_SNMP
#if LWIP_HAVE_LOOPIF
  struct netif *stats_if = &loop_netif;
#else /* LWIP_HAVE_LOOPIF */
  struct netif *stats_if = netif;
#endif /* LWIP_HAVE_LOOPIF */
#endif /* LWIP_SNMP */
  SYS_ARCH_DECL_PROTECT(lev);
  LWIP_UNUSED_ARG(ipaddr);

  /* Allocate a new pbuf */
  r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM);
  if (r == NULL) {
    LINK_STATS_INC(link.memerr);
    LINK_STATS_INC(link.drop);
    snmp_inc_ifoutdiscards(stats_if);
    return ERR_MEM;
  }
#if LWIP_LOOPBACK_MAX_PBUFS
  clen = pbuf_clen(r);
  /* check for overflow or too many pbuf on queue */
  if(((netif->loop_cnt_current + clen) < netif->loop_cnt_current) ||
     ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) {
    pbuf_free(r);
    LINK_STATS_INC(link.memerr);
    LINK_STATS_INC(link.drop);
    snmp_inc_ifoutdiscards(stats_if);
    return ERR_MEM;
  }
  netif->loop_cnt_current += clen;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */

  /* Copy the whole pbuf queue p into the single pbuf r */
  if ((err = pbuf_copy(r, p)) != ERR_OK) {
    pbuf_free(r);
    LINK_STATS_INC(link.memerr);
    LINK_STATS_INC(link.drop);
    snmp_inc_ifoutdiscards(stats_if);
    return err;
  }

  /* Put the packet on a linked list which gets emptied through calling
     netif_poll(). */

  /* let last point to the last pbuf in chain r */
  for (last = r; last->next != NULL; last = last->next);

  SYS_ARCH_PROTECT(lev);
  if(netif->loop_first != NULL) {
    LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL);
    netif->loop_last->next = r;
    netif->loop_last = last;
  } else {
    netif->loop_first = r;
    netif->loop_last = last;
  }
  SYS_ARCH_UNPROTECT(lev);

  LINK_STATS_INC(link.xmit);
  snmp_add_ifoutoctets(stats_if, p->tot_len);
  snmp_inc_ifoutucastpkts(stats_if);

#if LWIP_NETIF_LOOPBACK_MULTITHREADING
  /* For multithreading environment, schedule a call to netif_poll */
  tcpip_callback((tcpip_callback_fn)netif_poll, netif);
#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */

  return ERR_OK;
}
Exemple #9
0
/**
 * Dereference a pbuf chain or queue and deallocate any no-longer-used
 * pbufs at the head of this chain or queue.
 *
 * Decrements the pbuf reference count. If it reaches zero, the pbuf is
 * deallocated.
 *
 * For a pbuf chain, this is repeated for each pbuf in the chain,
 * up to the first pbuf which has a non-zero reference count after
 * decrementing. So, when all reference counts are one, the whole
 * chain is free'd.
 *
 * @param pbuf The pbuf (chain) to be dereferenced.
 *
 * @return the number of pbufs that were de-allocated
 * from the head of the chain.
 *
 * @note MUST NOT be called on a packet queue (Not verified to work yet).
 * @note the reference counter of a pbuf equals the number of pointers
 * that refer to the pbuf (or into the pbuf).
 *
 * @internal examples:
 *
 * Assuming existing chains a->b->c with the following reference
 * counts, calling pbuf_free(a) results in:
 * 
 * 1->2->3 becomes ...1->3
 * 3->3->3 becomes 2->3->3
 * 1->1->2 becomes ......1
 * 2->1->1 becomes 1->1->1
 * 1->1->1 becomes .......
 *
 */
u8_t
pbuf_free(struct pbuf *p)
{
  struct pbuf *q;
  u8_t count;
  SYS_ARCH_DECL_PROTECT(old_level);

  LWIP_ASSERT("p != NULL", p != NULL);
  /* if assertions are disabled, proceed with debug output */
  if (p == NULL) {
    LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 2, ("pbuf_free(p == NULL) was called.\n"));
    return 0;
  }
  LWIP_DEBUGF(PBUF_DEBUG | DBG_TRACE | 3, ("pbuf_free(%p)\n", (void *)p));

  PERF_START;

  LWIP_ASSERT("pbuf_free: sane flags",
    p->flags == PBUF_FLAG_RAM || p->flags == PBUF_FLAG_ROM ||
    p->flags == PBUF_FLAG_REF || p->flags == PBUF_FLAG_POOL);

  count = 0;
  /* Since decrementing ref cannot be guaranteed to be a single machine operation
   * we must protect it. Also, the later test of ref must be protected.
   */
  SYS_ARCH_PROTECT(old_level);
  /* de-allocate all consecutive pbufs from the head of the chain that
   * obtain a zero reference count after decrementing*/
  while (p != NULL) {
    /* all pbufs in a chain are referenced at least once */
    LWIP_ASSERT("pbuf_free: p->ref > 0", p->ref > 0);
    /* decrease reference count (number of pointers to pbuf) */
    p->ref--;
    /* this pbuf is no longer referenced to? */
    if (p->ref == 0) {
      /* remember next pbuf in chain for next iteration */
      q = p->next;
      LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_free: deallocating %p\n", (void *)p));
      /* is this a pbuf from the pool? */
      if (p->flags == PBUF_FLAG_POOL) {
        p->len = p->tot_len = PBUF_POOL_BUFSIZE;
        p->payload = (void *)((u8_t *)p + sizeof(struct pbuf));
        PBUF_POOL_FREE(p);
      /* is this a ROM or RAM referencing pbuf? */
      } else if (p->flags == PBUF_FLAG_ROM || p->flags == PBUF_FLAG_REF) {
        memp_free(MEMP_PBUF, p);
      /* p->flags == PBUF_FLAG_RAM */
      } else {
        mem_free(p);
      }
      count++;
      /* proceed to next pbuf */
      p = q;
    /* p->ref > 0, this pbuf is still referenced to */
    /* (and so the remaining pbufs in chain as well) */
    } else {
      LWIP_DEBUGF( PBUF_DEBUG | 2, ("pbuf_free: %p has ref %u, ending here.\n", (void *)p, (unsigned int)p->ref));
      /* stop walking through the chain */
      p = NULL;
    }
  }
  SYS_ARCH_UNPROTECT(old_level);
  PERF_STOP("pbuf_free");
  /* return number of de-allocated pbufs */
  return count;
}
Exemple #10
0
/**
 * Send an IP packet to be received on the same netif (loopif-like).
 * The pbuf is simply copied and handed back to netif->input.
 * In multithreaded mode, this is done directly since netif->input must put
 * the packet on a queue.
 * In callback mode, the packet is put on an internal queue and is fed to
 * netif->input by netif_poll().
 *
 * @param netif the lwip network interface structure
 * @param p the (IP) packet to 'send'
 * @param ipaddr the ip address to send the packet to (not used)
 * @return ERR_OK if the packet has been sent
 *         ERR_MEM if the pbuf used to copy the packet couldn't be allocated
 */
err_t
netif_loop_output(struct netif *netif, struct pbuf *p,
       struct ip_addr *ipaddr)
{
  struct pbuf *r;
  err_t err;
  struct pbuf *last;
#if LWIP_LOOPBACK_MAX_PBUFS
  u8_t clen = 0;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */
  SYS_ARCH_DECL_PROTECT(lev);
  LWIP_UNUSED_ARG(ipaddr);

  /* Allocate a new pbuf */
  r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM);
  if (r == NULL) {
    return ERR_MEM;
  }
#if LWIP_LOOPBACK_MAX_PBUFS
  clen = pbuf_clen(r);
  /* check for overflow or too many pbuf on queue */
  if(((netif->loop_cnt_current + clen) < netif->loop_cnt_current) ||
    ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) {
      pbuf_free(r);
      r = NULL;
      return ERR_MEM;
  }
  netif->loop_cnt_current += clen;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */

  /* Copy the whole pbuf queue p into the single pbuf r */
  if ((err = pbuf_copy(r, p)) != ERR_OK) {
    pbuf_free(r);
    r = NULL;
    return err;
  }

  /* Put the packet on a linked list which gets emptied through calling
     netif_poll(). */

  /* let last point to the last pbuf in chain r */
  for (last = r; last->next != NULL; last = last->next);

  SYS_ARCH_PROTECT(lev);
  if(netif->loop_first != NULL) {
    LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL);
    netif->loop_last->next = r;
    netif->loop_last = last;
  } else {
    netif->loop_first = r;
    netif->loop_last = last;
  }
  SYS_ARCH_UNPROTECT(lev);

#if LWIP_NETIF_LOOPBACK_MULTITHREADING
  /* For multithreading environment, schedule a call to netif_poll */
  tcpip_callback((void (*)(void *))(netif_poll), netif);
#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */

  return ERR_OK;
}
Exemple #11
0
/**
 * Call netif_poll() in the main loop of your application. This is to prevent
 * reentering non-reentrant functions like tcp_input(). Packets passed to
 * netif_loop_output() are put on a list that is passed to netif->input() by
 * netif_poll().
 */
void
netif_poll(struct netif *netif)
{
  struct pbuf *in;
  /* If we have a loopif, SNMP counters are adjusted for it,
   * if not they are adjusted for 'netif'. */
#if LWIP_SNMP
#if LWIP_HAVE_LOOPIF
  struct netif *stats_if = &loop_netif;
#else /* LWIP_HAVE_LOOPIF */
  struct netif *stats_if = netif;
#endif /* LWIP_HAVE_LOOPIF */
#endif /* LWIP_SNMP */
  SYS_ARCH_DECL_PROTECT(lev);

  do {
    /* Get a packet from the list. With SYS_LIGHTWEIGHT_PROT=1, this is protected */
    SYS_ARCH_PROTECT(lev);
    in = netif->loop_first;
    if (in != NULL) {
      struct pbuf *in_end = in;
#if LWIP_LOOPBACK_MAX_PBUFS
      u8_t clen = 1;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */
      while (in_end->len != in_end->tot_len) {
        LWIP_ASSERT("bogus pbuf: len != tot_len but next == NULL!", in_end->next != NULL);
        in_end = in_end->next;
#if LWIP_LOOPBACK_MAX_PBUFS
        clen++;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */
      }
#if LWIP_LOOPBACK_MAX_PBUFS
      /* adjust the number of pbufs on queue */
      LWIP_ASSERT("netif->loop_cnt_current underflow",
        ((netif->loop_cnt_current - clen) < netif->loop_cnt_current));
      netif->loop_cnt_current -= clen;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */

      /* 'in_end' now points to the last pbuf from 'in' */
      if (in_end == netif->loop_last) {
        /* this was the last pbuf in the list */
        netif->loop_first = netif->loop_last = NULL;
      } else {
        /* pop the pbuf off the list */
        netif->loop_first = in_end->next;
        LWIP_ASSERT("should not be null since first != last!", netif->loop_first != NULL);
      }
      /* De-queue the pbuf from its successors on the 'loop_' list. */
      in_end->next = NULL;
    }
    SYS_ARCH_UNPROTECT(lev);

    if (in != NULL) {
      LINK_STATS_INC(link.recv);
      snmp_add_ifinoctets(stats_if, in->tot_len);
      snmp_inc_ifinucastpkts(stats_if);
      /* loopback packets are always IP packets! */
      if (ipX_input(in, netif) != ERR_OK) {
        pbuf_free(in);
      }
      /* Don't reference the packet any more! */
      in = NULL;
    }
  /* go on while there is a packet on the list */
  } while (netif->loop_first != NULL);
}
//*****************************************************************************
//! Receives a TCP packet from lwIP for the telnet server.
//!
//! \param arg is the telnet state data for this connection.
//! \param pcb is the pointer to the TCP control structure.
//! \param p is the pointer to the pbuf structure containing the packet data.
//! \param err is used to indicate if any errors are associated with the
//! incoming packet.
//!
//! This function is called when the lwIP TCP/IP stack has an incoming packet
//! to be processed.
//!
//! \return This function will return an lwIP defined error code.
//*****************************************************************************
err_t TelnetReceive(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err)
{
    tState *pState = arg;
    SYS_ARCH_DECL_PROTECT(lev);

#if 0
    CONSOLE("%u: receive 0x%08x, 0x%08x, 0x%08x, %d\n", pState->ucSerialPort, arg, pcb, p, err);
#else
    CONSOLE("%u: receive error=%d\n", pState->ucSerialPort, err);
#endif

    // Place the incoming packet onto the queue if there is space.
    if((err == ERR_OK) && (p != NULL))
    {
        // This should be done in a protected/critical section.
        SYS_ARCH_PROTECT(lev);

        // Do we have space in the queue?
        int iNextWrite = ((pState->iBufQWrite + 1) % PBUF_POOL_SIZE);
        if(iNextWrite == pState->iBufQRead)
        {
            // The queue is full - discard the pbuf and return since we can't handle it just now.
            CONSOLE("%u: WARNING queue is full - discard data\n", pState->ucSerialPort);

            // Restore previous level of protection.
            SYS_ARCH_UNPROTECT(lev);

            // Free up the pbuf.  Note that we don't acknowledge receipt of
            // the data since we want it to be retransmitted later.
            pbuf_free(p);
        }
        else
        {
            // Place the pbuf in the circular queue.
            pState->pBufQ[pState->iBufQWrite] = p;

            // Increment the queue write index.
            pState->iBufQWrite = iNextWrite;

            // Restore previous level of protection.
            SYS_ARCH_UNPROTECT(lev);
        }
    }

    // If a null packet is passed in, close the connection.
    else if((err == ERR_OK) && (p == NULL))
    {
        CONSOLE("%u: received NULL packet - close connection\n", pState->ucSerialPort);

        // Clear out all of the TCP callbacks.
        tcp_arg(pcb, NULL);
        tcp_sent(pcb, NULL);
        tcp_recv(pcb, NULL);
        tcp_err(pcb, NULL);
        tcp_poll(pcb, NULL, 1);

        // Close the TCP connection.
        err = tcp_close(pcb);
        if (err != ERR_OK)
        {
           WARNING("%u: TelnetReceive.tcp_close failed, error=%d\n", pState->ucSerialPort, err);
           ErrorTCPOperation(pState->ucSerialPort, err, TCP_CLOSE_RECEIVE);
        }

        // Clear out any pbufs associated with this session.
        TelnetFreePbufs(pState);

        // Clear out the telnet session PCB.
        pState->pConnectPCB = NULL;

        StartConnection(pState->ucSerialPort);
    }

    CustomerSettings1_TelnetReceive(pState->ucSerialPort);

    return(ERR_OK);
}
Exemple #13
0
/*********************************************************************************************************
** 函数名称: __inetPing6Prepare
** 功能描述: 构造 ping 包
** 输 入  : icmp6hdrEcho  数据
**           pip6addrDest  目标 IP
**           iDataSize     数据大小
**           pcNetif       网络接口名 (NULL 表示自动确定接口)
**           pusSeqRecv    需要判断的 seq
** 输 出  : ERROR
** 全局变量: 
** 调用模块: 
*********************************************************************************************************/
static INT  __inetPing6Prepare (struct icmp6_echo_hdr   *icmp6hdrEcho, 
                                struct ip6_addr         *pip6addrDest,
                                INT                      iDataSize, 
                                CPCHAR                   pcNetif,
                                UINT16                  *pusSeqRecv)
{
    static u16_t    usSeqNum = 1;
    REGISTER INT    i;
    
#if CHECKSUM_GEN_ICMP6
           INT      iError;
    struct ip6_addr ip6addrSrc;
    struct pbuf     pbuf;
    struct netif   *netif;
#endif                                                                  /*  CHECKSUM_GEN_ICMP6          */
    
    SYS_ARCH_DECL_PROTECT(x);

    icmp6hdrEcho->type = ICMP6_TYPE_EREQ;
    icmp6hdrEcho->code = 0;
    
    *pusSeqRecv = usSeqNum;
    
    icmp6hdrEcho->chksum = 0;
    icmp6hdrEcho->id     = 0xAFAF;                                      /*  ID                          */
    icmp6hdrEcho->seqno  = htons(usSeqNum);
    
    /*
     *  填充数据
     */
    for(i = 0; i < iDataSize; i++) {
        ((PCHAR)icmp6hdrEcho)[sizeof(struct icmp6_echo_hdr) + i] = (CHAR)(i % 256);
    }
    
#if CHECKSUM_GEN_ICMP6
    LOCK_TCPIP_CORE();
    if (pcNetif) {
        netif = netif_find((PCHAR)pcNetif);
        if (netif == LW_NULL) {
            UNLOCK_TCPIP_CORE();
            fprintf(stderr, "Invalid interface.\n");
            return  (PX_ERROR);
        }
    } else {
        netif = LW_NULL;
    }
    iError = __inetPing6FindSrc(netif, pip6addrDest, &ip6addrSrc);
    UNLOCK_TCPIP_CORE();
    
    if (iError == -1) {
        fprintf(stderr, "You must determine net interface.\n");
        return  (PX_ERROR);
    
    } else if (iError == -2) {
        fprintf(stderr, "Unreachable destination.\n");
        return  (PX_ERROR);
    }
    
    pbuf.next    = LW_NULL;
    pbuf.payload = (void *)icmp6hdrEcho;
    pbuf.tot_len = (u16_t)(iDataSize + sizeof(struct icmp6_echo_hdr));
    pbuf.len     = pbuf.tot_len;
    pbuf.type    = PBUF_ROM;
    pbuf.flags   = 0;
    pbuf.ref     = 1;
    
    icmp6hdrEcho->chksum = ip6_chksum_pseudo(&pbuf, IP6_NEXTH_ICMP6, pbuf.tot_len,
                                             &ip6addrSrc, pip6addrDest);
#endif                                                                  /*  CHECKSUM_GEN_ICMP6          */
    
    SYS_ARCH_PROTECT(x);
    usSeqNum++;
    SYS_ARCH_UNPROTECT(x);
    
    return  (ERROR_NONE);
}