Example #1
0
/**
 * Send an IP packet to be received on the same netif (loopif-like).
 * The pbuf is simply copied and handed back to netif->input.
 * In multithreaded mode, this is done directly since netif->input must put
 * the packet on a queue.
 * In callback mode, the packet is put on an internal queue and is fed to
 * netif->input by netif_poll().
 *
 * @param netif the lwip network interface structure
 * @param p the (IP) packet to 'send'
 * @param ipaddr the ip address to send the packet to (not used)
 * @return ERR_OK if the packet has been sent
 *         ERR_MEM if the pbuf used to copy the packet couldn't be allocated
 */
err_t
netif_loop_output(struct netif *netif, struct pbuf *p,
       ip_addr_t *ipaddr)
{
  struct pbuf *r;
  err_t err;
  struct pbuf *last;
#if LWIP_LOOPBACK_MAX_PBUFS
  u8_t clen = 0;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */
  /* If we have a loopif, SNMP counters are adjusted for it,
   * if not they are adjusted for 'netif'. */
#if LWIP_SNMP
#if LWIP_HAVE_LOOPIF
  struct netif *stats_if = &loop_netif;
#else /* LWIP_HAVE_LOOPIF */
  struct netif *stats_if = netif;
#endif /* LWIP_HAVE_LOOPIF */
#endif /* LWIP_SNMP */
  SYS_ARCH_DECL_PROTECT(lev);
  LWIP_UNUSED_ARG(ipaddr);

  /* Allocate a new pbuf */
  r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM);
  if (r == NULL) {
    LINK_STATS_INC(link.memerr);
    LINK_STATS_INC(link.drop);
    snmp_inc_ifoutdiscards(stats_if);
    return ERR_MEM;
  }
#if LWIP_LOOPBACK_MAX_PBUFS
  clen = pbuf_clen(r);
  /* check for overflow or too many pbuf on queue */
  if(((netif->loop_cnt_current + clen) < netif->loop_cnt_current) ||
     ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) {
    pbuf_free(r);
    LINK_STATS_INC(link.memerr);
    LINK_STATS_INC(link.drop);
    snmp_inc_ifoutdiscards(stats_if);
    return ERR_MEM;
  }
  netif->loop_cnt_current += clen;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */

  /* Copy the whole pbuf queue p into the single pbuf r */
  if ((err = pbuf_copy(r, p)) != ERR_OK) {
    pbuf_free(r);
    LINK_STATS_INC(link.memerr);
    LINK_STATS_INC(link.drop);
    snmp_inc_ifoutdiscards(stats_if);
    return err;
  }

  /* Put the packet on a linked list which gets emptied through calling
     netif_poll(). */

  /* let last point to the last pbuf in chain r */
  for (last = r; last->next != NULL; last = last->next);

  SYS_ARCH_PROTECT(lev);
  if(netif->loop_first != NULL) {
    LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL);
    netif->loop_last->next = r;
    netif->loop_last = last;
  } else {
    netif->loop_first = r;
    netif->loop_last = last;
  }
  SYS_ARCH_UNPROTECT(lev);

  LINK_STATS_INC(link.xmit);
  snmp_add_ifoutoctets(stats_if, p->tot_len);
  snmp_inc_ifoutucastpkts(stats_if);

#if LWIP_NETIF_LOOPBACK_MULTITHREADING
  /* For multithreading environment, schedule a call to netif_poll */
  tcpip_callback((tcpip_callback_fn)netif_poll, netif);
#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */

  return ERR_OK;
}
Example #2
0
/**
 * Enqueue TCP options for transmission.
 *
 * Called by tcp_connect(), tcp_listen_input(), and tcp_send_ctrl().
 *
 * @param pcb Protocol control block for the TCP connection.
 * @param flags TCP header flags to set in the outgoing segment.
 * @param optdata pointer to TCP options, or NULL.
 * @param optlen length of TCP options in bytes.
 */
err_t
tcp_enqueue_flags(struct tcp_pcb *pcb, u8_t flags)
{
  struct pbuf *p;
  struct tcp_seg *seg;
  u8_t optflags = 0;
  u8_t optlen = 0;

  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: queuelen: %"U16_F"\n", (u16_t)pcb->snd_queuelen));

  LWIP_ASSERT("tcp_enqueue_flags: need either TCP_SYN or TCP_FIN in flags (programmer violates API)",
              (flags & (TCP_SYN | TCP_FIN)) != 0);

  /* check for configured max queuelen and possible overflow (FIN flag should always come through!)*/
  if (((pcb->snd_queuelen >= pcb->max_unsent_len) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) &&
		  ((flags & TCP_FIN) == 0)) {
    LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_enqueue_flags: too long queue %"U16_F" (max %"U16_F")\n",
                                       pcb->snd_queuelen, pcb->max_unsent_len));
    TCP_STATS_INC(tcp.memerr);
    pcb->flags |= TF_NAGLEMEMERR;
    return ERR_MEM;
  }

  if (flags & TCP_SYN) {
    optflags = TF_SEG_OPTS_MSS;
    if(enable_wnd_scale) optflags |= TF_SEG_OPTS_WNDSCALE;
	#if LWIP_TCP_TIMESTAMPS
    	if (pcb->enable_ts_opt && !(flags & TCP_ACK)) {
    		// enable initial timestamp announcement only for the connecting side. accepting side reply accordingly.
    		optflags |= TF_SEG_OPTS_TS;
    	}
	#endif
  }
#if LWIP_TCP_TIMESTAMPS
  if ((pcb->flags & TF_TIMESTAMP)) {
    optflags |= TF_SEG_OPTS_TS;
  }
#endif /* LWIP_TCP_TIMESTAMPS */
  optlen = LWIP_TCP_OPT_LENGTH(optflags);

  /* tcp_enqueue_flags is always called with either SYN or FIN in flags.
   * We need one available snd_buf byte to do that.
   * This means we can't send FIN while snd_buf==0. A better fix would be to
   * not include SYN and FIN sequence numbers in the snd_buf count. */

  /*if (pcb->snd_buf == 0) {
    LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_enqueue_flags: no send buffer available\n"));
    TCP_STATS_INC(tcp.memerr);
    return ERR_MEM;
  }*/ //to consider snd_buf for syn or fin, unmarked sections with SND_BUF_FOR_SYN_FIN

  /* Allocate pbuf with room for TCP header + options */
  if ((p = tcp_tx_pbuf_alloc(pcb, optlen, PBUF_RAM)) == NULL) {
    pcb->flags |= TF_NAGLEMEMERR;
    TCP_STATS_INC(tcp.memerr);
    return ERR_MEM;
  }
  LWIP_ASSERT("tcp_enqueue_flags: check that first pbuf can hold optlen",
              (p->len >= optlen));

  /* Allocate memory for tcp_seg, and fill in fields. */
  if ((seg = tcp_create_segment(pcb, p, flags, pcb->snd_lbb, optflags)) == NULL) {
    pcb->flags |= TF_NAGLEMEMERR;
    TCP_STATS_INC(tcp.memerr);
    return ERR_MEM;
  }
  LWIP_ASSERT("tcp_enqueue_flags: invalid segment length", seg->len == 0);

  LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE,
              ("tcp_enqueue_flags: queueing %"U32_F":%"U32_F" (0x%"X16_F")\n",
               ntohl(seg->tcphdr->seqno),
               ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg),
               (u16_t)flags));

  /* Now append seg to pcb->unsent queue */
  if (pcb->unsent == NULL) {
    pcb->unsent = seg;
  } else {
    struct tcp_seg *useg;
    for (useg = pcb->unsent; useg->next != NULL; useg = useg->next);
    useg->next = seg;
  }
#if TCP_OVERSIZE
  /* The new unsent tail has no space */
  pcb->unsent_oversize = 0;
#endif /* TCP_OVERSIZE */

  /* SYN and FIN bump the sequence number */
  if ((flags & TCP_SYN) || (flags & TCP_FIN)) {
    pcb->snd_lbb++;
    /* optlen does not influence snd_buf */
    // pcb->snd_buf--; SND_BUF_FOR_SYN_FIN
  }
  if (flags & TCP_FIN) {
    pcb->flags |= TF_FIN;
  }

  /* update number of segments on the queues */
  pcb->snd_queuelen += pbuf_clen(seg->p);
  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_enqueue_flags: %"S16_F" (after enqueued)\n", pcb->snd_queuelen));
  if (pcb->snd_queuelen != 0) {
    LWIP_ASSERT("tcp_enqueue_flags: invalid queue length",
      pcb->unacked != NULL || pcb->unsent != NULL);
  }

  return ERR_OK;
}
Example #3
0
/**
 * Send an IP packet to be received on the same netif (loopif-like).
 * The pbuf is simply copied and handed back to netif->input.
 * In multithreaded mode, this is done directly since netif->input must put
 * the packet on a queue.
 * In callback mode, the packet is put on an internal queue and is fed to
 * netif->input by netif_poll().
 *
 * @param netif the lwip network interface structure
 * @param p the (IP) packet to 'send'
 * @param ipaddr the ip address to send the packet to (not used)
 * @return ERR_OK if the packet has been sent
 *         ERR_MEM if the pbuf used to copy the packet couldn't be allocated
 */
err_t
netif_loop_output(struct netif *netif, struct pbuf *p,
       struct ip_addr *ipaddr)
{
  struct pbuf *r;
  err_t err;
  struct pbuf *last;
#if LWIP_LOOPBACK_MAX_PBUFS
  u8_t clen = 0;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */
  SYS_ARCH_DECL_PROTECT(lev);
  LWIP_UNUSED_ARG(ipaddr);

  /* Allocate a new pbuf */
  r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM);
  if (r == NULL) {
    return ERR_MEM;
  }
#if LWIP_LOOPBACK_MAX_PBUFS
  clen = pbuf_clen(r);
  /* check for overflow or too many pbuf on queue */
  if(((netif->loop_cnt_current + clen) < netif->loop_cnt_current) ||
    ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) {
      pbuf_free(r);
      r = NULL;
      return ERR_MEM;
  }
  netif->loop_cnt_current += clen;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */

  /* Copy the whole pbuf queue p into the single pbuf r */
  if ((err = pbuf_copy(r, p)) != ERR_OK) {
    pbuf_free(r);
    r = NULL;
    return err;
  }

  /* Put the packet on a linked list which gets emptied through calling
     netif_poll(). */

  /* let last point to the last pbuf in chain r */
  for (last = r; last->next != NULL; last = last->next);

  SYS_ARCH_PROTECT(lev);
  if(netif->loop_first != NULL) {
    LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL);
    netif->loop_last->next = r;
    netif->loop_last = last;
  } else {
    netif->loop_first = r;
    netif->loop_last = last;
  }
  SYS_ARCH_UNPROTECT(lev);

#if LWIP_NETIF_LOOPBACK_MULTITHREADING
  /* For multithreading environment, schedule a call to netif_poll */
  tcpip_callback((void (*)(void *))(netif_poll), netif);
#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */

  return ERR_OK;
}
Example #4
0
/**
 * Reassembles incoming IP fragments into an IP datagram.
 *
 * @param p points to a pbuf chain of the fragment
 * @return NULL if reassembly is incomplete, ? otherwise
 */
struct pbuf *
ip4_reass(struct pbuf *p)
{
  struct pbuf *r;
  struct ip_hdr *fraghdr;
  struct ip_reassdata *ipr;
  struct ip_reass_helper *iprh;
  u16_t offset, len;
  u8_t clen;

  IPFRAG_STATS_INC(ip_frag.recv);
  snmp_inc_ipreasmreqds();

  fraghdr = (struct ip_hdr*)p->payload;

  if ((IPH_HL(fraghdr) * 4) != IP_HLEN) {
    LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: IP options currently not supported!\n"));
    IPFRAG_STATS_INC(ip_frag.err);
    goto nullreturn;
  }

  offset = (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8;
  len = ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4;

  /* Check if we are allowed to enqueue more datagrams. */
  clen = pbuf_clen(p);
  if ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
#if IP_REASS_FREE_OLDEST
    if (!ip_reass_remove_oldest_datagram(fraghdr, clen) ||
        ((ip_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS))
#endif /* IP_REASS_FREE_OLDEST */
    {
      /* No datagram could be freed and still too many pbufs enqueued */
      LWIP_DEBUGF(IP_REASS_DEBUG,("ip4_reass: Overflow condition: pbufct=%d, clen=%d, MAX=%d\n",
        ip_reass_pbufcount, clen, IP_REASS_MAX_PBUFS));
      IPFRAG_STATS_INC(ip_frag.memerr);
      /* @todo: send ICMP time exceeded here? */
      /* drop this pbuf */
      goto nullreturn;
    }
  }

  /* Look for the datagram the fragment belongs to in the current datagram queue,
   * remembering the previous in the queue for later dequeueing. */
  for (ipr = reassdatagrams; ipr != NULL; ipr = ipr->next) {
    /* Check if the incoming fragment matches the one currently present
       in the reassembly buffer. If so, we proceed with copying the
       fragment into the buffer. */
    if (IP_ADDRESSES_AND_ID_MATCH(&ipr->iphdr, fraghdr)) {
      LWIP_DEBUGF(IP_REASS_DEBUG, ("ip4_reass: matching previous fragment ID=%"X16_F"\n",
        ntohs(IPH_ID(fraghdr))));
      IPFRAG_STATS_INC(ip_frag.cachehit);
      break;
    }
  }

  if (ipr == NULL) {
  /* Enqueue a new datagram into the datagram queue */
    ipr = ip_reass_enqueue_new_datagram(fraghdr, clen);
    /* Bail if unable to enqueue */
    if(ipr == NULL) {
      goto nullreturn;
    }
  } else {
    if (((ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) == 0) && 
      ((ntohs(IPH_OFFSET(&ipr->iphdr)) & IP_OFFMASK) != 0)) {
      /* ipr->iphdr is not the header from the first fragment, but fraghdr is
       * -> copy fraghdr into ipr->iphdr since we want to have the header
       * of the first fragment (for ICMP time exceeded and later, for copying
       * all options, if supported)*/
      SMEMCPY(&ipr->iphdr, fraghdr, IP_HLEN);
    }
  }
  /* Track the current number of pbufs current 'in-flight', in order to limit 
  the number of fragments that may be enqueued at any one time */
  ip_reass_pbufcount += clen;

  /* At this point, we have either created a new entry or pointing 
   * to an existing one */

  /* check for 'no more fragments', and update queue entry*/
  if ((IPH_OFFSET(fraghdr) & PP_NTOHS(IP_MF)) == 0) {
    ipr->flags |= IP_REASS_FLAG_LASTFRAG;
    ipr->datagram_len = offset + len;
    LWIP_DEBUGF(IP_REASS_DEBUG,
     ("ip4_reass: last fragment seen, total len %"S16_F"\n",
      ipr->datagram_len));
  }
  /* find the right place to insert this pbuf */
  /* @todo: trim pbufs if fragments are overlapping */
  if (ip_reass_chain_frag_into_datagram_and_validate(ipr, p)) {
    struct ip_reassdata *ipr_prev;
    /* the totally last fragment (flag more fragments = 0) was received at least
     * once AND all fragments are received */
    ipr->datagram_len += IP_HLEN;

    /* save the second pbuf before copying the header over the pointer */
    r = ((struct ip_reass_helper*)ipr->p->payload)->next_pbuf;

    /* copy the original ip header back to the first pbuf */
    fraghdr = (struct ip_hdr*)(ipr->p->payload);
    SMEMCPY(fraghdr, &ipr->iphdr, IP_HLEN);
    IPH_LEN_SET(fraghdr, htons(ipr->datagram_len));
    IPH_OFFSET_SET(fraghdr, 0);
    IPH_CHKSUM_SET(fraghdr, 0);
    /* @todo: do we need to set/calculate the correct checksum? */
#if CHECKSUM_GEN_IP
    IF__NETIF_CHECKSUM_ENABLED(ip_current_input_netif(), NETIF_CHECKSUM_GEN_IP) {
      IPH_CHKSUM_SET(fraghdr, inet_chksum(fraghdr, IP_HLEN));
    }
#endif /* CHECKSUM_GEN_IP */

    p = ipr->p;

    /* chain together the pbufs contained within the reass_data list. */
    while(r != NULL) {
      iprh = (struct ip_reass_helper*)r->payload;

      /* hide the ip header for every succeeding fragment */
      pbuf_header(r, -IP_HLEN);
      pbuf_cat(p, r);
      r = iprh->next_pbuf;
    }

    /* find the previous entry in the linked list */
    if (ipr == reassdatagrams) {
      ipr_prev = NULL;
    } else {
      for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
        if (ipr_prev->next == ipr) {
          break;
        }
      }
    }

    /* release the sources allocate for the fragment queue entry */
    ip_reass_dequeue_datagram(ipr, ipr_prev);

    /* and adjust the number of pbufs currently queued for reassembly. */
    ip_reass_pbufcount -= pbuf_clen(p);

    /* Return the pbuf chain */
    return p;
  }
Example #5
0
/**
 * Create a TCP segment with prefilled header.
 *
 * Called by tcp_write and tcp_enqueue_flags.
 *
 * @param pcb Protocol control block for the TCP connection.
 * @param p pbuf that is used to hold the TCP header.
 * @param flags TCP flags for header.
 * @param seqno TCP sequence number of this packet
 * @param optflags options to include in TCP header
 * @return a new tcp_seg pointing to p, or NULL.
 * The TCP header is filled in except ackno and wnd.
 * p is freed on failure.
 */
static struct tcp_seg *
tcp_create_segment(struct tcp_pcb *pcb, struct pbuf *p, u8_t flags, u32_t seqno, u8_t optflags)
{
  struct tcp_seg *seg;
  u8_t optlen = LWIP_TCP_OPT_LENGTH(optflags);

#if LWIP_3RD_PARTY_BUFS
  if ((seg = external_tcp_seg_alloc(pcb)) == NULL) {
#else
  if ((seg = (struct tcp_seg *)memp_malloc(MEMP_TCP_SEG)) == NULL) {
#endif
    LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_create_segment: no memory.\n"));
    tcp_tx_pbuf_free(pcb, p);
    return NULL;
  }
  seg->flags = optflags;
  seg->next = NULL;
  seg->p = p;
  seg->dataptr = p->payload;
  seg->len = p->tot_len - optlen;
#if TCP_OVERSIZE_DBGCHECK
  seg->oversize_left = 0;
#endif /* TCP_OVERSIZE_DBGCHECK */
#if TCP_CHECKSUM_ON_COPY
  seg->chksum = 0;
  seg->chksum_swapped = 0;
  /* check optflags */
  LWIP_ASSERT("invalid optflags passed: TF_SEG_DATA_CHECKSUMMED",
              (optflags & TF_SEG_DATA_CHECKSUMMED) == 0);
#endif /* TCP_CHECKSUM_ON_COPY */
  seg->seqno = seqno;

  /* build TCP header */
  if (pbuf_header(p, TCP_HLEN)) {
    LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_create_segment: no room for TCP header in pbuf.\n"));
    TCP_STATS_INC(tcp.err);
    tcp_tx_seg_free(pcb, seg);
    return NULL;
  }
  seg->tcphdr = (struct tcp_hdr *)seg->p->payload;
  seg->tcphdr->src = htons(pcb->local_port);
  seg->tcphdr->dest = htons(pcb->remote_port);
  seg->tcphdr->seqno = htonl(seqno);
  /* ackno is set in tcp_output */
  TCPH_HDRLEN_FLAGS_SET(seg->tcphdr, (5 + optlen / 4), flags);
  /* wnd and chksum are set in tcp_output */
  seg->tcphdr->urgp = 0;
  return seg;
} 

/**
 * Allocate a PBUF_RAM pbuf, perhaps with extra space at the end.
 *
 * This function is like pbuf_alloc(layer, length, PBUF_RAM) except
 * there may be extra bytes available at the end.
 *
 * @param layer flag to define header size.
 * @param length size of the pbuf's payload.
 * @param max_length maximum usable size of payload+oversize.
 * @param oversize pointer to a u16_t that will receive the number of usable tail bytes.
 * @param pcb The TCP connection that willo enqueue the pbuf.
 * @param apiflags API flags given to tcp_write.
 * @param first_seg true when this pbuf will be used in the first enqueued segment.
 * @param 
 */
static struct pbuf *
tcp_pbuf_prealloc(u16_t length, u16_t max_length,
                  u16_t *oversize, struct tcp_pcb *pcb, u8_t apiflags,
                  u8_t first_seg)
{
  struct pbuf *p;
  u16_t alloc = length;

  if (length < max_length) {
    /* Should we allocate an oversized pbuf, or just the minimum
     * length required? If tcp_write is going to be called again
     * before this segment is transmitted, we want the oversized
     * buffer. If the segment will be transmitted immediately, we can
     * save memory by allocating only length. We use a simple
     * heuristic based on the following information:
     *
     * Did the user set TCP_WRITE_FLAG_MORE?
     *
     * Will the Nagle algorithm defer transmission of this segment?
     */
    if ((apiflags & TCP_WRITE_FLAG_MORE) ||
        (!(pcb->flags & TF_NODELAY) &&
         (!first_seg ||
          pcb->unsent != NULL ||
          pcb->unacked != NULL))) {
          alloc = LWIP_MIN(max_length, LWIP_MEM_ALIGN_SIZE(length + pcb->tcp_oversize_val));
    }
  }
  p = tcp_tx_pbuf_alloc(pcb, alloc, PBUF_RAM);
  if (p == NULL) {
    return NULL;
  }
  LWIP_ASSERT("need unchained pbuf", p->next == NULL);
  *oversize = p->len - length;
  /* trim p->len to the currently used size */
  p->len = p->tot_len = length;
  return p;
}

/** Checks if tcp_write is allowed or not (checks state, snd_buf and snd_queuelen).
 *
 * @param pcb the tcp pcb to check for
 * @param len length of data to send (checked agains snd_buf)
 * @return ERR_OK if tcp_write is allowed to proceed, another err_t otherwise
 */
static err_t
tcp_write_checks(struct tcp_pcb *pcb, u32_t len)
{
  /* connection is in invalid state for data transmission? */
  if ((get_tcp_state(pcb) != ESTABLISHED) &&
      (get_tcp_state(pcb) != CLOSE_WAIT) &&
      (get_tcp_state(pcb) != SYN_SENT) &&
      (get_tcp_state(pcb) != SYN_RCVD)) {
    LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_STATE | LWIP_DBG_LEVEL_SEVERE, ("tcp_write() called in invalid state\n"));
    return ERR_CONN;
  } else if (len == 0) {
    return ERR_OK;
  }

  /* fail on too much data */
  if (len > pcb->snd_buf) {
    LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_write: too much data (len=%"U32_F" > snd_buf=%"U32_F")\n",
      len, pcb->snd_buf));
    pcb->flags |= TF_NAGLEMEMERR;
    return ERR_MEM;
  }
  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: queuelen: %"U32_F"\n", (u32_t)pcb->snd_queuelen));

  /* If total number of pbufs on the unsent/unacked queues exceeds the
   * configured maximum, return an error */
  /* check for configured max queuelen and possible overflow */
  if ((pcb->snd_queuelen >= pcb->max_unsent_len) || (pcb->snd_queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
    LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 3, ("tcp_write: too long queue %"U32_F" (max %"U32_F")\n",
      pcb->snd_queuelen, pcb->max_unsent_len));
    TCP_STATS_INC(tcp.memerr);
    pcb->flags |= TF_NAGLEMEMERR;
    return ERR_MEM;
  }
  if (pcb->snd_queuelen != 0) {
  } else {
    LWIP_ASSERT("tcp_write: no pbufs on queue => both queues empty",
      pcb->unacked == NULL && pcb->unsent == NULL);
  }
  return ERR_OK;
}

/**
 * Write data for sending (but does not send it immediately).
 *
 * It waits in the expectation of more data being sent soon (as
 * it can send them more efficiently by combining them together).
 * To prompt the system to send data now, call tcp_output() after
 * calling tcp_write().
 *
 * @param pcb Protocol control block for the TCP connection to enqueue data for.
 * @param arg Pointer to the data to be enqueued for sending.
 * @param len Data length in bytes
 * @param apiflags combination of following flags :
 * - TCP_WRITE_FLAG_COPY (0x01) data will be copied into memory belonging to the stack
 * - TCP_WRITE_FLAG_MORE (0x02) for TCP connection, PSH flag will be set on last segment sent,
 * @return ERR_OK if enqueued, another err_t on error
 */
err_t
tcp_write(struct tcp_pcb *pcb, const void *arg, u32_t len, u8_t apiflags)
{
  struct pbuf *concat_p = NULL;
  struct tcp_seg *seg = NULL, *prev_seg = NULL, *queue = NULL;
  u32_t pos = 0; /* position in 'arg' data */
  u32_t queuelen;
  u8_t optlen = 0;
  u8_t optflags = 0;
#if TCP_OVERSIZE
  u16_t oversize = 0;
  u16_t oversize_used = 0;
#endif /* TCP_OVERSIZE */
#if TCP_CHECKSUM_ON_COPY
  u16_t concat_chksum = 0;
  u8_t concat_chksum_swapped = 0;
  u16_t concat_chksummed = 0;
#endif /* TCP_CHECKSUM_ON_COPY */
  err_t err;
  /* don't allocate segments bigger than half the maximum window we ever received */
  u16_t mss_local = LWIP_MIN(pcb->mss, pcb->snd_wnd_max/2);
  mss_local = mss_local ? mss_local : pcb->mss;

  int byte_queued = pcb->snd_nxt - pcb->lastack;
  if ( len < pcb->mss)
          pcb->snd_sml_add = (pcb->unacked ? pcb->unacked->len : 0) + byte_queued;

#if LWIP_NETIF_TX_SINGLE_PBUF
  /* Always copy to try to create single pbufs for TX */
  apiflags |= TCP_WRITE_FLAG_COPY;
#endif /* LWIP_NETIF_TX_SINGLE_PBUF */

  LWIP_DEBUGF(TCP_OUTPUT_DEBUG, ("tcp_write(pcb=%p, data=%p, len=%"U16_F", apiflags=%"U16_F")\n",
    (void *)pcb, arg, len, (u16_t)apiflags));
  LWIP_ERROR("tcp_write: arg == NULL (programmer violates API)", 
             arg != NULL, return ERR_ARG;);

  err = tcp_write_checks(pcb, len);
  if (err != ERR_OK) {
    return err;
  }
  queuelen = pcb->snd_queuelen;

#if LWIP_TCP_TIMESTAMPS
  if ((pcb->flags & TF_TIMESTAMP)) {
    optflags = TF_SEG_OPTS_TS;
    /* ensure that segments can hold at least one data byte... */
    mss_local = LWIP_MAX(mss_local, LWIP_TCP_OPT_LEN_TS + 1);
  }
#endif /* LWIP_TCP_TIMESTAMPS */
  optlen = LWIP_TCP_OPT_LENGTH( optflags );

  /*
   * TCP segmentation is done in three phases with increasing complexity:
   *
   * 1. Copy data directly into an oversized pbuf.
   * 2. Chain a new pbuf to the end of pcb->unsent.
   * 3. Create new segments.
   *
   * We may run out of memory at any point. In that case we must
   * return ERR_MEM and not change anything in pcb. Therefore, all
   * changes are recorded in local variables and committed at the end
   * of the function. Some pcb fields are maintained in local copies:
   *
   * queuelen = pcb->snd_queuelen
   * oversize = pcb->unsent_oversize
   *
   * These variables are set consistently by the phases:
   *
   * seg points to the last segment tampered with.
   *
   * pos records progress as data is segmented.
   */

  /* Find the tail of the unsent queue. */
  if (pcb->unsent != NULL) {
    u16_t space;
    u16_t unsent_optlen;

    if (!pcb->last_unsent || pcb->last_unsent->next) {
      /* @todo: this could be sped up by keeping last_unsent in the pcb */
      for (pcb->last_unsent = pcb->unsent; pcb->last_unsent->next != NULL;
           pcb->last_unsent = pcb->last_unsent->next);
    }
    /* Usable space at the end of the last unsent segment */
    unsent_optlen = LWIP_TCP_OPT_LENGTH(pcb->last_unsent->flags);
    LWIP_ASSERT("mss_local is too small", mss_local >= pcb->last_unsent->len + unsent_optlen);
    space = mss_local - (pcb->last_unsent->len + unsent_optlen);

    /*
     * Phase 1: Copy data directly into an oversized pbuf.
     *
     * The number of bytes copied is recorded in the oversize_used
     * variable. The actual copying is done at the bottom of the
     * function.
     */
#if TCP_OVERSIZE
#if TCP_OVERSIZE_DBGCHECK
    /* check that pcb->unsent_oversize matches last_unsent->unsent_oversize */
    LWIP_ASSERT("unsent_oversize mismatch (pcb vs. last_unsent)",
                pcb->unsent_oversize == pcb->last_unsent->oversize_left);
#endif /* TCP_OVERSIZE_DBGCHECK */
    oversize = pcb->unsent_oversize;
    if (oversize > 0) {
      LWIP_ASSERT("inconsistent oversize vs. space", oversize_used <= space);
      seg = pcb->last_unsent;
      oversize_used = oversize < len ? oversize : len;
      pos += oversize_used;
      oversize -= oversize_used;
      space -= oversize_used;
    }
    /* now we are either finished or oversize is zero */
    LWIP_ASSERT("inconsistend oversize vs. len", (oversize == 0) || (pos == len));
#endif /* TCP_OVERSIZE */

    /*
     * Phase 2: Chain a new pbuf to the end of pcb->unsent.
     *
     * We don't extend segments containing SYN/FIN flags or options
     * (len==0). The new pbuf is kept in concat_p and pbuf_cat'ed at
     * the end.
     */
    if ((pos < len) && (space > 0) && (pcb->last_unsent->len > 0)) {
      u16_t seglen = space < len - pos ? space : len - pos;
      seg = pcb->last_unsent;

      /* Create a pbuf with a copy or reference to seglen bytes. We
       * can use PBUF_RAW here since the data appears in the middle of
       * a segment. A header will never be prepended. */
      if (apiflags & TCP_WRITE_FLAG_COPY) {
        /* Data is copied */
        if ((concat_p = tcp_pbuf_prealloc(seglen, space, &oversize, pcb, apiflags, 1)) == NULL) {
          LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2,
                      ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n",
                       seglen));
          goto memerr;
        }
#if TCP_OVERSIZE_DBGCHECK
        pcb->last_unsent->oversize_left += oversize;
#endif /* TCP_OVERSIZE_DBGCHECK */
        TCP_DATA_COPY2(concat_p->payload, (u8_t*)arg + pos, seglen, &concat_chksum, &concat_chksum_swapped);
#if TCP_CHECKSUM_ON_COPY
        concat_chksummed += seglen;
#endif /* TCP_CHECKSUM_ON_COPY */
      } else {
    	  LWIP_ASSERT("tcp_write : we are never here", 0);
    	  goto memerr;
      }

      pos += seglen;
      queuelen += pbuf_clen(concat_p);
    }
  } else {
#if TCP_OVERSIZE
    pcb->last_unsent = NULL;
    LWIP_ASSERT("unsent_oversize mismatch (pcb->unsent is NULL)",
                pcb->unsent_oversize == 0);
#endif /* TCP_OVERSIZE */
  }

  /*
   * Phase 3: Create new segments.
   *
   * The new segments are chained together in the local 'queue'
   * variable, ready to be appended to pcb->unsent.
   */
  while (pos < len) {
    struct pbuf *p;
    u32_t left = len - pos;
    u16_t max_len = mss_local - optlen;
    u16_t seglen = left > max_len ? max_len : left;

    if (apiflags & TCP_WRITE_FLAG_COPY) {
      /* If copy is set, memory should be allocated and data copied
       * into pbuf */
      if ((p = tcp_pbuf_prealloc(seglen + optlen, mss_local, &oversize, pcb, apiflags, queue == NULL)) == NULL) {
        LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write : could not allocate memory for pbuf copy size %"U16_F"\n", seglen));
        goto memerr;
      }
      LWIP_ASSERT("tcp_write: check that first pbuf can hold the complete seglen",
                  (p->len >= seglen));
      TCP_DATA_COPY2((char *)p->payload + optlen, (u8_t*)arg + pos, seglen, &chksum, &chksum_swapped);
    } else {
    	LWIP_ASSERT("tcp_write: we are never here",0);
    	goto memerr;
    }

    queuelen += pbuf_clen(p);

    /* Now that there are more segments queued, we check again if the
     * length of the queue exceeds the configured maximum or
     * overflows. */
    if ((queuelen > pcb->max_unsent_len) || (queuelen > TCP_SNDQUEUELEN_OVERFLOW)) {
      LWIP_DEBUGF(TCP_OUTPUT_DEBUG | 2, ("tcp_write: queue too long %"U32_F" (%"U32_F")\n", queuelen, pcb->max_unsent_len));
      tcp_tx_pbuf_free(pcb, p);
      goto memerr;
    }

    if ((seg = tcp_create_segment(pcb, p, 0, pcb->snd_lbb + pos, optflags)) == NULL) {
      goto memerr;
    }
#if TCP_OVERSIZE_DBGCHECK
    seg->oversize_left = oversize;
#endif /* TCP_OVERSIZE_DBGCHECK */
#if TCP_CHECKSUM_ON_COPY
    seg->chksum = chksum;
    seg->chksum_swapped = chksum_swapped;
    seg->flags |= TF_SEG_DATA_CHECKSUMMED;
#endif /* TCP_CHECKSUM_ON_COPY */
    /* Fix dataptr for the nocopy case */
    if ((apiflags & TCP_WRITE_FLAG_COPY) == 0) {
      seg->dataptr = (u8_t*)arg + pos;
    }

    /* first segment of to-be-queued data? */
    if (queue == NULL) {
      queue = seg;
    } else {
      /* Attach the segment to the end of the queued segments */
      LWIP_ASSERT("prev_seg != NULL", prev_seg != NULL);
      prev_seg->next = seg;
    }
    /* remember last segment of to-be-queued data for next iteration */
    prev_seg = seg;

    LWIP_DEBUGF(TCP_OUTPUT_DEBUG | LWIP_DBG_TRACE, ("tcp_write: queueing %"U32_F":%"U32_F"\n",
      ntohl(seg->tcphdr->seqno),
      ntohl(seg->tcphdr->seqno) + TCP_TCPLEN(seg)));

    pos += seglen;
  }

  /*
   * All three segmentation phases were successful. We can commit the
   * transaction.
   */

  /*
   * Phase 1: If data has been added to the preallocated tail of
   * last_unsent, we update the length fields of the pbuf chain.
   */
#if TCP_OVERSIZE
  if (oversize_used > 0) {
    struct pbuf *p;
    /* Bump tot_len of whole chain, len of tail */
    for (p = pcb->last_unsent->p; p; p = p->next) {
      p->tot_len += oversize_used;
      if (p->next == NULL) {
        TCP_DATA_COPY((char *)p->payload + p->len, arg, oversize_used, pcb->last_unsent);
        p->len += oversize_used;
      }
    }
    pcb->last_unsent->len += oversize_used;
#if TCP_OVERSIZE_DBGCHECK
    pcb->last_unsent->oversize_left -= oversize_used;
#endif /* TCP_OVERSIZE_DBGCHECK */
  }
  pcb->unsent_oversize = oversize;
#endif /* TCP_OVERSIZE */

  /*
   * Phase 2: concat_p can be concatenated onto pcb->last_unsent->p
   */
  if (concat_p != NULL) {
    LWIP_ASSERT("tcp_write: cannot concatenate when pcb->unsent is empty",
      (pcb->last_unsent != NULL));
    pbuf_cat(pcb->last_unsent->p, concat_p);
    pcb->last_unsent->len += concat_p->tot_len;
#if TCP_CHECKSUM_ON_COPY
    if (concat_chksummed) {
      tcp_seg_add_chksum(concat_chksum, concat_chksummed, &pcb->last_unsent->chksum,
        &pcb->last_unsent->chksum_swapped);
      pcb->last_unsent->flags |= TF_SEG_DATA_CHECKSUMMED;
    }
#endif /* TCP_CHECKSUM_ON_COPY */
  }

  /*
   * Phase 3: Append queue to pcb->unsent. Queue may be NULL, but that
   * is harmless
   */
  if (pcb->last_unsent == NULL) {
    pcb->unsent = queue;
  } else {
    pcb->last_unsent->next = queue;
  }
  pcb->last_unsent = seg;

  /*
   * Finally update the pcb state.
   */
  pcb->snd_lbb += len;
  pcb->snd_buf -= len;
  pcb->snd_queuelen = queuelen;

  LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_write: %"S16_F" (after enqueued)\n",
    pcb->snd_queuelen));
  if (pcb->snd_queuelen != 0) {
    LWIP_ASSERT("tcp_write: valid queue length",
                pcb->unacked != NULL || pcb->unsent != NULL);
  }

  /* Set the PSH flag in the last segment that we enqueued. */
  if (seg != NULL && seg->tcphdr != NULL) {
    TCPH_SET_FLAG(seg->tcphdr, TCP_PSH);
  }

  return ERR_OK;
memerr:
  pcb->flags |= TF_NAGLEMEMERR;
  TCP_STATS_INC(tcp.memerr);

  if (concat_p != NULL) {
    tcp_tx_pbuf_free(pcb, concat_p);
  }
  if (queue != NULL) {
    tcp_tx_segs_free(pcb, queue);
  }
  if (pcb->snd_queuelen != 0) {
    LWIP_ASSERT("tcp_write: valid queue length", pcb->unacked != NULL ||
      pcb->unsent != NULL);
  }
  LWIP_DEBUGF(TCP_QLEN_DEBUG | LWIP_DBG_STATE, ("tcp_write: %"S16_F" (with mem err)\n", pcb->snd_queuelen));
  return ERR_MEM;
}
/* Low level output of a packet. Never call this from an interrupt context,
 * as it may block until TX descriptors become available. */
STATIC err_t lpc_low_level_output(struct netif *netif, struct pbuf *p)
{
    lpc_enetdata_t *lpc_enetif = netif->state;
    struct pbuf *q;

#if LPC_TX_PBUF_BOUNCE_EN == 1
    u8_t *dst;
    struct pbuf *np;
#endif
    u32_t idx;
    u32_t dn, notdmasafe = 0;

    /* Zero-copy TX buffers may be fragmented across mutliple payload
       chains. Determine the number of descriptors needed for the
       transfer. The pbuf chaining can be a mess! */
    dn = (u32_t) pbuf_clen(p);

    /* Test to make sure packet addresses are DMA safe. A DMA safe
       address is once that uses external memory or periphheral RAM.
       IRAM and FLASH are not safe! */
    for (q = p; q != NULL; q = q->next) {
        notdmasafe += lpc_packet_addr_notsafe(q->payload);
    }

#if LPC_TX_PBUF_BOUNCE_EN == 1
    /* If the pbuf is not DMA safe, a new bounce buffer (pbuf) will be
       created that will be used instead. This requires an copy from the
       non-safe DMA region to the new pbuf */
    if (notdmasafe) {
        /* Allocate a pbuf in DMA memory */
        np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
        if (np == NULL) {
            LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE,
                        ("lpc_low_level_output: could not allocate TX pbuf\n"));
            return ERR_MEM;
        }

        /* This buffer better be contiguous! */
        LWIP_ASSERT("lpc_low_level_output: New transmit pbuf is chained",
                    (pbuf_clen(np) == 1));

        /* Copy to DMA safe pbuf */
        dst = (u8_t *) np->payload;
        for (q = p; q != NULL; q = q->next) {
            /* Copy the buffer to the descriptor's buffer */
            MEMCPY(dst, (u8_t *) q->payload, q->len);
            dst += q->len;
        }
        np->len = p->tot_len;

        LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE,
                    ("lpc_low_level_output: Switched to DMA safe buffer, old=%p, new=%p\n",
                     q, np));

        /* use the new buffer for descrptor queueing. The original pbuf will
           be de-allocated outsuide this driver. */
        p = np;
        dn = 1;
    }
#else
    if (notdmasafe) {
        LWIP_ASSERT("lpc_low_level_output: Not a DMA safe pbuf",
                    (notdmasafe == 0));
    }
#endif

    /* Wait until enough descriptors are available for the transfer. */
    /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
    while (dn > lpc_tx_ready(netif)) {
#if NO_SYS == 0
        xSemaphoreTake(lpc_enetif->xtx_count_sem, 0);
#else
        msDelay(1);
#endif
    }

    /* Get free TX buffer index */
    idx = Chip_ENET_GetTXProduceIndex(LPC_ETHERNET);

#if NO_SYS == 0
    /* Get exclusive access */
    sys_mutex_lock(&lpc_enetif->tx_lock_mutex);
#endif

    /* Prevent LWIP from de-allocating this pbuf. The driver will
       free it once it's been transmitted. */
    if (!notdmasafe) {
        pbuf_ref(p);
    }

    /* Setup transfers */
    q = p;
    while (dn > 0) {
        dn--;

        /* Only save pointer to free on last descriptor */
        if (dn == 0) {
            /* Save size of packet and signal it's ready */
            lpc_enetif->ptxd[idx].Control = ENET_TCTRL_SIZE(q->len) | ENET_TCTRL_INT |
                                            ENET_TCTRL_LAST;
            lpc_enetif->txb[idx] = p;
        }
        else {
            /* Save size of packet, descriptor is not last */
            lpc_enetif->ptxd[idx].Control = ENET_TCTRL_SIZE(q->len) | ENET_TCTRL_INT;
            lpc_enetif->txb[idx] = NULL;
        }

        LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE,
                    ("lpc_low_level_output: pbuf packet(%p) sent, chain#=%d,"
                     " size = %d (index=%d)\n", q->payload, dn, q->len, idx));

        lpc_enetif->ptxd[idx].Packet = (u32_t) q->payload;

        q = q->next;

        idx = Chip_ENET_IncTXProduceIndex(LPC_ETHERNET);
    }

    LINK_STATS_INC(link.xmit);

#if NO_SYS == 0
    /* Restore access */
    sys_mutex_unlock(&lpc_enetif->tx_lock_mutex);
#endif

    return ERR_OK;
}
Example #7
0
/**
 * Chain a new pbuf into the pbuf list that composes the datagram.  The pbuf list
 * will grow over time as  new pbufs are rx.
 * Also checks that the datagram passes basic continuity checks (if the last
 * fragment was received at least once).
 * @param root_p points to the 'root' pbuf for the current datagram being assembled.
 * @param new_p points to the pbuf for the current fragment
 * @return 0 if invalid, >0 otherwise
 */
static int
ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p)
{
  struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL;
  struct pbuf *q;
  u16_t offset,len;
  struct ip_hdr *fraghdr;
  int valid = 1;

  /* Extract length and fragment offset from current fragment */
  fraghdr = (struct ip_hdr*)new_p->payload; 
  len = ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4;
  offset = (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8;

  /* overwrite the fragment's ip header from the pbuf with our helper struct,
   * and setup the embedded helper structure. */
  /* make sure the struct ip_reass_helper fits into the IP header */
  LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN",
              sizeof(struct ip_reass_helper) <= IP_HLEN);
  iprh = (struct ip_reass_helper*)new_p->payload;
  iprh->next_pbuf = NULL;
  iprh->start = offset;
  iprh->end = offset + len;

  /* Iterate through until we either get to the end of the list (append),
   * or we find on with a larger offset (insert). */
  for (q = ipr->p; q != NULL;) {
    iprh_tmp = (struct ip_reass_helper*)q->payload;
    if (iprh->start < iprh_tmp->start) {
      /* the new pbuf should be inserted before this */
      iprh->next_pbuf = q;
      if (iprh_prev != NULL) {
        /* not the fragment with the lowest offset */
#if IP_REASS_CHECK_OVERLAP
        if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) {
          /* fragment overlaps with previous or following, throw away */
          goto freepbuf;
        }
#endif /* IP_REASS_CHECK_OVERLAP */
        iprh_prev->next_pbuf = new_p;
      } else {
        /* fragment with the lowest offset */
        ipr->p = new_p;
      }
      break;
    } else if(iprh->start == iprh_tmp->start) {
      /* received the same datagram twice: no need to keep the datagram */
      goto freepbuf;
#if IP_REASS_CHECK_OVERLAP
    } else if(iprh->start < iprh_tmp->end) {
      /* overlap: no need to keep the new datagram */
      goto freepbuf;
#endif /* IP_REASS_CHECK_OVERLAP */
    } else {
      /* Check if the fragments received so far have no wholes. */
      if (iprh_prev != NULL) {
        if (iprh_prev->end != iprh_tmp->start) {
          /* There is a fragment missing between the current
           * and the previous fragment */
          valid = 0;
        }
      }
    }
    q = iprh_tmp->next_pbuf;
    iprh_prev = iprh_tmp;
  }

  /* If q is NULL, then we made it to the end of the list. Determine what to do now */
  if (q == NULL) {
    if (iprh_prev != NULL) {
      /* this is (for now), the fragment with the highest offset:
       * chain it to the last fragment */
#if IP_REASS_CHECK_OVERLAP
      LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start);
#endif /* IP_REASS_CHECK_OVERLAP */
      iprh_prev->next_pbuf = new_p;
      if (iprh_prev->end != iprh->start) {
        valid = 0;
      }
    } else {
#if IP_REASS_CHECK_OVERLAP
      LWIP_ASSERT("no previous fragment, this must be the first fragment!",
        ipr->p == NULL);
#endif /* IP_REASS_CHECK_OVERLAP */
      /* this is the first fragment we ever received for this ip datagram */
      ipr->p = new_p;
    }
  }

  /* At this point, the validation part begins: */
  /* If we already received the last fragment */
  if ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0) {
    /* and had no wholes so far */
    if (valid) {
      /* then check if the rest of the fragments is here */
      /* Check if the queue starts with the first datagram */
      if ((ipr->p == NULL) || (((struct ip_reass_helper*)ipr->p->payload)->start != 0)) {
        valid = 0;
      } else {
        /* and check that there are no wholes after this datagram */
        iprh_prev = iprh;
        q = iprh->next_pbuf;
        while (q != NULL) {
          iprh = (struct ip_reass_helper*)q->payload;
          if (iprh_prev->end != iprh->start) {
            valid = 0;
            break;
          }
          iprh_prev = iprh;
          q = iprh->next_pbuf;
        }
        /* if still valid, all fragments are received
         * (because to the MF==0 already arrived */
        if (valid) {
          LWIP_ASSERT("sanity check", ipr->p != NULL);
          LWIP_ASSERT("sanity check",
            ((struct ip_reass_helper*)ipr->p->payload) != iprh);
          LWIP_ASSERT("validate_datagram:next_pbuf!=NULL",
            iprh->next_pbuf == NULL);
          LWIP_ASSERT("validate_datagram:datagram end!=datagram len",
            iprh->end == ipr->datagram_len);
        }
      }
    }
    /* If valid is 0 here, there are some fragments missing in the middle
     * (since MF == 0 has already arrived). Such datagrams simply time out if
     * no more fragments are received... */
    return valid;
  }
  /* If we come here, not all fragments were received, yet! */
  return 0; /* not yet valid! */
#if IP_REASS_CHECK_OVERLAP
freepbuf:
  ip_reass_pbufcount -= pbuf_clen(new_p);
  pbuf_free(new_p);
  return 0;
#endif /* IP_REASS_CHECK_OVERLAP */
}
Example #8
0
/** \brief  Low level output of a packet. Never call this from an
 *          interrupt context, as it may block until TX descriptors
 *          become available.
 *
 *  \param[in] netif the lwip network interface structure for this netif
 *  \param[in] p the MAC packet to send (e.g. IP packet including MAC addresses and type)
 *  \return ERR_OK if the packet could be sent or an err_t value if the packet couldn't be sent
 */
static err_t k64f_low_level_output(struct netif *netif, struct pbuf *p)
{
  struct k64f_enetdata *k64f_enet = netif->state;
  struct pbuf *q;
  u32_t idx;
  s32_t dn;
  uint8_t *psend = NULL, *dst;

  /* Get free TX buffer index */
  idx = k64f_enet->tx_produce_index;
  
  /* Check the pbuf chain for payloads that are not 8-byte aligned.
     If found, a new properly aligned buffer needs to be allocated
     and the data copied there */
  for (q = p; q != NULL; q = q->next)
    if (((u32_t)q->payload & (TX_BUF_ALIGNMENT - 1)) != 0)
      break;
  if (q != NULL) {
    // Allocate properly aligned buffer
    psend = (uint8_t*)malloc(p->tot_len);
    if (NULL == psend)
      return ERR_MEM;   
    LWIP_ASSERT("k64f_low_level_output: buffer not properly aligned", ((u32_t)psend & (TX_BUF_ALIGNMENT - 1)) == 0);
    for (q = p, dst = psend; q != NULL; q = q->next) {
      MEMCPY(dst, q->payload, q->len);
      dst += q->len;
    }
    k64f_enet->txb_aligned[idx] = psend;
    dn = 1;
  } else {
    k64f_enet->txb_aligned[idx] = NULL;
    dn = (s32_t) pbuf_clen(p);
    pbuf_ref(p);
  }

  /* Wait until enough descriptors are available for the transfer. */
  /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
  while (dn > k64f_tx_ready(netif))
    osSemaphoreWait(k64f_enet->xTXDCountSem.id, osWaitForever);

  /* Get exclusive access */
  sys_mutex_lock(&k64f_enet->TXLockMutex);

  /* Setup transfers */
  q = p;
  while (dn > 0) {
    dn--;
    if (psend != NULL) {
      k64f_update_txbds(k64f_enet, idx, psend, p->tot_len, 1);
      k64f_enet->txb[idx] = NULL;
      
      LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
      ("k64f_low_level_output: aligned packet(%p) sent"
      " size = %d (index=%d)\n", psend, p->tot_len, idx));      
    } else {
      LWIP_ASSERT("k64f_low_level_output: buffer not properly aligned", ((u32_t)q->payload & 0x07) == 0);

      /* Only save pointer to free on last descriptor */
      if (dn == 0) {
        /* Save size of packet and signal it's ready */
        k64f_update_txbds(k64f_enet, idx, q->payload, q->len, 1);
        k64f_enet->txb[idx] = p;
      }
      else {
        /* Save size of packet, descriptor is not last */
        k64f_update_txbds(k64f_enet, idx, q->payload, q->len, 0);
        k64f_enet->txb[idx] = NULL;
      }
      
      LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
      ("k64f_low_level_output: pbuf packet(%p) sent, chain#=%d,"
      " size = %d (index=%d)\n", q->payload, dn, q->len, idx));
    }

    q = q->next;

    idx = (idx + 1) % ENET_TX_RING_LEN;
  }

  k64f_enet->tx_produce_index = idx;
  enet_hal_active_txbd(BOARD_DEBUG_ENET_INSTANCE);
  LINK_STATS_INC(link.xmit);

  /* Restore access */
  sys_mutex_unlock(&k64f_enet->TXLockMutex);

  return ERR_OK;
}
Example #9
0
/**
 * Called by tcp_process. Checks if the given segment is an ACK for outstanding
 * data, and if so frees the memory of the buffered data. Next, is places the
 * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment
 * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until
 * i it has been removed from the buffer.
 *
 * If the incoming segment constitutes an ACK for a segment that was used for RTT
 * estimation, the RTT is estimated here as well.
 *
 * Called from tcp_process().
 */
static void
tcp_receive(struct tcp_pcb *pcb)
{
	struct tcp_seg *next;
#if TCP_QUEUE_OOSEQ
	struct tcp_seg *prev, *cseg;
#endif
	struct pbuf *p;
	s32_t off;
	s16_t m;
	u32_t right_wnd_edge;
	u16_t new_tot_len;
	int found_dupack = 0;

	if (flags & TCP_ACK) {
		right_wnd_edge = pcb->snd_wnd + pcb->snd_wl2;

		/* Update window. */
		if (TCP_SEQ_LT(pcb->snd_wl1, seqno) ||
			(pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) ||
			(pcb->snd_wl2 == ackno && tcphdr->wnd > pcb->snd_wnd)) {
			pcb->snd_wnd = tcphdr->wnd;
			pcb->snd_wl1 = seqno;
			pcb->snd_wl2 = ackno;
			if (pcb->snd_wnd > 0 && pcb->persist_backoff > 0) {
				pcb->persist_backoff = 0;
			}
			LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"U16_F"\n", pcb->snd_wnd));
#if TCP_WND_DEBUG
		} else {
			if (pcb->snd_wnd != tcphdr->wnd) {
				LWIP_DEBUGF(TCP_WND_DEBUG,
							("tcp_receive: no window update lastack %"U32_F" ackno %"
							 U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n",
							 pcb->lastack, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2));
			}
#endif /* TCP_WND_DEBUG */
		}

		/* (From Stevens TCP/IP Illustrated Vol II, p970.) Its only a
		 * duplicate ack if:
		 * 1) It doesn't ACK new data
		 * 2) length of received packet is zero (i.e. no payload)
		 * 3) the advertised window hasn't changed
		 * 4) There is outstanding unacknowledged data (retransmission timer running)
		 * 5) The ACK is == biggest ACK sequence number so far seen (snd_una)
		 *
		 * If it passes all five, should process as a dupack:
		 * a) dupacks < 3: do nothing
		 * b) dupacks == 3: fast retransmit
		 * c) dupacks > 3: increase cwnd
		 *
		 * If it only passes 1-3, should reset dupack counter (and add to
		 * stats, which we don't do in lwIP)
		 *
		 * If it only passes 1, should reset dupack counter
		 *
		 */

		/* Clause 1 */
		if (TCP_SEQ_LEQ(ackno, pcb->lastack)) {
			pcb->acked = 0;
			/* Clause 2 */
			if (tcplen == 0) {
				/* Clause 3 */
				if (pcb->snd_wl2 + pcb->snd_wnd == right_wnd_edge) {
					/* Clause 4 */
					if (pcb->rtime >= 0) {
						/* Clause 5 */
						if (pcb->lastack == ackno) {
							found_dupack = 1;
							if (pcb->dupacks + 1 > pcb->dupacks)
								++pcb->dupacks;
							if (pcb->dupacks > 3) {
								/* Inflate the congestion window, but not if it means that
								   the value overflows. */
								if ((u16_t)(pcb->cwnd + pcb->mss) > pcb->cwnd) {
									pcb->cwnd += pcb->mss;
								}
							} else if (pcb->dupacks == 3) {
								/* Do fast retransmit */
								tcp_rexmit_fast(pcb);
							}
						}
					}
				}
			}
			/* If Clause (1) or more is true, but not a duplicate ack, reset
			 * count of consecutive duplicate acks */
			if (!found_dupack) {
				pcb->dupacks = 0;
			}
		} else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack+1, pcb->snd_nxt)) {
			/* We come here when the ACK acknowledges new data. */

			/* Reset the "IN Fast Retransmit" flag, since we are no longer
			   in fast retransmit. Also reset the congestion window to the
			   slow start threshold. */
			if (pcb->flags & TF_INFR) {
				pcb->flags &= ~TF_INFR;
				pcb->cwnd = pcb->ssthresh;
			}

			/* Reset the number of retransmissions. */
			pcb->nrtx = 0;

			/* Reset the retransmission time-out. */
			pcb->rto = (pcb->sa >> 3) + pcb->sv;

			/* Update the send buffer space. Diff between the two can never exceed 64K? */
			pcb->acked = (u16_t)(ackno - pcb->lastack);

			pcb->snd_buf += pcb->acked;

			/* Reset the fast retransmit variables. */
			pcb->dupacks = 0;
			pcb->lastack = ackno;

			/* Update the congestion control variables (cwnd and
			   ssthresh). */
			if (pcb->state >= ESTABLISHED) {
				if (pcb->cwnd < pcb->ssthresh) {
					if ((u16_t)(pcb->cwnd + pcb->mss) > pcb->cwnd) {
						pcb->cwnd += pcb->mss;
					}
					LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"U16_F"\n", pcb->cwnd));
				} else {
					u16_t new_cwnd = (pcb->cwnd + pcb->mss * pcb->mss / pcb->cwnd);
					if (new_cwnd > pcb->cwnd) {
						pcb->cwnd = new_cwnd;
					}
					LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"U16_F"\n", pcb->cwnd));
				}
			}
			LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n",
										  ackno,
										  pcb->unacked != NULL?
										  ntohl(pcb->unacked->tcphdr->seqno): 0,
										  pcb->unacked != NULL?
										  ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked): 0));

			/* Remove segment from the unacknowledged list if the incoming
			   ACK acknowlegdes them. */
			while (pcb->unacked != NULL &&
				   TCP_SEQ_LEQ(ntohl(pcb->unacked->tcphdr->seqno) +
							   TCP_TCPLEN(pcb->unacked), ackno)) {
				LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->unacked\n",
											  ntohl(pcb->unacked->tcphdr->seqno),
											  ntohl(pcb->unacked->tcphdr->seqno) +
											  TCP_TCPLEN(pcb->unacked)));

				next = pcb->unacked;
				pcb->unacked = pcb->unacked->next;

				LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"U16_F" ... ", (u16_t)pcb->snd_queuelen));
				LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= pbuf_clen(next->p)));
				/* Prevent ACK for FIN to generate a sent event */
				if ((pcb->acked != 0) && ((TCPH_FLAGS(next->tcphdr) & TCP_FIN) != 0)) {
					pcb->acked--;
				}

				pcb->snd_queuelen -= pbuf_clen(next->p);
				tcp_seg_free(next);

				LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"U16_F" (after freeing unacked)\n", (u16_t)pcb->snd_queuelen));
				if (pcb->snd_queuelen != 0) {
					LWIP_ASSERT("tcp_receive: valid queue length", pcb->unacked != NULL ||
								pcb->unsent != NULL);
				}
			}

			/* If there's nothing left to acknowledge, stop the retransmit
			   timer, otherwise reset it to start again */
			if(pcb->unacked == NULL)
				pcb->rtime = -1;
			else
				pcb->rtime = 0;

			pcb->polltmr = 0;
		} else {
Example #10
0
/* Low level output of a packet. Never call this from an interrupt context,
   as it may block until TX descriptors become available */
static err_t lpc_low_level_output(struct netif *netif, struct pbuf *sendp)
{
	struct lpc_enetdata *lpc_netifdata = netif->state;
	u32_t idx, fidx, dn;
	struct pbuf *p = sendp;

#if LPC_CHECK_SLOWMEM == 1
	struct pbuf *q, *wp;

	u8_t *dst;
	int pcopy = 0;

	/* Check packet address to determine if it's in slow memory and
	   relocate if necessary */
	for (q = p; ((q != NULL) && (pcopy == 0)); q = q->next) {
		fidx = 0;
		for (idx = 0; idx < sizeof(slmem);
			 idx += sizeof(struct lpc_slowmem_array_t)) {
			if ((q->payload >= (void *) slmem[fidx].start) &&
				(q->payload <= (void *) slmem[fidx].end)) {
				/* Needs copy */
				pcopy = 1;
			}
		}
	}

	if (pcopy) {
		/* Create a new pbuf with the total pbuf size */
		wp = pbuf_alloc(PBUF_RAW, (u16_t) EMAC_ETH_MAX_FLEN, PBUF_RAM);
		if (!wp) {
			/* Exit with error */
			return ERR_MEM;
		}

		/* Copy pbuf */
		dst = (u8_t *) wp->payload;
		wp->tot_len = 0;
		for (q = p; q != NULL; q = q->next) {
			MEMCPY(dst, (u8_t *) q->payload, q->len);
			dst += q->len;
			wp->tot_len += q->len;
		}
		wp->len = wp->tot_len;

		/* LWIP will free original pbuf on exit of function */

		p = sendp = wp;
	}
#endif

	/* Zero-copy TX buffers may be fragmented across mutliple payload
	   chains. Determine the number of descriptors needed for the
	   transfer. The pbuf chaining can be a mess! */
	dn = (u32_t) pbuf_clen(p);

	/* Wait until enough descriptors are available for the transfer. */
	/* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
	while (dn > lpc_tx_ready(netif))
#if NO_SYS == 0
	{xSemaphoreTake(lpc_netifdata->xTXDCountSem, 0); }
#else
	{msDelay(1); }
#endif

	/* Get the next free descriptor index */
	fidx = idx = lpc_netifdata->tx_fill_idx;

#if NO_SYS == 0
	/* Get exclusive access */
	sys_mutex_lock(&lpc_netifdata->TXLockMutex);
#endif

	/* Fill in the next free descriptor(s) */
	while (dn > 0) {
		dn--;

		/* Setup packet address and length */
		lpc_netifdata->ptdesc[idx].B1ADD = (u32_t) p->payload;
		lpc_netifdata->ptdesc[idx].BSIZE = (u32_t) TDES_ENH_BS1(p->len);

		/* Save pointer to pbuf so we can reclain the memory for
		   the pbuf after the buffer has been sent. Only the first
		   pbuf in a chain is saved since the full chain doesn't
		   need to be freed. */
		/* For first packet only, first flag */
		lpc_netifdata->tx_free_descs--;
		if (idx == fidx) {
			lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_FS;
#if LPC_CHECK_SLOWMEM == 1
			/* If this is a copied pbuf, then avoid getting the extra reference
			   or the TX reclaim will be off by 1 */
			if (!pcopy) {
				pbuf_ref(p);
			}
#else
			/* Increment reference count on this packet so LWIP doesn't
			   attempt to free it on return from this call */
			pbuf_ref(p);
#endif
		}
		else {
			lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_OWN;
		}

		/* Save address of pbuf, but make sure it's associated with the
		   first chained pbuf so it gets freed once all pbuf chains are
		   transferred. */
		if (!dn) {
			lpc_netifdata->txpbufs[idx] = sendp;
		}
		else {
			lpc_netifdata->txpbufs[idx] = NULL;
		}

		/* For last packet only, interrupt and last flag */
		if (dn == 0) {
			lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_LS |
												   TDES_ENH_IC;
		}

		/* IP checksumming requires full buffering in IP */
		lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_CIC(3);

		LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE,
					("lpc_low_level_output: pbuf packet %p sent, chain %d,"
					 " size %d, index %d, free %d\n", p, dn, p->len, idx,
					 lpc_netifdata->tx_free_descs));

		/* Update next available descriptor */
		idx++;
		if (idx >= LPC_NUM_BUFF_TXDESCS) {
			idx = 0;
		}

		/* Next packet fragment */
		p = p->next;
	}

	lpc_netifdata->tx_fill_idx = idx;

	LINK_STATS_INC(link.xmit);

	/* Give first descriptor to DMA to start transfer */
	lpc_netifdata->ptdesc[fidx].CTRLSTAT |= TDES_OWN;

	/* Tell DMA to poll descriptors to start transfer */
	LPC_ETHERNET->DMA_TRANS_POLL_DEMAND = 1;

#if NO_SYS == 0
	/* Restore access */
	sys_mutex_unlock(&lpc_netifdata->TXLockMutex);
#endif

	return ERR_OK;
}
Example #11
0
/*-----------------------------------------------------------------------------------*
  void low_level_output(mcf5272if_t *mcf5272, struct pbuf *p)

  Output pbuf chain to hardware. It is assumed that there is a complete and correct
  ethernet frame in p. The only buffering we have in this system is in the
  hardware descriptor ring. If there is no room on the ring, then drop the frame.
 *-----------------------------------------------------------------------------------*/
static err_t
low_level_output(struct netif *netif, struct pbuf *p)
{
    struct pbuf *q;
    mcf5272if_t *mcf5272 = netif->state;
    MCF5272_IMM *imm = mcf5272->imm;
    int num_desc;
    int num_free;
    unsigned int tx_insert_sof, tx_insert_eof;
    unsigned int i;
    u32_t old_level;

    /* Make sure that there are no PBUF_REF buffers in the chain. These buffers
       have to be freed immediately and this ethernet driver puts the buffers on
       the dma chain, so they get freed later */
    p = pbuf_take(p);
    /* Interrupts are disabled through this whole thing to support multi-threading
     * transmit calls. Also this function might be called from an ISR. */
    old_level = sys_arch_protect();
    
    /* Determine number of descriptors needed */
    num_desc = pbuf_clen(p);
    if (num_desc > mcf5272->tx_free)
    {
        /* Drop the frame, we have no place to put it */
#ifdef LINK_STATS
        lwip_stats.link.memerr++;
#endif
        sys_arch_unprotect(old_level);
        return ERR_MEM;
        
    } else {
        /* Increment use count on pbuf */
        pbuf_ref(p);
        
        /* Put buffers on descriptor ring, but don't mark them as ready yet */
        tx_insert_eof = tx_insert_sof = mcf5272->tx_insert;
        q = p;
        do
        {
            mcf5272->tx_free--;
            mcf5272->tx_pbuf_a[tx_insert_eof] = q;
            mcf5272->txbd_a[tx_insert_eof].p_buf = q->payload;
            mcf5272->txbd_a[tx_insert_eof].data_len = q->len;
            q = q->next;
            if (q)
                INC_TX_BD_INDEX(tx_insert_eof);
        } while (q);
        
        /* Go backwards through descriptor ring setting flags */
        i = tx_insert_eof;
        do
        {
            mcf5272->txbd_a[i].flags = (u16_t) (MCF5272_FEC_TX_BD_R |
                                                (mcf5272->txbd_a[i].flags & MCF5272_FEC_TX_BD_W) |
                               ((i == tx_insert_eof) ? (MCF5272_FEC_TX_BD_L | MCF5272_FEC_TX_BD_TC) : 0));
            if (i != tx_insert_sof)
                DEC_TX_BD_INDEX(i);
            else
                break;
        } while (1);
        INC_TX_BD_INDEX(tx_insert_eof);
        mcf5272->tx_insert = tx_insert_eof;
#ifdef LINK_STATS
        lwip_stats.link.xmit++;
#endif        
	/* Indicate that there has been a transmit buffer produced */
	MCF5272_WR_FEC_TDAR(imm,1);
        sys_arch_unprotect(old_level);
    }
    return ERR_OK;
}
Example #12
0
/**
 * Called by tcp_process. Checks if the given segment is an ACK for outstanding
 * data, and if so frees the memory of the buffered data. Next, is places the
 * segment on any of the receive queues (pcb->recved or pcb->ooseq). If the segment
 * is buffered, the pbuf is referenced by pbuf_ref so that it will not be freed until
 * i it has been removed from the buffer.
 *
 * If the incoming segment constitutes an ACK for a segment that was used for RTT
 * estimation, the RTT is estimated here as well.
 *
 * Called from tcp_process().
 *
 * @return 1 if the incoming segment is the next in sequence, 0 if not
 */
static u8_t
tcp_receive(struct tcp_pcb *pcb)
{
  struct tcp_seg *next;
#if TCP_QUEUE_OOSEQ
  struct tcp_seg *prev, *cseg;
#endif
  struct pbuf *p;
  s32_t off;
  s16_t m;
  u32_t right_wnd_edge;
  u16_t new_tot_len;
  u8_t accepted_inseq = 0;

  if (flags & TCP_ACK) {
    right_wnd_edge = pcb->snd_wnd + pcb->snd_wl1;

    /* Update window. */
    if (TCP_SEQ_LT(pcb->snd_wl1, seqno) ||
       (pcb->snd_wl1 == seqno && TCP_SEQ_LT(pcb->snd_wl2, ackno)) ||
       (pcb->snd_wl2 == ackno && tcphdr->wnd > pcb->snd_wnd)) {
      pcb->snd_wnd = tcphdr->wnd;
      pcb->snd_wl1 = seqno;
      pcb->snd_wl2 = ackno;
      if (pcb->snd_wnd > 0 && pcb->persist_backoff > 0) {
          pcb->persist_backoff = 0;
      }
      LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: window update %"U16_F"\n", pcb->snd_wnd));
#if TCP_WND_DEBUG
    } else {
      if (pcb->snd_wnd != tcphdr->wnd) {
        LWIP_DEBUGF(TCP_WND_DEBUG, ("tcp_receive: no window update lastack %"U32_F" snd_max %"U32_F" ackno %"U32_F" wl1 %"U32_F" seqno %"U32_F" wl2 %"U32_F"\n",
                               pcb->lastack, pcb->snd_max, ackno, pcb->snd_wl1, seqno, pcb->snd_wl2));
      }
#endif /* TCP_WND_DEBUG */
    }

    if (pcb->lastack == ackno) {
      pcb->acked = 0;

      if (pcb->snd_wl1 + pcb->snd_wnd == right_wnd_edge){
        ++pcb->dupacks;
        if (pcb->dupacks >= 3 && pcb->unacked != NULL) {
          if (!(pcb->flags & TF_INFR)) {
            /* This is fast retransmit. Retransmit the first unacked segment. */
            LWIP_DEBUGF(TCP_FR_DEBUG, ("tcp_receive: dupacks %"U16_F" (%"U32_F"), fast retransmit %"U32_F"\n",
                                       (u16_t)pcb->dupacks, pcb->lastack,
                                       ntohl(pcb->unacked->tcphdr->seqno)));
            tcp_rexmit(pcb);
            /* Set ssthresh to max (FlightSize / 2, 2*SMSS) */
            /*pcb->ssthresh = LWIP_MAX((pcb->snd_max -
                                      pcb->lastack) / 2,
                                      2 * pcb->mss);*/
            /* Set ssthresh to half of the minimum of the current cwnd and the advertised window */
            if (pcb->cwnd > pcb->snd_wnd)
              pcb->ssthresh = pcb->snd_wnd / 2;
            else
              pcb->ssthresh = pcb->cwnd / 2;

            /* The minimum value for ssthresh should be 2 MSS */
            if (pcb->ssthresh < 2*pcb->mss) {
              LWIP_DEBUGF(TCP_FR_DEBUG, ("tcp_receive: The minimum value for ssthresh %"U16_F" should be min 2 mss %"U16_F"...\n", pcb->ssthresh, 2*pcb->mss));
              pcb->ssthresh = 2*pcb->mss;
            }

            pcb->cwnd = pcb->ssthresh + 3 * pcb->mss;
            pcb->flags |= TF_INFR;
          } else {
            /* Inflate the congestion window, but not if it means that
               the value overflows. */
            if ((u16_t)(pcb->cwnd + pcb->mss) > pcb->cwnd) {
              pcb->cwnd += pcb->mss;
            }
          }
        }
      } else {
        LWIP_DEBUGF(TCP_FR_DEBUG, ("tcp_receive: dupack averted %"U32_F" %"U32_F"\n",
                                   pcb->snd_wl1 + pcb->snd_wnd, right_wnd_edge));
      }
    } else if (TCP_SEQ_BETWEEN(ackno, pcb->lastack+1, pcb->snd_max)){
      /* We come here when the ACK acknowledges new data. */
      
      /* Reset the "IN Fast Retransmit" flag, since we are no longer
         in fast retransmit. Also reset the congestion window to the
         slow start threshold. */
      if (pcb->flags & TF_INFR) {
        pcb->flags &= ~TF_INFR;
        pcb->cwnd = pcb->ssthresh;
      }

      /* Reset the number of retransmissions. */
      pcb->nrtx = 0;

      /* Reset the retransmission time-out. */
      pcb->rto = (pcb->sa >> 3) + pcb->sv;

      /* Update the send buffer space. Diff between the two can never exceed 64K? */
      pcb->acked = (u16_t)(ackno - pcb->lastack);

      pcb->snd_buf += pcb->acked;

      /* Reset the fast retransmit variables. */
      pcb->dupacks = 0;
      pcb->lastack = ackno;

      /* Update the congestion control variables (cwnd and
         ssthresh). */
      if (pcb->state >= ESTABLISHED) {
        if (pcb->cwnd < pcb->ssthresh) {
          if ((u16_t)(pcb->cwnd + pcb->mss) > pcb->cwnd) {
            pcb->cwnd += pcb->mss;
          }
          LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: slow start cwnd %"U16_F"\n", pcb->cwnd));
        } else {
          u16_t new_cwnd = (pcb->cwnd + pcb->mss * pcb->mss / pcb->cwnd);
          if (new_cwnd > pcb->cwnd) {
            pcb->cwnd = new_cwnd;
          }
          LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_receive: congestion avoidance cwnd %"U16_F"\n", pcb->cwnd));
        }
      }
      LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: ACK for %"U32_F", unacked->seqno %"U32_F":%"U32_F"\n",
                                    ackno,
                                    pcb->unacked != NULL?
                                    ntohl(pcb->unacked->tcphdr->seqno): 0,
                                    pcb->unacked != NULL?
                                    ntohl(pcb->unacked->tcphdr->seqno) + TCP_TCPLEN(pcb->unacked): 0));

      /* Remove segment from the unacknowledged list if the incoming
         ACK acknowlegdes them. */
      while (pcb->unacked != NULL &&
             TCP_SEQ_LEQ(ntohl(pcb->unacked->tcphdr->seqno) +
                         TCP_TCPLEN(pcb->unacked), ackno)) {
        LWIP_DEBUGF(TCP_INPUT_DEBUG, ("tcp_receive: removing %"U32_F":%"U32_F" from pcb->unacked\n",
                                      ntohl(pcb->unacked->tcphdr->seqno),
                                      ntohl(pcb->unacked->tcphdr->seqno) +
                                      TCP_TCPLEN(pcb->unacked)));

        next = pcb->unacked;
        pcb->unacked = pcb->unacked->next;

        LWIP_DEBUGF(TCP_QLEN_DEBUG, ("tcp_receive: queuelen %"U16_F" ... ", (u16_t)pcb->snd_queuelen));
        LWIP_ASSERT("pcb->snd_queuelen >= pbuf_clen(next->p)", (pcb->snd_queuelen >= pbuf_clen(next->p)));
        pcb->snd_queuelen -= pbuf_clen(next->p);
        tcp_seg_free(next);

        LWIP_DEBUGF(TCP_QLEN_DEBUG, ("%"U16_F" (after freeing unacked)\n", (u16_t)pcb->snd_queuelen));
        if (pcb->snd_queuelen != 0) {
          LWIP_ASSERT("tcp_receive: valid queue length", pcb->unacked != NULL ||
                      pcb->unsent != NULL);
        }
      }

      /* If there's nothing left to acknowledge, stop the retransmit
         timer, otherwise reset it to start again */
      if(pcb->unacked == NULL)
        pcb->rtime = -1;
      else
        pcb->rtime = 0;

      pcb->polltmr = 0;
    } else {
Example #13
0
static err_t mg_lwip_tcp_recv_cb(void *arg, struct tcp_pcb *tpcb,
                                 struct pbuf *p, err_t err) {
  struct mg_connection *nc = (struct mg_connection *) arg;
  DBG(("%p %p %u %d", nc, tpcb, (p != NULL ? p->tot_len : 0), err));
  if (p == NULL) {
    if (nc != NULL) {
      mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc);
    } else {
      /* Tombstoned connection, do nothing. */
    }
    return ERR_OK;
  } else if (nc == NULL) {
    tcp_abort(tpcb);
    return ERR_ARG;
  }
  struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
  /*
   * If we get a chain of more than one segment at once, we need to bump
   * refcount on the subsequent bufs to make them independent.
   */
  if (p->next != NULL) {
    struct pbuf *q = p->next;
    for (; q != NULL; q = q->next) pbuf_ref(q);
  }
  if (cs->rx_chain == NULL) {
    cs->rx_chain = p;
    cs->rx_offset = 0;
  } else {
    if (pbuf_clen(cs->rx_chain) >= 4) {
      /* ESP SDK has a limited pool of 5 pbufs. We must not hog them all or RX
       * will be completely blocked. We already have at least 4 in the chain,
       * this one is, so we have to make a copy and release this one. */
      struct pbuf *np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
      if (np != NULL) {
        pbuf_copy(np, p);
        pbuf_free(p);
        p = np;
      }
    }
    pbuf_chain(cs->rx_chain, p);
  }

#ifdef SSL_KRYPTON
  if (nc->ssl != NULL) {
    if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) {
      mg_lwip_ssl_recv(nc);
    } else {
      mg_lwip_ssl_do_hs(nc);
    }
    return ERR_OK;
  }
#endif

  while (cs->rx_chain != NULL) {
    struct pbuf *seg = cs->rx_chain;
    size_t len = (seg->len - cs->rx_offset);
    char *data = (char *) malloc(len);
    if (data == NULL) {
      DBG(("OOM"));
      return ERR_MEM;
    }
    pbuf_copy_partial(seg, data, len, cs->rx_offset);
    mg_if_recv_tcp_cb(nc, data, len); /* callee takes over data */
    cs->rx_offset += len;
    if (cs->rx_offset == cs->rx_chain->len) {
      cs->rx_chain = pbuf_dechain(cs->rx_chain);
      pbuf_free(seg);
      cs->rx_offset = 0;
    }
  }

  if (nc->send_mbuf.len > 0) {
    mg_lwip_mgr_schedule_poll(nc->mgr);
  }
  return ERR_OK;
}
Example #14
0
/**
 * Reassembles incoming IPv6 fragments into an IPv6 datagram.
 *
 * @param p points to the IPv6 Fragment Header
 * @return NULL if reassembly is incomplete, pbuf pointing to
 *         IPv6 Header if reassembly is complete
 */
struct pbuf *
ip6_reass(struct pbuf *p)
{
  struct ip6_reassdata *ipr, *ipr_prev;
  struct ip6_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL;
  struct ip6_frag_hdr *frag_hdr;
  u16_t offset, len;
  u16_t clen;
  u8_t valid = 1;
  struct pbuf *q;

  IP6_FRAG_STATS_INC(ip6_frag.recv);

  if ((const void*)ip6_current_header() != ((u8_t*)p->payload) - IP6_HLEN) {
    /* ip6_frag_hdr must be in the first pbuf, not chained */
    IP6_FRAG_STATS_INC(ip6_frag.proterr);
    IP6_FRAG_STATS_INC(ip6_frag.drop);
    goto nullreturn;
  }

  frag_hdr = (struct ip6_frag_hdr *) p->payload;

  clen = pbuf_clen(p);

  offset = lwip_ntohs(frag_hdr->_fragment_offset);

  /* Calculate fragment length from IPv6 payload length.
   * Adjust for headers before Fragment Header.
   * And finally adjust by Fragment Header length. */
  len = lwip_ntohs(ip6_current_header()->_plen);
  len -= (u16_t)(((u8_t*)p->payload - (const u8_t*)ip6_current_header()) - IP6_HLEN);
  len -= IP6_FRAG_HLEN;

  /* Look for the datagram the fragment belongs to in the current datagram queue,
   * remembering the previous in the queue for later dequeueing. */
  for (ipr = reassdatagrams, ipr_prev = NULL; ipr != NULL; ipr = ipr->next) {
    /* Check if the incoming fragment matches the one currently present
       in the reassembly buffer. If so, we proceed with copying the
       fragment into the buffer. */
    if ((frag_hdr->_identification == ipr->identification) &&
        ip6_addr_cmp(ip6_current_src_addr(), &(IPV6_FRAG_HDRREF(ipr->iphdr)->src)) &&
        ip6_addr_cmp(ip6_current_dest_addr(), &(IPV6_FRAG_HDRREF(ipr->iphdr)->dest))) {
      IP6_FRAG_STATS_INC(ip6_frag.cachehit);
      break;
    }
    ipr_prev = ipr;
  }

  if (ipr == NULL) {
  /* Enqueue a new datagram into the datagram queue */
    ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA);
    if (ipr == NULL) {
#if IP_REASS_FREE_OLDEST
      /* Make room and try again. */
      ip6_reass_remove_oldest_datagram(ipr, clen);
      ipr = (struct ip6_reassdata *)memp_malloc(MEMP_IP6_REASSDATA);
      if (ipr != NULL) {
        /* re-search ipr_prev since it might have been removed */
        for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
          if (ipr_prev->next == ipr) {
            break;
          }
        }
      } else
#endif /* IP_REASS_FREE_OLDEST */
      {
        IP6_FRAG_STATS_INC(ip6_frag.memerr);
        IP6_FRAG_STATS_INC(ip6_frag.drop);
        goto nullreturn;
      }
    }

    memset(ipr, 0, sizeof(struct ip6_reassdata));
    ipr->timer = IP_REASS_MAXAGE;

    /* enqueue the new structure to the front of the list */
    ipr->next = reassdatagrams;
    reassdatagrams = ipr;

    /* Use the current IPv6 header for src/dest address reference.
     * Eventually, we will replace it when we get the first fragment
     * (it might be this one, in any case, it is done later). */
#if IPV6_FRAG_COPYHEADER
    MEMCPY(&ipr->iphdr, ip6_current_header(), IP6_HLEN);
#else /* IPV6_FRAG_COPYHEADER */
    /* need to use the none-const pointer here: */
    ipr->iphdr = ip_data.current_ip6_header;
#endif /* IPV6_FRAG_COPYHEADER */

    /* copy the fragmented packet id. */
    ipr->identification = frag_hdr->_identification;

    /* copy the nexth field */
    ipr->nexth = frag_hdr->_nexth;
  }

  /* Check if we are allowed to enqueue more datagrams. */
  if ((ip6_reass_pbufcount + clen) > IP_REASS_MAX_PBUFS) {
#if IP_REASS_FREE_OLDEST
    ip6_reass_remove_oldest_datagram(ipr, clen);
    if ((ip6_reass_pbufcount + clen) <= IP_REASS_MAX_PBUFS) {
      /* re-search ipr_prev since it might have been removed */
      for (ipr_prev = reassdatagrams; ipr_prev != NULL; ipr_prev = ipr_prev->next) {
        if (ipr_prev->next == ipr) {
          break;
        }
      }
    } else
#endif /* IP_REASS_FREE_OLDEST */
    {
      /* @todo: send ICMPv6 time exceeded here? */
      /* drop this pbuf */
      IP6_FRAG_STATS_INC(ip6_frag.memerr);
      IP6_FRAG_STATS_INC(ip6_frag.drop);
      goto nullreturn;
    }
  }

  /* Overwrite Fragment Header with our own helper struct. */
#if IPV6_FRAG_COPYHEADER
  if (IPV6_FRAG_REQROOM > 0) {
    /* Make room for struct ip6_reass_helper (only required if sizeof(void*) > 4).
       This cannot fail since we already checked when receiving this fragment. */
    u8_t hdrerr = pbuf_header_force(p, IPV6_FRAG_REQROOM);
    LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0);
  }
#else /* IPV6_FRAG_COPYHEADER */
  LWIP_ASSERT("sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN, set IPV6_FRAG_COPYHEADER to 1",
    sizeof(struct ip6_reass_helper) <= IP6_FRAG_HLEN);
#endif /* IPV6_FRAG_COPYHEADER */
  iprh = (struct ip6_reass_helper *)p->payload;
  iprh->next_pbuf = NULL;
  iprh->start = (offset & IP6_FRAG_OFFSET_MASK);
  iprh->end = (offset & IP6_FRAG_OFFSET_MASK) + len;

  /* find the right place to insert this pbuf */
  /* Iterate through until we either get to the end of the list (append),
   * or we find on with a larger offset (insert). */
  for (q = ipr->p; q != NULL;) {
    iprh_tmp = (struct ip6_reass_helper*)q->payload;
    if (iprh->start < iprh_tmp->start) {
#if IP_REASS_CHECK_OVERLAP
      if (iprh->end > iprh_tmp->start) {
        /* fragment overlaps with following, throw away */
        IP6_FRAG_STATS_INC(ip6_frag.proterr);
        IP6_FRAG_STATS_INC(ip6_frag.drop);
        goto nullreturn;
      }
      if (iprh_prev != NULL) {
        if (iprh->start < iprh_prev->end) {
          /* fragment overlaps with previous, throw away */
          IP6_FRAG_STATS_INC(ip6_frag.proterr);
          IP6_FRAG_STATS_INC(ip6_frag.drop);
          goto nullreturn;
        }
      }
#endif /* IP_REASS_CHECK_OVERLAP */
      /* the new pbuf should be inserted before this */
      iprh->next_pbuf = q;
      if (iprh_prev != NULL) {
        /* not the fragment with the lowest offset */
        iprh_prev->next_pbuf = p;
      } else {
        /* fragment with the lowest offset */
        ipr->p = p;
      }
      break;
    } else if (iprh->start == iprh_tmp->start) {
      /* received the same datagram twice: no need to keep the datagram */
      IP6_FRAG_STATS_INC(ip6_frag.drop);
      goto nullreturn;
#if IP_REASS_CHECK_OVERLAP
    } else if (iprh->start < iprh_tmp->end) {
      /* overlap: no need to keep the new datagram */
      IP6_FRAG_STATS_INC(ip6_frag.proterr);
      IP6_FRAG_STATS_INC(ip6_frag.drop);
      goto nullreturn;
#endif /* IP_REASS_CHECK_OVERLAP */
    } else {
      /* Check if the fragments received so far have no gaps. */
      if (iprh_prev != NULL) {
        if (iprh_prev->end != iprh_tmp->start) {
          /* There is a fragment missing between the current
           * and the previous fragment */
          valid = 0;
        }
      }
    }
    q = iprh_tmp->next_pbuf;
    iprh_prev = iprh_tmp;
  }

  /* If q is NULL, then we made it to the end of the list. Determine what to do now */
  if (q == NULL) {
    if (iprh_prev != NULL) {
      /* this is (for now), the fragment with the highest offset:
       * chain it to the last fragment */
#if IP_REASS_CHECK_OVERLAP
      LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start);
#endif /* IP_REASS_CHECK_OVERLAP */
      iprh_prev->next_pbuf = p;
      if (iprh_prev->end != iprh->start) {
        valid = 0;
      }
    } else {
#if IP_REASS_CHECK_OVERLAP
      LWIP_ASSERT("no previous fragment, this must be the first fragment!",
        ipr->p == NULL);
#endif /* IP_REASS_CHECK_OVERLAP */
      /* this is the first fragment we ever received for this ip datagram */
      ipr->p = p;
    }
  }

  /* Track the current number of pbufs current 'in-flight', in order to limit
  the number of fragments that may be enqueued at any one time */
  ip6_reass_pbufcount += clen;

  /* Remember IPv6 header if this is the first fragment. */
  if (iprh->start == 0) {
#if IPV6_FRAG_COPYHEADER
    if (iprh->next_pbuf != NULL) {
      MEMCPY(&ipr->iphdr, ip6_current_header(), IP6_HLEN);
    }
#else /* IPV6_FRAG_COPYHEADER */
    /* need to use the none-const pointer here: */
    ipr->iphdr = ip_data.current_ip6_header;
#endif /* IPV6_FRAG_COPYHEADER */
  }

  /* If this is the last fragment, calculate total packet length. */
  if ((offset & IP6_FRAG_MORE_FLAG) == 0) {
    ipr->datagram_len = iprh->end;
  }

  /* Additional validity tests: we have received first and last fragment. */
  iprh_tmp = (struct ip6_reass_helper*)ipr->p->payload;
  if (iprh_tmp->start != 0) {
    valid = 0;
  }
  if (ipr->datagram_len == 0) {
    valid = 0;
  }

  /* Final validity test: no gaps between current and last fragment. */
  iprh_prev = iprh;
  q = iprh->next_pbuf;
  while ((q != NULL) && valid) {
    iprh = (struct ip6_reass_helper*)q->payload;
    if (iprh_prev->end != iprh->start) {
      valid = 0;
      break;
    }
    iprh_prev = iprh;
    q = iprh->next_pbuf;
  }

  if (valid) {
    /* All fragments have been received */
    struct ip6_hdr* iphdr_ptr;

    /* chain together the pbufs contained within the ip6_reassdata list. */
    iprh = (struct ip6_reass_helper*) ipr->p->payload;
    while (iprh != NULL) {
      struct pbuf* next_pbuf = iprh->next_pbuf;
      if (next_pbuf != NULL) {
        /* Save next helper struct (will be hidden in next step). */
        iprh_tmp = (struct ip6_reass_helper*)next_pbuf->payload;

        /* hide the fragment header for every succeeding fragment */
        pbuf_header(next_pbuf, -IP6_FRAG_HLEN);
#if IPV6_FRAG_COPYHEADER
        if (IPV6_FRAG_REQROOM > 0) {
          /* hide the extra bytes borrowed from ip6_hdr for struct ip6_reass_helper */
          u8_t hdrerr = pbuf_header(next_pbuf, -(s16_t)(IPV6_FRAG_REQROOM));
          LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0);
        }
#endif
        pbuf_cat(ipr->p, next_pbuf);
      }
      else {
        iprh_tmp = NULL;
      }

      iprh = iprh_tmp;
    }

#if IPV6_FRAG_COPYHEADER
    if (IPV6_FRAG_REQROOM > 0) {
      /* get back room for struct ip6_reass_helper (only required if sizeof(void*) > 4) */
      u8_t hdrerr = pbuf_header(ipr->p, -(s16_t)(IPV6_FRAG_REQROOM));
      LWIP_ASSERT("no room for struct ip6_reass_helper", hdrerr == 0);
    }
    iphdr_ptr = (struct ip6_hdr*)((u8_t*)ipr->p->payload - IP6_HLEN);
    MEMCPY(iphdr_ptr, &ipr->iphdr, IP6_HLEN);
#else
    iphdr_ptr = ipr->iphdr;
#endif

    /* Adjust datagram length by adding header lengths. */
    ipr->datagram_len += (u16_t)(((u8_t*)ipr->p->payload - (u8_t*)iphdr_ptr)
                         + IP6_FRAG_HLEN
                         - IP6_HLEN);

    /* Set payload length in ip header. */
    iphdr_ptr->_plen = lwip_htons(ipr->datagram_len);

    /* Get the first pbuf. */
    p = ipr->p;

    /* Restore Fragment Header in first pbuf. Mark as "single fragment"
     * packet. Restore nexth. */
    frag_hdr = (struct ip6_frag_hdr *) p->payload;
    frag_hdr->_nexth = ipr->nexth;
    frag_hdr->reserved = 0;
    frag_hdr->_fragment_offset = 0;
    frag_hdr->_identification = 0;

    /* release the sources allocate for the fragment queue entry */
    if (reassdatagrams == ipr) {
      /* it was the first in the list */
      reassdatagrams = ipr->next;
    } else {
      /* it wasn't the first, so it must have a valid 'prev' */
      LWIP_ASSERT("sanity check linked list", ipr_prev != NULL);
      ipr_prev->next = ipr->next;
    }
    memp_free(MEMP_IP6_REASSDATA, ipr);

    /* adjust the number of pbufs currently queued for reassembly. */
    ip6_reass_pbufcount -= pbuf_clen(p);

    /* Move pbuf back to IPv6 header.
       This cannot fail since we already checked when receiving this fragment. */
    if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)iphdr_ptr))) {
      LWIP_ASSERT("ip6_reass: moving p->payload to ip6 header failed\n", 0);
      pbuf_free(p);
      return NULL;
    }

    /* Return the pbuf chain */
    return p;
  }
  /* the datagram is not (yet?) reassembled completely */
  return NULL;

nullreturn:
  pbuf_free(p);
  return NULL;
}
Example #15
0
/**
 * Free a datagram (struct ip6_reassdata) and all its pbufs.
 * Updates the total count of enqueued pbufs (ip6_reass_pbufcount),
 * sends an ICMP time exceeded packet.
 *
 * @param ipr datagram to free
 */
static void
ip6_reass_free_complete_datagram(struct ip6_reassdata *ipr)
{
  struct ip6_reassdata *prev;
  u16_t pbufs_freed = 0;
  u16_t clen;
  struct pbuf *p;
  struct ip6_reass_helper *iprh;

#if LWIP_ICMP6
  iprh = (struct ip6_reass_helper *)ipr->p->payload;
  if (iprh->start == 0) {
    /* The first fragment was received, send ICMP time exceeded. */
    /* First, de-queue the first pbuf from r->p. */
    p = ipr->p;
    ipr->p = iprh->next_pbuf;
    /* Then, move back to the original ipv6 header (we are now pointing to Fragment header).
       This cannot fail since we already checked when receiving this fragment. */
    if (pbuf_header_force(p, (s16_t)((u8_t*)p->payload - (u8_t*)IPV6_FRAG_HDRREF(ipr->iphdr)))) {
      LWIP_ASSERT("ip6_reass_free: moving p->payload to ip6 header failed\n", 0);
    }
    else {
      icmp6_time_exceeded(p, ICMP6_TE_FRAG);
    }
    clen = pbuf_clen(p);
    LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
    pbufs_freed += clen;
    pbuf_free(p);
  }
#endif /* LWIP_ICMP6 */

  /* First, free all received pbufs.  The individual pbufs need to be released
     separately as they have not yet been chained */
  p = ipr->p;
  while (p != NULL) {
    struct pbuf *pcur;
    iprh = (struct ip6_reass_helper *)p->payload;
    pcur = p;
    /* get the next pointer before freeing */
    p = iprh->next_pbuf;
    clen = pbuf_clen(pcur);
    LWIP_ASSERT("pbufs_freed + clen <= 0xffff", pbufs_freed + clen <= 0xffff);
    pbufs_freed += clen;
    pbuf_free(pcur);
  }

  /* Then, unchain the struct ip6_reassdata from the list and free it. */
  if (ipr == reassdatagrams) {
    reassdatagrams = ipr->next;
  } else {
    prev = reassdatagrams;
    while (prev != NULL) {
      if (prev->next == ipr) {
        break;
      }
      prev = prev->next;
    }
    if (prev != NULL) {
      prev->next = ipr->next;
    }
  }
  memp_free(MEMP_IP6_REASSDATA, ipr);

  /* Finally, update number of pbufs in reassembly queue */
  LWIP_ASSERT("ip_reass_pbufcount >= clen", ip6_reass_pbufcount >= pbufs_freed);
  ip6_reass_pbufcount -= pbufs_freed;
}
Example #16
0
void
proxy_sendto(SOCKET sock, struct pbuf *p, void *name, size_t namelen)
{
    struct pbuf *q;
    size_t i, clen;
#ifndef RT_OS_WINDOWS
    struct msghdr mh;
#else
    int rc;
#endif
    IOVEC fixiov[8];     /* fixed size (typical case) */
    const size_t fixiovsize = sizeof(fixiov)/sizeof(fixiov[0]);
    IOVEC *dyniov;       /* dynamically sized */
    IOVEC *iov;
    ssize_t nsent;

    /*
     * Static iov[] is usually enough since UDP protocols use small
     * datagrams to avoid fragmentation, but be prepared.
     */
    clen = pbuf_clen(p);
    if (clen > fixiovsize) {
        /*
         * XXX: TODO: check that clen is shorter than IOV_MAX
         */
        dyniov = (IOVEC *)malloc(clen * sizeof(*dyniov));
        if (dyniov == NULL) {
            goto out;
        }
        iov = dyniov;
    }
    else {
        dyniov = NULL;
        iov = fixiov;
    }


    for (q = p, i = 0; i < clen; q = q->next, ++i) {
        LWIP_ASSERT1(q != NULL);

        IOVEC_SET_BASE(iov[i], q->payload);
        IOVEC_SET_LEN(iov[i], q->len);
    }

#ifndef RT_OS_WINDOWS
    memset(&mh, 0, sizeof(mh));
    mh.msg_name = name;
    mh.msg_namelen = namelen;
    mh.msg_iov = iov;
    mh.msg_iovlen = clen;

    nsent = sendmsg(sock, &mh, 0);
    if (nsent < 0) {
        DPRINTF(("%s: fd %d: sendmsg errno %d\n",
                 __func__, sock, errno));
    }
#else
    rc = WSASendTo(sock, iov, (DWORD)clen, (DWORD *)&nsent, 0, name, (int)namelen, NULL, NULL);
    if (rc == SOCKET_ERROR) {
         DPRINTF(("%s: fd %d: sendmsg errno %d\n",
                  __func__, sock, WSAGetLastError()));
    }
#endif

  out:
    if (dyniov != NULL) {
        free(dyniov);
    }
    pbuf_free(p);
}