/**
 * Fragment an IPv6 datagram if too large for the netif or path MTU.
 *
 * Chop the datagram in MTU sized chunks and send them in order
 * by pointing PBUF_REFs into p
 *
 * @param p ipv6 packet to send
 * @param netif the netif on which to send
 * @param dest destination ipv6 address to which to send
 *
 * @return ERR_OK if sent successfully, err_t otherwise
 */
err_t
ip6_frag(struct pbuf *p, struct netif *netif, const ip6_addr_t *dest)
{
  struct ip6_hdr *original_ip6hdr;
  struct ip6_hdr *ip6hdr;
  struct ip6_frag_hdr * frag_hdr;
  struct pbuf *rambuf;
  struct pbuf *newpbuf;
  static u32_t identification;
  u16_t nfb;
  u16_t left, cop;
  u16_t mtu;
  u16_t fragment_offset = 0;
  u16_t last;
  u16_t poff = IP6_HLEN;
  u16_t newpbuflen = 0;
  u16_t left_to_copy;

  identification++;

  original_ip6hdr = (struct ip6_hdr *)p->payload;

  mtu = nd6_get_destination_mtu(dest, netif);

  /* TODO we assume there are no options in the unfragmentable part (IPv6 header). */
  left = p->tot_len - IP6_HLEN;

  nfb = (mtu - (IP6_HLEN + IP6_FRAG_HLEN)) & IP6_FRAG_OFFSET_MASK;

  while (left) {
    last = (left <= nfb);

    /* Fill this fragment */
    cop = last ? left : nfb;

    /* When not using a static buffer, create a chain of pbufs.
     * The first will be a PBUF_RAM holding the link, IPv6, and Fragment header.
     * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
     * but limited to the size of an mtu.
     */
    rambuf = pbuf_alloc(PBUF_LINK, IP6_HLEN + IP6_FRAG_HLEN, PBUF_RAM);
    if (rambuf == NULL) {
      IP6_FRAG_STATS_INC(ip6_frag.memerr);
      return ERR_MEM;
    }
    LWIP_ASSERT("this needs a pbuf in one piece!",
                (p->len >= (IP6_HLEN + IP6_FRAG_HLEN)));
    SMEMCPY(rambuf->payload, original_ip6hdr, IP6_HLEN);
    ip6hdr = (struct ip6_hdr *)rambuf->payload;
    frag_hdr = (struct ip6_frag_hdr *)((u8_t*)rambuf->payload + IP6_HLEN);

    /* Can just adjust p directly for needed offset. */
    p->payload = (u8_t *)p->payload + poff;
    p->len -= poff;
    p->tot_len -= poff;

    left_to_copy = cop;
    while (left_to_copy) {
      struct pbuf_custom_ref *pcr;
      newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len;
      /* Is this pbuf already empty? */
      if (!newpbuflen) {
        p = p->next;
        continue;
      }
      pcr = ip6_frag_alloc_pbuf_custom_ref();
      if (pcr == NULL) {
        pbuf_free(rambuf);
        IP6_FRAG_STATS_INC(ip6_frag.memerr);
        return ERR_MEM;
      }
      /* Mirror this pbuf, although we might not need all of it. */
      newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen);
      if (newpbuf == NULL) {
        ip6_frag_free_pbuf_custom_ref(pcr);
        pbuf_free(rambuf);
        IP6_FRAG_STATS_INC(ip6_frag.memerr);
        return ERR_MEM;
      }
      pbuf_ref(p);
      pcr->original = p;
      pcr->pc.custom_free_function = ip6_frag_free_pbuf_custom;

      /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
       * so that it is removed when pbuf_dechain is later called on rambuf.
       */
      pbuf_cat(rambuf, newpbuf);
      left_to_copy -= newpbuflen;
      if (left_to_copy) {
        p = p->next;
      }
    }
    poff = newpbuflen;

    /* Set headers */
    frag_hdr->_nexth = original_ip6hdr->_nexth;
    frag_hdr->reserved = 0;
    frag_hdr->_fragment_offset = htons((fragment_offset & IP6_FRAG_OFFSET_MASK) | (last ? 0 : IP6_FRAG_MORE_FLAG));
    frag_hdr->_identification = htonl(identification);

    IP6H_NEXTH_SET(ip6hdr, IP6_NEXTH_FRAGMENT);
    IP6H_PLEN_SET(ip6hdr, cop + IP6_FRAG_HLEN);

    /* No need for separate header pbuf - we allowed room for it in rambuf
     * when allocated.
     */
    IP6_FRAG_STATS_INC(ip6_frag.xmit);
    netif->output_ip6(netif, rambuf, dest);

    /* Unfortunately we can't reuse rambuf - the hardware may still be
     * using the buffer. Instead we free it (and the ensuing chain) and
     * recreate it next time round the loop. If we're lucky the hardware
     * will have already sent the packet, the free will really free, and
     * there will be zero memory penalty.
     */

    pbuf_free(rambuf);
    left -= cop;
    fragment_offset += cop;
  }
  return ERR_OK;
}
Пример #2
0
/*-----------------------------------------------------------------------------------*/
struct pbuf *
etharp_arp_input(struct netif *netif, struct eth_addr *ethaddr, struct pbuf *p, struct pbuf **queued)
{
    struct etharp_hdr *hdr;
    u8_t i;

    if(p->tot_len < sizeof(struct etharp_hdr)) {
        DEBUGF(ETHARP_DEBUG, ("etharp_etharp_input: packet too short (%d/%d)\n", (int) p->tot_len, (int) sizeof(struct etharp_hdr)));
        return NULL;
    }

    hdr = p->payload;

    switch(htons(hdr->opcode)) {
    case ARP_REQUEST:
        *queued = update_arp_entry(&(hdr->sipaddr), &(hdr->shwaddr), 0);
        /* ARP request. If it asked for our address, we send out a
           reply. */
        DEBUGF(ETHARP_DEBUG, ("etharp_arp_input: ARP request\n"));
        if(!memcmp(&(hdr->dipaddr), &(netif->ip_addr), sizeof (hdr->dipaddr))) {
            pbuf_ref(p);
            hdr->opcode = htons(ARP_REPLY);

            memcpy (&(hdr->dipaddr), &(hdr->sipaddr), sizeof (hdr->dipaddr));
            memcpy (&(hdr->sipaddr), &(netif->ip_addr), sizeof (hdr->sipaddr));

            for(i = 0; i < 6; ++i) {
                hdr->dhwaddr.addr[i] = hdr->shwaddr.addr[i];
                hdr->shwaddr.addr[i] = ethaddr->addr[i];
                hdr->ethhdr.dest.addr[i] = hdr->dhwaddr.addr[i];
                hdr->ethhdr.src.addr[i] = ethaddr->addr[i];
            }

            hdr->hwtype = htons(HWTYPE_ETHERNET);
            ARPH_HWLEN_SET(hdr, 6);

            hdr->proto = htons(ETHTYPE_IP);
            ARPH_PROTOLEN_SET(hdr, sizeof(struct ip_addr));

            hdr->ethhdr.type = htons(ETHTYPE_ARP);
            return p;
        }
        break;
    case ARP_REPLY:
        /* ARP reply. We insert or update the ARP table. */
        DEBUGF(ETHARP_DEBUG, ("etharp_arp_input: ARP reply\n"));
        if(!memcmp (&(hdr->dipaddr), &(netif->ip_addr), sizeof (hdr->dipaddr))) {
#if (LWIP_DHCP && DHCP_DOES_ARP_CHECK)
            dhcp_arp_reply(&hdr->sipaddr);
#endif
            /* add_arp_entry() will return a pbuf that has previously been
            queued waiting for an ARP reply. */
        }
        /* whether its destined for us or not, we update the arp table */
        return add_arp_entry(&(hdr->sipaddr), &(hdr->shwaddr));
        break;
    default:
        DEBUGF(ETHARP_DEBUG, ("etharp_arp_input: unknown type %d\n", htons(hdr->opcode)));
        break;
    }

    return NULL;
}
Пример #3
0
/**
 * Fragment an IP datagram if too large for the netif.
 *
 * Chop the datagram in MTU sized chunks and send them in order
 * by using a fixed size static memory buffer (PBUF_REF) or
 * point PBUF_REFs into p (depending on IP_FRAG_USES_STATIC_BUF).
 *
 * @param p ip packet to send
 * @param netif the netif on which to send
 * @param dest destination ip address to which to send
 *
 * @return ERR_OK if sent successfully, err_t otherwise
 */
err_t 
ip_frag(struct pbuf *p, struct netif *netif, ip_addr_t *dest)
{
  struct pbuf *rambuf;
#if IP_FRAG_USES_STATIC_BUF
  struct pbuf *header;
#else
#if !LWIP_NETIF_TX_SINGLE_PBUF
  struct pbuf *newpbuf;
#endif
  struct ip_hdr *original_iphdr;
#endif
  struct ip_hdr *iphdr;
  u16_t nfb;
  u16_t left, cop;
  u16_t mtu = netif->mtu;
  u16_t ofo, omf;
  u16_t last;
  u16_t poff = IP_HLEN;
  u16_t tmp;
#if !IP_FRAG_USES_STATIC_BUF && !LWIP_NETIF_TX_SINGLE_PBUF
  u16_t newpbuflen = 0;
  u16_t left_to_copy;
#endif

  /* Get a RAM based MTU sized pbuf */
#if IP_FRAG_USES_STATIC_BUF
  /* When using a static buffer, we use a PBUF_REF, which we will
   * use to reference the packet (without link header).
   * Layer and length is irrelevant.
   */
  rambuf = pbuf_alloc(PBUF_LINK, 0, PBUF_REF);
  if (rambuf == NULL) {
    LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc(PBUF_LINK, 0, PBUF_REF) failed\n"));
    return ERR_MEM;
  }
  rambuf->tot_len = rambuf->len = mtu;
  rambuf->payload = LWIP_MEM_ALIGN((void *)buf);

  /* Copy the IP header in it */
  iphdr = (struct ip_hdr *)rambuf->payload;
  SMEMCPY(iphdr, p->payload, IP_HLEN);
#else /* IP_FRAG_USES_STATIC_BUF */
  original_iphdr = (struct ip_hdr *)p->payload;
  iphdr = original_iphdr;
#endif /* IP_FRAG_USES_STATIC_BUF */

  /* Save original offset */
  tmp = ntohs(IPH_OFFSET(iphdr));
  ofo = tmp & IP_OFFMASK;
  omf = tmp & IP_MF;

  left = p->tot_len - IP_HLEN;

  nfb = (mtu - IP_HLEN) / 8;

  while (left) {
    last = (left <= mtu - IP_HLEN);

    /* Set new offset and MF flag */
    tmp = omf | (IP_OFFMASK & (ofo));
    if (!last) {
      tmp = tmp | IP_MF;
    }

    /* Fill this fragment */
    cop = last ? left : nfb * 8;

#if IP_FRAG_USES_STATIC_BUF
    poff += pbuf_copy_partial(p, (u8_t*)iphdr + IP_HLEN, cop, poff);
#else /* IP_FRAG_USES_STATIC_BUF */
#if LWIP_NETIF_TX_SINGLE_PBUF
    rambuf = pbuf_alloc(PBUF_IP, cop, PBUF_RAM);
    if (rambuf == NULL) {
      return ERR_MEM;
    }
    LWIP_ASSERT("this needs a pbuf in one piece!",
      (rambuf->len == rambuf->tot_len) && (rambuf->next == NULL));
    poff += pbuf_copy_partial(p, rambuf->payload, cop, poff);
    /* make room for the IP header */
    if(pbuf_header(rambuf, IP_HLEN)) {
      pbuf_free(rambuf);
      return ERR_MEM;
    }
    /* fill in the IP header */
    SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
    iphdr = rambuf->payload;
#else /* LWIP_NETIF_TX_SINGLE_PBUF */
    /* When not using a static buffer, create a chain of pbufs.
     * The first will be a PBUF_RAM holding the link and IP header.
     * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
     * but limited to the size of an mtu.
     */
    rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM);
    if (rambuf == NULL) {
      return ERR_MEM;
    }
    LWIP_ASSERT("this needs a pbuf in one piece!",
                (p->len >= (IP_HLEN)));
    SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
    iphdr = (struct ip_hdr *)rambuf->payload;

    /* Can just adjust p directly for needed offset. */
    p->payload = (u8_t *)p->payload + poff;
    p->len -= poff;

    left_to_copy = cop;
    while (left_to_copy) {
      struct pbuf_custom_ref *pcr;
      newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len;
      /* Is this pbuf already empty? */
      if (!newpbuflen) {
        p = p->next;
        continue;
      }
      pcr = ip_frag_alloc_pbuf_custom_ref();
      if (pcr == NULL) {
        pbuf_free(rambuf);
        return ERR_MEM;
      }
      /* Mirror this pbuf, although we might not need all of it. */
      newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen);
      if (newpbuf == NULL) {
        ip_frag_free_pbuf_custom_ref(pcr);
        pbuf_free(rambuf);
        return ERR_MEM;
      }
      pbuf_ref(p);
      pcr->original = p;
      pcr->pc.custom_free_function = ipfrag_free_pbuf_custom;

      /* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
       * so that it is removed when pbuf_dechain is later called on rambuf.
       */
      pbuf_cat(rambuf, newpbuf);
      left_to_copy -= newpbuflen;
      if (left_to_copy) {
        p = p->next;
      }
    }
    poff = newpbuflen;
#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
#endif /* IP_FRAG_USES_STATIC_BUF */

    /* Correct header */
    IPH_OFFSET_SET(iphdr, htons(tmp));
    IPH_LEN_SET(iphdr, htons(cop + IP_HLEN));
    IPH_CHKSUM_SET(iphdr, 0);
    IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN));

#if IP_FRAG_USES_STATIC_BUF
    if (last) {
      pbuf_realloc(rambuf, left + IP_HLEN);
    }

    /* This part is ugly: we alloc a RAM based pbuf for 
     * the link level header for each chunk and then 
     * free it.A PBUF_ROM style pbuf for which pbuf_header
     * worked would make things simpler.
     */
    header = pbuf_alloc(PBUF_LINK, 0, PBUF_RAM);
    if (header != NULL) {
      pbuf_chain(header, rambuf);
      netif->output(netif, header, dest);
      IPFRAG_STATS_INC(ip_frag.xmit);
      snmp_inc_ipfragcreates();
      pbuf_free(header);
    } else {
      LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc() for header failed\n"));
      pbuf_free(rambuf);
      return ERR_MEM;
    }
#else /* IP_FRAG_USES_STATIC_BUF */
    /* No need for separate header pbuf - we allowed room for it in rambuf
     * when allocated.
     */
    netif->output(netif, rambuf, dest);
    IPFRAG_STATS_INC(ip_frag.xmit);

    /* Unfortunately we can't reuse rambuf - the hardware may still be
     * using the buffer. Instead we free it (and the ensuing chain) and
     * recreate it next time round the loop. If we're lucky the hardware
     * will have already sent the packet, the free will really free, and
     * there will be zero memory penalty.
     */
    
    pbuf_free(rambuf);
#endif /* IP_FRAG_USES_STATIC_BUF */
    left -= cop;
    ofo += nfb;
  }
#if IP_FRAG_USES_STATIC_BUF
  pbuf_free(rambuf);
#endif /* IP_FRAG_USES_STATIC_BUF */
  snmp_inc_ipfragoks();
  return ERR_OK;
}
Пример #4
0
/** \brief  Low level output of a packet. Never call this from an
 *          interrupt context, as it may block until TX descriptors
 *          become available.
 *
 *  \param[in] netif the lwip network interface structure for this lpc_enetif
 *  \param[in] sendp the MAC packet to send (e.g. IP packet including MAC addresses and type)
 *  \return ERR_OK if the packet could be sent or
 *         an err_t value if the packet couldn't be sent
 */
static err_t lpc_low_level_output(struct netif *netif, struct pbuf *sendp)
{
	struct lpc_enetdata *lpc_netifdata = netif->state;
	u32_t idx, fidx, dn, fdn;
	struct pbuf *p = sendp;

#if LPC_CHECK_SLOWMEM == 1
	struct pbuf *q, *wp;
	u8_t *dst;
	int pcopy = 0;

	/* Check packet address to determine if it's in slow memory and
	   relocate if necessary */
 	for(q = p; ((q != NULL) && (pcopy == 0)); q = q->next) {
		fidx = 0;
		for (idx = 0; idx < sizeof(slmem);
			idx += sizeof(struct lpc_slowmem_array_t)) {
			if ((q->payload >= (void *) slmem[fidx].start) &&
				(q->payload <= (void *) slmem[fidx].end)) {
				/* Needs copy */
				pcopy = 1;
			}
		}
	}

	if (pcopy) {
		/* Create a new pbuf with the total pbuf size */
		wp = pbuf_alloc(PBUF_RAW, (u16_t) EMAC_ETH_MAX_FLEN, PBUF_RAM);
		if (!wp) {
			/* Exit with error */
			return ERR_MEM;
		}

		/* Copy pbuf */
		dst = (u8_t *) wp->payload;
		wp->tot_len = 0;
 		for(q = p; q != NULL; q = q->next) {
			MEMCPY(dst, (u8_t *) q->payload, q->len);
			dst += q->len;
			wp->tot_len += q->len;
		}
		wp->len = wp->tot_len;

		/* LWIP will free original pbuf on exit of function */

		p = sendp = wp;
	}
#endif

	/* Zero-copy TX buffers may be fragmented across mutliple payload
	   chains. Determine the number of descriptors needed for the
	   transfer. The pbuf chaining can be a mess! */
	dn = (u32_t) pbuf_clen(p);

	/* Wait until enough descriptors are available for the transfer. */
	/* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
	while (dn > lpc_tx_ready(netif))
#if NO_SYS == 0
		xSemaphoreTake(lpc_netifdata->xTXDCountSem, 0);
#else
		msDelay(1);
#endif

	/* Get the next free descriptor index */
	fidx = idx = lpc_netifdata->tx_fill_idx;

#if NO_SYS == 0
	/* Get exclusive access */
	sys_mutex_lock(&lpc_netifdata->TXLockMutex);
#endif

	/* Fill in the next free descriptor(s) */
	while (dn > 0) {
		dn--;

		/* Setup packet address and length */
		lpc_netifdata->ptdesc[idx].B1ADD = (u32_t) p->payload;
		lpc_netifdata->ptdesc[idx].BSIZE = (u32_t) TDES_ENH_BS1(p->len);

		/* Save pointer to pbuf so we can reclain the memory for
		   the pbuf after the buffer has been sent. Only the first
		   pbuf in a chain is saved since the full chain doesn't
		   need to be freed. */
		/* For first packet only, first flag */
		lpc_netifdata->tx_free_descs--;
		if (idx == fidx) {
			lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_FS;

#if LPC_CHECK_SLOWMEM == 1
			/* If this is a copied pbuf, then avoid getting the extra reference
			   or the TX reclaim will be off by 1 */
			if (!pcopy)
				pbuf_ref(p);
#else
			/* Increment reference count on this packet so LWIP doesn't
			   attempt to free it on return from this call */
				pbuf_ref(p);
#endif
		} else
			lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_OWN;

		/* Save address of pbuf, but make sure it's associated with the
		   first chained pbuf so it gets freed once all pbuf chains are
		   transferred. */
		if (!dn)
			lpc_netifdata->txpbufs[idx] = sendp;
		else
			lpc_netifdata->txpbufs[idx] = NULL;

		/* For last packet only, interrupt and last flag */
		if (dn == 0)
			lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_LS |
				TDES_ENH_IC;

		/* FIXME: For now, only IP header checksumming */
		lpc_netifdata->ptdesc[idx].CTRLSTAT |= TDES_ENH_CIC(3);

		LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
			("lpc_low_level_output: pbuf packet %p sent, chain %d,"
			" size %d, index %d, free %d\n", p, dn, p->len, idx,
			lpc_netifdata->tx_free_descs));

		/* Update next available descriptor */
		idx++;
		if (idx >= LPC_NUM_BUFF_TXDESCS)
			idx = 0;

		/* Next packet fragment */
		p = p->next;
	}

	lpc_netifdata->tx_fill_idx = idx;

	LINK_STATS_INC(link.xmit);

	/* Give first descriptor to DMA to start transfer */
	lpc_netifdata->ptdesc[fidx].CTRLSTAT |= TDES_OWN;

	/* Tell DMA to poll descriptors to start transfer */
	LPC_ETHERNET->DMA_TRANS_POLL_DEMAND = 1;

#if NO_SYS == 0
	/* Restore access */
	sys_mutex_unlock(&lpc_netifdata->TXLockMutex);
#endif

	return ERR_OK;
}
Пример #5
0
static err_t mg_lwip_tcp_recv_cb(void *arg, struct tcp_pcb *tpcb,
                                 struct pbuf *p, err_t err) {
  struct mg_connection *nc = (struct mg_connection *) arg;
  DBG(("%p %p %u %d", nc, tpcb, (p != NULL ? p->tot_len : 0), err));
  if (p == NULL) {
    if (nc != NULL) {
      mg_lwip_post_signal(MG_SIG_CLOSE_CONN, nc);
    } else {
      /* Tombstoned connection, do nothing. */
    }
    return ERR_OK;
  } else if (nc == NULL) {
    tcp_abort(tpcb);
    return ERR_ARG;
  }
  struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
  /*
   * If we get a chain of more than one segment at once, we need to bump
   * refcount on the subsequent bufs to make them independent.
   */
  if (p->next != NULL) {
    struct pbuf *q = p->next;
    for (; q != NULL; q = q->next) pbuf_ref(q);
  }
  if (cs->rx_chain == NULL) {
    cs->rx_chain = p;
    cs->rx_offset = 0;
  } else {
    if (pbuf_clen(cs->rx_chain) >= 4) {
      /* ESP SDK has a limited pool of 5 pbufs. We must not hog them all or RX
       * will be completely blocked. We already have at least 4 in the chain,
       * this one is, so we have to make a copy and release this one. */
      struct pbuf *np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
      if (np != NULL) {
        pbuf_copy(np, p);
        pbuf_free(p);
        p = np;
      }
    }
    pbuf_chain(cs->rx_chain, p);
  }

#ifdef SSL_KRYPTON
  if (nc->ssl != NULL) {
    if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) {
      mg_lwip_ssl_recv(nc);
    } else {
      mg_lwip_ssl_do_hs(nc);
    }
    return ERR_OK;
  }
#endif

  while (cs->rx_chain != NULL) {
    struct pbuf *seg = cs->rx_chain;
    size_t len = (seg->len - cs->rx_offset);
    char *data = (char *) malloc(len);
    if (data == NULL) {
      DBG(("OOM"));
      return ERR_MEM;
    }
    pbuf_copy_partial(seg, data, len, cs->rx_offset);
    mg_if_recv_tcp_cb(nc, data, len); /* callee takes over data */
    cs->rx_offset += len;
    if (cs->rx_offset == cs->rx_chain->len) {
      cs->rx_chain = pbuf_dechain(cs->rx_chain);
      pbuf_free(seg);
      cs->rx_offset = 0;
    }
  }

  if (nc->send_mbuf.len > 0) {
    mg_lwip_mgr_schedule_poll(nc->mgr);
  }
  return ERR_OK;
}
Пример #6
0
/**
 * Send an ARP request for the given IP address.
 *
 * Sends an ARP request for the given IP address, unless
 * a request for this address is already pending. Optionally
 * queues an outgoing packet on the resulting ARP entry.
 *
 * @param netif The lwIP network interface where ipaddr
 * must be queried for.
 * @param ipaddr The IP address to be resolved.
 * @param q If non-NULL, a pbuf that must be queued on the
 * ARP entry for the ipaddr IP address.
 *
 * @return NULL.
 *
 * @note Might be used in the future by manual IP configuration
 * as well.
 *
 * TODO: use the ctime field to see how long ago an ARP request was sent,
 * possibly retry.
 */
err_t etharp_query(struct netif *netif, struct ip_addr *ipaddr, struct pbuf *q)
{
  struct eth_addr *srcaddr;
  struct etharp_hdr *hdr;
  struct pbuf *p;
  err_t result = ERR_OK;
  u8_t i;
  u8_t perform_arp_request = 1;
  /* prevent 'unused argument' warning if ARP_QUEUEING == 0 */
  (void)q;
  srcaddr = (struct eth_addr *)netif->hwaddr;
  /* bail out if this IP address is pending */
  for (i = 0; i < ARP_TABLE_SIZE; ++i) {
    if (ip_addr_cmp(ipaddr, &arp_table[i].ipaddr)) {
      if (arp_table[i].state == ETHARP_STATE_PENDING) {
        DEBUGF(ETHARP_DEBUG | DBG_TRACE | DBG_STATE, ("etharp_query: requested IP already pending as entry %u\n", i));
        /* break out of for-loop, user may wish to queue a packet on a stable entry */
        /* TODO: we will issue a new ARP request, which should not occur too often */
        /* we might want to run a faster timer on ARP to limit this */
        break;
      }
      else if (arp_table[i].state == ETHARP_STATE_STABLE) {
        DEBUGF(ETHARP_DEBUG | DBG_TRACE | DBG_STATE, ("etharp_query: requested IP already stable as entry %u\n", i));
        /* user may wish to queue a packet on a stable entry, so we proceed without ARP requesting */
        /* TODO: even if the ARP entry is stable, we might do an ARP request anyway */
        perform_arp_request = 0;
        break;
      }
    }
  }
  /* queried address not yet in ARP table? */
  if (i == ARP_TABLE_SIZE) {
    DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: IP address not found in ARP table\n"));
    /* find an available entry */
    i = find_arp_entry();
    /* bail out if no ARP entries are available */
    if (i == ARP_TABLE_SIZE) {
      DEBUGF(ETHARP_DEBUG | 2, ("etharp_query: no more ARP entries available.\n"));
      return ERR_MEM;
    }
    /* we will now recycle entry i */
    DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: created ARP table entry %u.\n", i));
    /* i is available, create ARP entry */
    ip_addr_set(&arp_table[i].ipaddr, ipaddr);
    arp_table[i].ctime = 0;
    arp_table[i].state = ETHARP_STATE_PENDING;
#if ARP_QUEUEING
    /* free queued packet, as entry is now invalidated */
    if (arp_table[i].p != NULL) {
      pbuf_free(arp_table[i].p);
      arp_table[i].p = NULL;
      DEBUGF(ETHARP_DEBUG | DBG_TRACE | 3, ("etharp_query: dropped packet on ARP queue. Should not occur.\n"));
    }
#endif
  }
#if ARP_QUEUEING
  /* any pbuf to queue and queue is empty? */
  if (q != NULL) {
/* yield later packets over older packets? */
#if ARP_QUEUE_FIRST == 0
    /* earlier queued packet on this entry? */
    if (arp_table[i].p != NULL) {
      pbuf_free(arp_table[i].p);
      arp_table[i].p = NULL;
      DEBUGF(ETHARP_DEBUG | DBG_TRACE | 3, ("etharp_query: dropped packet on ARP queue. Should not occur.\n"));
      /* fall-through into next if */
    }
#endif
    /* packet can be queued? */
    if (arp_table[i].p == NULL) {
      /* copy PBUF_REF referenced payloads into PBUF_RAM */
      q = pbuf_take(q);
      /* remember pbuf to queue, if any */
      arp_table[i].p = q;
      /* pbufs are queued, increase the reference count */
      pbuf_ref(q);
      DEBUGF(ETHARP_DEBUG | DBG_TRACE | DBG_STATE, ("etharp_query: queued packet %p on ARP entry %u.\n", (void *)q, i));
    }
  }
#endif
  /* ARP request? */
  if (perform_arp_request)
  {
    /* allocate a pbuf for the outgoing ARP request packet */
    p = pbuf_alloc(PBUF_LINK, sizeof(struct etharp_hdr), PBUF_RAM);
    /* could allocate pbuf? */
    if (p != NULL) {
      u8_t j;
      DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: sending ARP request.\n"));
      hdr = p->payload;
      hdr->opcode = htons(ARP_REQUEST);
      for(j = 0; j < netif->hwaddr_len; ++j)
      {
        hdr->dhwaddr.addr[j] = 0x00;
        hdr->shwaddr.addr[j] = srcaddr->addr[j];
      }
      ip_addr_set(&(hdr->dipaddr), ipaddr);
      ip_addr_set(&(hdr->sipaddr), &(netif->ip_addr));

      hdr->hwtype = htons(HWTYPE_ETHERNET);
      ARPH_HWLEN_SET(hdr, netif->hwaddr_len);

      hdr->proto = htons(ETHTYPE_IP);
      ARPH_PROTOLEN_SET(hdr, sizeof(struct ip_addr));
      for(j = 0; j < netif->hwaddr_len; ++j)
      {
        hdr->ethhdr.dest.addr[j] = 0xff;
        hdr->ethhdr.src.addr[j] = srcaddr->addr[j];
      }
      hdr->ethhdr.type = htons(ETHTYPE_ARP);      
      /* send ARP query */
      result = netif->linkoutput(netif, p);
      /* free ARP query packet */
      pbuf_free(p);
      p = NULL;
    } else {
      result = ERR_MEM;
      DEBUGF(ETHARP_DEBUG | DBG_TRACE | 2, ("etharp_query: could not allocate pbuf for ARP request.\n"));
    }
  }
  return result;
}
Пример #7
0
XStatus emacps_sgsend(xemacpsif_s *xemacpsif, struct pbuf *p)
{
	struct pbuf *q;
	int n_pbufs;
	XEmacPs_Bd *txbdset, *txbd, *last_txbd = NULL;
	XStatus Status;
	XEmacPs_BdRing *txring;
	unsigned int BdIndex;
	unsigned int lev;

	lev = mfcpsr();
	mtcpsr(lev | 0x000000C0);

#ifdef PEEP
    while((XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress,
    									XEMACPS_TXSR_OFFSET)) & 0x08);
#endif
	txring = &(XEmacPs_GetTxRing(&xemacpsif->emacps));

	/* first count the number of pbufs */
	for (q = p, n_pbufs = 0; q != NULL; q = q->next)
		n_pbufs++;

	/* obtain as many BD's */
	Status = XEmacPs_BdRingAlloc(txring, n_pbufs, &txbdset);
	if (Status != XST_SUCCESS) {
		mtcpsr(lev);
		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error allocating TxBD\r\n"));
		return ERR_IF;
	}

	for(q = p, txbd = txbdset; q != NULL; q = q->next) {
		BdIndex = XEMACPS_BD_TO_INDEX(txring, txbd);
		if (tx_pbufs_storage[BdIndex] != 0) {
			mtcpsr(lev);
			LWIP_DEBUGF(NETIF_DEBUG, ("PBUFS not available\r\n"));
			return ERR_IF;
		}

		/* Send the data from the pbuf to the interface, one pbuf at a
		   time. The size of the data in each pbuf is kept in the ->len
		   variable. */
		Xil_DCacheFlushRange((unsigned int)q->payload, (unsigned)q->len);

		XEmacPs_BdSetAddressTx(txbd, (u32)q->payload);
		if (q->len > (XEMACPS_MAX_FRAME_SIZE - 18))
			XEmacPs_BdSetLength(txbd, (XEMACPS_MAX_FRAME_SIZE - 18) & 0x3FFF);
		else
			XEmacPs_BdSetLength(txbd, q->len & 0x3FFF);

		tx_pbufs_storage[BdIndex] = (int)q;

		pbuf_ref(q);
		last_txbd = txbd;
		XEmacPs_BdClearLast(txbd);
		dsb();
 		txbd = XEmacPs_BdRingNext(txring, txbd);
	}
	XEmacPs_BdSetLast(last_txbd);
	dsb();
	for(q = p, txbd = txbdset; q != NULL; q = q->next) {
		XEmacPs_BdClearTxUsed(txbd);
		txbd = XEmacPs_BdRingNext(txring, txbd);
	}
	dsb();

	Status = XEmacPs_BdRingToHw(txring, n_pbufs, txbdset);
	if (Status != XST_SUCCESS) {
		mtcpsr(lev);
		LWIP_DEBUGF(NETIF_DEBUG, ("sgsend: Error submitting TxBD\r\n"));
		return ERR_IF;
	}
	dsb();
	/* Start transmit */
	XEmacPs_WriteReg((xemacpsif->emacps).Config.BaseAddress,
	XEMACPS_NWCTRL_OFFSET,
	(XEmacPs_ReadReg((xemacpsif->emacps).Config.BaseAddress,
	XEMACPS_NWCTRL_OFFSET) | XEMACPS_NWCTRL_STARTTX_MASK));
	dsb();
	mtcpsr(lev);
	return Status;
}
Пример #8
0
static inline struct netif_tx_request *netfront_make_txreqs(struct netfront_dev *dev,
							    struct netif_tx_request *tx,
							    struct pbuf *p, int *slots)
{
	struct netif_tx_request *first_tx = tx;
	struct net_txbuffer *buf;
	struct pbuf *first_p = p;
	struct pbuf *q;
	unsigned long tot_len;
	unsigned long s;
	void *page;
	int q_slots;
	size_t plen, left;

	tot_len = 0;
	buf = &dev->tx_buffers[tx->id];

	/* map pages of pbuf */
	for (q = p; q != NULL; q = q->next) {
		left = q->len;
		q_slots = (int) _count_pages(q->payload, q->len);

		/* grant pages of pbuf */
		for (s = 0; s < q_slots; ++s) {
			/* read only mapping */
			page = (void *)((((unsigned long) q->payload) & PAGE_MASK) + (s * PAGE_SIZE));
			tx->gref = buf->gref = gnttab_grant_access(dev->dom, virtual_to_mfn(page), 0);
			BUG_ON(tx->gref == GRANT_INVALID_REF);

			if (s == 0) /* first slot */
				tx->offset = ((unsigned long) q->payload) & ~PAGE_MASK;
			else
				tx->offset = 0;

			if ((s + 1) == q_slots) /* last slot */
				tx->size   = ((((unsigned long) q->payload) + q->len) & ~PAGE_MASK) - tx->offset;
			else
				tx->size   = PAGE_SIZE - tx->offset;

			tot_len += tx->size;

			if ((s + 1) < q_slots || q->next != NULL) {
				/* there will be a follow-up slot */
				tx->flags |= NETTXF_more_data;
				tx = netfront_get_page(dev); /* next slot */
				BUG_ON(tx == NULL); /* out of memory -> this should have been catched
						       before calling this function */
				(*slots)++;
				buf = &dev->tx_buffers[tx->id];
			}
		}
	}

	/*
	 * The first fragment has the entire packet
	 * size, subsequent fragments have just the
	 * fragment size. The backend works out the
	 * true size of the first fragment by
	 * subtracting the sizes of the other
	 * fragments.
	 */
	BUG_ON(first_p->tot_len != tot_len); /* broken pbuf?! */
	first_tx->size = tot_len;
	pbuf_ref(first_p); /* increase ref count */
	buf->pbuf = first_p; /* remember chain for later release on last buf */
	return tx;
}
Пример #9
0
static err_t mg_lwip_tcp_recv_cb(void *arg, struct tcp_pcb *tpcb,
                                 struct pbuf *p, err_t err) {
  struct mg_connection *nc = (struct mg_connection *) arg;
  DBG(("%p %p %u %d", nc, tpcb, (p != NULL ? p->tot_len : 0), err));
  if (p == NULL) {
    if (nc != NULL) {
      system_os_post(MG_TASK_PRIORITY, MG_SIG_CLOSE_CONN, (uint32_t) nc);
    } else {
      /* Tombstoned connection, do nothing. */
    }
    return ERR_OK;
  } else if (nc == NULL) {
    tcp_abort(tpcb);
    return ERR_ARG;
  }
  struct mg_lwip_conn_state *cs = (struct mg_lwip_conn_state *) nc->sock;
  /*
   * If we get a chain of more than one segment at once, we need to bump
   * refcount on the subsequent bufs to make them independent.
   */
  if (p->next != NULL) {
    struct pbuf *q = p->next;
    for (; q != NULL; q = q->next) pbuf_ref(q);
  }
  if (cs->rx_chain == NULL) {
    cs->rx_chain = p;
    cs->rx_offset = 0;
  } else {
    pbuf_chain(cs->rx_chain, p);
  }

#ifdef ESP_SSL_KRYPTON
  if (nc->ssl != NULL) {
    if (nc->flags & MG_F_SSL_HANDSHAKE_DONE) {
      mg_lwip_ssl_recv(nc);
    } else {
      mg_lwip_ssl_do_hs(nc);
    }
    return ERR_OK;
  }
#endif

  while (cs->rx_chain != NULL) {
    struct pbuf *seg = cs->rx_chain;
    size_t len = (seg->len - cs->rx_offset);
    char *data = (char *) malloc(len);
    if (data == NULL) {
      DBG(("OOM"));
      return ERR_MEM;
    }
    pbuf_copy_partial(seg, data, len, cs->rx_offset);
    mg_if_recv_tcp_cb(nc, data, len); /* callee takes over data */
    cs->rx_offset += len;
    if (cs->rx_offset == cs->rx_chain->len) {
      cs->rx_chain = pbuf_dechain(cs->rx_chain);
      pbuf_free(seg);
      cs->rx_offset = 0;
    }
  }

  if (nc->send_mbuf.len > 0) {
    mg_lwip_mgr_schedule_poll(nc->mgr);
  }
  return ERR_OK;
}
Пример #10
0
/** \brief  Low level output of a packet. Never call this from an
 *          interrupt context, as it may block until TX descriptors
 *          become available.
 *
 *  \param[in] netif the lwip network interface structure for this netif
 *  \param[in] p the MAC packet to send (e.g. IP packet including MAC addresses and type)
 *  \return ERR_OK if the packet could be sent or an err_t value if the packet couldn't be sent
 */
static err_t k64f_low_level_output(struct netif *netif, struct pbuf *p)
{
  struct k64f_enetdata *k64f_enet = netif->state;
  struct pbuf *q;
  u32_t idx;
  s32_t dn;
  uint8_t *psend = NULL, *dst;

  /* Get free TX buffer index */
  idx = k64f_enet->tx_produce_index;
  
  /* Check the pbuf chain for payloads that are not 8-byte aligned.
     If found, a new properly aligned buffer needs to be allocated
     and the data copied there */
  for (q = p; q != NULL; q = q->next)
    if (((u32_t)q->payload & (TX_BUF_ALIGNMENT - 1)) != 0)
      break;
  if (q != NULL) {
    // Allocate properly aligned buffer
    psend = (uint8_t*)malloc(p->tot_len);
    if (NULL == psend)
      return ERR_MEM;   
    LWIP_ASSERT("k64f_low_level_output: buffer not properly aligned", ((u32_t)psend & (TX_BUF_ALIGNMENT - 1)) == 0);
    for (q = p, dst = psend; q != NULL; q = q->next) {
      MEMCPY(dst, q->payload, q->len);
      dst += q->len;
    }
    k64f_enet->txb_aligned[idx] = psend;
    dn = 1;
  } else {
    k64f_enet->txb_aligned[idx] = NULL;
    dn = (s32_t) pbuf_clen(p);
    pbuf_ref(p);
  }

  /* Wait until enough descriptors are available for the transfer. */
  /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
  while (dn > k64f_tx_ready(netif))
    osSemaphoreWait(k64f_enet->xTXDCountSem.id, osWaitForever);

  /* Get exclusive access */
  sys_mutex_lock(&k64f_enet->TXLockMutex);

  /* Setup transfers */
  q = p;
  while (dn > 0) {
    dn--;
    if (psend != NULL) {
      k64f_update_txbds(k64f_enet, idx, psend, p->tot_len, 1);
      k64f_enet->txb[idx] = NULL;
      
      LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
      ("k64f_low_level_output: aligned packet(%p) sent"
      " size = %d (index=%d)\n", psend, p->tot_len, idx));      
    } else {
      LWIP_ASSERT("k64f_low_level_output: buffer not properly aligned", ((u32_t)q->payload & 0x07) == 0);

      /* Only save pointer to free on last descriptor */
      if (dn == 0) {
        /* Save size of packet and signal it's ready */
        k64f_update_txbds(k64f_enet, idx, q->payload, q->len, 1);
        k64f_enet->txb[idx] = p;
      }
      else {
        /* Save size of packet, descriptor is not last */
        k64f_update_txbds(k64f_enet, idx, q->payload, q->len, 0);
        k64f_enet->txb[idx] = NULL;
      }
      
      LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
      ("k64f_low_level_output: pbuf packet(%p) sent, chain#=%d,"
      " size = %d (index=%d)\n", q->payload, dn, q->len, idx));
    }

    q = q->next;

    idx = (idx + 1) % ENET_TX_RING_LEN;
  }

  k64f_enet->tx_produce_index = idx;
  enet_hal_active_txbd(BOARD_DEBUG_ENET_INSTANCE);
  LINK_STATS_INC(link.xmit);

  /* Restore access */
  sys_mutex_unlock(&k64f_enet->TXLockMutex);

  return ERR_OK;
}
Пример #11
0
/*-----------------------------------------------------------------------------------*
  void low_level_output(mcf5272if_t *mcf5272, struct pbuf *p)

  Output pbuf chain to hardware. It is assumed that there is a complete and correct
  ethernet frame in p. The only buffering we have in this system is in the
  hardware descriptor ring. If there is no room on the ring, then drop the frame.
 *-----------------------------------------------------------------------------------*/
static err_t
low_level_output(struct netif *netif, struct pbuf *p)
{
    struct pbuf *q;
    mcf5272if_t *mcf5272 = netif->state;
    MCF5272_IMM *imm = mcf5272->imm;
    int num_desc;
    int num_free;
    unsigned int tx_insert_sof, tx_insert_eof;
    unsigned int i;
    u32_t old_level;

    /* Make sure that there are no PBUF_REF buffers in the chain. These buffers
       have to be freed immediately and this ethernet driver puts the buffers on
       the dma chain, so they get freed later */
    p = pbuf_take(p);
    /* Interrupts are disabled through this whole thing to support multi-threading
     * transmit calls. Also this function might be called from an ISR. */
    old_level = sys_arch_protect();
    
    /* Determine number of descriptors needed */
    num_desc = pbuf_clen(p);
    if (num_desc > mcf5272->tx_free)
    {
        /* Drop the frame, we have no place to put it */
#ifdef LINK_STATS
        lwip_stats.link.memerr++;
#endif
        sys_arch_unprotect(old_level);
        return ERR_MEM;
        
    } else {
        /* Increment use count on pbuf */
        pbuf_ref(p);
        
        /* Put buffers on descriptor ring, but don't mark them as ready yet */
        tx_insert_eof = tx_insert_sof = mcf5272->tx_insert;
        q = p;
        do
        {
            mcf5272->tx_free--;
            mcf5272->tx_pbuf_a[tx_insert_eof] = q;
            mcf5272->txbd_a[tx_insert_eof].p_buf = q->payload;
            mcf5272->txbd_a[tx_insert_eof].data_len = q->len;
            q = q->next;
            if (q)
                INC_TX_BD_INDEX(tx_insert_eof);
        } while (q);
        
        /* Go backwards through descriptor ring setting flags */
        i = tx_insert_eof;
        do
        {
            mcf5272->txbd_a[i].flags = (u16_t) (MCF5272_FEC_TX_BD_R |
                                                (mcf5272->txbd_a[i].flags & MCF5272_FEC_TX_BD_W) |
                               ((i == tx_insert_eof) ? (MCF5272_FEC_TX_BD_L | MCF5272_FEC_TX_BD_TC) : 0));
            if (i != tx_insert_sof)
                DEC_TX_BD_INDEX(i);
            else
                break;
        } while (1);
        INC_TX_BD_INDEX(tx_insert_eof);
        mcf5272->tx_insert = tx_insert_eof;
#ifdef LINK_STATS
        lwip_stats.link.xmit++;
#endif        
	/* Indicate that there has been a transmit buffer produced */
	MCF5272_WR_FEC_TDAR(imm,1);
        sys_arch_unprotect(old_level);
    }
    return ERR_OK;
}
Пример #12
0
static socket_error_t recv_copy_free(struct socket *socket, void * buf,
        size_t *len) {
    struct pbuf *p;
    size_t copied;

    p = (struct pbuf *) socket->rxBufChain;
    if (p == NULL) {
        return SOCKET_ERROR_WOULD_BLOCK;
    }

    switch (socket->family) {
        case SOCKET_STREAM: {
            /* Copy out of the pbuf chain */
            copied = pbuf_copy_partial(p, buf, *len, 0);
            /* Set the external length to the number of bytes copied */
            *len = copied;
            while (copied) {
                if (copied < p->len) {
                    /* advance the payload pointer by the number of bytes copied */
                    p->payload = (char *)p->payload + copied;
                    /* reduce the length by the number of bytes copied */
                    p->len -= copied;
                    /* break out of the loop */
                    copied = 0;
                } else {
                    struct pbuf *q;
                    uint16_t freelen = p->tot_len;
                    q = p->next;
                    /* decrement the number of bytes copied by the length of the buffer */
                    copied -= p->len;
                    /* Free the current pbuf */
                    /* NOTE: This operation is interrupt safe, but not thread safe. */
                    if (q != NULL) {
                        pbuf_ref(q);
                    }
                    socket->rxBufChain = q;
                    pbuf_free(p);
                    /* Update the TCP window */
                    tcp_recved(socket->impl, freelen);
                    p = q;
                }
            }
            break;
        }
        case SOCKET_DGRAM: {
            struct pbuf *q;
            size_t cplen = ((*len) < (p->len) ? (*len) : (p->len));
            copied = pbuf_copy_partial(p, buf, cplen, 0);
            *len = copied;
            q = p->next;
            /* NOTE: This operation is interrupt safe, but not thread safe. */
            if (q != NULL) {
                pbuf_ref(q);
            }
            socket->rxBufChain = q;
            pbuf_free(p);
            break;
        }
        default:
            break;
    }

    return SOCKET_ERROR_NONE;
}
Пример #13
0
/* 
 * Send out a packet through PPP/PPPoE main thread.
 * This routine is mainly used as sending routine of PPP interface,
 * which is called by IP layer.
 */
static BOOL pppSendPacket(struct netif* out_if, struct pbuf* pb, ip_addr_t* ipaddr)
{
	__PPP_SENDPACKET_BLOCK* pBlock = NULL;
	__KERNEL_THREAD_MESSAGE msg;
	DWORD dwFlags;
	BOOL bResult = FALSE;

	BUG_ON(NULL == out_if);
	BUG_ON(NULL == pb);

	/* Make sure the PPPoE main thread is in place. */
	if (NULL == pppoeManager.hMainThread)
	{
		goto __TERMINAL;
	}

	/* Allocate a parameter block object to hold all parameters. */
	pBlock = (__PPP_SENDPACKET_BLOCK*)_hx_malloc(sizeof(__PPP_SENDPACKET_BLOCK));
	if (NULL == pBlock)
	{
		goto __TERMINAL;
	}
	/* 
	 * Increase reference counter of pbuf,avoid to be deleted by 
	 * this routine's caller.
	 * The pbuf object will be released in the handler of SENDPACKET
	 * message.
	 */
	pbuf_ref(pb);

	pBlock->pNext = NULL;
	pBlock->out_if = out_if;
	pBlock->pkt_buff = pb; /* Should be confirmed if a new pbuf object should be allocated here. */
	pBlock->addr = *ipaddr;

	/*
	 * Hook the send packet block object into pppoeManager's out going list,
	 * and send a message to the PPPoE main thread if the list is empty,to
	 * trigger the sending process.
	 * Use critical section to protect the list operation since it maybe accessed
	 * in interrupt context.
	 */
	__ENTER_CRITICAL_SECTION_SMP(pppoeManager.spin_lock, dwFlags);
	if (0 == pppoeManager.nOutgSize) /* List is empty. */
	{
		BUG_ON(pppoeManager.pOutgFirst != NULL);
		BUG_ON(pppoeManager.pOutgLast != NULL);
		pppoeManager.pOutgFirst = pppoeManager.pOutgLast = pBlock;
		pBlock->pNext = NULL; /* Very important. */
		pppoeManager.nOutgSize++;
		/* 
		 * Send sending message to PPPoE main thread.
		 * Disable kernel thread's scheduling since in critical
		 * section.
		 */
		__LEAVE_CRITICAL_SECTION_SMP(pppoeManager.spin_lock, dwFlags);
		msg.wCommand = PPPOE_MSG_SENDPACKET;
		msg.wParam = 0;
		msg.dwParam = 0;
		bResult = SendMessage(pppoeManager.hMainThread, &msg);

		if (!bResult) /* Msg queue is full? */
		{
			__ENTER_CRITICAL_SECTION_SMP(pppoeManager.spin_lock, dwFlags);
			/* Unlink from list. */
			pppoeManager.pOutgFirst = pppoeManager.pOutgLast = NULL;
			pppoeManager.nOutgSize = 0;
			__LEAVE_CRITICAL_SECTION_SMP(pppoeManager.spin_lock, dwFlags);
			goto __TERMINAL;
		}
	}
	else /* The out going list is not empty. */
	{
		BUG_ON(NULL == pppoeManager.pOutgFirst);
		BUG_ON(NULL == pppoeManager.pOutgLast);
		if (pppoeManager.nOutgSize > PPPOE_MAX_PENDINGLIST_SIZE)
		{
			__LEAVE_CRITICAL_SECTION_SMP(pppoeManager.spin_lock, dwFlags);
			bResult = FALSE;
			goto __TERMINAL;
		}
		/* Link the block into lsit. */
		pBlock->pNext = NULL;
		pppoeManager.pOutgLast->pNext = pBlock;
		pppoeManager.pOutgLast = pBlock;
		pppoeManager.nOutgSize++;
		__LEAVE_CRITICAL_SECTION_SMP(pppoeManager.spin_lock, dwFlags);
		bResult = TRUE;
	}

#if 0 /* Obsoleted. */
	/* Send message to PPP/PPPoE main thread to trigger sending. */
	msg.wCommand = PPPOE_MSG_SENDPACKET;
	msg.wParam = 0;
	msg.dwParam = (DWORD)pBlock;
	bResult = SendMessage(pppoeManager.hMainThread, &msg);
#endif

__TERMINAL:
	if (!bResult)
	{
		/* Should release the parameter block object. */
		if (pBlock)
		{
			_hx_free(pBlock);
		}
		/* Free pbuf since we refered it. */
		pbuf_free(pb);
		LINK_STATS_INC(link.drop);
	}
	return bResult;
}
Пример #14
0
/* returns ARP_TABLE_SIZE or the index of the new entry */
static u8_t
etharp_new_entry(struct pbuf *q, struct ip_addr *ipaddr, struct eth_addr *hwaddr, enum etharp_state s)
{
    u8_t i;

    /* We now try to find an unused entry in the ARP table that we
       will setup and queue the outgoing packet. */
    for(i = 0; i < arp_table_last; ++i) {
        if(arp_table[i].state == ETHARP_STATE_EMPTY) {
            break;
        }
    }

    if(i == arp_table_last && arp_table_last < ARP_TABLE_SIZE)
        arp_table_last++;

    /* If no unused entry is found, we try to find the oldest entry and
       throw it away. */
    if(i == arp_table_last) {
        u8_t j, maxtime;
        maxtime = 0;
        j = arp_table_last;
        for(i = 0; i < arp_table_last; ++i) {
            if((arp_table[i].state == ETHARP_STATE_STABLE || arp_table[i].state == ETHARP_STATE_STATIC) &&
                    ctime - arp_table[i].ctime > maxtime) {
                maxtime = ctime - arp_table[i].ctime;
                j = i;
            }
        }
        i = j;
    }

    /* If all table entries were in pending state, we won't send out any
       more ARP requests. We'll just give up. */
    if(i == arp_table_last) {
        DEBUGF(ETHARP_DEBUG, ("etharp_output: no more ARP table entries avaliable.\n"));
        return ARP_TABLE_SIZE;
    }

    /* Now, i is the ARP table entry which we will fill with the new
       information. */
    memcpy (&arp_table[i].ipaddr, ipaddr, sizeof (arp_table[i].ipaddr));
    arp_table[i].ctime = ctime;
    arp_table[i].state = s;
    if (q) {
        /* Because the pbuf will be queued, we'll increase the refernce
           count. */
        arp_table[i].p = q;
        pbuf_ref (q);
        arp_table[i].payload = q->payload;
        arp_table[i].len = q->len;
        arp_table[i].tot_len = q->tot_len;
    } else {
        arp_table[i].p = NULL;
        arp_table[i].payload = NULL;
        arp_table[i].len = 0;
        arp_table[i].tot_len = 0;
    }
    if (hwaddr) {
        u8_t k;
        for(k = 0; k < 6; ++k) {
            arp_table[i].ethaddr.addr[k] = hwaddr->addr[k];
        }
        if (s == ETHARP_STATE_STABLE)
            if (arp_callback)
                (*arp_callback)(arp_callback_arg, arp_table[i].ethaddr.addr, arp_table[i].ipaddr.addr);
    }

    DEBUGF(ETHARP_DEBUG, ("etharp_output: queueing %p\n", q));
    return i;
}
/* Low level output of a packet. Never call this from an interrupt context,
 * as it may block until TX descriptors become available. */
STATIC err_t lpc_low_level_output(struct netif *netif, struct pbuf *p)
{
    lpc_enetdata_t *lpc_enetif = netif->state;
    struct pbuf *q;

#if LPC_TX_PBUF_BOUNCE_EN == 1
    u8_t *dst;
    struct pbuf *np;
#endif
    u32_t idx;
    u32_t dn, notdmasafe = 0;

    /* Zero-copy TX buffers may be fragmented across mutliple payload
       chains. Determine the number of descriptors needed for the
       transfer. The pbuf chaining can be a mess! */
    dn = (u32_t) pbuf_clen(p);

    /* Test to make sure packet addresses are DMA safe. A DMA safe
       address is once that uses external memory or periphheral RAM.
       IRAM and FLASH are not safe! */
    for (q = p; q != NULL; q = q->next) {
        notdmasafe += lpc_packet_addr_notsafe(q->payload);
    }

#if LPC_TX_PBUF_BOUNCE_EN == 1
    /* If the pbuf is not DMA safe, a new bounce buffer (pbuf) will be
       created that will be used instead. This requires an copy from the
       non-safe DMA region to the new pbuf */
    if (notdmasafe) {
        /* Allocate a pbuf in DMA memory */
        np = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
        if (np == NULL) {
            LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE,
                        ("lpc_low_level_output: could not allocate TX pbuf\n"));
            return ERR_MEM;
        }

        /* This buffer better be contiguous! */
        LWIP_ASSERT("lpc_low_level_output: New transmit pbuf is chained",
                    (pbuf_clen(np) == 1));

        /* Copy to DMA safe pbuf */
        dst = (u8_t *) np->payload;
        for (q = p; q != NULL; q = q->next) {
            /* Copy the buffer to the descriptor's buffer */
            MEMCPY(dst, (u8_t *) q->payload, q->len);
            dst += q->len;
        }
        np->len = p->tot_len;

        LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE,
                    ("lpc_low_level_output: Switched to DMA safe buffer, old=%p, new=%p\n",
                     q, np));

        /* use the new buffer for descrptor queueing. The original pbuf will
           be de-allocated outsuide this driver. */
        p = np;
        dn = 1;
    }
#else
    if (notdmasafe) {
        LWIP_ASSERT("lpc_low_level_output: Not a DMA safe pbuf",
                    (notdmasafe == 0));
    }
#endif

    /* Wait until enough descriptors are available for the transfer. */
    /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
    while (dn > lpc_tx_ready(netif)) {
#if NO_SYS == 0
        xSemaphoreTake(lpc_enetif->xtx_count_sem, 0);
#else
        msDelay(1);
#endif
    }

    /* Get free TX buffer index */
    idx = Chip_ENET_GetTXProduceIndex(LPC_ETHERNET);

#if NO_SYS == 0
    /* Get exclusive access */
    sys_mutex_lock(&lpc_enetif->tx_lock_mutex);
#endif

    /* Prevent LWIP from de-allocating this pbuf. The driver will
       free it once it's been transmitted. */
    if (!notdmasafe) {
        pbuf_ref(p);
    }

    /* Setup transfers */
    q = p;
    while (dn > 0) {
        dn--;

        /* Only save pointer to free on last descriptor */
        if (dn == 0) {
            /* Save size of packet and signal it's ready */
            lpc_enetif->ptxd[idx].Control = ENET_TCTRL_SIZE(q->len) | ENET_TCTRL_INT |
                                            ENET_TCTRL_LAST;
            lpc_enetif->txb[idx] = p;
        }
        else {
            /* Save size of packet, descriptor is not last */
            lpc_enetif->ptxd[idx].Control = ENET_TCTRL_SIZE(q->len) | ENET_TCTRL_INT;
            lpc_enetif->txb[idx] = NULL;
        }

        LWIP_DEBUGF(EMAC_DEBUG | LWIP_DBG_TRACE,
                    ("lpc_low_level_output: pbuf packet(%p) sent, chain#=%d,"
                     " size = %d (index=%d)\n", q->payload, dn, q->len, idx));

        lpc_enetif->ptxd[idx].Packet = (u32_t) q->payload;

        q = q->next;

        idx = Chip_ENET_IncTXProduceIndex(LPC_ETHERNET);
    }

    LINK_STATS_INC(link.xmit);

#if NO_SYS == 0
    /* Restore access */
    sys_mutex_unlock(&lpc_enetif->tx_lock_mutex);
#endif

    return ERR_OK;
}
Пример #16
0
/*-----------------------------------------------------------------------------------*/
struct pbuf *
etharp_output(struct netif *netif, struct ip_addr *ipaddr, struct pbuf *q)
{
    struct eth_addr *dest, *srcaddr, mcastaddr;
    struct eth_hdr *ethhdr;
    struct etharp_hdr *hdr;
    struct pbuf *p;
    u8_t i;

    srcaddr = (struct eth_addr *)netif->hwaddr;

    /* Make room for Ethernet header. */
    if(pbuf_header(q, sizeof(struct eth_hdr)) != 0) {
        /* The pbuf_header() call shouldn't fail, and we'll just bail
           out if it does.. */
        DEBUGF(ETHARP_DEBUG, ("etharp_output: could not allocate room for header.\n"));
#ifdef LINK_STATS
        ++stats.link.lenerr;
#endif /* LINK_STATS */
        return NULL;
    }


    dest = NULL;
    /* Construct Ethernet header. Start with looking up deciding which
       MAC address to use as a destination address. Broadcasts and
       multicasts are special, all other addresses are looked up in the
       ARP table. */
    if(ip_addr_isany(ipaddr) ||
            ip_addr_isbroadcast(ipaddr, &(netif->netmask))) {
        dest = (struct eth_addr *)&ethbroadcast;
    } else if(ip_addr_ismulticast(ipaddr)) {
        /* Hash IP multicast address to MAC address. */
        mcastaddr.addr[0] = 0x01;
        mcastaddr.addr[1] = 0x0;
        mcastaddr.addr[2] = 0x5e;
        mcastaddr.addr[3] = ip4_addr2(ipaddr) & 0x7f;
        mcastaddr.addr[4] = ip4_addr3(ipaddr);
        mcastaddr.addr[5] = ip4_addr4(ipaddr);
        dest = &mcastaddr;
    } else {
#ifdef __PAULOS__
        /* abort on insane conditions */
        if (!ip_addr_maskcmp(ipaddr, &(netif->ip_addr), &(netif->netmask)))
            return NULL;
        if (ipaddr->addr == netif->ip_addr.addr)
            return NULL;
#else
        if(!ip_addr_maskcmp(ipaddr, &(netif->ip_addr), &(netif->netmask))) {
            /* Use the IP address of the default gateway if the destination
               is NOT on the same subnet as we are. ("NOT" added 20021113 psheer@) */
            ipaddr = &(netif->gw);
        }
#endif

        /* We try to find a stable mapping. */
        for(i = 0; i < arp_table_last; ++i) {
            if((arp_table[i].state == ETHARP_STATE_STABLE || arp_table[i].state == ETHARP_STATE_STATIC) &&
                    ip_addr_cmp(ipaddr, &arp_table[i].ipaddr)) {
                dest = &arp_table[i].ethaddr;

#if 0
// FIXME: remove this test code
                if (!((int) rand() % 2)) {
                    dest = NULL;
                    arp_table[i].state = ETHARP_STATE_EMPTY;
                    if (arp_table[i].p)
                        pbuf_free (arp_table[i].p);
                    arp_table[i].p = NULL;
                    arp_table[i].payload = NULL;
                    arp_table[i].len = arp_table[0].tot_len = 0;
                }
#endif

                break;
            }
        }
    }

    if(dest == NULL) {
        /* No destination address has been found, so we'll have to send
           out an ARP request for the IP address. The outgoing packet is
           queued unless the queue is full. */

        /* We check if we are already querying for this address. If so,
           we'll bail out. */
        for(i = 0; i < arp_table_last; ++i) {
            if(arp_table[i].state == ETHARP_STATE_PENDING &&
                    ip_addr_cmp(ipaddr, &arp_table[i].ipaddr)) {
                DEBUGF(ETHARP_DEBUG, ("etharp_output: already queued\n"));
                return NULL;
            }
        }

        hdr = q->payload;
        for(i = 0; i < 6; ++i)
            hdr->ethhdr.src.addr[i] = srcaddr->addr[i];
        hdr->ethhdr.type = htons(ETHTYPE_IP);

        i = etharp_new_entry(q, ipaddr, NULL, ETHARP_STATE_PENDING);

        /* We allocate a pbuf for the outgoing ARP request packet. */
        p = pbuf_alloc(PBUF_RAW, sizeof(struct etharp_hdr) + 2, PBUF_RAM);
        if(p == NULL) {
            /* No ARP request packet could be allocated, so we forget about
            the ARP table entry. */
            if(i != ARP_TABLE_SIZE) {
                arp_table[i].state = ETHARP_STATE_EMPTY;
                /* We decrease the reference count of the queued pbuf (which now
                   is dequeued). */
                DEBUGF(ETHARP_DEBUG, ("etharp_output: couldn't alloc pbuf for query, dequeueing %p\n", q));
            }
            return NULL;
        }
        pbuf_header (p, (s16_t) -2);

        hdr = p->payload;

        hdr->opcode = htons(ARP_REQUEST);

        for(i = 0; i < 6; ++i) {
            hdr->dhwaddr.addr[i] = 0x00;
            hdr->shwaddr.addr[i] = srcaddr->addr[i];
        }

        memcpy (&(hdr->dipaddr), ipaddr, sizeof (hdr->dipaddr));
        memcpy (&(hdr->sipaddr), &(netif->ip_addr), sizeof (hdr->sipaddr));

        hdr->hwtype = htons(HWTYPE_ETHERNET);
        ARPH_HWLEN_SET(hdr, 6);

        hdr->proto = htons(ETHTYPE_IP);
        ARPH_PROTOLEN_SET(hdr, sizeof(struct ip_addr));

        for(i = 0; i < 6; ++i) {
            hdr->ethhdr.dest.addr[i] = 0xff;
            hdr->ethhdr.src.addr[i] = srcaddr->addr[i];
        }

        hdr->ethhdr.type = htons(ETHTYPE_ARP);
        return p;	/* (1) */
    } else {
        /* A valid IP->MAC address mapping was found, so we construct the
           Ethernet header for the outgoing packet. */

        ethhdr = q->payload;

        for(i = 0; i < 6; i++) {
            ethhdr->dest.addr[i] = dest->addr[i];
            ethhdr->src.addr[i] = srcaddr->addr[i];
        }

        ethhdr->type = htons(ETHTYPE_IP);

        pbuf_ref (q);  /* <--- this is important, because the reference
must parallel that when returning over here (1). Callers must then
ALWAYS do a pbuf_free on the return value of etharp_output(). */
        return q;
    }


}
Пример #17
0
/**
 * Send an ARP request for the given IP address and/or queue a packet.
 *
 * If the IP address was not yet in the cache, a pending ARP cache entry
 * is added and an ARP request is sent for the given address. The packet
 * is queued on this entry.
 *
 * If the IP address was already pending in the cache, a new ARP request
 * is sent for the given address. The packet is queued on this entry.
 *
 * If the IP address was already stable in the cache, and a packet is
 * given, it is directly sent and no ARP request is sent out. 
 * 
 * If the IP address was already stable in the cache, and no packet is
 * given, an ARP request is sent out.
 * 
 * @param netif The lwIP network interface on which ipaddr
 * must be queried for.
 * @param ipaddr The IP address to be resolved.
 * @param q If non-NULL, a pbuf that must be delivered to the IP address.
 * q is not freed by this function.
 *
 * @return
 * - ERR_BUF Could not make room for Ethernet header.
 * - ERR_MEM Hardware address unknown, and no more ARP entries available
 *   to query for address or queue the packet.
 * - ERR_MEM Could not queue packet due to memory shortage.
 * - ERR_RTE No route to destination (no gateway to external networks).
 * - ERR_ARG Non-unicast address given, those will not appear in ARP cache.
 *
 */
err_t etharp_query(struct netif *netif, struct ip_addr *ipaddr, struct pbuf *q)
{
  struct eth_addr * srcaddr = (struct eth_addr *)netif->hwaddr;
  err_t result = ERR_MEM;
  s8_t i; /* ARP entry index */
  u8_t k; /* Ethernet address octet index */

  /* non-unicast address? */
  if (ip_addr_isbroadcast(ipaddr, netif) ||
      ip_addr_ismulticast(ipaddr) ||
      ip_addr_isany(ipaddr)) {
    LWIP_DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: will not add non-unicast IP address to ARP cache\n"));
    return ERR_ARG;
  }

  /* find entry in ARP cache, ask to create entry if queueing packet */
  i = find_entry(ipaddr, ETHARP_TRY_HARD);

  /* could not find or create entry? */
  if (i < 0)
  {
    LWIP_DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: could not create ARP entry\n"));
    if (q) LWIP_DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: packet dropped\n"));
    return (err_t)i;
  }

  /* mark a fresh entry as pending (we just sent a request) */
  if (arp_table[i].state == ETHARP_STATE_EMPTY) {
    arp_table[i].state = ETHARP_STATE_PENDING;
  }

  /* { i is either a STABLE or (new or existing) PENDING entry } */
  LWIP_ASSERT("arp_table[i].state == PENDING or STABLE",
  ((arp_table[i].state == ETHARP_STATE_PENDING) ||
   (arp_table[i].state == ETHARP_STATE_STABLE)));

  /* do we have a pending entry? or an implicit query request? */
  if ((arp_table[i].state == ETHARP_STATE_PENDING) || (q == NULL)) {
    /* try to resolve it; send out ARP request */
    result = etharp_request(netif, ipaddr);
  }
  
  /* packet given? */
  if (q != NULL) {
    /* stable entry? */
    if (arp_table[i].state == ETHARP_STATE_STABLE) {
      /* we have a valid IP->Ethernet address mapping,
       * fill in the Ethernet header for the outgoing packet */
      struct eth_hdr *ethhdr = q->payload;
      for(k = 0; k < netif->hwaddr_len; k++) {
        ethhdr->dest.addr[k] = arp_table[i].ethaddr.addr[k];
        ethhdr->src.addr[k]  = srcaddr->addr[k];
      }
      ethhdr->type = htons(ETHTYPE_IP);
      LWIP_DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: sending packet %p\n", (void *)q));
      /* send the packet */
      result = netif->linkoutput(netif, q);
    /* pending entry? (either just created or already pending */
    } else if (arp_table[i].state == ETHARP_STATE_PENDING) {
#if ARP_QUEUEING /* queue the given q packet */
      struct pbuf *p;
      /* copy any PBUF_REF referenced payloads into PBUF_RAM */
      /* (the caller of lwIP assumes the referenced payload can be
       * freed after it returns from the lwIP call that brought us here) */
      p = pbuf_take(q);
      /* packet could be taken over? */
      if (p != NULL) {
        /* queue packet ... */
        if (arp_table[i].p == NULL) {
        	/* ... in the empty queue */
        	pbuf_ref(p);
        	arp_table[i].p = p;
#if 0 /* multi-packet-queueing disabled, see bug #11400 */
        } else {
        	/* ... at tail of non-empty queue */
          pbuf_queue(arp_table[i].p, p);
#endif
        }
        LWIP_DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: queued packet %p on ARP entry %"S16_F"\n", (void *)q, (s16_t)i));
        result = ERR_OK;
      } else {
        LWIP_DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: could not queue a copy of PBUF_REF packet %p (out of memory)\n", (void *)q));
        /* { result == ERR_MEM } through initialization */
      }
#else /* ARP_QUEUEING == 0 */
      /* q && state == PENDING && ARP_QUEUEING == 0 => result = ERR_MEM */
      /* { result == ERR_MEM } through initialization */
      LWIP_DEBUGF(ETHARP_DEBUG | DBG_TRACE, ("etharp_query: Ethernet destination address unknown, queueing disabled, packet %p dropped\n", (void *)q));
#endif
    }
  }
  return result;
}
Пример #18
0
static int recv_common(int s, void * mem, int len, unsigned int flags, int istcp) {
	sockfd_t	* fd;
	int		rv = 0, amt, totalrcv;
	uint8		* buf;
	struct pbuf	* p;

	if (flags != 0) {
		errno = EINVAL;
		return -1;
	}

	if (len <= 0) {
		errno = EINVAL;
		return -1;
	}

	fd = fds + s;

	// printf("lwip_recv(%d): want %d bytes\n", s, len);

	// Get access
	mutex_lock(fd->mutex);

	// Is there data available now?
	while (fd->recv <= 0) {
		// Make sure we're capable of reading
		if (fd->recv < 0) {
			rv = 0; goto out;
		}

		// Wait for more data to be available
		//printf("cond_wait for more data\n");
		cond_wait(fd->recv_avail, fd->mutex);
		//printf("woken\n");
	}

	// Ok, some data should be available now
	assert( fd->recv_buf );
	if (!fd->recv_buf) {
		///
		rv = -1; goto out;
	}

	// Pull data out into the client buffer
	buf = (uint8 *)mem;
	totalrcv = 0;
	for (p = fd->recv_buf; p && len > 0; ) {
		// How much to pull from this pbuf?
		if (p->len > len)
			amt = len;
		else
			amt = p->len;

		//printf("  pbuf: %d avail, want %d\n", p->len, amt);

		// Do it.
		memcpy(buf, p->payload, amt);
		buf += amt;
		totalrcv += amt;
		len -= amt;

		// Did we take the whole thing?
		if (amt == p->len) {
			// Yes. Dechain it and move forwards.
			fd->recv_buf = p->next;
			if (fd->recv_buf)
				pbuf_ref(fd->recv_buf);
			pbuf_free(p);
			p = fd->recv_buf;
		} else {
			// Nope. Just adjust the "header".
			pbuf_header(p, -amt);
			p = NULL;
		}
	}

	// ACK the amount we actually read out
	//printf("lwip_read(%d): ACK'ing %d bytes\n", s, totalrcv);
	rv = totalrcv;
	if (istcp)
		tcp_recved(fd->tcppcb, rv);
	fd->recv -= rv;
	//printf("returning\n");

out:
	mutex_unlock(fd->mutex);	
	return rv;
}
Пример #19
0
static int read_from_tcp(struct socket * sock, message * m)
{
	unsigned rem_buf, written = 0;
	struct pbuf * p;

	assert(!(sock->flags & SOCK_FLG_OP_LISTENING) && sock->recv_head);

	rem_buf = m->COUNT;

	debug_tcp_print("socket num %ld recv buff sz %d", get_sock_num(sock), rem_buf);

	p = (struct pbuf *)sock->recv_head->data;
	while (rem_buf) {
		int err;

		if (rem_buf >= p->len) {
			struct pbuf * np;

			/*
			 * FIXME perhaps copy this to a local buffer and do a
			 * single copy to user then
			 */
#if 0
			print_tcp_payload(p->payload, p->len);
#endif
			err = copy_to_user(m->m_source, p->payload, p->len,
					(cp_grant_id_t) m->IO_GRANT, written);
			if (err != OK)
				goto cp_error;
			sock->recv_data_size -= p->len;

			debug_tcp_print("whole pbuf copied (%d bytes)", p->len);
			rem_buf -= p->len;
			written += p->len;

			if ((np = p->next)) {
				pbuf_ref(np);
				if (pbuf_free(p) != 1)
					panic("LWIP : pbuf_free != 1");
				/*
				 * Mark where we are going to continue if an
				 * error occurs
				 */
				sock->recv_head->data = np;
				p = np;
			} else {
				sock_dequeue_data(sock);
				pbuf_free(p);
				if (sock->recv_head)
					p = (struct pbuf *)sock->recv_head->data;
				else
					break;
			}

			if (rem_buf == 0)
				break;
		} else {
			/*
			 * It must be PBUF_RAM for us to be able to shift the
			 * payload pointer
			 */
			assert(p->type == PBUF_RAM);
			
#if 0
			print_tcp_payload(p->payload, rem_buf);
#endif
			err = copy_to_user(m->m_source, p->payload, rem_buf,
					(cp_grant_id_t) m->IO_GRANT, written);
			if (err != OK)
				goto cp_error;
			sock->recv_data_size -= rem_buf;

			debug_tcp_print("partial pbuf copied (%d bytes)", rem_buf);
			/*
			 * The whole pbuf hasn't been copied out, we only shift
			 * the payload pointer to remember where to continue
			 * next time
			 */
			pbuf_header(p, -rem_buf);
			written += rem_buf;
			break;
		}
	}

	debug_tcp_print("%d bytes written to userspace", written);
	//printf("%d wr, queue %d\n", written, sock->recv_data_size);
	tcp_recved((struct tcp_pcb *) sock->pcb, written);
	return written;

cp_error:
	if (written) {
		debug_tcp_print("%d bytes written to userspace", written);
		return written;
	} else
		return EFAULT;
}