/**
 * Service an internal or external event for SNMP GET.
 *
 * @param request_id identifies requests from 0 to (SNMP_CONCURRENT_REQUESTS-1)
 * @param msg_ps points to the assosicated message process state
 */
static void
snmp_msg_get_event(u8_t request_id, struct snmp_msg_pstat *msg_ps)
{
  LWIP_DEBUGF(SNMP_MSG_DEBUG, ("snmp_msg_get_event: msg_ps->state==%"U16_F"\n",(u16_t)msg_ps->state));

  if (msg_ps->state == SNMP_MSG_EXTERNAL_GET_OBJDEF)
  {
    struct mib_external_node *en;
    struct snmp_name_ptr np;

    /* get_object_def() answer*/
    en = msg_ps->ext_mib_node;
    np = msg_ps->ext_name_ptr;

    /* translate answer into a known lifeform */
    en->get_object_def_a(request_id, np.ident_len, np.ident, &msg_ps->ext_object_def);
    if (msg_ps->ext_object_def.instance != MIB_OBJECT_NONE)
    {
      msg_ps->state = SNMP_MSG_EXTERNAL_GET_VALUE;
      en->get_value_q(request_id, &msg_ps->ext_object_def);
    }
    else
    {
      en->get_object_def_pc(request_id, np.ident_len, np.ident);
      /* search failed, object id points to unknown object (nosuchname) */
      snmp_error_response(msg_ps,SNMP_ES_NOSUCHNAME);
    }
  }
  else if (msg_ps->state == SNMP_MSG_EXTERNAL_GET_VALUE)
  {
    struct mib_external_node *en;
    struct snmp_varbind *vb;

    /* get_value() answer */
    en = msg_ps->ext_mib_node;

    /* allocate output varbind */
    vb = (struct snmp_varbind *)mem_malloc(sizeof(struct snmp_varbind));
    LWIP_ASSERT("vb != NULL",vb != NULL);
    if (vb != NULL)
    {
      vb->next = NULL;
      vb->prev = NULL;

      /* move name from invb to outvb */
      vb->ident = msg_ps->vb_ptr->ident;
      vb->ident_len = msg_ps->vb_ptr->ident_len;
      /* ensure this memory is refereced once only */
      msg_ps->vb_ptr->ident = NULL;
      msg_ps->vb_ptr->ident_len = 0;

      vb->value_type = msg_ps->ext_object_def.asn_type;
      vb->value_len =  msg_ps->ext_object_def.v_len;
      if (vb->value_len > 0)
      {
        vb->value = mem_malloc(vb->value_len);
        LWIP_ASSERT("vb->value != NULL",vb->value != NULL);
        if (vb->value != NULL)
        {
          en->get_value_a(request_id, &msg_ps->ext_object_def, vb->value_len, vb->value);
          snmp_varbind_tail_add(&msg_ps->outvb, vb);
          /* search again (if vb_idx < msg_ps->invb.count) */
          msg_ps->state = SNMP_MSG_SEARCH_OBJ;
          msg_ps->vb_idx += 1;
        }
        else
        {
          en->get_value_pc(request_id, &msg_ps->ext_object_def);
          LWIP_DEBUGF(SNMP_MSG_DEBUG, ("snmp_msg_event: no variable space\n"));
          msg_ps->vb_ptr->ident = vb->ident;
          msg_ps->vb_ptr->ident_len = vb->ident_len;
          mem_free(vb);
          snmp_error_response(msg_ps,SNMP_ES_TOOBIG);
        }
      }
      else
      {
        /* vb->value_len == 0, empty value (e.g. empty string) */
        en->get_value_a(request_id, &msg_ps->ext_object_def, 0, NULL);
        vb->value = NULL;
        snmp_varbind_tail_add(&msg_ps->outvb, vb);
        /* search again (if vb_idx < msg_ps->invb.count) */
        msg_ps->state = SNMP_MSG_SEARCH_OBJ;
        msg_ps->vb_idx += 1;
      }
    }
    else
    {
      en->get_value_pc(request_id, &msg_ps->ext_object_def);
      LWIP_DEBUGF(SNMP_MSG_DEBUG, ("snmp_msg_event: no outvb space\n"));
      snmp_error_response(msg_ps,SNMP_ES_TOOBIG);
    }
  }

  while ((msg_ps->state == SNMP_MSG_SEARCH_OBJ) &&
         (msg_ps->vb_idx < msg_ps->invb.count))
  {
    struct mib_node *mn;
    struct snmp_name_ptr np;

    if (msg_ps->vb_idx == 0)
    {
      msg_ps->vb_ptr = msg_ps->invb.head;
    }
    else
    {
      msg_ps->vb_ptr = msg_ps->vb_ptr->next;
    }
    /** test object identifier for .iso.org.dod.internet prefix */
    if (snmp_iso_prefix_tst(msg_ps->vb_ptr->ident_len,  msg_ps->vb_ptr->ident))
    {
      mn = snmp_search_tree((struct mib_node*)&internet, msg_ps->vb_ptr->ident_len - 4,
                             msg_ps->vb_ptr->ident + 4, &np);
      if (mn != NULL)
      {
        if (mn->node_type == MIB_NODE_EX)
        {
          /* external object */
          struct mib_external_node *en = (struct mib_external_node*)mn;

          msg_ps->state = SNMP_MSG_EXTERNAL_GET_OBJDEF;
          /* save en && args in msg_ps!! */
          msg_ps->ext_mib_node = en;
          msg_ps->ext_name_ptr = np;

          en->get_object_def_q(en->addr_inf, request_id, np.ident_len, np.ident);
        }
        else
        {
          /* internal object */
          struct obj_def object_def;

          msg_ps->state = SNMP_MSG_INTERNAL_GET_OBJDEF;
          mn->get_object_def(np.ident_len, np.ident, &object_def);
          if (object_def.instance != MIB_OBJECT_NONE)
          {
            mn = mn;
          }
          else
          {
            /* search failed, object id points to unknown object (nosuchname) */
            mn =  NULL;
          }
          if (mn != NULL)
          {
            struct snmp_varbind *vb;

            msg_ps->state = SNMP_MSG_INTERNAL_GET_VALUE;
            /* allocate output varbind */
            vb = (struct snmp_varbind *)mem_malloc(sizeof(struct snmp_varbind));
            LWIP_ASSERT("vb != NULL",vb != NULL);
            if (vb != NULL)
            {
              vb->next = NULL;
              vb->prev = NULL;

              /* move name from invb to outvb */
              vb->ident = msg_ps->vb_ptr->ident;
              vb->ident_len = msg_ps->vb_ptr->ident_len;
              /* ensure this memory is refereced once only */
              msg_ps->vb_ptr->ident = NULL;
              msg_ps->vb_ptr->ident_len = 0;

              vb->value_type = object_def.asn_type;
              vb->value_len = object_def.v_len;
              if (vb->value_len > 0)
              {
                vb->value = mem_malloc(vb->value_len);
                LWIP_ASSERT("vb->value != NULL",vb->value != NULL);
                if (vb->value != NULL)
                {
                  mn->get_value(&object_def, vb->value_len, vb->value);
                  snmp_varbind_tail_add(&msg_ps->outvb, vb);
                  msg_ps->state = SNMP_MSG_SEARCH_OBJ;
                  msg_ps->vb_idx += 1;
                }
                else
                {
                  LWIP_DEBUGF(SNMP_MSG_DEBUG, ("snmp_msg_event: couldn't allocate variable space\n"));
                  msg_ps->vb_ptr->ident = vb->ident;
                  msg_ps->vb_ptr->ident_len = vb->ident_len;
                  mem_free(vb);
                  snmp_error_response(msg_ps,SNMP_ES_TOOBIG);
                }
              }
              else
              {
                /* vb->value_len == 0, empty value (e.g. empty string) */
                vb->value = NULL;
                snmp_varbind_tail_add(&msg_ps->outvb, vb);
                msg_ps->state = SNMP_MSG_SEARCH_OBJ;
                msg_ps->vb_idx += 1;
              }
            }
            else
            {
              LWIP_DEBUGF(SNMP_MSG_DEBUG, ("snmp_msg_event: couldn't allocate outvb space\n"));
              snmp_error_response(msg_ps,SNMP_ES_TOOBIG);
            }
          }
        }
      }
    }
    else
    {
      mn = NULL;
    }
    if (mn == NULL)
    {
      /* mn == NULL, noSuchName */
      snmp_error_response(msg_ps,SNMP_ES_NOSUCHNAME);
    }
  }
  if ((msg_ps->state == SNMP_MSG_SEARCH_OBJ) &&
      (msg_ps->vb_idx == msg_ps->invb.count))
  {
    snmp_ok_response(msg_ps);
  }
}
Example #2
0
/**
 * Fragment an IP datagram if too large for the netif.
 *
 * Chop the datagram in MTU sized chunks and send them in order
 * by using a fixed size static memory buffer (PBUF_REF) or
 * point PBUF_REFs into p (depending on IP_FRAG_USES_STATIC_BUF).
 *
 * @param p ip packet to send
 * @param netif the netif on which to send
 * @param dest destination ip address to which to send
 *
 * @return ERR_OK if sent successfully, err_t otherwise
 */
err_t
ip_frag(struct pbuf *p, struct netif *netif, ip_addr_t *dest)
{
	struct pbuf *rambuf;
#if IP_FRAG_USES_STATIC_BUF
	struct pbuf *header;
#else
#if !LWIP_NETIF_TX_SINGLE_PBUF
	struct pbuf *newpbuf;
#endif
	struct ip_hdr *original_iphdr;
#endif
	struct ip_hdr *iphdr;
	u16_t nfb;
	u16_t left, cop;
	u16_t mtu = netif->mtu;
	u16_t ofo, omf;
	u16_t last;
	u16_t poff = IP_HLEN;
	u16_t tmp;
#if !IP_FRAG_USES_STATIC_BUF && !LWIP_NETIF_TX_SINGLE_PBUF
	u16_t newpbuflen = 0;
	u16_t left_to_copy;
#endif

	/* Get a RAM based MTU sized pbuf */
#if IP_FRAG_USES_STATIC_BUF
	/* When using a static buffer, we use a PBUF_REF, which we will
	 * use to reference the packet (without link header).
	 * Layer and length is irrelevant.
	 */
	rambuf = pbuf_alloc(PBUF_LINK, 0, PBUF_REF);
	if (rambuf == NULL) {
		LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc(PBUF_LINK, 0, PBUF_REF) failed\n"));
		return ERR_MEM;
	}
	rambuf->tot_len = rambuf->len = mtu;
	rambuf->payload = LWIP_MEM_ALIGN((void *)buf);

	/* Copy the IP header in it */
	iphdr = (struct ip_hdr *)rambuf->payload;
	SMEMCPY(iphdr, p->payload, IP_HLEN);
#else /* IP_FRAG_USES_STATIC_BUF */
	original_iphdr = (struct ip_hdr *)p->payload;
	iphdr = original_iphdr;
#endif /* IP_FRAG_USES_STATIC_BUF */

	/* Save original offset */
	tmp = ntohs(IPH_OFFSET(iphdr));
	ofo = tmp & IP_OFFMASK;
	omf = tmp & IP_MF;

	left = p->tot_len - IP_HLEN;

	nfb = (mtu - IP_HLEN) / 8;

	while (left) {
		last = (left <= mtu - IP_HLEN);

		/* Set new offset and MF flag */
		tmp = omf | (IP_OFFMASK & (ofo));
		if (!last) {
			tmp = tmp | IP_MF;
		}

		/* Fill this fragment */
		cop = last ? left : nfb * 8;

#if IP_FRAG_USES_STATIC_BUF
		poff += pbuf_copy_partial(p, (u8_t*)iphdr + IP_HLEN, cop, poff);
#else /* IP_FRAG_USES_STATIC_BUF */
#if LWIP_NETIF_TX_SINGLE_PBUF
		rambuf = pbuf_alloc(PBUF_IP, cop, PBUF_RAM);
		if (rambuf == NULL) {
			return ERR_MEM;
		}
		LWIP_ASSERT("this needs a pbuf in one piece!",
					(rambuf->len == rambuf->tot_len) && (rambuf->next == NULL));
		poff += pbuf_copy_partial(p, rambuf->payload, cop, poff);
		/* make room for the IP header */
		if(pbuf_header(rambuf, IP_HLEN)) {
			pbuf_free(rambuf);
			return ERR_MEM;
		}
		/* fill in the IP header */
		SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
		iphdr = rambuf->payload;
#else /* LWIP_NETIF_TX_SINGLE_PBUF */
		/* When not using a static buffer, create a chain of pbufs.
		 * The first will be a PBUF_RAM holding the link and IP header.
		 * The rest will be PBUF_REFs mirroring the pbuf chain to be fragged,
		 * but limited to the size of an mtu.
		 */
		rambuf = pbuf_alloc(PBUF_LINK, IP_HLEN, PBUF_RAM);
		if (rambuf == NULL) {
			return ERR_MEM;
		}
		LWIP_ASSERT("this needs a pbuf in one piece!",
					(p->len >= (IP_HLEN)));
		SMEMCPY(rambuf->payload, original_iphdr, IP_HLEN);
		iphdr = (struct ip_hdr *)rambuf->payload;

		/* Can just adjust p directly for needed offset. */
		p->payload = (u8_t *)p->payload + poff;
		p->len -= poff;

		left_to_copy = cop;
		while (left_to_copy) {
			struct pbuf_custom_ref *pcr;
			newpbuflen = (left_to_copy < p->len) ? left_to_copy : p->len;
			/* Is this pbuf already empty? */
			if (!newpbuflen) {
				p = p->next;
				continue;
			}
			pcr = ip_frag_alloc_pbuf_custom_ref();
			if (pcr == NULL) {
				pbuf_free(rambuf);
				return ERR_MEM;
			}
			/* Mirror this pbuf, although we might not need all of it. */
			newpbuf = pbuf_alloced_custom(PBUF_RAW, newpbuflen, PBUF_REF, &pcr->pc, p->payload, newpbuflen);
			if (newpbuf == NULL) {
				ip_frag_free_pbuf_custom_ref(pcr);
				pbuf_free(rambuf);
				return ERR_MEM;
			}
			pbuf_ref(p);
			pcr->original = p;
			pcr->pc.custom_free_function = ipfrag_free_pbuf_custom;

			/* Add it to end of rambuf's chain, but using pbuf_cat, not pbuf_chain
			 * so that it is removed when pbuf_dechain is later called on rambuf.
			 */
			pbuf_cat(rambuf, newpbuf);
			left_to_copy -= newpbuflen;
			if (left_to_copy) {
				p = p->next;
			}
		}
		poff = newpbuflen;
#endif /* LWIP_NETIF_TX_SINGLE_PBUF */
#endif /* IP_FRAG_USES_STATIC_BUF */

		/* Correct header */
		IPH_OFFSET_SET(iphdr, htons(tmp));
		IPH_LEN_SET(iphdr, htons(cop + IP_HLEN));
		IPH_CHKSUM_SET(iphdr, 0);
		IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN));

#if IP_FRAG_USES_STATIC_BUF
		if (last) {
			pbuf_realloc(rambuf, left + IP_HLEN);
		}

		/* This part is ugly: we alloc a RAM based pbuf for
		 * the link level header for each chunk and then
		 * free it.A PBUF_ROM style pbuf for which pbuf_header
		 * worked would make things simpler.
		 */
		header = pbuf_alloc(PBUF_LINK, 0, PBUF_RAM);
		if (header != NULL) {
			pbuf_chain(header, rambuf);
			netif->output(netif, header, dest);
			IPFRAG_STATS_INC(ip_frag.xmit);
			snmp_inc_ipfragcreates();
			pbuf_free(header);
		} else {
			LWIP_DEBUGF(IP_REASS_DEBUG, ("ip_frag: pbuf_alloc() for header failed\n"));
			pbuf_free(rambuf);
			return ERR_MEM;
		}
#else /* IP_FRAG_USES_STATIC_BUF */
		/* No need for separate header pbuf - we allowed room for it in rambuf
		 * when allocated.
		 */
		netif->output(netif, rambuf, dest);
		IPFRAG_STATS_INC(ip_frag.xmit);

		/* Unfortunately we can't reuse rambuf - the hardware may still be
		 * using the buffer. Instead we free it (and the ensuing chain) and
		 * recreate it next time round the loop. If we're lucky the hardware
		 * will have already sent the packet, the free will really free, and
		 * there will be zero memory penalty.
		 */

		pbuf_free(rambuf);
#endif /* IP_FRAG_USES_STATIC_BUF */
		left -= cop;
		ofo += nfb;
	}
#if IP_FRAG_USES_STATIC_BUF
	pbuf_free(rambuf);
#endif /* IP_FRAG_USES_STATIC_BUF */
	snmp_inc_ipfragoks();
	return ERR_OK;
}
Example #3
0
/**
 * Closes the TX side of a connection held by the PCB.
 * For tcp_close(), a RST is sent if the application didn't receive all data
 * (tcp_recved() not called for all data passed to recv callback).
 *
 * Listening pcbs are freed and may not be referenced any more.
 * Connection pcbs are freed if not yet connected and may not be referenced
 * any more. If a connection is established (at least SYN received or in
 * a closing state), the connection is closed, and put in a closing state.
 * The pcb is then automatically freed in tcp_slowtmr(). It is therefore
 * unsafe to reference it.
 *
 * @param pcb the tcp_pcb to close
 * @return ERR_OK if connection has been closed
 *         another err_t if closing failed and pcb is not freed
 */
static err_t
tcp_close_shutdown(struct tcp_pcb *pcb, u8_t rst_on_unacked_data)
{
  err_t err;

  if (rst_on_unacked_data && (pcb->state != LISTEN)) {
    if ((pcb->refused_data != NULL) || (pcb->rcv_wnd != TCP_WND)) {
      /* Not all data received by application, send RST to tell the remote
         side about this. */
      LWIP_ASSERT("pcb->flags & TF_RXCLOSED", pcb->flags & TF_RXCLOSED);

      /* don't call tcp_abort here: we must not deallocate the pcb since
         that might not be expected when calling tcp_close */
      tcp_rst(pcb->snd_nxt, pcb->rcv_nxt, &pcb->local_ip, &pcb->remote_ip,
        pcb->local_port, pcb->remote_port);

      tcp_pcb_purge(pcb);

      /* TODO: to which state do we move now? */

      /* move to TIME_WAIT since we close actively */
      TCP_RMV(&tcp_active_pcbs, pcb);
      pcb->state = TIME_WAIT;
      TCP_REG(&tcp_tw_pcbs, pcb);

      return ERR_OK;
    }
  }

  switch (pcb->state) {
  case CLOSED:
    /* Closing a pcb in the CLOSED state might seem erroneous,
     * however, it is in this state once allocated and as yet unused
     * and the user needs some way to free it should the need arise.
     * Calling tcp_close() with a pcb that has already been closed, (i.e. twice)
     * or for a pcb that has been used and then entered the CLOSED state 
     * is erroneous, but this should never happen as the pcb has in those cases
     * been freed, and so any remaining handles are bogus. */
    err = ERR_OK;
    TCP_RMV(&tcp_bound_pcbs, pcb);
    memp_free(MEMP_TCP_PCB, pcb);
    pcb = NULL;
    break;
  case LISTEN:
    err = ERR_OK;
    tcp_pcb_remove(&tcp_listen_pcbs.pcbs, pcb);
    memp_free(MEMP_TCP_PCB_LISTEN, pcb);
    pcb = NULL;
    break;
  case SYN_SENT:
    err = ERR_OK;
    tcp_pcb_remove(&tcp_active_pcbs, pcb);
    memp_free(MEMP_TCP_PCB, pcb);
    pcb = NULL;
    snmp_inc_tcpattemptfails();
    break;
  case SYN_RCVD:
    err = tcp_send_fin(pcb);
    if (err == ERR_OK) {
      snmp_inc_tcpattemptfails();
      pcb->state = FIN_WAIT_1;
    }
    break;
  case ESTABLISHED:
    err = tcp_send_fin(pcb);
    if (err == ERR_OK) {
      snmp_inc_tcpestabresets();
      pcb->state = FIN_WAIT_1;
    }
    break;
  case CLOSE_WAIT:
    err = tcp_send_fin(pcb);
    if (err == ERR_OK) {
      snmp_inc_tcpestabresets();
      pcb->state = LAST_ACK;
    }
    break;
  default:
    /* Has already been closed, do nothing. */
    err = ERR_OK;
    pcb = NULL;
    break;
  }

  if (pcb != NULL && err == ERR_OK) {
    /* To ensure all data has been sent when tcp_close returns, we have
       to make sure tcp_output doesn't fail.
       Since we don't really have to ensure all data has been sent when tcp_close
       returns (unsent data is sent from tcp timer functions, also), we don't care
       for the return value of tcp_output for now. */
    /* @todo: When implementing SO_LINGER, this must be changed somehow:
       If SOF_LINGER is set, the data should be sent and acked before close returns.
       This can only be valid for sequential APIs, not for the raw API. */
    tcp_output(pcb);
  }
  return err;
}
/**
 * Tree expansion.
 */
struct mib_node *
snmp_expand_tree(struct mib_node *node, u8_t ident_len, s32_t *ident, struct snmp_obj_id *oidret)
{
  u8_t node_type, ext_level, climb_tree;

  ext_level = 0;
  /* reset node stack */
  node_stack_cnt = 0;
  while (node != NULL)
  {
    climb_tree = 0;
    node_type = node->node_type;
    if ((node_type == MIB_NODE_AR) || (node_type == MIB_NODE_RA))
    {
      struct mib_array_node *an;
      u16_t i;

      /* array node (internal ROM or RAM, fixed length) */
      an = (struct mib_array_node *)node;
      if (ident_len > 0)
      {
        i = 0;
        while ((i < an->maxlength) && (an->objid[i] < *ident))
        {
          i++;
        }
        if (i < an->maxlength)
        {
          LWIP_DEBUGF(SNMP_MIB_DEBUG,("an->objid[%"U16_F"]==%"S32_F" *ident==%"S32_F"\n",i,an->objid[i],*ident));
          /* add identifier to oidret */
          oidret->id[oidret->len] = an->objid[i];
          (oidret->len)++;

          if (an->nptr[i] == NULL)
          {
            LWIP_DEBUGF(SNMP_MIB_DEBUG,("leaf node\n"));
            /* leaf node (e.g. in a fixed size table) */
            if (an->objid[i] > *ident)
            {
              return (struct mib_node*)an;
            }
            else if ((i + 1) < an->maxlength)
            {
              /* an->objid[i] == *ident */
              (oidret->len)--;
              oidret->id[oidret->len] = an->objid[i + 1];
              (oidret->len)++;
              return (struct mib_node*)an;
            }
            else
            {
              /* (i + 1) == an->maxlength */
              (oidret->len)--;
              climb_tree = 1;
            }
          }
          else
          {
            u8_t j;
            struct nse cur_node;

            LWIP_DEBUGF(SNMP_MIB_DEBUG,("non-leaf node\n"));
            /* non-leaf, store right child ptr and id */
            LWIP_ASSERT("i < 0xff", i < 0xff);
            j = (u8_t)i + 1;
            while ((j < an->maxlength) && (empty_table(an->nptr[j])))
            {
              j++;
            }
            if (j < an->maxlength)
            {
              cur_node.r_ptr = an->nptr[j];
              cur_node.r_id = an->objid[j];
              cur_node.r_nl = 0;
            }
            else
            {
              cur_node.r_ptr = NULL;
            }
            push_node(&cur_node);
            if (an->objid[i] == *ident)
            {
              ident_len--;
              ident++;
            }
            else
            {
              /* an->objid[i] < *ident */
              ident_len = 0;
            }
            /* follow next child pointer */
            node = an->nptr[i];
          }
        }
        else
        {
          /* i == an->maxlength */
          climb_tree = 1;
        }
      }
      else
      {
        u8_t j;
        /* ident_len == 0, complete with leftmost '.thing' */
        j = 0;
        while ((j < an->maxlength) && empty_table(an->nptr[j]))
        {
          j++;
        }
        if (j < an->maxlength)
        {
          LWIP_DEBUGF(SNMP_MIB_DEBUG,("left an->objid[j]==%"S32_F"\n",an->objid[j]));
          oidret->id[oidret->len] = an->objid[j];
          (oidret->len)++;
          if (an->nptr[j] == NULL)
          {
            /* leaf node */
            return (struct mib_node*)an;
          }
          else
          {
            /* no leaf, continue */
            node = an->nptr[j];
          }
        }
        else
        {
          /* j == an->maxlength */
          climb_tree = 1;
        }
      }
    }
    else if(node_type == MIB_NODE_LR)
    {
      struct mib_list_rootnode *lrn;
      struct mib_list_node *ln;

      /* list root node (internal 'RAM', variable length) */
      lrn = (struct mib_list_rootnode *)node;
      if (ident_len > 0)
      {
        ln = lrn->head;
        /* iterate over list, head to tail */
        while ((ln != NULL) && (ln->objid < *ident))
        {
          ln = ln->next;
        }
        if (ln != NULL)
        {
          LWIP_DEBUGF(SNMP_MIB_DEBUG,("ln->objid==%"S32_F" *ident==%"S32_F"\n",ln->objid,*ident));
          oidret->id[oidret->len] = ln->objid;
          (oidret->len)++;
          if (ln->nptr == NULL)
          {
            /* leaf node */
            if (ln->objid > *ident)
            {
              return (struct mib_node*)lrn;
            }
            else if (ln->next != NULL)
            {
              /* ln->objid == *ident */
              (oidret->len)--;
              oidret->id[oidret->len] = ln->next->objid;
              (oidret->len)++;
              return (struct mib_node*)lrn;
            }
            else
            {
              /* ln->next == NULL */
              (oidret->len)--;
              climb_tree = 1;
            }
          }
          else
          {
            struct mib_list_node *jn;
            struct nse cur_node;

            /* non-leaf, store right child ptr and id */
            jn = ln->next;
            while ((jn != NULL) && empty_table(jn->nptr))
            {
              jn = jn->next;
            }
            if (jn != NULL)
            {
              cur_node.r_ptr = jn->nptr;
              cur_node.r_id = jn->objid;
              cur_node.r_nl = 0;
            }
            else
            {
              cur_node.r_ptr = NULL;
            }
            push_node(&cur_node);
            if (ln->objid == *ident)
            {
              ident_len--;
              ident++;
            }
            else
            {
              /* ln->objid < *ident */
              ident_len = 0;
            }
            /* follow next child pointer */
            node = ln->nptr;
          }

        }
        else
        {
          /* ln == NULL */
          climb_tree = 1;
        }
      }
      else
      {
        struct mib_list_node *jn;
        /* ident_len == 0, complete with leftmost '.thing' */
        jn = lrn->head;
        while ((jn != NULL) && empty_table(jn->nptr))
        {
          jn = jn->next;
        }
        if (jn != NULL)
        {
          LWIP_DEBUGF(SNMP_MIB_DEBUG,("left jn->objid==%"S32_F"\n",jn->objid));
          oidret->id[oidret->len] = jn->objid;
          (oidret->len)++;
          if (jn->nptr == NULL)
          {
            /* leaf node */
            LWIP_DEBUGF(SNMP_MIB_DEBUG,("jn->nptr == NULL\n"));
            return (struct mib_node*)lrn;
          }
          else
          {
            /* no leaf, continue */
            node = jn->nptr;
          }
        }
        else
        {
          /* jn == NULL */
          climb_tree = 1;
        }
      }
    }
    else if(node_type == MIB_NODE_EX)
    {
      struct mib_external_node *en;
      s32_t ex_id;

      /* external node (addressing and access via functions) */
      en = (struct mib_external_node *)node;
      if (ident_len > 0)
      {
        u16_t i, len;

        i = 0;
        len = en->level_length(en->addr_inf,ext_level);
        while ((i < len) && (en->ident_cmp(en->addr_inf,ext_level,i,*ident) < 0))
        {
          i++;
        }
        if (i < len)
        {
          /* add identifier to oidret */
          en->get_objid(en->addr_inf,ext_level,i,&ex_id);
          LWIP_DEBUGF(SNMP_MIB_DEBUG,("en->objid[%"U16_F"]==%"S32_F" *ident==%"S32_F"\n",i,ex_id,*ident));
          oidret->id[oidret->len] = ex_id;
          (oidret->len)++;

          if ((ext_level + 1) == en->tree_levels)
          {
            LWIP_DEBUGF(SNMP_MIB_DEBUG,("leaf node\n"));
            /* leaf node */
            if (ex_id > *ident)
            {
              return (struct mib_node*)en;
            }
            else if ((i + 1) < len)
            {
              /* ex_id == *ident */
              en->get_objid(en->addr_inf,ext_level,i + 1,&ex_id);
              (oidret->len)--;
              oidret->id[oidret->len] = ex_id;
              (oidret->len)++;
              return (struct mib_node*)en;
            }
            else
            {
              /* (i + 1) == len */
              (oidret->len)--;
              climb_tree = 1;
            }
          }
          else
          {
            u8_t j;
            struct nse cur_node;

            LWIP_DEBUGF(SNMP_MIB_DEBUG,("non-leaf node\n"));
            /* non-leaf, store right child ptr and id */
            LWIP_ASSERT("i < 0xff", i < 0xff);
            j = (u8_t)i + 1;
            if (j < len)
            {
              /* right node is the current external node */
              cur_node.r_ptr = node;
              en->get_objid(en->addr_inf,ext_level,j,&cur_node.r_id);
              cur_node.r_nl = ext_level + 1;
            }
            else
            {
              cur_node.r_ptr = NULL;
            }
            push_node(&cur_node);
            if (en->ident_cmp(en->addr_inf,ext_level,i,*ident) == 0)
            {
              ident_len--;
              ident++;
            }
            else
            {
              /* external id < *ident */
              ident_len = 0;
            }
            /* proceed to child */
            ext_level++;
          }
        }
        else
        {
          /* i == len (en->level_len()) */
          climb_tree = 1;
        }
      }
      else
      {
        /* ident_len == 0, complete with leftmost '.thing' */
        en->get_objid(en->addr_inf,ext_level,0,&ex_id);
        LWIP_DEBUGF(SNMP_MIB_DEBUG,("left en->objid==%"S32_F"\n",ex_id));
        oidret->id[oidret->len] = ex_id;
        (oidret->len)++;
        if ((ext_level + 1) == en->tree_levels)
        {
          /* leaf node */
          LWIP_DEBUGF(SNMP_MIB_DEBUG,("(ext_level + 1) == en->tree_levels\n"));
          return (struct mib_node*)en;
        }
        else
        {
          /* no leaf, proceed to child */
          ext_level++;
        }
      }
    }
    else if(node_type == MIB_NODE_SC)
    {
      mib_scalar_node *sn;

      /* scalar node  */
      sn = (mib_scalar_node *)node;
      if (ident_len > 0)
      {
        /* at .0 */
        climb_tree = 1;
      }
      else
      {
        /* ident_len == 0, complete object identifier */
        oidret->id[oidret->len] = 0;
        (oidret->len)++;
        /* leaf node */
        LWIP_DEBUGF(SNMP_MIB_DEBUG,("completed scalar leaf\n"));
        return (struct mib_node*)sn;
      }
    }
    else
    {
      /* unknown/unhandled node_type */
      LWIP_DEBUGF(SNMP_MIB_DEBUG,("expand failed node_type %"U16_F" unkown\n",(u16_t)node_type));
      return NULL;
    }

    if (climb_tree)
    {
      struct nse child;

      /* find right child ptr */
      child.r_ptr = NULL;
      child.r_id = 0;
      child.r_nl = 0;
      while ((node_stack_cnt > 0) && (child.r_ptr == NULL))
      {
        pop_node(&child);
        /* trim returned oid */
        (oidret->len)--;
      }
      if (child.r_ptr != NULL)
      {
        /* incoming ident is useless beyond this point */
        ident_len = 0;
        oidret->id[oidret->len] = child.r_id;
        oidret->len++;
        node = child.r_ptr;
        ext_level = child.r_nl;
      }
      else
      {
        /* tree ends here ... */
        LWIP_DEBUGF(SNMP_MIB_DEBUG,("expand failed, tree ends here\n"));
        return NULL;
      }
    }
  }
  /* done, found nothing */
  LWIP_DEBUGF(SNMP_MIB_DEBUG,("expand failed node==%p\n",(void*)node));
  return NULL;
}
Example #5
0
/**
 * Chain a new pbuf into the pbuf list that composes the datagram.  The pbuf list
 * will grow over time as  new pbufs are rx.
 * Also checks that the datagram passes basic continuity checks (if the last
 * fragment was received at least once).
 * @param root_p points to the 'root' pbuf for the current datagram being assembled.
 * @param new_p points to the pbuf for the current fragment
 * @return 0 if invalid, >0 otherwise
 */
static int
ip_reass_chain_frag_into_datagram_and_validate(struct ip_reassdata *ipr, struct pbuf *new_p)
{
	struct ip_reass_helper *iprh, *iprh_tmp, *iprh_prev=NULL;
	struct pbuf *q;
	u16_t offset,len;
	struct ip_hdr *fraghdr;
	int valid = 1;

	/* Extract length and fragment offset from current fragment */
	fraghdr = (struct ip_hdr*)new_p->payload;
	len = ntohs(IPH_LEN(fraghdr)) - IPH_HL(fraghdr) * 4;
	offset = (ntohs(IPH_OFFSET(fraghdr)) & IP_OFFMASK) * 8;

	/* overwrite the fragment's ip header from the pbuf with our helper struct,
	 * and setup the embedded helper structure. */
	/* make sure the struct ip_reass_helper fits into the IP header */
	LWIP_ASSERT("sizeof(struct ip_reass_helper) <= IP_HLEN",
				sizeof(struct ip_reass_helper) <= IP_HLEN);
	iprh = (struct ip_reass_helper*)new_p->payload;
	iprh->next_pbuf = NULL;
	iprh->start = offset;
	iprh->end = offset + len;

	/* Iterate through until we either get to the end of the list (append),
	 * or we find on with a larger offset (insert). */
	for (q = ipr->p; q != NULL;) {
		iprh_tmp = (struct ip_reass_helper*)q->payload;
		if (iprh->start < iprh_tmp->start) {
			/* the new pbuf should be inserted before this */
			iprh->next_pbuf = q;
			if (iprh_prev != NULL) {
				/* not the fragment with the lowest offset */
#if IP_REASS_CHECK_OVERLAP
				if ((iprh->start < iprh_prev->end) || (iprh->end > iprh_tmp->start)) {
					/* fragment overlaps with previous or following, throw away */
					goto freepbuf;
				}
#endif /* IP_REASS_CHECK_OVERLAP */
				iprh_prev->next_pbuf = new_p;
			} else {
				/* fragment with the lowest offset */
				ipr->p = new_p;
			}
			break;
		} else if(iprh->start == iprh_tmp->start) {
			/* received the same datagram twice: no need to keep the datagram */
			goto freepbuf;
#if IP_REASS_CHECK_OVERLAP
		} else if(iprh->start < iprh_tmp->end) {
			/* overlap: no need to keep the new datagram */
			goto freepbuf;
#endif /* IP_REASS_CHECK_OVERLAP */
		} else {
			/* Check if the fragments received so far have no wholes. */
			if (iprh_prev != NULL) {
				if (iprh_prev->end != iprh_tmp->start) {
					/* There is a fragment missing between the current
					 * and the previous fragment */
					valid = 0;
				}
			}
		}
		q = iprh_tmp->next_pbuf;
		iprh_prev = iprh_tmp;
	}

	/* If q is NULL, then we made it to the end of the list. Determine what to do now */
	if (q == NULL) {
		if (iprh_prev != NULL) {
			/* this is (for now), the fragment with the highest offset:
			 * chain it to the last fragment */
#if IP_REASS_CHECK_OVERLAP
			LWIP_ASSERT("check fragments don't overlap", iprh_prev->end <= iprh->start);
#endif /* IP_REASS_CHECK_OVERLAP */
			iprh_prev->next_pbuf = new_p;
			if (iprh_prev->end != iprh->start) {
				valid = 0;
			}
		} else {
#if IP_REASS_CHECK_OVERLAP
			LWIP_ASSERT("no previous fragment, this must be the first fragment!",
						ipr->p == NULL);
#endif /* IP_REASS_CHECK_OVERLAP */
			/* this is the first fragment we ever received for this ip datagram */
			ipr->p = new_p;
		}
	}

	/* At this point, the validation part begins: */
	/* If we already received the last fragment */
	if ((ipr->flags & IP_REASS_FLAG_LASTFRAG) != 0) {
		/* and had no wholes so far */
		if (valid) {
			/* then check if the rest of the fragments is here */
			/* Check if the queue starts with the first datagram */
			if (((struct ip_reass_helper*)ipr->p->payload)->start != 0) {
				valid = 0;
			} else {
				/* and check that there are no wholes after this datagram */
				iprh_prev = iprh;
				q = iprh->next_pbuf;
				while (q != NULL) {
					iprh = (struct ip_reass_helper*)q->payload;
					if (iprh_prev->end != iprh->start) {
						valid = 0;
						break;
					}
					iprh_prev = iprh;
					q = iprh->next_pbuf;
				}
				/* if still valid, all fragments are received
				 * (because to the MF==0 already arrived */
				if (valid) {
					LWIP_ASSERT("sanity check", ipr->p != NULL);
					LWIP_ASSERT("sanity check",
								((struct ip_reass_helper*)ipr->p->payload) != iprh);
					LWIP_ASSERT("validate_datagram:next_pbuf!=NULL",
								iprh->next_pbuf == NULL);
					LWIP_ASSERT("validate_datagram:datagram end!=datagram len",
								iprh->end == ipr->datagram_len);
				}
			}
		}
		/* If valid is 0 here, there are some fragments missing in the middle
		 * (since MF == 0 has already arrived). Such datagrams simply time out if
		 * no more fragments are received... */
		return valid;
	}
	/* If we come here, not all fragments were received, yet! */
	return 0; /* not yet valid! */
#if IP_REASS_CHECK_OVERLAP
freepbuf:
	ip_reass_pbufcount -= pbuf_clen(new_p);
	pbuf_free(new_p);
	return 0;
#endif /* IP_REASS_CHECK_OVERLAP */
}
Example #6
0
/**
 * Should be called at the beginning of the program to set up the
 * network interface.
 *
 * This function should be passed as a parameter to netif_add().
 *
 * \param[in] netif the lwip network interface structure for this lpc_enetif
 * \return ERR_OK if the loopif is initialized
 *         ERR_MEM if private data couldn't be allocated
 *         any other err_t on error
 */
err_t lpc_enetif_init(struct netif *netif)
{
	err_t err;

	LWIP_ASSERT("netif != NULL", (netif != NULL));
    
	lpc_enetdata.netif = netif;

	/* set MAC hardware address */
	board_get_macaddr(netif->hwaddr);
	netif->hwaddr_len = ETHARP_HWADDR_LEN;

 	/* maximum transfer unit */
	netif->mtu = 1500;

	/* device capabilities */
	netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_UP |
		NETIF_FLAG_ETHERNET;

	/* Initialize the hardware */
	netif->state = &lpc_enetdata;
	err = low_level_init(netif);
	if (err != ERR_OK)
		return err;

#if LWIP_NETIF_HOSTNAME
	/* Initialize interface hostname */
	netif->hostname = "lwiplpc";
#endif /* LWIP_NETIF_HOSTNAME */

	netif->name[0] = 'e';
	netif->name[1] = 'n';

	netif->output = lpc_etharp_output;
	netif->linkoutput = lpc_low_level_output;

	/* For FreeRTOS, start tasks */
#if NO_SYS == 0
	lpc_enetdata.xTXDCountSem = xSemaphoreCreateCounting(LPC_NUM_BUFF_TXDESCS,
		LPC_NUM_BUFF_TXDESCS);
	LWIP_ASSERT("xTXDCountSem creation error",
		(lpc_enetdata.xTXDCountSem != NULL));

	err = sys_mutex_new(&lpc_enetdata.TXLockMutex);
	LWIP_ASSERT("TXLockMutex creation error", (err == ERR_OK));

	/* Packet receive task */
	err = sys_sem_new(&lpc_enetdata.RxSem, 0);
	LWIP_ASSERT("RxSem creation error", (err == ERR_OK));
	sys_thread_new("receive_thread", vPacketReceiveTask, netif->state,
		DEFAULT_THREAD_STACKSIZE, tskRECPKT_PRIORITY);

	/* Transmit cleanup task */
	err = sys_sem_new(&lpc_enetdata.TxCleanSem, 0);
	LWIP_ASSERT("TxCleanSem creation error", (err == ERR_OK));
	sys_thread_new("txclean_thread", vTransmitCleanupTask, netif->state,
		DEFAULT_THREAD_STACKSIZE, tskTXCLEAN_PRIORITY);
#endif

	return ERR_OK;
}
Example #7
0
/**
 * Send the raw IP packet to the given address. Note that actually you cannot
 * modify the IP headers (this is inconsistent with the receive callback where
 * you actually get the IP headers), you can only specify the IP payload here.
 * It requires some more changes in lwIP. (there will be a raw_send() function
 * then.)
 *
 * @param pcb the raw pcb which to send
 * @param p the IP payload to send
 * @param ipaddr the destination address of the IP packet
 *
 */
err_t ICACHE_FLASH_ATTR
raw_sendto(struct raw_pcb *pcb, struct pbuf *p, ip_addr_t *ipaddr)
{
  err_t err;
  struct netif *netif;
  ip_addr_t *src_ip;
  struct pbuf *q; /* q will be sent down the stack */
  
  LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE, ("raw_sendto\n"));
  
  /* not enough space to add an IP header to first pbuf in given p chain? */
  if (pbuf_header(p, IP_HLEN)) {
    /* allocate header in new pbuf */
    q = pbuf_alloc(PBUF_IP, 0, PBUF_RAM);
    /* new header pbuf could not be allocated? */
    if (q == NULL) {
      LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_TRACE | LWIP_DBG_LEVEL_SERIOUS, ("raw_sendto: could not allocate header\n"));
      return ERR_MEM;
    }
    if (p->tot_len != 0) {
      /* chain header q in front of given pbuf p */
      pbuf_chain(q, p);
    }
    /* { first pbuf q points to header pbuf } */
    LWIP_DEBUGF(RAW_DEBUG, ("raw_sendto: added header pbuf %p before given pbuf %p\n", (void *)q, (void *)p));
  }  else {
    /* first pbuf q equals given pbuf */
    q = p;
    if(pbuf_header(q, -IP_HLEN)) {
      LWIP_ASSERT("Can't restore header we just removed!", 0);
      return ERR_MEM;
    }
  }

  if ((netif = ip_route(ipaddr)) == NULL) {
    LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: No route to %"U16_F".%"U16_F".%"U16_F".%"U16_F"\n",
      ip4_addr1_16(ipaddr), ip4_addr2_16(ipaddr), ip4_addr3_16(ipaddr), ip4_addr4_16(ipaddr)));
    /* free any temporary header pbuf allocated by pbuf_header() */
    if (q != p) {
      pbuf_free(q);
    }
    return ERR_RTE;
  }

#if IP_SOF_BROADCAST
  /* broadcast filter? */
  if (((pcb->so_options & SOF_BROADCAST) == 0) && ip_addr_isbroadcast(ipaddr, netif)) {
    LWIP_DEBUGF(RAW_DEBUG | LWIP_DBG_LEVEL_WARNING, ("raw_sendto: SOF_BROADCAST not enabled on pcb %p\n", (void *)pcb));
    /* free any temporary header pbuf allocated by pbuf_header() */
    if (q != p) {
      pbuf_free(q);
    }
    return ERR_VAL;
  }
#endif /* IP_SOF_BROADCAST */

  if (ip_addr_isany(&pcb->local_ip)) {
    /* use outgoing network interface IP address as source address */
    src_ip = &(netif->ip_addr);
  } else {
    /* use RAW PCB local IP address as source address */
    src_ip = &(pcb->local_ip);
  }

#if LWIP_NETIF_HWADDRHINT
  netif->addr_hint = &(pcb->addr_hint);
#endif /* LWIP_NETIF_HWADDRHINT*/
  err = ip_output_if (q, src_ip, ipaddr, pcb->ttl, pcb->tos, pcb->protocol, netif);
#if LWIP_NETIF_HWADDRHINT
  netif->addr_hint = NULL;
#endif /* LWIP_NETIF_HWADDRHINT*/

  /* did we chain a header earlier? */
  if (q != p) {
    /* free the header */
    pbuf_free(q);
  }
  return err;
}
Example #8
0
File: tcpip.c Project: wosayttn/aos
/**
 * Try to post a callback-message to the tcpip_thread mbox
 * This is intended to be used to send "static" messages from interrupt context.
 *
 * @param msg pointer to the message to post
 * @return sys_mbox_trypost() return code
 */
err_t
tcpip_trycallback(struct tcpip_callback_msg* msg)
{
  LWIP_ASSERT("Invalid mbox", sys_mbox_valid_val(mbox));
  return sys_mbox_trypost(&mbox, msg);
}
Example #9
0
File: tcpip.c Project: wosayttn/aos
/**
 * The main lwIP thread. This thread has exclusive access to lwIP core functions
 * (unless access to them is not locked). Other threads communicate with this
 * thread using message boxes.
 *
 * It also starts all the timers to make sure they are running in the right
 * thread context.
 *
 * @param arg unused argument
 */
static void
tcpip_thread(void *arg)
{
  struct tcpip_msg *msg;
  LWIP_UNUSED_ARG(arg);

  if (tcpip_init_done != NULL) {
    tcpip_init_done(tcpip_init_done_arg);
  }

  LOCK_TCPIP_CORE();
  while (1) {                          /* MAIN Loop */
    UNLOCK_TCPIP_CORE();
    LWIP_TCPIP_THREAD_ALIVE();
    /* wait for a message, timeouts are processed while waiting */
    TCPIP_MBOX_FETCH(&mbox, (void **)&msg);
    LOCK_TCPIP_CORE();
    if (msg == NULL) {
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: NULL\n"));
      LWIP_ASSERT("tcpip_thread: invalid message", 0);
      continue;
    }
    switch (msg->type) {
#if !LWIP_TCPIP_CORE_LOCKING
    case TCPIP_MSG_API:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg));
      msg->msg.api_msg.function(msg->msg.api_msg.msg);
      break;
    case TCPIP_MSG_API_CALL:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API CALL message %p\n", (void *)msg));
      msg->msg.api_call.arg->err = msg->msg.api_call.function(msg->msg.api_call.arg);
      sys_sem_signal(msg->msg.api_call.sem);
      break;
#endif /* !LWIP_TCPIP_CORE_LOCKING */

#if !LWIP_TCPIP_CORE_LOCKING_INPUT
    case TCPIP_MSG_INPKT:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg));
      msg->msg.inp.input_fn(msg->msg.inp.p, msg->msg.inp.netif);
      memp_free(MEMP_TCPIP_MSG_INPKT, msg);
      break;
#endif /* !LWIP_TCPIP_CORE_LOCKING_INPUT */

#if LWIP_TCPIP_TIMEOUT && LWIP_TIMERS
    case TCPIP_MSG_TIMEOUT:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg));
      sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg);
      memp_free(MEMP_TCPIP_MSG_API, msg);
      break;
    case TCPIP_MSG_UNTIMEOUT:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg));
      sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg);
      memp_free(MEMP_TCPIP_MSG_API, msg);
      break;
#endif /* LWIP_TCPIP_TIMEOUT && LWIP_TIMERS */

    case TCPIP_MSG_CALLBACK:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg));
      msg->msg.cb.function(msg->msg.cb.ctx);
      memp_free(MEMP_TCPIP_MSG_API, msg);
      break;

    case TCPIP_MSG_CALLBACK_STATIC:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg));
      msg->msg.cb.function(msg->msg.cb.ctx);
      break;
#ifdef LWIP_NETIF_DRV
    case TCPIP_MSG_DRV:
      LWIP_DEBUGF(NETIF_DEBUG, ("tcpip_thread: DRV %p\n", (void *)msg));
      msg->msg.drv.drv_fn(msg->msg.drv.netif, msg->msg.drv.event);
      memp_free(MEMP_NETIF_DRV_MSG, msg);
      break;
#endif
    default:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type));
      LWIP_ASSERT("tcpip_thread: invalid message", 0);
      break;
    }
  }
}
Example #10
0
/**
 * Add a network interface to the list of lwIP netifs.
 *
 * @param netif a pre-allocated netif structure
 * @param ipaddr IP address for the new netif
 * @param netmask network mask for the new netif
 * @param gw default gateway IP address for the new netif
 * @param state opaque data passed to the new netif
 * @param init callback function that initializes the interface
 * @param input callback function that is called to pass
 * ingress packets up in the protocol layer stack.
 *
 * @return netif, or NULL if failed.
 */
struct netif *
netif_add(struct netif *netif, ip_addr_t *ipaddr, ip_addr_t *netmask,
  ip_addr_t *gw, void *state, netif_init_fn init, netif_input_fn input)
{

  LWIP_ASSERT("No init function given", init != NULL);

  /* reset new interface configuration state */
  ip_addr_set_zero(&netif->ip_addr);
  ip_addr_set_zero(&netif->netmask);
  ip_addr_set_zero(&netif->gw);
  netif->flags = 0;
#if LWIP_DHCP
  /* netif not under DHCP control by default */
  netif->dhcp = NULL;
#endif /* LWIP_DHCP */
#if LWIP_AUTOIP
  /* netif not under AutoIP control by default */
  netif->autoip = NULL;
#endif /* LWIP_AUTOIP */
#if LWIP_NETIF_STATUS_CALLBACK
  netif->status_callback = NULL;
#endif /* LWIP_NETIF_STATUS_CALLBACK */
#if LWIP_NETIF_LINK_CALLBACK
  netif->link_callback = NULL;
#endif /* LWIP_NETIF_LINK_CALLBACK */
#if LWIP_IGMP
  netif->igmp_mac_filter = NULL;
#endif /* LWIP_IGMP */
#if ENABLE_LOOPBACK
  netif->loop_first = NULL;
  netif->loop_last = NULL;
#endif /* ENABLE_LOOPBACK */

  /* remember netif specific state information data */
  netif->state = state;
  netif->num = netif_num++;
  netif->input = input;
  NETIF_SET_HWADDRHINT(netif, NULL);
#if ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS
  netif->loop_cnt_current = 0;
#endif /* ENABLE_LOOPBACK && LWIP_LOOPBACK_MAX_PBUFS */

  netif_set_addr(netif, ipaddr, netmask, gw);

  /* call user specified initialization function for netif */
  if (init(netif) != ERR_OK) {
    return NULL;
  }

  /* add this netif to the list */
  netif->next = netif_list;
  netif_list = netif;
  snmp_inc_iflist();

#if LWIP_IGMP
  /* start IGMP processing */
  if (netif->flags & NETIF_FLAG_IGMP) {
    igmp_start(netif);
  }
#endif /* LWIP_IGMP */

  LWIP_DEBUGF(NETIF_DEBUG, ("netif: added interface %c%c IP addr ",
    netif->name[0], netif->name[1]));
  ip_addr_debug_print(NETIF_DEBUG, ipaddr);
  LWIP_DEBUGF(NETIF_DEBUG, (" netmask "));
  ip_addr_debug_print(NETIF_DEBUG, netmask);
  LWIP_DEBUGF(NETIF_DEBUG, (" gw "));
  ip_addr_debug_print(NETIF_DEBUG, gw);
  LWIP_DEBUGF(NETIF_DEBUG, ("\n"));
  return netif;
}
Example #11
0
/**
 * Send an IP packet to be received on the same netif (loopif-like).
 * The pbuf is simply copied and handed back to netif->input.
 * In multithreaded mode, this is done directly since netif->input must put
 * the packet on a queue.
 * In callback mode, the packet is put on an internal queue and is fed to
 * netif->input by netif_poll().
 *
 * @param netif the lwip network interface structure
 * @param p the (IP) packet to 'send'
 * @param ipaddr the ip address to send the packet to (not used)
 * @return ERR_OK if the packet has been sent
 *         ERR_MEM if the pbuf used to copy the packet couldn't be allocated
 */
err_t
netif_loop_output(struct netif *netif, struct pbuf *p,
       ip_addr_t *ipaddr)
{
  struct pbuf *r;
  err_t err;
  struct pbuf *last;
#if LWIP_LOOPBACK_MAX_PBUFS
  u8_t clen = 0;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */
  /* If we have a loopif, SNMP counters are adjusted for it,
   * if not they are adjusted for 'netif'. */
#if LWIP_SNMP
#if LWIP_HAVE_LOOPIF
  struct netif *stats_if = &loop_netif;
#else /* LWIP_HAVE_LOOPIF */
  struct netif *stats_if = netif;
#endif /* LWIP_HAVE_LOOPIF */
#endif /* LWIP_SNMP */
  SYS_ARCH_DECL_PROTECT(lev);
  LWIP_UNUSED_ARG(ipaddr);

  /* Allocate a new pbuf */
  r = pbuf_alloc(PBUF_LINK, p->tot_len, PBUF_RAM);
  if (r == NULL) {
    LINK_STATS_INC(link.memerr);
    LINK_STATS_INC(link.drop);
    snmp_inc_ifoutdiscards(stats_if);
    return ERR_MEM;
  }
#if LWIP_LOOPBACK_MAX_PBUFS
  clen = pbuf_clen(r);
  /* check for overflow or too many pbuf on queue */
  if(((netif->loop_cnt_current + clen) < netif->loop_cnt_current) ||
     ((netif->loop_cnt_current + clen) > LWIP_LOOPBACK_MAX_PBUFS)) {
    pbuf_free(r);
    LINK_STATS_INC(link.memerr);
    LINK_STATS_INC(link.drop);
    snmp_inc_ifoutdiscards(stats_if);
    return ERR_MEM;
  }
  netif->loop_cnt_current += clen;
#endif /* LWIP_LOOPBACK_MAX_PBUFS */

  /* Copy the whole pbuf queue p into the single pbuf r */
  if ((err = pbuf_copy(r, p)) != ERR_OK) {
    pbuf_free(r);
    LINK_STATS_INC(link.memerr);
    LINK_STATS_INC(link.drop);
    snmp_inc_ifoutdiscards(stats_if);
    return err;
  }

  /* Put the packet on a linked list which gets emptied through calling
     netif_poll(). */

  /* let last point to the last pbuf in chain r */
  for (last = r; last->next != NULL; last = last->next);

  SYS_ARCH_PROTECT(lev);
  if(netif->loop_first != NULL) {
    LWIP_ASSERT("if first != NULL, last must also be != NULL", netif->loop_last != NULL);
    netif->loop_last->next = r;
    netif->loop_last = last;
  } else {
    netif->loop_first = r;
    netif->loop_last = last;
  }
  SYS_ARCH_UNPROTECT(lev);

  LINK_STATS_INC(link.xmit);
  snmp_add_ifoutoctets(stats_if, p->tot_len);
  snmp_inc_ifoutucastpkts(stats_if);

#if LWIP_NETIF_LOOPBACK_MULTITHREADING
  /* For multithreading environment, schedule a call to netif_poll */
  tcpip_callback((tcpip_callback_fn)netif_poll, netif);
#endif /* LWIP_NETIF_LOOPBACK_MULTITHREADING */

  return ERR_OK;
}
Example #12
0
/* ------------------------ Start implementation -------------------------- */
void
sys_init( void )
{
    LWIP_ASSERT( "sys_init: not called first", tasks == NULL );
    tasks = NULL;
}
Example #13
0
/* Signals a semaphore */
void
sys_sem_signal( sys_sem_t sem )
{
    LWIP_ASSERT( "sys_sem_signal: sem != SYS_SEM_NULL", sem != SYS_SEM_NULL );
    xSemaphoreGive( sem );
}
/* lwIP UDP receive callback function */
static void
snmp_recv(void *arg, struct udp_pcb *pcb, struct pbuf *p, struct ip_addr *addr, u16_t port)
{
  struct udp_hdr *udphdr;

  /* suppress unused argument warning */
  LWIP_UNUSED_ARG(arg);
  /* peek in the UDP header (goto IP payload) */
  if(pbuf_header(p, UDP_HLEN)){
    LWIP_ASSERT("Can't move to UDP header", 0);
    pbuf_free(p);
    return;
  }
  udphdr = p->payload;

  /* check if datagram is really directed at us (including broadcast requests) */
  if ((pcb == snmp1_pcb) && (ntohs(udphdr->dest) == SNMP_IN_PORT))
  {
    struct snmp_msg_pstat *msg_ps;
    u8_t req_idx;

    /* traverse input message process list, look for SNMP_MSG_EMPTY */
    msg_ps = &msg_input_list[0];
    req_idx = 0;
    while ((req_idx<SNMP_CONCURRENT_REQUESTS) && (msg_ps->state != SNMP_MSG_EMPTY))
    {
      req_idx++;
      msg_ps++;
    }
    if (req_idx != SNMP_CONCURRENT_REQUESTS)
    {
      err_t err_ret;
      u16_t payload_len;
      u16_t payload_ofs;
      u16_t varbind_ofs = 0;

      /* accepting request */
      snmp_inc_snmpinpkts();
      /* record used 'protocol control block' */
      msg_ps->pcb = pcb;
      /* source address (network order) */
      msg_ps->sip = *addr;
      /* source port (host order (lwIP oddity)) */
      msg_ps->sp = port;
      /* read UDP payload length from UDP header */
      payload_len = ntohs(udphdr->len) - UDP_HLEN;

      /* adjust to UDP payload */
      payload_ofs = UDP_HLEN;

      /* check total length, version, community, pdu type */
      err_ret = snmp_pdu_header_check(p, payload_ofs, payload_len, &varbind_ofs, msg_ps);
      if (((msg_ps->rt == SNMP_ASN1_PDU_GET_REQ) ||
           (msg_ps->rt == SNMP_ASN1_PDU_GET_NEXT_REQ) ||
           (msg_ps->rt == SNMP_ASN1_PDU_SET_REQ)) &&
          ((msg_ps->error_status == SNMP_ES_NOERROR) &&
           (msg_ps->error_index == 0)) )
      {
        /* Only accept requests and requests without error (be robust) */
        err_ret = err_ret;
      }
      else
      {
        /* Reject response and trap headers or error requests as input! */
        err_ret = ERR_ARG;
      }
      if (err_ret == ERR_OK)
      {
        LWIP_DEBUGF(SNMP_MSG_DEBUG, ("snmp_recv ok, community %s\n", msg_ps->community));

        /* Builds a list of variable bindings. Copy the varbinds from the pbuf
          chain to glue them when these are divided over two or more pbuf's. */
        err_ret = snmp_pdu_dec_varbindlist(p, varbind_ofs, &varbind_ofs, msg_ps);
        if ((err_ret == ERR_OK) && (msg_ps->invb.count > 0))
        {
          /* we've decoded the incoming message, release input msg now */
          pbuf_free(p);

          msg_ps->error_status = SNMP_ES_NOERROR;
          msg_ps->error_index = 0;
          /* find object for each variable binding */
          msg_ps->state = SNMP_MSG_SEARCH_OBJ;
          /* first variable binding from list to inspect */
          msg_ps->vb_idx = 0;

          LWIP_DEBUGF(SNMP_MSG_DEBUG, ("snmp_recv varbind cnt=%"U16_F"\n",(u16_t)msg_ps->invb.count));

          /* handle input event and as much objects as possible in one go */
          snmp_msg_event(req_idx);
        }
        else
        {
          /* varbind-list decode failed, or varbind list empty.
             drop request silently, do not return error!
             (errors are only returned for a specific varbind failure) */
          pbuf_free(p);
          LWIP_DEBUGF(SNMP_MSG_DEBUG, ("snmp_pdu_dec_varbindlist() failed\n"));
        }
      }
      else
      {
        /* header check failed
           drop request silently, do not return error! */
        pbuf_free(p);
        LWIP_DEBUGF(SNMP_MSG_DEBUG, ("snmp_pdu_header_check() failed\n"));
      }
    }
    else
    {
      /* exceeding number of concurrent requests */
      pbuf_free(p);
    }
  }
  else
  {
    /* datagram not for us */
    pbuf_free(p);
  }
}
Example #15
0
/** Create a TCP segment usable for passing to tcp_input */
static struct pbuf*
tcp_create_segment_wnd(ip_addr_t* src_ip, ip_addr_t* dst_ip,
                   u16_t src_port, u16_t dst_port, void* data, size_t data_len,
                   u32_t seqno, u32_t ackno, u8_t headerflags, u16_t wnd)
{
  struct pbuf *p, *q;
  struct ip_hdr* iphdr;
  struct tcp_hdr* tcphdr;
  u16_t pbuf_len = (u16_t)(sizeof(struct ip_hdr) + sizeof(struct tcp_hdr) + data_len);
  LWIP_ASSERT("data_len too big", data_len <= 0xFFFF);

  p = pbuf_alloc(PBUF_RAW, pbuf_len, PBUF_POOL);
  EXPECT_RETNULL(p != NULL);
  /* first pbuf must be big enough to hold the headers */
  EXPECT_RETNULL(p->len >= (sizeof(struct ip_hdr) + sizeof(struct tcp_hdr)));
  if (data_len > 0) {
    /* first pbuf must be big enough to hold at least 1 data byte, too */
    EXPECT_RETNULL(p->len > (sizeof(struct ip_hdr) + sizeof(struct tcp_hdr)));
  }

  for(q = p; q != NULL; q = q->next) {
    memset(q->payload, 0, q->len);
  }

  iphdr = p->payload;
  /* fill IP header */
  iphdr->dest.addr = ip_2_ip4(dst_ip)->addr;
  iphdr->src.addr = ip_2_ip4(src_ip)->addr;
  IPH_VHL_SET(iphdr, 4, IP_HLEN / 4);
  IPH_TOS_SET(iphdr, 0);
  IPH_LEN_SET(iphdr, htons(p->tot_len));
  IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, IP_HLEN));

  /* let p point to TCP header */
  pbuf_header(p, -(s16_t)sizeof(struct ip_hdr));

  tcphdr = p->payload;
  tcphdr->src   = htons(src_port);
  tcphdr->dest  = htons(dst_port);
  tcphdr->seqno = htonl(seqno);
  tcphdr->ackno = htonl(ackno);
  TCPH_HDRLEN_SET(tcphdr, sizeof(struct tcp_hdr)/4);
  TCPH_FLAGS_SET(tcphdr, headerflags);
  tcphdr->wnd   = htons(wnd);

  if (data_len > 0) {
    /* let p point to TCP data */
    pbuf_header(p, -(s16_t)sizeof(struct tcp_hdr));
    /* copy data */
    pbuf_take(p, data, (u16_t)data_len);
    /* let p point to TCP header again */
    pbuf_header(p, sizeof(struct tcp_hdr));
  }

  /* calculate checksum */

  tcphdr->chksum = ip_chksum_pseudo(p,
          IP_PROTO_TCP, p->tot_len, src_ip, dst_ip);

  pbuf_header(p, sizeof(struct ip_hdr));

  return p;
}
Example #16
0
/**
 * Process received ethernet frames. Using this function instead of directly
 * calling ip_input and passing ARP frames through etharp in ethernetif_input,
 * the ARP cache is protected from concurrent access.
 *
 * @param p the recevied packet, p->payload pointing to the ethernet header
 * @param netif the network interface on which the packet was received
 */
err_t ethernet_input(struct pbuf *p, struct netif *netif) {
    struct eth_hdr* ethhdr;
    u16_t type;

    /* points to packet payload, which starts with an Ethernet header */
    ethhdr = p->payload;
    LWIP_DEBUGF(ETHARP_DEBUG | LWIP_DBG_TRACE, ("ethernet_input: dest:%02x:%02x:%02x:%02x:%02x:%02x, src:%02x:%02x:%02x:%02x:%02x:%02x, type:%2x"NEWLINE, (unsigned )ethhdr->dest.addr[0], (unsigned )ethhdr->dest.addr[1], (unsigned )ethhdr->dest.addr[2], (unsigned )ethhdr->dest.addr[3], (unsigned )ethhdr->dest.addr[4], (unsigned )ethhdr->dest.addr[5], (unsigned )ethhdr->src.addr[0], (unsigned )ethhdr->src.addr[1], (unsigned )ethhdr->src.addr[2], (unsigned )ethhdr->src.addr[3], (unsigned )ethhdr->src.addr[4], (unsigned )ethhdr->src.addr[5], (unsigned )htons(ethhdr->type)));


    netif->rxpackets++;
    netif->rxbytes += p->tot_len;

    type = htons(ethhdr->type);
#if ETHARP_SUPPORT_VLAN
    if (type == ETHTYPE_VLAN) {
        struct eth_vlan_hdr *vlan = (struct eth_vlan_hdr*)(((char*)ethhdr) + SIZEOF_ETH_HDR);
#ifdef ETHARP_VLAN_CHECK /* if not, allow all VLANs */
        if (VLAN_ID(vlan) != ETHARP_VLAN_CHECK) {
            /* silently ignore this packet: not for our VLAN */
            pbuf_free(p);
            return ERR_OK;
        }
#endif /* ETHARP_VLAN_CHECK */
        type = htons(vlan->tpid);
    }
#endif /* ETHARP_SUPPORT_VLAN */

    //acquireMutex(comStackMutex);

    switch (type) {
    /* IP packet? */
    case ETHTYPE_IPV4:
#if ETHARP_TRUST_IP_MAC
        /* update ARP table */
        ethar_ip_input(netif, p);
#endif /* ETHARP_TRUST_IP_MAC */
        /* skip Ethernet header */
        if (pbuf_header(p, -(s16_t) SIZEOF_ETH_HDR)) {
            LWIP_ASSERT("Can't move over header in packet", 0);
            pbuf_free(p);
            p = NULL;
        } else {
            /* pass to IP layer */
            ip4_input(p, netif);
        }
        break;

    case ETHTYPE_IPV6:

#if ETHARP_TRUST_IP_MAC
        /* update AR table */
        ethar_ip_input(netif, p);
#endif /* ETHARP_TRUST_IP_MAC */

        if (pbuf_header(p, -(s16_t) SIZEOF_ETH_HDR)) {
            LWIP_ASSERT("Can't move over header in packet", 0);
            pbuf_free(p);
            p = NULL;
        } else {
            ip6_input(p, netif);
        }
        break;

#if LWIP_ARP
    case ETHTYPE_ARP:
        /* pass p to ARP module */
        etharp_arp_input(netif, (struct eth_addr*) (netif->hwaddr), p);
        break;
#endif
#if PPPOE_SUPPORT
        case ETHTYPE_PPPOEDISC: /* PPP Over Ethernet Discovery Stage */
        pppoe_disc_input(netif, p);
        break;

        case ETHTYPE_PPPOE: /* PPP Over Ethernet Session Stage */
        pppoe_data_input(netif, p);
        break;
#endif /* PPPOE_SUPPORT */

    default:
        ETHARP_STATS_INC(etharp.proterr);
        ETHARP_STATS_INC(etharp.drop);
        pbuf_free(p);
        break;
    }

    //releaseMutex(comStackMutex);

    /* This means the pbuf is freed or consumed,
     so the caller doesn't have to free it again */
    return (ERR_OK);
}
Example #17
0
/**
  * @brief tcp_receiv callback
  * @param arg: argument to be passed to receive callback
  * @param tpcb: tcp connection control block
  * @param err: receive error code
  * @retval err_t: retuned error
  */
static err_t tcp_echoclient_recv(void *arg, struct tcp_pcb *tpcb, struct pbuf *p, err_t err)
{
    struct echoclient *es;
    err_t ret_err;


    LWIP_ASSERT("arg != NULL",arg != NULL);

    es = (struct echoclient *)arg;

    /* if we receive an empty tcp frame from server => close connection */
    if (p == NULL)
    {
        /* remote host closed connection */
        es->state = ES_CLOSING;
        if(es->p_tx == NULL)
        {
            /* we're done sending, close connection */
            tcp_echoclient_connection_close(tpcb, es);
        }
        else
        {
            /* send remaining data*/
            tcp_echoclient_send(tpcb, es);
        }
        ret_err = ERR_OK;
    }
    /* else : a non empty frame was received from echo server but for some reason err != ERR_OK */
    else if(err != ERR_OK)
    {
        /* free received pbuf*/
        if (p != NULL)
        {
            pbuf_free(p);
        }
        ret_err = err;
    }
    else if(es->state == ES_CONNECTED)
    {
        /* increment message count */
        message_count++;

        /* Acknowledge data reception */
        tcp_recved(tpcb, p->tot_len);

        pbuf_free(p);
        tcp_echoclient_connection_close(tpcb, es);
        ret_err = ERR_OK;
    }

    /* data received when connection already closed */
    else
    {
        /* Acknowledge data reception */
        tcp_recved(tpcb, p->tot_len);

        /* free pbuf and do nothing */
        pbuf_free(p);
        ret_err = ERR_OK;
    }
    return ret_err;
}
/**
 * Should be called at the beginning of the program to set up the
 * network interface.
 *
 * This function should be passed as a parameter to netif_add().
 *
 * @param[in] netif the lwip network interface structure for this netif
 * @return ERR_OK if the loopif is initialized
 *         ERR_MEM if private data couldn't be allocated
 *         any other err_t on error
 */
err_t eth_arch_enetif_init(struct netif *netif)
{
  err_t err;

  LWIP_ASSERT("netif != NULL", (netif != NULL));

  k64f_enetdata.netif = netif;

  /* set MAC hardware address */
#if (MBED_MAC_ADDRESS_SUM != MBED_MAC_ADDR_INTERFACE)
  netif->hwaddr[0] = MBED_MAC_ADDR_0;
  netif->hwaddr[1] = MBED_MAC_ADDR_1;
  netif->hwaddr[2] = MBED_MAC_ADDR_2;
  netif->hwaddr[3] = MBED_MAC_ADDR_3;
  netif->hwaddr[4] = MBED_MAC_ADDR_4;
  netif->hwaddr[5] = MBED_MAC_ADDR_5;
#else
  mbed_mac_address((char *)netif->hwaddr);
#endif
  netif->hwaddr_len = ETHARP_HWADDR_LEN;

  /* maximum transfer unit */
  netif->mtu = 1500;

  /* device capabilities */
  // TODOETH: check if the flags are correct below
  netif->flags = NETIF_FLAG_BROADCAST | NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET | NETIF_FLAG_IGMP;

  /* Initialize the hardware */
  netif->state = &k64f_enetdata;
  err = low_level_init(netif);
  if (err != ERR_OK)
    return err;

#if LWIP_NETIF_HOSTNAME
  /* Initialize interface hostname */
  netif->hostname = "lwipk64f";
#endif /* LWIP_NETIF_HOSTNAME */

  netif->name[0] = 'e';
  netif->name[1] = 'n';

  netif->output = k64f_etharp_output;
  netif->linkoutput = k64f_low_level_output;

  /* CMSIS-RTOS, start tasks */
#ifdef CMSIS_OS_RTX
  memset(k64f_enetdata.xTXDCountSem.data, 0, sizeof(k64f_enetdata.xTXDCountSem.data));
  k64f_enetdata.xTXDCountSem.def.semaphore = k64f_enetdata.xTXDCountSem.data;
#endif
  k64f_enetdata.xTXDCountSem.id = osSemaphoreCreate(&k64f_enetdata.xTXDCountSem.def, ENET_TX_RING_LEN);

  LWIP_ASSERT("xTXDCountSem creation error", (k64f_enetdata.xTXDCountSem.id != NULL));

  err = sys_mutex_new(&k64f_enetdata.TXLockMutex);
  LWIP_ASSERT("TXLockMutex creation error", (err == ERR_OK));

  /* Packet receive task */
  err = sys_sem_new(&k64f_enetdata.RxReadySem, 0);
  LWIP_ASSERT("RxReadySem creation error", (err == ERR_OK));
  sys_thread_new("receive_thread", packet_rx, netif->state, DEFAULT_THREAD_STACKSIZE, RX_PRIORITY);

  /* Transmit cleanup task */
  err = sys_sem_new(&k64f_enetdata.TxCleanSem, 0);
  LWIP_ASSERT("TxCleanSem creation error", (err == ERR_OK));
  sys_thread_new("txclean_thread", packet_tx, netif->state, DEFAULT_THREAD_STACKSIZE, TX_PRIORITY);

  /* PHY monitoring task */
  sys_thread_new("phy_thread", k64f_phy_task, netif, DEFAULT_THREAD_STACKSIZE, PHY_PRIORITY);

  /* Allow the PHY task to detect the initial link state and set up the proper flags */
  osDelay(10);

  return ERR_OK;
}
Example #19
0
static void
tcpip_thread(void *arg)
{
  struct tcpip_msg *msg;
  LWIP_UNUSED_ARG(arg);

  if (tcpip_init_done != NULL) {
    tcpip_init_done(tcpip_init_done_arg);
  }

  LOCK_TCPIP_CORE();
  while (1) {                          /* MAIN Loop */
    UNLOCK_TCPIP_CORE();
    LWIP_TCPIP_THREAD_ALIVE();
    /* wait for a message, timeouts are processed while waiting */
    sys_timeouts_mbox_fetch(&mbox, (void **)&msg);
	PROFILER_SUB = 10;
    LOCK_TCPIP_CORE();
    switch (msg->type) {
#if LWIP_NETCONN
    case TCPIP_MSG_API:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: API message %p\n", (void *)msg));
      msg->msg.apimsg->function(&(msg->msg.apimsg->msg));
      break;
#endif /* LWIP_NETCONN */

#if !LWIP_TCPIP_CORE_LOCKING_INPUT
    case TCPIP_MSG_INPKT:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: PACKET %p\n", (void *)msg));
#if LWIP_ETHERNET
      if (msg->msg.inp.netif->flags & (NETIF_FLAG_ETHARP | NETIF_FLAG_ETHERNET)) {
        ethernet_input(msg->msg.inp.p, msg->msg.inp.netif);
      } else
#endif /* LWIP_ETHERNET */
      {
        ip_input(msg->msg.inp.p, msg->msg.inp.netif);
      }
      memp_free(MEMP_TCPIP_MSG_INPKT, msg);
      break;
#endif /* LWIP_TCPIP_CORE_LOCKING_INPUT */

#if LWIP_NETIF_API
    case TCPIP_MSG_NETIFAPI:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: Netif API message %p\n", (void *)msg));
      msg->msg.netifapimsg->function(&(msg->msg.netifapimsg->msg));
      break;
#endif /* LWIP_NETIF_API */

#if LWIP_TCPIP_TIMEOUT
    case TCPIP_MSG_TIMEOUT:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: TIMEOUT %p\n", (void *)msg));
      sys_timeout(msg->msg.tmo.msecs, msg->msg.tmo.h, msg->msg.tmo.arg);
      memp_free(MEMP_TCPIP_MSG_API, msg);
      break;
    case TCPIP_MSG_UNTIMEOUT:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: UNTIMEOUT %p\n", (void *)msg));
      sys_untimeout(msg->msg.tmo.h, msg->msg.tmo.arg);
      memp_free(MEMP_TCPIP_MSG_API, msg);
      break;
#endif /* LWIP_TCPIP_TIMEOUT */

    case TCPIP_MSG_CALLBACK:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK %p\n", (void *)msg));
      msg->msg.cb.function(msg->msg.cb.ctx);
      memp_free(MEMP_TCPIP_MSG_API, msg);
      break;

    case TCPIP_MSG_CALLBACK_STATIC:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: CALLBACK_STATIC %p\n", (void *)msg));
      msg->msg.cb.function(msg->msg.cb.ctx);
      break;

    default:
      LWIP_DEBUGF(TCPIP_DEBUG, ("tcpip_thread: invalid message: %d\n", msg->type));
      LWIP_ASSERT("tcpip_thread: invalid message", 0);
      break;
    }
	PROFILER_SUB = 11;
  }
}
Example #20
0
/**
 * In contrast to its name, mem_realloc can only shrink memory, not expand it.
 * Since the only use (for now) is in pbuf_realloc (which also can only shrink),
 * this shouldn't be a problem!
 *
 * @param rmem pointer to memory allocated by mem_malloc the is to be shrinked
 * @param newsize required size after shrinking (needs to be smaller than or
 *                equal to the previous size)
 * @return for compatibility reasons: is always == rmem, at the moment
 */
void *
mem_realloc(void *rmem, mem_size_t newsize)
{
  mem_size_t size;
  mem_size_t ptr, ptr2;
  struct mem *mem, *mem2;

  /* Expand the size of the allocated memory region so that we can
     adjust for alignment. */
  newsize = LWIP_MEM_ALIGN_SIZE(newsize);

  if(newsize < MIN_SIZE_ALIGNED) {
    /* every data block must be at least MIN_SIZE_ALIGNED long */
    newsize = MIN_SIZE_ALIGNED;
  }

  if (newsize > MEM_SIZE_ALIGNED) {
    return NULL;
  }

  LWIP_ASSERT("mem_realloc: legal memory", (u8_t *)rmem >= (u8_t *)ram &&
   (u8_t *)rmem < (u8_t *)ram_end);

  if ((u8_t *)rmem < (u8_t *)ram || (u8_t *)rmem >= (u8_t *)ram_end) {
    LWIP_DEBUGF(MEM_DEBUG | 3, ("mem_realloc: illegal memory\n"));
    return rmem;
  }
  /* Get the corresponding struct mem ... */
  mem = (struct mem *)((u8_t *)rmem - SIZEOF_STRUCT_MEM);
  /* ... and its offset pointer */
  ptr = (u8_t *)mem - ram;

  size = mem->next - ptr - SIZEOF_STRUCT_MEM;
  LWIP_ASSERT("mem_realloc can only shrink memory", newsize <= size);
  if (newsize > size) {
    /* not supported */
    return NULL;
  }
  if (newsize == size) {
    /* No change in size, simply return */
    return rmem;
  }

  /* protect the heap from concurrent access */
  sys_arch_sem_wait(mem_sem, 0);

#if MEM_STATS
  lwip_stats.mem.used -= (size - newsize);
#endif /* MEM_STATS */

  mem2 = (struct mem *)&ram[mem->next];
  if(mem2->used == 0) {
    /* The next struct is unused, we can simply move it at little */
    mem_size_t next;
    /* remember the old next pointer */
    next = mem2->next;
    /* create new struct mem which is moved directly after the shrinked mem */
    ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
    if (lfree == mem2) {
      lfree = (struct mem *)&ram[ptr2];
    }
    mem2 = (struct mem *)&ram[ptr2];
    mem2->used = 0;
    /* restore the next pointer */
    mem2->next = next;
    /* link it back to mem */
    mem2->prev = ptr;
    /* link mem to it */
    mem->next = ptr2;
    /* last thing to restore linked list: as we have moved mem2,
     * let 'mem2->next->prev' point to mem2 again. but only if mem2->next is not
     * the end of the heap */
    if (mem2->next != MEM_SIZE_ALIGNED) {
      ((struct mem *)&ram[mem2->next])->prev = ptr2;
    }
    /* no need to plug holes, we've already done that */
  } else if (newsize + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED <= size) {
    /* Next struct is used but there's room for another struct mem with
     * at least MIN_SIZE_ALIGNED of data.
     * Old size ('size') must be big enough to contain at least 'newsize' plus a struct mem
     * ('SIZEOF_STRUCT_MEM') with some data ('MIN_SIZE_ALIGNED').
     * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
     *       region that couldn't hold data, but when mem->next gets freed,
     *       the 2 regions would be combined, resulting in more free memory */
    ptr2 = ptr + SIZEOF_STRUCT_MEM + newsize;
    mem2 = (struct mem *)&ram[ptr2];
    if (mem2 < lfree) {
      lfree = mem2;
    }
    mem2->used = 0;
    mem2->next = mem->next;
    mem2->prev = ptr;
    mem->next = ptr2;
    if (mem2->next != MEM_SIZE_ALIGNED) {
      ((struct mem *)&ram[mem2->next])->prev = ptr2;
    }
    /* the original mem->next is used, so no need to plug holes! */
  }
  /* else {
    next struct mem is used but size between mem and mem2 is not big enough
    to create another struct mem
    -> don't do anyhting. 
    -> the remaining space stays unused since it is too small
  } */
  sys_sem_signal(mem_sem);
  return rmem;
}
/**
 * Inserts node in idx list in a sorted
 * (ascending order) fashion and
 * allocates the node if needed.
 *
 * @param rn points to the root node
 * @param objid is the object sub identifier
 * @param insn points to a pointer to the inserted node
 *   used for constructing the tree.
 * @return -1 if failed, 1 if inserted, 2 if present.
 */
s8_t
snmp_mib_node_insert(struct mib_list_rootnode *rn, s32_t objid, struct mib_list_node **insn)
{
  struct mib_list_node *nn;
  s8_t insert;

  LWIP_ASSERT("rn != NULL",rn != NULL);

  /* -1 = malloc failure, 0 = not inserted, 1 = inserted, 2 = was present */
  insert = 0;
  if (rn->head == NULL)
  {
    /* empty list, add first node */
    LWIP_DEBUGF(SNMP_MIB_DEBUG,("alloc empty list objid==%"S32_F"\n",objid));
    nn = snmp_mib_ln_alloc(objid);
    if (nn != NULL)
    {
      rn->head = nn;
      rn->tail = nn;
      *insn = nn;
      insert = 1;
    }
    else
    {
      insert = -1;
    }
  }
  else
  {
    struct mib_list_node *n;
    /* at least one node is present */
    n = rn->head;
    while ((n != NULL) && (insert == 0))
    {
      if (n->objid == objid)
      {
        /* node is already there */
        LWIP_DEBUGF(SNMP_MIB_DEBUG,("node already there objid==%"S32_F"\n",objid));
        *insn = n;
        insert = 2;
      }
      else if (n->objid < objid)
      {
        if (n->next == NULL)
        {
          /* alloc and insert at the tail */
          LWIP_DEBUGF(SNMP_MIB_DEBUG,("alloc ins tail objid==%"S32_F"\n",objid));
          nn = snmp_mib_ln_alloc(objid);
          if (nn != NULL)
          {
            nn->next = NULL;
            nn->prev = n;
            n->next = nn;
            rn->tail = nn;
            *insn = nn;
            insert = 1;
          }
          else
          {
            /* insertion failure */
            insert = -1;
          }
        }
        else
        {
          /* there's more to explore: traverse list */
          LWIP_DEBUGF(SNMP_MIB_DEBUG,("traverse list\n"));
          n = n->next;
        }
      }
      else
      {
        /* n->objid > objid */
        /* alloc and insert between n->prev and n */
        LWIP_DEBUGF(SNMP_MIB_DEBUG,("alloc ins n->prev, objid==%"S32_F", n\n",objid));
        nn = snmp_mib_ln_alloc(objid);
        if (nn != NULL)
        {
          if (n->prev == NULL)
          {
            /* insert at the head */
            nn->next = n;
            nn->prev = NULL;
            rn->head = nn;
            n->prev = nn;
          }
          else
          {
            /* insert in the middle */
            nn->next = n;
            nn->prev = n->prev;
            n->prev->next = nn;
            n->prev = nn;
          }
          *insn = nn;
          insert = 1;
        }
        else
        {
          /* insertion failure */
          insert = -1;
        }
      }
    }
  }
  if (insert == 1)
  {
    rn->count += 1;
  }
  LWIP_ASSERT("insert != 0",insert != 0);
  return insert;
}
Example #22
0
/**
 * Adam's mem_malloc() plus solution for bug #17922
 * Allocate a block of memory with a minimum of 'size' bytes.
 *
 * @param size is the minimum size of the requested block in bytes.
 * @return pointer to allocated memory or NULL if no free memory was found.
 *
 * Note that the returned value will always be aligned (as defined by MEM_ALIGNMENT).
 */
void *
mem_malloc(mem_size_t size)
{
  mem_size_t ptr, ptr2;
  struct mem *mem, *mem2;

  if (size == 0) {
    return NULL;
  }

  /* Expand the size of the allocated memory region so that we can
     adjust for alignment. */
  size = LWIP_MEM_ALIGN_SIZE(size);

  if(size < MIN_SIZE_ALIGNED) {
    /* every data block must be at least MIN_SIZE_ALIGNED long */
    size = MIN_SIZE_ALIGNED;
  }

  if (size > MEM_SIZE_ALIGNED) {
    return NULL;
  }

  /* protect the heap from concurrent access */
  sys_arch_sem_wait(mem_sem, 0);

  /* Scan through the heap searching for a free block that is big enough,
   * beginning with the lowest free block.
   */
  for (ptr = (u8_t *)lfree - ram; ptr < MEM_SIZE_ALIGNED - size;
       ptr = ((struct mem *)&ram[ptr])->next) {
    mem = (struct mem *)&ram[ptr];

    if ((!mem->used) &&
        (mem->next - (ptr + SIZEOF_STRUCT_MEM)) >= size) {
      /* mem is not used and at least perfect fit is possible:
       * mem->next - (ptr + SIZEOF_STRUCT_MEM) gives us the 'user data size' of mem */

      if (mem->next - (ptr + SIZEOF_STRUCT_MEM) >= (size + SIZEOF_STRUCT_MEM + MIN_SIZE_ALIGNED)) {
        /* (in addition to the above, we test if another struct mem (SIZEOF_STRUCT_MEM) containing
         * at least MIN_SIZE_ALIGNED of data also fits in the 'user data space' of 'mem')
         * -> split large block, create empty remainder,
         * remainder must be large enough to contain MIN_SIZE_ALIGNED data: if
         * mem->next - (ptr + (2*SIZEOF_STRUCT_MEM)) == size,
         * struct mem would fit in but no data between mem2 and mem2->next
         * @todo we could leave out MIN_SIZE_ALIGNED. We would create an empty
         *       region that couldn't hold data, but when mem->next gets freed,
         *       the 2 regions would be combined, resulting in more free memory
         */
        ptr2 = ptr + SIZEOF_STRUCT_MEM + size;
        /* create mem2 struct */
        mem2 = (struct mem *)&ram[ptr2];
        mem2->used = 0;
        mem2->next = mem->next;
        mem2->prev = ptr;
        /* and insert it between mem and mem->next */
        mem->next = ptr2;
        mem->used = 1;

        if (mem2->next != MEM_SIZE_ALIGNED) {
          ((struct mem *)&ram[mem2->next])->prev = ptr2;
        }
#if MEM_STATS
        lwip_stats.mem.used += (size + SIZEOF_STRUCT_MEM);
        if (lwip_stats.mem.max < lwip_stats.mem.used) {
          lwip_stats.mem.max = lwip_stats.mem.used;
        }
#endif /* MEM_STATS */
      } else {
        /* (a mem2 struct does no fit into the user data space of mem and mem->next will always
         * be used at this point: if not we have 2 unused structs in a row, plug_holes should have
         * take care of this).
         * -> near fit or excact fit: do not split, no mem2 creation
         * also can't move mem->next directly behind mem, since mem->next
         * will always be used at this point!
         */
        mem->used = 1;
#if MEM_STATS
        lwip_stats.mem.used += mem->next - ((u8_t *)mem - ram);
        if (lwip_stats.mem.max < lwip_stats.mem.used) {
          lwip_stats.mem.max = lwip_stats.mem.used;
        }
#endif /* MEM_STATS */
      }

      if (mem == lfree) {
        /* Find next free block after mem and update lowest free pointer */
        while (lfree->used && lfree != ram_end) {
          lfree = (struct mem *)&ram[lfree->next];
        }
        LWIP_ASSERT("mem_malloc: !lfree->used", ((lfree == ram_end) || (!lfree->used)));
      }
      sys_sem_signal(mem_sem);
      LWIP_ASSERT("mem_malloc: allocated memory not above ram_end.",
       (mem_ptr_t)mem + SIZEOF_STRUCT_MEM + size <= (mem_ptr_t)ram_end);
      LWIP_ASSERT("mem_malloc: allocated memory properly aligned.",
       (unsigned long)((u8_t *)mem + SIZEOF_STRUCT_MEM) % MEM_ALIGNMENT == 0);
      LWIP_ASSERT("mem_malloc: sanity check alignment",
        (((mem_ptr_t)mem) & (MEM_ALIGNMENT-1)) == 0);

      return (u8_t *)mem + SIZEOF_STRUCT_MEM;
    }
  }
  LWIP_DEBUGF(MEM_DEBUG | 2, ("mem_malloc: could not allocate %"S16_F" bytes\n", (s16_t)size));
#if MEM_STATS
  ++lwip_stats.mem.err;
#endif /* MEM_STATS */
  sys_sem_signal(mem_sem);
  return NULL;
}
Example #23
0
/**
 * Processes ICMP input packets, called from ip_input().
 *
 * Currently only processes icmp echo requests and sends
 * out the echo response.
 *
 * @param p the icmp echo request packet, p->payload pointing to the icmp header
 * @param inp the netif on which this packet was received
 */
void
icmp_input(struct pbuf *p, struct netif *inp)
{
  u8_t type;
#ifdef LWIP_DEBUG
  u8_t code;
#endif /* LWIP_DEBUG */
  struct icmp_echo_hdr *iecho;
  const struct ip_hdr *iphdr_in;
  s16_t hlen;
  const ip4_addr_t* src;

  ICMP_STATS_INC(icmp.recv);
  MIB2_STATS_INC(mib2.icmpinmsgs);

  iphdr_in = ip4_current_header();
  hlen = IPH_HL(iphdr_in) * 4;
  if (hlen < IP_HLEN) {
    LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short IP header (%"S16_F" bytes) received\n", hlen));
    goto lenerr;
  }
  if (p->len < sizeof(u16_t)*2) {
    LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: short ICMP (%"U16_F" bytes) received\n", p->tot_len));
    goto lenerr;
  }

  type = *((u8_t *)p->payload);
#ifdef LWIP_DEBUG
  code = *(((u8_t *)p->payload)+1);
#endif /* LWIP_DEBUG */
  switch (type) {
  case ICMP_ER:
    /* This is OK, echo reply might have been parsed by a raw PCB
       (as obviously, an echo request has been sent, too). */
    MIB2_STATS_INC(mib2.icmpinechoreps);
    break;
  case ICMP_ECHO:
    MIB2_STATS_INC(mib2.icmpinechos);
    src = ip4_current_dest_addr();
    /* multicast destination address? */
    if (ip4_addr_ismulticast(ip4_current_dest_addr())) {
#if LWIP_MULTICAST_PING
      /* For multicast, use address of receiving interface as source address */
      src = netif_ip4_addr(inp);
#else /* LWIP_MULTICAST_PING */
      LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to multicast pings\n"));
      goto icmperr;
#endif /* LWIP_MULTICAST_PING */
    }
    /* broadcast destination address? */
    if (ip4_addr_isbroadcast(ip4_current_dest_addr(), ip_current_netif())) {
#if LWIP_BROADCAST_PING
      /* For broadcast, use address of receiving interface as source address */
      src = netif_ip4_addr(inp);
#else /* LWIP_BROADCAST_PING */
      LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: Not echoing to broadcast pings\n"));
      goto icmperr;
#endif /* LWIP_BROADCAST_PING */
    }
    LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ping\n"));
    if (p->tot_len < sizeof(struct icmp_echo_hdr)) {
      LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: bad ICMP echo received\n"));
      goto lenerr;
    }
#if CHECKSUM_CHECK_ICMP
    IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_CHECK_ICMP) {
      if (inet_chksum_pbuf(p) != 0) {
        LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: checksum failed for received ICMP echo\n"));
        pbuf_free(p);
        ICMP_STATS_INC(icmp.chkerr);
        MIB2_STATS_INC(mib2.icmpinerrors);
        return;
      }
    }
#endif
#if LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN
    if (pbuf_header(p, (hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN))) {
      /* p is not big enough to contain link headers
       * allocate a new one and copy p into it
       */
      struct pbuf *r;
      /* allocate new packet buffer with space for link headers */
      r = pbuf_alloc(PBUF_LINK, p->tot_len + hlen, PBUF_RAM);
      if (r == NULL) {
        LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: allocating new pbuf failed\n"));
        goto icmperr;
      }
      if (r->len < hlen + sizeof(struct icmp_echo_hdr)) {
        LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("first pbuf cannot hold the ICMP header"));
        pbuf_free(r);
        goto icmperr;
      }
      /* copy the ip header */
      MEMCPY(r->payload, iphdr_in, hlen);
      /* switch r->payload back to icmp header (cannot fail) */
      if (pbuf_header(r, -hlen)) {
        LWIP_ASSERT("icmp_input: moving r->payload to icmp header failed\n", 0);
        pbuf_free(r);
        goto icmperr;
      }
      /* copy the rest of the packet without ip header */
      if (pbuf_copy(r, p) != ERR_OK) {
        LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("icmp_input: copying to new pbuf failed"));
        pbuf_free(r);
        goto icmperr;
      }
      /* free the original p */
      pbuf_free(p);
      /* we now have an identical copy of p that has room for link headers */
      p = r;
    } else {
      /* restore p->payload to point to icmp header (cannot fail) */
      if (pbuf_header(p, -(s16_t)(hlen + PBUF_LINK_HLEN + PBUF_LINK_ENCAPSULATION_HLEN))) {
        LWIP_ASSERT("icmp_input: restoring original p->payload failed\n", 0);
        goto icmperr;
      }
    }
#endif /* LWIP_ICMP_ECHO_CHECK_INPUT_PBUF_LEN */
    /* At this point, all checks are OK. */
    /* We generate an answer by switching the dest and src ip addresses,
     * setting the icmp type to ECHO_RESPONSE and updating the checksum. */
    iecho = (struct icmp_echo_hdr *)p->payload;
    if (pbuf_header(p, hlen)) {
      LWIP_DEBUGF(ICMP_DEBUG | LWIP_DBG_LEVEL_SERIOUS, ("Can't move over header in packet"));
    } else {
      err_t ret;
      struct ip_hdr *iphdr = (struct ip_hdr*)p->payload;
      ip4_addr_copy(iphdr->src, *src);
      ip4_addr_copy(iphdr->dest, *ip4_current_src_addr());
      ICMPH_TYPE_SET(iecho, ICMP_ER);
#if CHECKSUM_GEN_ICMP
      IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_ICMP) {
        /* adjust the checksum */
        if (iecho->chksum > PP_HTONS(0xffffU - (ICMP_ECHO << 8))) {
          iecho->chksum += PP_HTONS(ICMP_ECHO << 8) + 1;
        } else {
          iecho->chksum += PP_HTONS(ICMP_ECHO << 8);
        }
      }
#if LWIP_CHECKSUM_CTRL_PER_NETIF
      else {
        iecho->chksum = 0;
      }
#endif /* LWIP_CHECKSUM_CTRL_PER_NETIF */
#else /* CHECKSUM_GEN_ICMP */
      iecho->chksum = 0;
#endif /* CHECKSUM_GEN_ICMP */

      /* Set the correct TTL and recalculate the header checksum. */
      IPH_TTL_SET(iphdr, ICMP_TTL);
      IPH_CHKSUM_SET(iphdr, 0);
#if CHECKSUM_GEN_IP
      IF__NETIF_CHECKSUM_ENABLED(inp, NETIF_CHECKSUM_GEN_IP) {
        IPH_CHKSUM_SET(iphdr, inet_chksum(iphdr, hlen));
      }
#endif /* CHECKSUM_GEN_IP */

      ICMP_STATS_INC(icmp.xmit);
      /* increase number of messages attempted to send */
      MIB2_STATS_INC(mib2.icmpoutmsgs);
      /* increase number of echo replies attempted to send */
      MIB2_STATS_INC(mib2.icmpoutechoreps);

      /* send an ICMP packet */
      ret = ip4_output_if(p, src, IP_HDRINCL,
                   ICMP_TTL, 0, IP_PROTO_ICMP, inp);
      if (ret != ERR_OK) {
        LWIP_DEBUGF(ICMP_DEBUG, ("icmp_input: ip_output_if returned an error: %s\n", lwip_strerr(ret)));
      }
    }
Example #24
0
/**
 * Create a new netconn (of a specific type) that has a callback function.
 * The corresponding pcb is NOT created!
 *
 * @param t the type of 'connection' to create (@see enum netconn_type)
 * @param proto the IP protocol for RAW IP pcbs
 * @param callback a function to call on status changes (RX available, TX'ed)
 * @return a newly allocated struct netconn or
 *         NULL on memory error
 */
struct netconn*
netconn_alloc(enum netconn_type t, netconn_callback callback)
{
  struct netconn *conn;
  int size;

  conn = memp_malloc(MEMP_NETCONN);
  if (conn == NULL) {
    return NULL;
  }

  conn->err = ERR_OK;
  conn->type = t;
  conn->pcb.tcp = NULL;

#if (DEFAULT_RAW_RECVMBOX_SIZE == DEFAULT_UDP_RECVMBOX_SIZE) && \
    (DEFAULT_RAW_RECVMBOX_SIZE == DEFAULT_TCP_RECVMBOX_SIZE)
  size = DEFAULT_RAW_RECVMBOX_SIZE;
#else
  switch(NETCONNTYPE_GROUP(t)) {
#if LWIP_RAW
  case NETCONN_RAW:
    size = DEFAULT_RAW_RECVMBOX_SIZE;
    break;
#endif /* LWIP_RAW */
#if LWIP_UDP
  case NETCONN_UDP:
    size = DEFAULT_UDP_RECVMBOX_SIZE;
    break;
#endif /* LWIP_UDP */
#if LWIP_TCP
  case NETCONN_TCP:
    size = DEFAULT_TCP_RECVMBOX_SIZE;
    break;
#endif /* LWIP_TCP */
  default:
    LWIP_ASSERT("netconn_alloc: undefined netconn_type", 0);
    break;
  }
#endif

  if ((conn->op_completed = sys_sem_new(0)) == SYS_SEM_NULL) {
    memp_free(MEMP_NETCONN, conn);
    return NULL;
  }
  if ((conn->recvmbox = sys_mbox_new(size)) == SYS_MBOX_NULL) {
    sys_sem_free(conn->op_completed);
    memp_free(MEMP_NETCONN, conn);
    return NULL;
  }

  conn->acceptmbox   = SYS_MBOX_NULL;
  conn->state        = NETCONN_NONE;
  /* initialize socket to -1 since 0 is a valid socket */
  conn->socket       = -1;
  conn->callback     = callback;
  conn->recv_avail   = 0;
#if LWIP_TCP
  conn->write_msg    = NULL;
  conn->write_offset = 0;
#if LWIP_TCPIP_CORE_LOCKING
  conn->write_delayed = 0;
#endif /* LWIP_TCPIP_CORE_LOCKING */
#endif /* LWIP_TCP */
#if LWIP_SO_RCVTIMEO
  conn->recv_timeout = 0;
#endif /* LWIP_SO_RCVTIMEO */
#if LWIP_SO_RCVBUF
  conn->recv_bufsize = RECV_BUFSIZE_DEFAULT;
#endif /* LWIP_SO_RCVBUF */
  return conn;
}
Example #25
0
/** Free a struct pbuf_custom_ref */
static void
ip_frag_free_pbuf_custom_ref(struct pbuf_custom_ref* p)
{
	LWIP_ASSERT("p != NULL", p != NULL);
	memp_free(MEMP_FRAG_PBUF, p);
}
Example #26
0
/**
 * Receive callback function for RAW netconns.
 * Doesn't 'eat' the packet, only references it and sends it to
 * conn->recvmbox
 *
 * @see raw.h (struct raw_pcb.recv) for parameters and return value
 */
static u8_t
recv_raw(void *arg, struct raw_pcb *pcb, struct pbuf *p,
    struct ip_addr *addr)
{
  struct pbuf *q;
  struct netbuf *buf;
  struct netconn *conn;
#if LWIP_SO_RCVBUF
  int recv_avail;
#endif /* LWIP_SO_RCVBUF */

  LWIP_UNUSED_ARG(addr);
  conn = arg;

#if LWIP_SO_RCVBUF
  SYS_ARCH_GET(conn->recv_avail, recv_avail);
  if ((conn != NULL) && (conn->recvmbox != SYS_MBOX_NULL) &&
      ((recv_avail + (int)(p->tot_len)) <= conn->recv_bufsize)) {
#else  /* LWIP_SO_RCVBUF */
  if ((conn != NULL) && (conn->recvmbox != SYS_MBOX_NULL)) {
#endif /* LWIP_SO_RCVBUF */
    /* copy the whole packet into new pbufs */
    q = pbuf_alloc(PBUF_RAW, p->tot_len, PBUF_RAM);
    if(q != NULL) {
      if (pbuf_copy(q, p) != ERR_OK) {
        pbuf_free(q);
        q = NULL;
      }
    }

    if(q != NULL) {
      buf = memp_malloc(MEMP_NETBUF);
      if (buf == NULL) {
        pbuf_free(q);
        return 0;
      }

      buf->p = q;
      buf->ptr = q;
      buf->addr = &(((struct ip_hdr*)(q->payload))->src);
      buf->port = pcb->protocol;

      if (sys_mbox_trypost(conn->recvmbox, buf) != ERR_OK) {
        netbuf_delete(buf);
        return 0;
      } else {
        SYS_ARCH_INC(conn->recv_avail, q->tot_len);
        /* Register event with callback */
        API_EVENT(conn, NETCONN_EVT_RCVPLUS, q->tot_len);
      }
    }
  }

  return 0; /* do not eat the packet */
}
#endif /* LWIP_RAW*/

#if LWIP_UDP
/**
 * Receive callback function for UDP netconns.
 * Posts the packet to conn->recvmbox or deletes it on memory error.
 *
 * @see udp.h (struct udp_pcb.recv) for parameters
 */
static void
recv_udp(void *arg, struct udp_pcb *pcb, struct pbuf *p,
   struct ip_addr *addr, u16_t port)
{
  struct netbuf *buf;
  struct netconn *conn;
#if LWIP_SO_RCVBUF
  int recv_avail;
#endif /* LWIP_SO_RCVBUF */

  LWIP_UNUSED_ARG(pcb); /* only used for asserts... */
  LWIP_ASSERT("recv_udp must have a pcb argument", pcb != NULL);
  LWIP_ASSERT("recv_udp must have an argument", arg != NULL);
  conn = arg;
  LWIP_ASSERT("recv_udp: recv for wrong pcb!", conn->pcb.udp == pcb);

#if LWIP_SO_RCVBUF
  SYS_ARCH_GET(conn->recv_avail, recv_avail);
  if ((conn == NULL) || (conn->recvmbox == SYS_MBOX_NULL) ||
      ((recv_avail + (int)(p->tot_len)) > conn->recv_bufsize)) {
#else  /* LWIP_SO_RCVBUF */
  if ((conn == NULL) || (conn->recvmbox == SYS_MBOX_NULL)) {
#endif /* LWIP_SO_RCVBUF */
    pbuf_free(p);
    return;
  }

  buf = memp_malloc(MEMP_NETBUF);
  if (buf == NULL) {
    pbuf_free(p);
    return;
  } else {
    buf->p = p;
    buf->ptr = p;
    buf->addr = addr;
    buf->port = port;
#if LWIP_NETBUF_RECVINFO
    {
      const struct ip_hdr* iphdr = ip_current_header();
      /* get the UDP header - always in the first pbuf, ensured by udp_input */
      const struct udp_hdr* udphdr = (void*)(((char*)iphdr) + IPH_LEN(iphdr));
      buf->toaddr = (struct ip_addr*)&iphdr->dest;
      buf->toport = udphdr->dest;
    }
#endif /* LWIP_NETBUF_RECVINFO */
  }

  if (sys_mbox_trypost(conn->recvmbox, buf) != ERR_OK) {
    netbuf_delete(buf);
    return;
  } else {
    SYS_ARCH_INC(conn->recv_avail, p->tot_len);
    /* Register event with callback */
    API_EVENT(conn, NETCONN_EVT_RCVPLUS, p->tot_len);
  }
}
#endif /* LWIP_UDP */

#if LWIP_TCP
/**
 * Receive callback function for TCP netconns.
 * Posts the packet to conn->recvmbox, but doesn't delete it on errors.
 *
 * @see tcp.h (struct tcp_pcb.recv) for parameters and return value
 */
static err_t
recv_tcp(void *arg, struct tcp_pcb *pcb, struct pbuf *p, err_t err)
{
  struct netconn *conn;
  u16_t len;

  LWIP_UNUSED_ARG(pcb);
  LWIP_ASSERT("recv_tcp must have a pcb argument", pcb != NULL);
  LWIP_ASSERT("recv_tcp must have an argument", arg != NULL);
  conn = arg;
  LWIP_ASSERT("recv_tcp: recv for wrong pcb!", conn->pcb.tcp == pcb);

  if ((conn == NULL) || (conn->recvmbox == SYS_MBOX_NULL)) {
    return ERR_VAL;
  }

  conn->err = err;
  if (p != NULL) {
    len = p->tot_len;
    SYS_ARCH_INC(conn->recv_avail, len);
  } else {
    len = 0;
  }

  if (sys_mbox_trypost(conn->recvmbox, p) != ERR_OK) {
    return ERR_MEM;
  } else {
    /* Register event with callback */
    API_EVENT(conn, NETCONN_EVT_RCVPLUS, len);
  }

  return ERR_OK;
}

/**
 * Poll callback function for TCP netconns.
 * Wakes up an application thread that waits for a connection to close
 * or data to be sent. The application thread then takes the
 * appropriate action to go on.
 *
 * Signals the conn->sem.
 * netconn_close waits for conn->sem if closing failed.
 *
 * @see tcp.h (struct tcp_pcb.poll) for parameters and return value
 */
static err_t
poll_tcp(void *arg, struct tcp_pcb *pcb)
{
  struct netconn *conn = arg;

  LWIP_UNUSED_ARG(pcb);
  LWIP_ASSERT("conn != NULL", (conn != NULL));

  if (conn->state == NETCONN_WRITE) {
    do_writemore(conn);
  } else if (conn->state == NETCONN_CLOSE) {
    do_close_internal(conn);
  }

  return ERR_OK;
}

/**
 * Sent callback function for TCP netconns.
 * Signals the conn->sem and calls API_EVENT.
 * netconn_write waits for conn->sem if send buffer is low.
 *
 * @see tcp.h (struct tcp_pcb.sent) for parameters and return value
 */
static err_t
sent_tcp(void *arg, struct tcp_pcb *pcb, u16_t len)
{
  struct netconn *conn = arg;

  LWIP_UNUSED_ARG(pcb);
  LWIP_ASSERT("conn != NULL", (conn != NULL));

  if (conn->state == NETCONN_WRITE) {
    LWIP_ASSERT("conn->pcb.tcp != NULL", conn->pcb.tcp != NULL);
    do_writemore(conn);
  } else if (conn->state == NETCONN_CLOSE) {
    do_close_internal(conn);
  }

  if (conn) {
    if ((conn->pcb.tcp != NULL) && (tcp_sndbuf(conn->pcb.tcp) > TCP_SNDLOWAT)) {
      API_EVENT(conn, NETCONN_EVT_SENDPLUS, len);
    }
  }
  
  return ERR_OK;
}

/**
 * Error callback function for TCP netconns.
 * Signals conn->sem, posts to all conn mboxes and calls API_EVENT.
 * The application thread has then to decide what to do.
 *
 * @see tcp.h (struct tcp_pcb.err) for parameters
 */
static void
err_tcp(void *arg, err_t err)
{
  struct netconn *conn;

  conn = arg;
  LWIP_ASSERT("conn != NULL", (conn != NULL));

  conn->pcb.tcp = NULL;

  conn->err = err;
  if (conn->recvmbox != SYS_MBOX_NULL) {
    /* Register event with callback */
    API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
    sys_mbox_post(conn->recvmbox, NULL);
  }
  if (conn->op_completed != SYS_SEM_NULL && conn->state == NETCONN_CONNECT) {
    conn->state = NETCONN_NONE;
    sys_sem_signal(conn->op_completed);
  }
  if (conn->acceptmbox != SYS_MBOX_NULL) {
    /* Register event with callback */
    API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
    sys_mbox_post(conn->acceptmbox, NULL);
  }
  if ((conn->state == NETCONN_WRITE) || (conn->state == NETCONN_CLOSE)) {
    /* calling do_writemore/do_close_internal is not necessary
       since the pcb has already been deleted! */
    conn->state = NETCONN_NONE;
    /* wake up the waiting task */
    sys_sem_signal(conn->op_completed);
  }
}

/**
 * Setup a tcp_pcb with the correct callback function pointers
 * and their arguments.
 *
 * @param conn the TCP netconn to setup
 */
static void
setup_tcp(struct netconn *conn)
{
  struct tcp_pcb *pcb;

  pcb = conn->pcb.tcp;
  tcp_arg(pcb, conn);
  tcp_recv(pcb, recv_tcp);
  tcp_sent(pcb, sent_tcp);
  tcp_poll(pcb, poll_tcp, 4);
  tcp_err(pcb, err_tcp);
}

/**
 * Accept callback function for TCP netconns.
 * Allocates a new netconn and posts that to conn->acceptmbox.
 *
 * @see tcp.h (struct tcp_pcb_listen.accept) for parameters and return value
 */
static err_t
accept_function(void *arg, struct tcp_pcb *newpcb, err_t err)
{
  struct netconn *newconn;
  struct netconn *conn;

#if API_MSG_DEBUG
#if TCP_DEBUG
  tcp_debug_print_state(newpcb->state);
#endif /* TCP_DEBUG */
#endif /* API_MSG_DEBUG */
  conn = (struct netconn *)arg;

  LWIP_ERROR("accept_function: invalid conn->acceptmbox",
             conn->acceptmbox != SYS_MBOX_NULL, return ERR_VAL;);

  /* We have to set the callback here even though
   * the new socket is unknown. conn->socket is marked as -1. */
  newconn = netconn_alloc(conn->type, conn->callback);
  if (newconn == NULL) {
    return ERR_MEM;
  }
  newconn->pcb.tcp = newpcb;
  setup_tcp(newconn);
  newconn->err = err;

  if (sys_mbox_trypost(conn->acceptmbox, newconn) != ERR_OK) {
    /* When returning != ERR_OK, the connection is aborted in tcp_process(),
       so do nothing here! */
    newconn->pcb.tcp = NULL;
    netconn_free(newconn);
    return ERR_MEM;
  } else {
    /* Register event with callback */
    API_EVENT(conn, NETCONN_EVT_RCVPLUS, 0);
  }

  return ERR_OK;
}
#endif /* LWIP_TCP */

/**
 * Create a new pcb of a specific type.
 * Called from do_newconn().
 *
 * @param msg the api_msg_msg describing the connection type
 * @return msg->conn->err, but the return value is currently ignored
 */
static err_t
pcb_new(struct api_msg_msg *msg)
{
   msg->conn->err = ERR_OK;

   LWIP_ASSERT("pcb_new: pcb already allocated", msg->conn->pcb.tcp == NULL);

   /* Allocate a PCB for this connection */
   switch(NETCONNTYPE_GROUP(msg->conn->type)) {
#if LWIP_RAW
   case NETCONN_RAW:
     msg->conn->pcb.raw = raw_new(msg->msg.n.proto);
     if(msg->conn->pcb.raw == NULL) {
       msg->conn->err = ERR_MEM;
       break;
     }
     raw_recv(msg->conn->pcb.raw, recv_raw, msg->conn);
     break;
#endif /* LWIP_RAW */
#if LWIP_UDP
   case NETCONN_UDP:
     msg->conn->pcb.udp = udp_new();
     if(msg->conn->pcb.udp == NULL) {
       msg->conn->err = ERR_MEM;
       break;
     }
#if LWIP_UDPLITE
     if (msg->conn->type==NETCONN_UDPLITE) {
       udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_UDPLITE);
     }
#endif /* LWIP_UDPLITE */
     if (msg->conn->type==NETCONN_UDPNOCHKSUM) {
       udp_setflags(msg->conn->pcb.udp, UDP_FLAGS_NOCHKSUM);
     }
     udp_recv(msg->conn->pcb.udp, recv_udp, msg->conn);
     break;
#endif /* LWIP_UDP */
#if LWIP_TCP
   case NETCONN_TCP:
     msg->conn->pcb.tcp = tcp_new();
     if(msg->conn->pcb.tcp == NULL) {
       msg->conn->err = ERR_MEM;
       break;
     }
     setup_tcp(msg->conn);
     break;
#endif /* LWIP_TCP */
   default:
     /* Unsupported netconn type, e.g. protocol disabled */
     msg->conn->err = ERR_VAL;
     break;
   }

  return msg->conn->err;
}
Example #27
0
/** \brief  Low level output of a packet. Never call this from an
 *          interrupt context, as it may block until TX descriptors
 *          become available.
 *
 *  \param[in] netif the lwip network interface structure for this netif
 *  \param[in] p the MAC packet to send (e.g. IP packet including MAC addresses and type)
 *  \return ERR_OK if the packet could be sent or an err_t value if the packet couldn't be sent
 */
static err_t k64f_low_level_output(struct netif *netif, struct pbuf *p)
{
  struct k64f_enetdata *k64f_enet = netif->state;
  struct pbuf *q;
  u32_t idx;
  s32_t dn;
  uint8_t *psend = NULL, *dst;

  /* Get free TX buffer index */
  idx = k64f_enet->tx_produce_index;

  /* Check the pbuf chain for payloads that are not 8-byte aligned.
     If found, a new properly aligned buffer needs to be allocated
     and the data copied there */
  for (q = p; q != NULL; q = q->next)
    if (((u32_t)q->payload & (TX_BUF_ALIGNMENT - 1)) != 0)
      break;
  if (q != NULL) {
    // Allocate properly aligned buffer
    psend = (uint8_t*)malloc(p->tot_len);
    if (NULL == psend)
      return ERR_MEM;
    LWIP_ASSERT("k64f_low_level_output: buffer not properly aligned", ((u32_t)psend & (TX_BUF_ALIGNMENT - 1)) == 0);
    for (q = p, dst = psend; q != NULL; q = q->next) {
      MEMCPY(dst, q->payload, q->len);
      dst += q->len;
    }
    k64f_enet->txb_aligned[idx] = psend;
    dn = 1;
  } else {
    k64f_enet->txb_aligned[idx] = NULL;
    dn = (s32_t) pbuf_clen(p);
    pbuf_ref(p);
  }

  /* Wait until enough descriptors are available for the transfer. */
  /* THIS WILL BLOCK UNTIL THERE ARE ENOUGH DESCRIPTORS AVAILABLE */
  // TODO: exit with "out of memory" error ?
  // while (dn > k64f_tx_ready(netif))
  //   osSemaphoreWait(k64f_enet->xTXDCountSem.id, osWaitForever);
  if (dn > k64f_tx_ready(netif))
     printf("/////////////////////////////////////////////////////////////////////\n");
  /* Setup transfers */
  q = p;
  while (dn > 0) {
    dn--;
    if (psend != NULL) {
      k64f_update_txbds(k64f_enet, idx, psend, p->tot_len, 1);
      k64f_enet->txb[idx] = NULL;

      LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
      ("k64f_low_level_output: aligned packet(%p) sent"
      " size = %d (index=%d)\n", psend, p->tot_len, idx));
    } else {
      LWIP_ASSERT("k64f_low_level_output: buffer not properly aligned", ((u32_t)q->payload & 0x07) == 0);

      /* Only save pointer to free on last descriptor */
      if (dn == 0) {
        /* Save size of packet and signal it's ready */
        k64f_update_txbds(k64f_enet, idx, q->payload, q->len, 1);
        k64f_enet->txb[idx] = p;
      }
      else {
        /* Save size of packet, descriptor is not last */
        k64f_update_txbds(k64f_enet, idx, q->payload, q->len, 0);
        k64f_enet->txb[idx] = NULL;
      }

      LWIP_DEBUGF(UDP_LPC_EMAC | LWIP_DBG_TRACE,
      ("k64f_low_level_output: pbuf packet(%p) sent, chain#=%d,"
      " size = %d (index=%d)\n", q->payload, dn, q->len, idx));
    }

    q = q->next;

    idx = (idx + 1) % ENET_TX_RING_LEN;
  }

  k64f_enet->tx_produce_index = idx;
  enet_hal_active_txbd(BOARD_DEBUG_ENET_INSTANCE_ADDR);
  LINK_STATS_INC(link.xmit);

  return ERR_OK;
}
Example #28
0
/**
 * See if more data needs to be written from a previous call to netconn_write.
 * Called initially from do_write. If the first call can't send all data
 * (because of low memory or empty send-buffer), this function is called again
 * from sent_tcp() or poll_tcp() to send more data. If all data is sent, the
 * blocking application thread (waiting in netconn_write) is released.
 *
 * @param conn netconn (that is currently in state NETCONN_WRITE) to process
 * @return ERR_OK
 *         ERR_MEM if LWIP_TCPIP_CORE_LOCKING=1 and sending hasn't yet finished
 */
static err_t
do_writemore(struct netconn *conn)
{
  err_t err;
  void *dataptr;
  u16_t len, available;
  u8_t write_finished = 0;
  size_t diff;

  LWIP_ASSERT("conn->state == NETCONN_WRITE", (conn->state == NETCONN_WRITE));

  dataptr = (u8_t*)conn->write_msg->msg.w.dataptr + conn->write_offset;
  diff = conn->write_msg->msg.w.len - conn->write_offset;
  if (diff > 0xffffUL) { /* max_u16_t */
    len = 0xffff;
#if LWIP_TCPIP_CORE_LOCKING
    conn->write_delayed = 1;
#endif
  } else {
    len = (u16_t)diff;
  }
  available = tcp_sndbuf(conn->pcb.tcp);
  if (available < len) {
    /* don't try to write more than sendbuf */
    len = available;
#if LWIP_TCPIP_CORE_LOCKING
    conn->write_delayed = 1;
#endif
  }

  err = tcp_write(conn->pcb.tcp, dataptr, len, conn->write_msg->msg.w.apiflags);
  LWIP_ASSERT("do_writemore: invalid length!", ((conn->write_offset + len) <= conn->write_msg->msg.w.len));
  if (err == ERR_OK) {
    conn->write_offset += len;
    if (conn->write_offset == conn->write_msg->msg.w.len) {
      /* everything was written */
      write_finished = 1;
      conn->write_msg = NULL;
      conn->write_offset = 0;
      /* API_EVENT might call tcp_tmr, so reset conn->state now */
      conn->state = NETCONN_NONE;
    }
    err = tcp_output_nagle(conn->pcb.tcp);
    conn->err = err;
    if ((err == ERR_OK) && (tcp_sndbuf(conn->pcb.tcp) <= TCP_SNDLOWAT)) {
      API_EVENT(conn, NETCONN_EVT_SENDMINUS, len);
    }
  } else if (err == ERR_MEM) {
    /* If ERR_MEM, we wait for sent_tcp or poll_tcp to be called
       we do NOT return to the application thread, since ERR_MEM is
       only a temporary error! */

    /* tcp_enqueue returned ERR_MEM, try tcp_output anyway */
    err = tcp_output(conn->pcb.tcp);

#if LWIP_TCPIP_CORE_LOCKING
    conn->write_delayed = 1;
#endif
  } else {
    /* On errors != ERR_MEM, we don't try writing any more but return
       the error to the application thread. */
    conn->err = err;
    write_finished = 1;
  }

  if (write_finished) {
    /* everything was written: set back connection state
       and back to application task */
    conn->state = NETCONN_NONE;
#if LWIP_TCPIP_CORE_LOCKING
    if (conn->write_delayed != 0)
#endif
    {
      sys_sem_signal(conn->op_completed);
    }
  }
#if LWIP_TCPIP_CORE_LOCKING
  else
    return ERR_MEM;
#endif
  return ERR_OK;
}
Example #29
0
/**
 * Called every 500 ms and implements the retransmission timer and the timer that
 * removes PCBs that have been in TIME-WAIT for enough time. It also increments
 * various timers such as the inactivity timer in each PCB.
 *
 * Automatically called from tcp_tmr().
 */
void
tcp_slowtmr(void)
{
  struct tcp_pcb *pcb, *pcb2, *prev;
  u16_t eff_wnd;
  u8_t pcb_remove;      /* flag if a PCB should be removed */
  u8_t pcb_reset;       /* flag if a RST should be sent when removing */

  ++tcp_ticks;

  /* Steps through all of the active PCBs. */
  prev = NULL;
  pcb = tcp_active_pcbs;
  if (pcb == NULL) {
    LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: no active pcbs\n"));
  }
  while (pcb != NULL) {
    LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: processing active pcb\n"));
    LWIP_ASSERT("tcp_slowtmr: active pcb->state != CLOSED\n", pcb->state != CLOSED);
    LWIP_ASSERT("tcp_slowtmr: active pcb->state != LISTEN\n", pcb->state != LISTEN);
    LWIP_ASSERT("tcp_slowtmr: active pcb->state != TIME-WAIT\n", pcb->state != TIME_WAIT);

    pcb_remove = 0;
    pcb_reset = 0;

    if (pcb->state == SYN_SENT && pcb->nrtx == TCP_SYNMAXRTX) {
      ++pcb_remove;
      LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max SYN retries reached\n"));
    }
    else if (pcb->nrtx == TCP_MAXRTX) {
      ++pcb_remove;
      LWIP_DEBUGF(TCP_DEBUG, ("tcp_slowtmr: max DATA retries reached\n"));
    } else {
      if (pcb->persist_backoff > 0) {
        /* If snd_wnd is zero, use persist timer to send 1 byte probes
         * instead of using the standard retransmission mechanism. */
        pcb->persist_cnt++;
        if (pcb->persist_cnt >= tcp_persist_backoff[pcb->persist_backoff-1]) {
          pcb->persist_cnt = 0;
          if (pcb->persist_backoff < sizeof(tcp_persist_backoff)) {
            pcb->persist_backoff++;
          }
          tcp_zero_window_probe(pcb);
        }
      } else {
        /* Increase the retransmission timer if it is running */
        if(pcb->rtime >= 0)
          ++pcb->rtime;

        if (pcb->unacked != NULL && pcb->rtime >= pcb->rto) {
          /* Time for a retransmission. */
          LWIP_DEBUGF(TCP_RTO_DEBUG, ("tcp_slowtmr: rtime %"S16_F
                                      " pcb->rto %"S16_F"\n",
                                      pcb->rtime, pcb->rto));

          /* Double retransmission time-out unless we are trying to
           * connect to somebody (i.e., we are in SYN_SENT). */
          if (pcb->state != SYN_SENT) {
            pcb->rto = ((pcb->sa >> 3) + pcb->sv) << tcp_backoff[pcb->nrtx];
          }

          /* Reset the retransmission timer. */
          pcb->rtime = 0;

          /* Reduce congestion window and ssthresh. */
          eff_wnd = LWIP_MIN(pcb->cwnd, pcb->snd_wnd);
          pcb->ssthresh = eff_wnd >> 1;
          if (pcb->ssthresh < (pcb->mss << 1)) {
            pcb->ssthresh = (pcb->mss << 1);
          }
          pcb->cwnd = pcb->mss;
          LWIP_DEBUGF(TCP_CWND_DEBUG, ("tcp_slowtmr: cwnd %"U16_F
                                       " ssthresh %"U16_F"\n",
                                       pcb->cwnd, pcb->ssthresh));
 
          /* The following needs to be called AFTER cwnd is set to one
             mss - STJ */
          tcp_rexmit_rto(pcb);
        }
      }
    }
struct snmp_varbind*
snmp_varbind_alloc(struct snmp_obj_id *oid, u8_t type, u8_t len)
{
  struct snmp_varbind *vb;

  vb = (struct snmp_varbind *)mem_malloc(sizeof(struct snmp_varbind));
  LWIP_ASSERT("vb != NULL",vb != NULL);
  if (vb != NULL)
  {
    u8_t i;

    vb->next = NULL;
    vb->prev = NULL;
    i = oid->len;
    vb->ident_len = i;
    if (i > 0)
    {
      /* allocate array of s32_t for our object identifier */
      vb->ident = (s32_t*)mem_malloc(sizeof(s32_t) * i);
      LWIP_ASSERT("vb->ident != NULL",vb->ident != NULL);
      if (vb->ident == NULL)
      {
        mem_free(vb);
        return NULL;
      }
      while(i > 0)
      {
        i--;
        vb->ident[i] = oid->id[i];
      }
    }
    else
    {
      /* i == 0, pass zero length object identifier */
      vb->ident = NULL;
    }
    vb->value_type = type;
    vb->value_len = len;
    if (len > 0)
    {
      /* allocate raw bytes for our object value */
      vb->value = mem_malloc(len);
      LWIP_ASSERT("vb->value != NULL",vb->value != NULL);
      if (vb->value == NULL)
      {
        if (vb->ident != NULL)
        {
          mem_free(vb->ident);
        }
        mem_free(vb);
        return NULL;
      }
    }
    else
    {
      /* ASN1_NUL type, or zero length ASN1_OC_STR */
      vb->value = NULL;
    }
  }
  return vb;
}