예제 #1
0
bool uip_igmpcmptimer(FAR struct igmp_group_s *group, int maxticks)
{
  uip_lock_t flags;
  int remaining;

  /* Disable interrupts so that there is no race condition with the actual
   * timer expiration.
   */

  flags = uip_lock();

  /* Get the timer remaining on the watchdog.  A time of <= zero means that
   * the watchdog was never started.
   */

  remaining = wd_gettime(group->wdog);

  /* A remaining time of zero means that the watchdog was never started
   * or has already expired.  That case should be covered in the following
   * test as well.
   */

  gtmrdbg("maxticks: %d remaining: %d\n", maxticks, remaining);
  if (maxticks > remaining)
    {
      /* Cancel the watchdog timer and return true */

      wd_cancel(group->wdog);
      uip_unlock(flags);
      return true;
    }

  uip_unlock(flags);
  return false;
}
int net_addroute(uip_ipaddr_t target, uip_ipaddr_t netmask,
                 uip_ipaddr_t router)
{
  FAR struct net_route_s *route;
  uip_lock_t save;

  /* Allocate a route entry */

  route = net_allocroute();
  if (!route)
    {
      ndbg("ERROR:  Failed to allocate a route\n");
      return -ENOMEM;
    }

  /* Format the new route table entry */

  uip_ipaddr_copy(route->target, target);
  uip_ipaddr_copy(route->netmask, netmask);
  uip_ipaddr_copy(route->router, router);

  /* Get exclusive address to the networking data structures */

  save = uip_lock();

  /* Then add the new entry to the table */

  sq_addlast((FAR sq_entry_t *)route, (FAR sq_queue_t *)&g_routes);
  uip_unlock(save);
  return OK;
}
예제 #3
0
void uip_igmpwaitmsg(FAR struct igmp_group_s *group, uint8_t msgid)
{
  uip_lock_t flags;

  /* Schedule to send the message */

  flags = uip_lock();
  DEBUGASSERT(!IS_WAITMSG(group->flags));
  SET_WAITMSG(group->flags);
  uip_igmpschedmsg(group, msgid);

  /* Then wait for the message to be sent */

  while (IS_SCHEDMSG(group->flags))
    {
      /* Wait for the semaphore to be posted */

      while (uip_lockedwait(&group->sem) != 0)
        {
          /* The only error that should occur from uip_lockedwait() is if
           * the wait is awakened by a signal.
           */
 
          ASSERT(errno == EINTR);
        }
    }

  /* The message has been sent and we are no longer waiting */

  CLR_WAITMSG(group->flags);
  uip_unlock(flags);
}
예제 #4
0
FAR struct igmp_group_s *uip_grpfind(FAR struct uip_driver_s *dev,
                                     FAR const uip_ipaddr_t *addr)
{
  FAR struct igmp_group_s *group;
  uip_lock_t flags;

  grplldbg("Searching for addr %08x\n", (int)*addr);

  /* We must disable interrupts because we don't which context we were
   * called from.
   */

  flags = uip_lock();
  for (group = (FAR struct igmp_group_s *)dev->grplist.head;
       group;
       group = group->next)
    {
      grplldbg("Compare: %08x vs. %08x\n", group->grpaddr, *addr);
      if (uip_ipaddr_cmp(group->grpaddr, *addr))
        {
          grplldbg("Match!\n");
          break;
        }
    }
  uip_unlock(flags);
  return group;
}
예제 #5
0
int uip_tcpbind(struct uip_conn *conn, const struct sockaddr_in *addr)
#endif
{
  uip_lock_t flags;
  int port;

  /* Verify or select a local port */

  flags = uip_lock();
  port = uip_selectport(ntohs(addr->sin_port));
  uip_unlock(flags);

  if (port < 0)
    {
      return port;
    }

  /* Save the local address in the connection structure.  Note that the requested
   * local IP address is saved but not used.  At present, only a single network
   * interface is supported, the IP address is not of importance.
   */

  conn->lport = addr->sin_port;

#if 0 /* Not used */
#ifdef CONFIG_NET_IPv6
  uip_ipaddr_copy(conn->lipaddr, addr->sin6_addr.in6_u.u6_addr16);
#else
  uip_ipaddr_copy(conn->lipaddr, addr->sin_addr.s_addr);
#endif
#endif

  return OK;
}
예제 #6
0
static inline int net_pollteardown(FAR struct socket *psock, struct pollfd *fds)
{
  FAR struct uip_conn *conn = psock->s_conn;
  FAR struct uip_callback_s *cb;
  uip_lock_t flags;

  /* Sanity check */

#ifdef CONFIG_DEBUG
  if (!conn || !fds->priv)
    {
      return -EINVAL;
    }
#endif

  /* Recover the socket descriptor poll state info from the poll structure */

  cb = (FAR struct uip_callback_s *)fds->priv;
  if (cb)
    {
      /* Release the callback */

      flags = uip_lock();
      uip_tcpcallbackfree(conn, cb);
      uip_unlock(flags);

      /* Release the poll/select data slot */

      fds->priv = NULL;
    }

  return OK;
}
int uip_udpbind(struct uip_udp_conn *conn, const struct sockaddr_in *addr)
#endif
{
  int ret = -EADDRINUSE;
  uip_lock_t flags;

  /* Is the user requesting to bind to any port? */

  if (!addr->sin_port)
    {
      /* Yes.. Find an unused local port number */

      conn->lport = htons(uip_selectport());
      ret         = OK;
    }
  else
    {
      /* Interrupts must be disabled while access the UDP connection list */

      flags = uip_lock();

      /* Is any other UDP connection bound to this port? */

      if (!uip_find_conn(addr->sin_port))
        {
          /* No.. then bind the socket to the port */

          conn->lport = addr->sin_port;
          ret         = OK;
        }

      uip_unlock(flags);
    }
  return ret;
}
static uint16_t uip_selectport(void)
{
  uint16_t portno;

  /* Find an unused local port number.  Loop until we find a valid
   * listen port number that is not being used by any other connection.
   */

  uip_lock_t flags = uip_lock();
  do
    {
      /* Guess that the next available port number will be the one after
       * the last port number assigned.
       */

      ++g_last_udp_port;

      /* Make sure that the port number is within range */

      if (g_last_udp_port >= 32000)
        {
          g_last_udp_port = 4096;
        }
    }
  while (uip_find_conn(htons(g_last_udp_port)));

  /* Initialize and return the connection structure, bind it to the
   * port number
   */

  portno = g_last_udp_port;
  uip_unlock(flags);

  return portno;
}
예제 #9
0
void uip_tcpfree(struct uip_conn *conn)
{
#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  struct uip_readahead_s *readahead;
#endif
  uip_lock_t flags;

  /* Because g_free_tcp_connections is accessed from user level and interrupt
   * level, code, it is necessary to keep interrupts disabled during this
   * operation.
   */

  DEBUGASSERT(conn->crefs == 0);
  flags = uip_lock();

  /* UIP_ALLOCATED means that that the connection is not in the active list
   * yet.
   */

  if (conn->tcpstateflags != UIP_ALLOCATED)
    {
      /* Remove the connection from the active list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }

  /* Release any read-ahead buffers attached to the connection */

#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL)
    {
      uip_tcpreadaheadrelease(readahead);
    }
#endif

  /* Remove any backlog attached to this connection */

#ifdef CONFIG_NET_TCPBACKLOG
  if (conn->backlog)
    {
      uip_backlogdestroy(conn);
    }

  /* If this connection is, itself, backlogged, then remove it from the
   * parent connection's backlog list.
   */

  if (conn->blparent)
    {
      uip_backlogdelete(conn->blparent, conn);
    }
#endif

  /* Mark the connection available and put it into the free list */

  conn->tcpstateflags = UIP_CLOSED;
  dq_addlast(&conn->node, &g_free_tcp_connections);
  uip_unlock(flags);
}
예제 #10
0
void uip_grpfree(FAR struct uip_driver_s *dev, FAR struct igmp_group_s *group)
{
  uip_lock_t flags;

  grplldbg("Free: %p flags: %02x\n", group, group->flags);

  /* Cancel the wdog */

  flags = uip_lock();
  wd_cancel(group->wdog);
  
  /* Remove the group structure from the group list in the device structure */

  sq_rem((FAR sq_entry_t*)group, &dev->grplist);
  
  /* Destroy the wait semapore */

  (void)sem_destroy(&group->sem);

  /* Destroy the wdog */

  wd_delete(group->wdog);
  
  /* Then release the group structure resources.  Check first if this is one
   * of the pre-allocated group structures that we will retain in a free list.
   */

#if CONFIG_PREALLOC_IGMPGROUPS > 0
  if (IS_PREALLOCATED(group->flags))
    {
      grplldbg("Put back on free list\n");
      sq_addlast((FAR sq_entry_t*)group, &g_freelist);
      uip_unlock(flags);
    }
  else
#endif
    {
      /* No.. deallocate the group structure.  Use sched_free() just in case
       * this function is executing within an interrupt handler.
       */

      uip_unlock(flags);
      grplldbg("Call sched_free()\n");
      sched_free(group);
    }
}
예제 #11
0
void uip_igmpschedmsg(FAR struct igmp_group_s *group, uint8_t msgid)
{
  uip_lock_t flags;

  /* The following should be atomic */

  flags = uip_lock();
  DEBUGASSERT(!IS_SCHEDMSG(group->flags));
  group->msgid = msgid;
  SET_SCHEDMSG(group->flags);
  uip_unlock(flags);
}
예제 #12
0
int net_clone(FAR struct socket *psock1, FAR struct socket *psock2)
{
  uip_lock_t flags;
  int ret = OK;

  /* Parts of this operation need to be atomic */

  flags = uip_lock();

  /* Duplicate the socket state */

  psock2->s_type     = psock1->s_type;      /* Protocol type: Only SOCK_STREAM or SOCK_DGRAM */
  psock2->s_flags    = psock1->s_flags;     /* See _SF_* definitions */
#ifdef CONFIG_NET_SOCKOPTS
  psock2->s_options  = psock1->s_options;   /* Selected socket options */
#endif
#ifndef CONFIG_DISABLE_CLOCK
  psock2->s_rcvtimeo = psock1->s_rcvtimeo;  /* Receive timeout value (in deciseconds) */
  psock2->s_sndtimeo = psock1->s_sndtimeo;  /* Send timeout value (in deciseconds) */
#endif
  psock2->s_conn     = psock1->s_conn;      /* UDP or TCP connection structure */

  /* Increment the reference count on the connection */

  DEBUGASSERT(psock2->s_conn);
  psock2->s_crefs    = 1;                   /* One reference on the new socket itself */

#ifdef CONFIG_NET_TCP
  if (psock2->s_type == SOCK_STREAM)
    {
      struct uip_conn *conn = psock2->s_conn;
      DEBUGASSERT(conn->crefs > 0 && conn->crefs < 255);
      conn->crefs++;
    }
  else
#endif
#ifdef CONFIG_NET_UDP
  if (psock2->s_type == SOCK_DGRAM)
    {
      struct uip_udp_conn *conn = psock2->s_conn;
      DEBUGASSERT(conn->crefs > 0 && conn->crefs < 255);
      conn->crefs++;
    }
  else
#endif
    {
      ndbg("Unsupported type: %d\n", psock2->s_type);
      ret = -EBADF;
    }

  uip_unlock(flags);
  return ret;
}
예제 #13
0
static inline void dhcpd_arpupdate(uint16_t *pipaddr, uint8_t *phwaddr)
{
  uip_lock_t flags;

  /* Disable interrupts and update the ARP table -- very non-portable hack.
   * REVISIT -- switch to the SIOCSARP ioctl call if/when it is implemented.
   */

  flags = uip_lock();
  arp_update(pipaddr, phwaddr);
  uip_unlock(flags);
}
예제 #14
0
FAR struct igmp_group_s *uip_grpalloc(FAR struct uip_driver_s *dev,
                                      FAR const uip_ipaddr_t *addr)
{
    FAR struct igmp_group_s *group;
    uip_lock_t flags;

    nllvdbg("addr: %08x dev: %p\n", *addr, dev);
    if (up_interrupt_context())
    {
#if CONFIG_PREALLOC_IGMPGROUPS > 0
        grplldbg("Use a pre-allocated group entry\n");
        group = uip_grpprealloc();
#else
        grplldbg("Cannot allocate from interrupt handler\n");
        group = NULL;
#endif
    }
    else
    {
        grplldbg("Allocate from the heap\n");
        group = uip_grpheapalloc();
    }

    grplldbg("group: %p\n", group);

    /* Check if we successfully allocated a group structure */

    if (group)
    {
        /* Initialize the non-zero elements of the group structure */

        uip_ipaddr_copy(group->grpaddr, *addr);
        sem_init(&group->sem, 0, 0);

        /* Initialize the group timer (but don't start it yet) */

        group->wdog = wd_create();
        DEBUGASSERT(group->wdog);

        /* Interrupts must be disabled in order to modify the group list */

        flags = uip_lock();

        /* Add the group structure to the list in the device structure */

        sq_addfirst((FAR sq_entry_t*)group, &dev->grplist);
        uip_unlock(flags);
    }

    return group;
}
예제 #15
0
static inline void netclose_disconnect(FAR struct socket *psock)
{
  struct tcp_close_s state;
  uip_lock_t flags;

  /* Interrupts are disabled here to avoid race conditions */

  flags = uip_lock();

  /* Is the TCP socket in a connected state? */

  if (_SS_ISCONNECTED(psock->s_flags))
    {
       struct uip_conn *conn = (struct uip_conn*)psock->s_conn;

       /* Check for the case where the host beat us and disconnected first */

       if (conn->tcpstateflags == UIP_ESTABLISHED)
         {
           /* Set up to receive TCP data event callbacks */

           state.cl_cb = uip_tcpcallbackalloc(conn);
           if (state.cl_cb)
             {
               state.cl_psock       = psock;
               sem_init(&state.cl_sem, 0, 0);

               state.cl_cb->flags   = UIP_NEWDATA|UIP_POLL|UIP_CLOSE|UIP_ABORT;
               state.cl_cb->priv    = (void*)&state;
               state.cl_cb->event   = netclose_interrupt;

              /* Notify the device driver of the availaibilty of TX data */

               netdev_txnotify(&conn->ripaddr);

               /* Wait for the disconnect event */

               (void)uip_lockedwait(&state.cl_sem);

               /* We are now disconnected */

               sem_destroy(&state.cl_sem);
               uip_tcpcallbackfree(conn, state.cl_cb);
            }
        }
    }

  uip_unlock(flags);
}
void net_freeroute(FAR struct net_route_s *route)
{
  uip_lock_t save;

  DEBUGASSERT(route);

  /* Get exclusive address to the networking data structures */

  save = uip_lock();

  /* Then add the new entry to the table */

  sq_addlast((FAR sq_entry_t *)route, (FAR sq_queue_t *)&g_freeroutes);
  uip_unlock(save);
}
예제 #17
0
static void slip_txtask(int argc, char *argv[])
{
  FAR struct slip_driver_s *priv;
  unsigned int index = *(argv[1]) - '0';
  uip_lock_t flags;

  ndbg("index: %d\n", index);
  DEBUGASSERT(index < CONFIG_SLIP_NINTERFACES);

  /* Get our private data structure instance and wake up the waiting
   * initialization logic.
   */

  priv = &g_slip[index];
  slip_semgive(priv);

  /* Loop forever */

  for (;;)
    {
      /* Wait for the timeout to expire (or until we are signaled by by  */

      usleep(SLIP_WDDELAY);

      /* Is the interface up? */

      if (priv->bifup)
        {
          /* Get exclusive access to uIP (if it it is already being used
           * slip_rxtask, then we have to wait).
           */

          slip_semtake(priv);

          /* Poll uIP for new XMIT data. BUG:  We really need to calculate
           * the number of hsecs!  When we are awakened by slip_txavail, the
           * number will be smaller; when we have to wait for the semaphore
           * (above), it may be larger.
           */

          flags = uip_lock();
          priv->dev.d_buf = priv->txbuf;
          (void)uip_timer(&priv->dev, slip_uiptxpoll, SLIP_POLLHSEC);
          uip_unlock(flags);
          slip_semgive(priv);
        }
    }
}
FAR struct net_route_s *net_allocroute(void)
{
  FAR struct net_route_s *route;
  uip_lock_t save;

  /* Get exclusive address to the networking data structures */

  save = uip_lock();

  /* Then add the new entry to the table */

  route = (FAR struct net_route_s *)
    sq_remfirst((FAR sq_queue_t *)&g_freeroutes);

  uip_unlock(save);
  return route;
}
int net_foreachroute(route_handler_t handler, FAR void *arg)
{
  uip_lock_t save;
  int ret = 0;
  int i;

  /* Prevent concurrent access to the routing table */

  save = uip_lock();

  for (i = 0; i < CONFIG_NET_MAXROUTES && ret == 0; i++)
    {
      ret = handler(&g_routes[i], arg);
    }

  /* Unlock uIP */

  uip_unlock(save);
  return ret;
}
예제 #20
0
uint16_t uip_callbackexecute(FAR struct uip_driver_s *dev, void *pvconn,
                             uint16_t flags, FAR struct uip_callback_s *list)
{
  FAR struct uip_callback_s *next;
  uip_lock_t save;

  /* Loop for each callback in the list and while there are still events
   * set in the flags set.
   */

  save = uip_lock();
  while (list && flags)
    {
      /* Save the pointer to the next callback in the lists.  This is done
       * because the callback action might delete the entry pointed to by
       * list.
       */

      next = list->flink;

      /* Check if this callback handles any of the events in the flag set */

      if (list->event && (flags & list->flags) != 0)
        {
          /* Yes.. perform the callback.  Actions perform by the callback
           * may delete the current list entry or add a new list entry to
           * beginning of the list (which will be ignored on this pass)
           */

          nllvdbg("Call event=%p with flags=%04x\n", list->event, flags);
          flags = list->event(dev, pvconn, list->priv, flags);
        }

      /* Set up for the next time through the loop */

      list = next;
    }

  uip_unlock(save);
  return flags;
}
예제 #21
0
static inline int net_pollteardown(FAR struct socket *psock,
                                   FAR struct pollfd *fds)
{
  FAR struct uip_conn *conn = psock->s_conn;
  FAR struct net_poll_s *info;
  uip_lock_t flags;

  /* Sanity check */

#ifdef CONFIG_DEBUG
  if (!conn || !fds->priv)
    {
      return -EINVAL;
    }
#endif

  /* Recover the socket descriptor poll state info from the poll structure */

  info = (FAR struct net_poll_s *)fds->priv;
  DEBUGASSERT(info && info->fds && info->cb);
  if (info)
    {
      /* Release the callback */

      flags = uip_lock();
      uip_tcpcallbackfree(conn, info->cb);
      uip_unlock(flags);

      /* Release the poll/select data slot */

      info->fds->priv = NULL;

      /* Then free the poll info container */

      kfree(info);
    }

  return OK;
}
예제 #22
0
void uip_callbackfree(FAR struct uip_callback_s *cb, FAR struct uip_callback_s **list)
{
  FAR struct uip_callback_s *prev;
  FAR struct uip_callback_s *curr;
  uip_lock_t save;

  if (cb)
    {
      /* Find the callback structure in the connection's list */

      save = uip_lock();
      if (list)
        {
          for (prev = NULL, curr = *list;
               curr && curr != cb;
               prev = curr, curr = curr->flink);

          /* Remove the structure from the connection's list */

          if (curr)
            {
              if (prev)
                {
                  prev->flink = cb->flink;
                }
              else
                {
                  *list = cb->flink;
                }
            }
        }

      /* Put the structure into the free list */

      cb->flink    = g_cbfreelist;
      g_cbfreelist = cb;
      uip_unlock(save);
    }
}
예제 #23
0
FAR struct uip_callback_s *uip_callbackalloc(FAR struct uip_callback_s **list)
{
  struct uip_callback_s *ret;
  uip_lock_t save;

  /* Check  the head of the free list */

  save = uip_lock();
  ret  = g_cbfreelist;
  if (ret)
    {
      /* Remove the next instance from the head of the free list */

      g_cbfreelist = ret->flink;
      memset(ret, 0, sizeof(struct uip_callback_s));

      /* Add the newly allocated instance to the head of the specified list */

      if (list)
        {
           ret->flink = *list;
           *list      = ret;
        }
      else
        {
           ret->flink   = NULL;
        }
    }
#ifdef CONFIG_DEBUG
  else
    {
      nlldbg("Failed to allocate callback\n");
    }
#endif

  uip_unlock(save);
  return ret;
}
예제 #24
0
int uip_tcpconnect(struct uip_conn *conn, const struct sockaddr_in *addr)
#endif
{
  uip_lock_t flags;
  int port;

  /* The connection is expected to be in the UIP_ALLOCATED state.. i.e.,
   * allocated via up_tcpalloc(), but not yet put into the active connections
   * list.
   */

  if (!conn || conn->tcpstateflags != UIP_ALLOCATED)
    {
      return -EISCONN;
    }

  /* If the TCP port has not alread been bound to a local port, then select
   * one now.
   */

  flags = uip_lock();
  port = uip_selectport(ntohs(conn->lport));
  uip_unlock(flags);

  if (port < 0)
    {
      return port;
    }

  /* Initialize and return the connection structure, bind it to the port number */

  conn->tcpstateflags = UIP_SYN_SENT;
  uip_tcpinitsequence(conn->sndseq);

  conn->initialmss = conn->mss = UIP_TCP_MSS;
  conn->unacked    = 1;    /* TCP length of the SYN is one. */
  conn->nrtx       = 0;
  conn->timer      = 1;    /* Send the SYN next time around. */
  conn->rto        = UIP_RTO;
  conn->sa         = 0;
  conn->sv         = 16;   /* Initial value of the RTT variance. */
  conn->lport      = htons((uint16_t)port);

  /* The sockaddr port is 16 bits and already in network order */

  conn->rport = addr->sin_port;

  /* The sockaddr address is 32-bits in network order. */

  uip_ipaddr_copy(conn->ripaddr, addr->sin_addr.s_addr);

  /* Initialize the list of TCP read-ahead buffers */

#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  sq_init(&conn->readahead);
#endif

  /* And, finally, put the connection structure into the active
   * list. Because g_active_tcp_connections is accessed from user level and
   * interrupt level, code, it is necessary to keep interrupts disabled during
   * this operation.
   */

  flags = uip_lock();
  dq_addlast(&conn->node, &g_active_tcp_connections);
  uip_unlock(flags);

  return OK;
}
예제 #25
0
struct uip_conn *uip_tcpalloc(void)
{
  struct uip_conn *conn;
  uip_lock_t flags;

  /* Because this routine is called from both interrupt level and
   * and from user level, we have not option but to disable interrupts
   * while accessing g_free_tcp_connections[];
   */

  flags = uip_lock();

  /* Return the entry from the head of the free list */

  conn = (struct uip_conn *)dq_remfirst(&g_free_tcp_connections);

  /* Is the free list empty? */

  if (!conn)
    {
      /* As a fallback, check for connection structures which are not
       * established yet.
       *
       * Search the active connection list for the oldest connection
       * that is not in the UIP_ESTABLISHED state.
       */

      struct uip_conn *tmp = g_active_tcp_connections.head;
      while (tmp)
        {
          nllvdbg("conn: %p state: %02x\n", tmp, tmp->tcpstateflags);

          /* Is this connection in some state other than UIP_ESTABLISHED
           * state?
           */

          if (tmp->tcpstateflags != UIP_ESTABLISHED)
            {
              /* Yes.. Is it the oldest one we have seen so far? */

              if (!conn || tmp->timer > conn->timer)
                {
                  /* Yes.. remember it */

                  conn = tmp;
                }
            }

          /* Look at the next active connection */

          tmp = tmp->node.flink;
        }

      /* Did we find a connection that we can re-use? */

      if (conn != NULL)
        {
          nlldbg("Closing unestablished connection: %p\n", conn);

          /* Yes... free it.  This will remove the connection from the list
           * of active connections and release all resources held by the
           * connection.
           *
           * REVISIT:  Could there be any higher level, socket interface
           * that needs to be informed that we did this to them?
           */

          uip_tcpfree(conn);

          /* Now there is guaranteed to be one free connection.  Get it! */

          conn = (struct uip_conn *)dq_remfirst(&g_free_tcp_connections);
        }
    }

  uip_unlock(flags);

  /* Mark the connection allocated */

  if (conn)
    {
      memset(conn, 0, sizeof(struct uip_conn));
      conn->tcpstateflags = UIP_ALLOCATED;
    }

  return conn;
}
예제 #26
0
void uip_tcpfree(struct uip_conn *conn)
{
  FAR struct uip_callback_s *cb;
  FAR struct uip_callback_s *next;
#ifdef CONFIG_NET_TCP_READAHEAD
  FAR struct uip_readahead_s *readahead;
#endif
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  FAR struct uip_wrbuffer_s *wrbuffer;
#endif
  uip_lock_t flags;

  /* Because g_free_tcp_connections is accessed from user level and interrupt
   * level, code, it is necessary to keep interrupts disabled during this
   * operation.
   */

  DEBUGASSERT(conn->crefs == 0);
  flags = uip_lock();

  /* Free remaining callbacks, actually there should be only the close callback
   * left.
   */

  for (cb = conn->list; cb; cb = next)
    {
      next = cb->flink;
      uip_tcpcallbackfree(conn, cb);
    }

  /* UIP_ALLOCATED means that that the connection is not in the active list
   * yet.
   */

  if (conn->tcpstateflags != UIP_ALLOCATED)
    {
      /* Remove the connection from the active list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }

#ifdef CONFIG_NET_TCP_READAHEAD
  /* Release any read-ahead buffers attached to the connection */

  while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL)
    {
      uip_tcpreadahead_release(readahead);
    }
#endif

#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  /* Release any write buffers attached to the connection */

  while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->write_q)) != NULL)
    {
      uip_tcpwrbuffer_release(wrbuffer);
    }

  while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->unacked_q)) != NULL)
    {
      uip_tcpwrbuffer_release(wrbuffer);
    }
#endif

#ifdef CONFIG_NET_TCPBACKLOG
  /* Remove any backlog attached to this connection */

  if (conn->backlog)
    {
      uip_backlogdestroy(conn);
    }

  /* If this connection is, itself, backlogged, then remove it from the
   * parent connection's backlog list.
   */

  if (conn->blparent)
    {
      uip_backlogdelete(conn->blparent, conn);
    }
#endif

  /* Mark the connection available and put it into the free list */

  conn->tcpstateflags = UIP_CLOSED;
  dq_addlast(&conn->node, &g_free_tcp_connections);
  uip_unlock(flags);
}
예제 #27
0
ssize_t psock_sendto(FAR struct socket *psock, FAR const void *buf,
                     size_t len, int flags, FAR const struct sockaddr *to,
                     socklen_t tolen)
{
#ifdef CONFIG_NET_UDP
  FAR struct uip_udp_conn *conn;
#ifdef CONFIG_NET_IPv6
  FAR const struct sockaddr_in6 *into = (const struct sockaddr_in6 *)to;
#else
  FAR const struct sockaddr_in *into = (const struct sockaddr_in *)to;
#endif
  struct sendto_s state;
  uip_lock_t save;
  int ret;
#endif
  int err;

  /* If to is NULL or tolen is zero, then this function is same as send (for
   * connected socket types)
   */

  if (!to || !tolen)
    {
#ifdef CONFIG_NET_TCP
      return psock_send(psock, buf, len, flags);
#else
      err = EINVAL;
      goto errout;
#endif
    }

  /* Verify that a valid address has been provided */

#ifdef CONFIG_NET_IPv6
  if (to->sa_family != AF_INET6 || tolen < sizeof(struct sockaddr_in6))
#else
  if (to->sa_family != AF_INET || tolen < sizeof(struct sockaddr_in))
#endif
  {
      err = EBADF;
      goto errout;
  }

  /* Verify that the psock corresponds to valid, allocated socket */

  if (!psock || psock->s_crefs <= 0)
    {
      err = EBADF;
      goto errout;
    }

  /* If this is a connected socket, then return EISCONN */

  if (psock->s_type != SOCK_DGRAM)
    {
      err = EISCONN;
      goto errout;
    }

  /* Perform the UDP sendto operation */

#ifdef CONFIG_NET_UDP
  /* Set the socket state to sending */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_SEND);

  /* Initialize the state structure.  This is done with interrupts
   * disabled because we don't want anything to happen until we
   * are ready.
   */

  save = uip_lock();
  memset(&state, 0, sizeof(struct sendto_s));
  sem_init(&state.st_sem, 0, 0);
  state.st_buflen = len;
  state.st_buffer = buf;

  /* Set the initial time for calculating timeouts */

#ifdef CONFIG_NET_SENDTO_TIMEOUT
  state.st_sock = psock;
  state.st_time = clock_systimer();
#endif

  /* Setup the UDP socket */

  conn = (struct uip_udp_conn *)psock->s_conn;
  ret = uip_udpconnect(conn, into);
  if (ret < 0)
    {
      uip_unlock(save);
      err = -ret;
      goto errout;
    }

  /* Set up the callback in the connection */

  state.st_cb = uip_udpcallbackalloc(conn);
  if (state.st_cb)
    {
      state.st_cb->flags   = UIP_POLL;
      state.st_cb->priv    = (void*)&state;
      state.st_cb->event   = sendto_interrupt;

      /* Notify the device driver of the availabilty of TX data */

      netdev_txnotify(conn->ripaddr);

      /* Wait for either the receive to complete or for an error/timeout to occur.
       * NOTES:  (1) uip_lockedwait will also terminate if a signal is received, (2)
       * interrupts may be disabled!  They will be re-enabled while the task sleeps
       * and automatically re-enabled when the task restarts.
       */

      uip_lockedwait(&state.st_sem);

      /* Make sure that no further interrupts are processed */

      uip_udpcallbackfree(conn, state.st_cb);
    }
  uip_unlock(save);

  sem_destroy(&state.st_sem);

  /* Set the socket state to idle */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE);

  /* Check for errors */

  if (state.st_sndlen < 0)
    {
      err = -state.st_sndlen;
      goto errout;
    }

  /* Sucess */

  return state.st_sndlen;
#else
  err = ENOSYS;
#endif

errout:
  set_errno(err);
  return ERROR;
}
예제 #28
0
파일: accept.c 프로젝트: 1015472/PX4NuttX
int accept(int sockfd, struct sockaddr *addr, socklen_t *addrlen)
{
  FAR struct socket *psock = sockfd_socket(sockfd);
  FAR struct socket *pnewsock;
  FAR struct uip_conn *conn;
  struct accept_s state;
#ifdef CONFIG_NET_IPv6
  FAR struct sockaddr_in6 *inaddr = (struct sockaddr_in6 *)addr;
#else
  FAR struct sockaddr_in *inaddr = (struct sockaddr_in *)addr;
#endif
  uip_lock_t save;
  int newfd;
  int err;
  int ret;

  /* Verify that the sockfd corresponds to valid, allocated socket */

  if (!psock || psock->s_crefs <= 0)
    {
      /* It is not a valid socket description.  Distinguish between the cases
       * where sockfd is a just valid and when it is a valid file descriptor used
       * in the wrong context.
       */

#if CONFIG_NFILE_DESCRIPTORS > 0
      if ((unsigned int)sockfd < CONFIG_NFILE_DESCRIPTORS)
        {
          err = ENOTSOCK;
        }
      else
#endif
        {
          err = EBADF;
        }
      goto errout;
    }

  /* We have a socket descriptor, but it is a stream? */

  if (psock->s_type != SOCK_STREAM)
    {
      err = EOPNOTSUPP;
      goto errout;
    }

  /* Is the socket listening for a connection? */

  if (!_SS_ISLISTENING(psock->s_flags))
    {
      err = EINVAL;
      goto errout;
    }

  /* Verify that a valid memory block has been provided to receive the
   * address
   */

  if (addr)
    {
#ifdef CONFIG_NET_IPv6
      if (*addrlen < sizeof(struct sockaddr_in6))
#else
      if (*addrlen < sizeof(struct sockaddr_in))
#endif
        {
          err = EBADF;
          goto errout;
        }
    }

  /* Allocate a socket descriptor for the new connection now (so that it
   * cannot fail later)
   */

  newfd = sockfd_allocate(0);
  if (newfd < 0)
    {
      err = ENFILE;
      goto errout;
    }

  pnewsock = sockfd_socket(newfd);
  if (!pnewsock)
    {
      err = ENFILE;
      goto errout_with_socket;
    }

  /* Check the backlog to see if there is a connection already pending for
   * this listener.
   */

  save = uip_lock();
  conn = (struct uip_conn *)psock->s_conn;

#ifdef CONFIG_NET_TCPBACKLOG
  state.acpt_newconn = uip_backlogremove(conn);
  if (state.acpt_newconn)
    {
      /* Yes... get the address of the connected client */

      nvdbg("Pending conn=%p\n", state.acpt_newconn);
      accept_tcpsender(state.acpt_newconn, inaddr);
    }

  /* In general, this uIP-based implementation will not support non-blocking
   * socket operations... except in a few cases:  Here for TCP accept with backlog
   * enabled.  If this socket is configured as non-blocking then return EAGAIN
   * if there is no pending connection in the backlog.
   */

  else if (_SS_ISNONBLOCK(psock->s_flags))
    {
      err = EAGAIN;
      goto errout_with_lock;
    }
  else
#endif
    {
      /* Set the socket state to accepting */

      psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_ACCEPT);

      /* Perform the TCP accept operation */

      /* Initialize the state structure.  This is done with interrupts
       * disabled because we don't want anything to happen until we
       * are ready.
       */

      state.acpt_addr       = inaddr;
      state.acpt_newconn    = NULL;
      state.acpt_result     = OK;
      sem_init(&state.acpt_sem, 0, 0);

      /* Set up the callback in the connection */

      conn->accept_private  = (void*)&state;
      conn->accept          = accept_interrupt;

      /* Wait for the send to complete or an error to occur:  NOTES: (1)
       * uip_lockedwait will also terminate if a signal is received, (2) interrupts
       * may be disabled!  They will be re-enabled while the task sleeps and
       * automatically re-enabled when the task restarts.
       */

      ret = uip_lockedwait(&state.acpt_sem);

      /* Make sure that no further interrupts are processed */

      conn->accept_private = NULL;
      conn->accept         = NULL;

      sem_destroy(&state. acpt_sem);

      /* Set the socket state to idle */

      psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE);

      /* Check for a errors.  Errors are signaled by negative errno values
       * for the send length.
       */

      if (state.acpt_result != 0)
        {
          err = state.acpt_result;
          goto errout_with_lock;
        }

      /* If uip_lockedwait failed, then we were probably reawakened by a signal. In
       * this case, uip_lockedwait will have set errno appropriately.
       */

      if (ret < 0)
        {
          err = -ret;
          goto errout_with_lock;
        }
    }

  /* Initialize the socket structure and mark the socket as connected.
   * (The reference count on the new connection structure was set in the
   * interrupt handler).
   */

  pnewsock->s_type   = SOCK_STREAM;
  pnewsock->s_conn   = state.acpt_newconn;
  pnewsock->s_flags |= _SF_CONNECTED;
  pnewsock->s_flags &= ~_SF_CLOSED;

  /* Begin monitoring for TCP connection events on the newly connected socket */

  net_startmonitor(pnewsock);
  uip_unlock(save);
  return newfd;

errout_with_lock:
  uip_unlock(save);

errout_with_socket:
  sockfd_release(newfd);

errout:
  errno = err;
  return ERROR;
}
예제 #29
0
int uip_ping(uip_ipaddr_t addr, uint16_t id, uint16_t seqno,
             uint16_t datalen, int dsecs)
{
  struct icmp_ping_s state;
  uip_lock_t save;

  /* Initialize the state structure */

  sem_init(&state.png_sem, 0, 0);
  state.png_ticks  = DSEC2TICK(dsecs); /* System ticks to wait */
  state.png_result = -ENOMEM;          /* Assume allocation failure */
  state.png_addr   = addr;             /* Address of the peer to be ping'ed */
  state.png_id     = id;               /* The ID to use in the ECHO request */
  state.png_seqno  = seqno;            /* The seqno to use int the ECHO request */
  state.png_datlen = datalen;          /* The length of data to send in the ECHO request */
  state.png_sent   = false;            /* ECHO request not yet sent */

  save             = uip_lock();
  state.png_time   = clock_systimer();

  /* Set up the callback */

  state.png_cb = uip_icmpcallbackalloc();
  if (state.png_cb)
    {
      state.png_cb->flags   = UIP_POLL|UIP_ECHOREPLY;
      state.png_cb->priv    = (void*)&state;
      state.png_cb->event   = ping_interrupt;
      state.png_result      = -EINTR; /* Assume sem-wait interrupted by signal */

      /* Notify the device driver of the availaibilty of TX data */

      netdev_txnotify(&state.png_addr);

      /* Wait for either the full round trip transfer to complete or
       * for timeout to occur. (1) uip_lockedwait will also terminate if a
       * signal is received, (2) interrupts may be disabled!  They will
       * be re-enabled while the task sleeps and automatically
       * re-enabled when the task restarts.
       */

      nlldbg("Start time: 0x%08x seqno: %d\n", state.png_time, seqno);
      uip_lockedwait(&state.png_sem);

      uip_icmpcallbackfree(state.png_cb);
    }
  uip_unlock(save);

  /* Return the negated error number in the event of a failure, or the
   * sequence number of the ECHO reply on success.
   */

  if (!state.png_result)
    {
      nlldbg("Return seqno=%d\n", state.png_seqno);
      return (int)state.png_seqno;
    }
  else
    {
      nlldbg("Return error=%d\n", -state.png_result);
      return state.png_result;
    }
}
예제 #30
0
int ieee80211_output(FAR struct ieee80211_s *ic, FAR struct iob_s *iob,
                     FAR struct sockaddr *dst, uint8_t flags)
{
  FAR struct uip_driver_s *dev;
  FAR struct ieee80211_frame *wh;
  FAR struct m_tag *mtag;
  uip_lock_t flags;
  int error = 0;

  /* Get the driver structure */

  dev = netdev_findbyaddr(ic->ic_ifname);
  if (!dev)
    {
      error = -ENODEV;
      goto bad;
    }

  /* Interface has to be up and running */

  if ((dev->d_flags & (IFF_UP | IFF_RUNNING)) != (IFF_UP | IFF_RUNNING))
    {
      error = -ENETDOWN;
      goto bad;
    }

  /* Try to get the DLT from a buffer tag */

  if ((mtag = m_tag_find(iob, PACKET_TAG_DLT, NULL)) != NULL)
    {
      unsigned int dlt = *(unsigned int *)(mtag + 1);

      /* Fallback to Ethernet for non-802.11 linktypes */

      if (!(dlt == DLT_IEEE802_11 || dlt == DLT_IEEE802_11_RADIO))
        {
          goto fallback;
        }

      if (iob->io_pktlen < sizeof(struct ieee80211_frame_min))
        {
          return -EINVAL;
        }

      wh = (FAR struct ieee80211_frame *)IOB_DATA(iob);
      if ((wh->i_fc[0] & IEEE80211_FC0_VERSION_MASK) != IEEE80211_FC0_VERSION_0)
        {
          return -EINVAL;
        }

      if (!(ic->ic_caps & IEEE80211_C_RAWCTL) &&
          (wh->i_fc[0] & IEEE80211_FC0_TYPE_MASK) == IEEE80211_FC0_TYPE_CTL)
        {
          return -EINVAL;
        }

      /* Queue message on interface without adding any further headers, and
       * start output if interface not yet active.
       */

      flags = uip_lock();
      error = ieee80211_ifsend(ic, iob, flags);
      if (error)
        {
          /* buffer is already freed */

          uip_unlock(flags);
          ndbg("ERROR: %s: failed to queue raw tx frame\n", ic->ic_ifname);
          return error;
        }

      uip_unlock(flags);
      return error;
    }

fallback:
  return ether_output(ic, iob, dst);

bad:
  if (iob)
    {
      iob_free_chain(iob);
    }

  return error;
}