Example #1
0
void uip_tcpfree(struct uip_conn *conn)
{
#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  struct uip_readahead_s *readahead;
#endif
  uip_lock_t flags;

  /* Because g_free_tcp_connections is accessed from user level and interrupt
   * level, code, it is necessary to keep interrupts disabled during this
   * operation.
   */

  DEBUGASSERT(conn->crefs == 0);
  flags = uip_lock();

  /* UIP_ALLOCATED means that that the connection is not in the active list
   * yet.
   */

  if (conn->tcpstateflags != UIP_ALLOCATED)
    {
      /* Remove the connection from the active list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }

  /* Release any read-ahead buffers attached to the connection */

#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL)
    {
      uip_tcpreadaheadrelease(readahead);
    }
#endif

  /* Remove any backlog attached to this connection */

#ifdef CONFIG_NET_TCPBACKLOG
  if (conn->backlog)
    {
      uip_backlogdestroy(conn);
    }

  /* If this connection is, itself, backlogged, then remove it from the
   * parent connection's backlog list.
   */

  if (conn->blparent)
    {
      uip_backlogdelete(conn->blparent, conn);
    }
#endif

  /* Mark the connection available and put it into the free list */

  conn->tcpstateflags = UIP_CLOSED;
  dq_addlast(&conn->node, &g_free_tcp_connections);
  uip_unlock(flags);
}
Example #2
0
void uip_tcpfree(struct uip_conn *conn)
{
  FAR struct uip_callback_s *cb;
  FAR struct uip_callback_s *next;
#ifdef CONFIG_NET_TCP_READAHEAD
  FAR struct uip_readahead_s *readahead;
#endif
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  FAR struct uip_wrbuffer_s *wrbuffer;
#endif
  uip_lock_t flags;

  /* Because g_free_tcp_connections is accessed from user level and interrupt
   * level, code, it is necessary to keep interrupts disabled during this
   * operation.
   */

  DEBUGASSERT(conn->crefs == 0);
  flags = uip_lock();

  /* Free remaining callbacks, actually there should be only the close callback
   * left.
   */

  for (cb = conn->list; cb; cb = next)
    {
      next = cb->flink;
      uip_tcpcallbackfree(conn, cb);
    }

  /* UIP_ALLOCATED means that that the connection is not in the active list
   * yet.
   */

  if (conn->tcpstateflags != UIP_ALLOCATED)
    {
      /* Remove the connection from the active list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }

#ifdef CONFIG_NET_TCP_READAHEAD
  /* Release any read-ahead buffers attached to the connection */

  while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL)
    {
      uip_tcpreadahead_release(readahead);
    }
#endif

#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  /* Release any write buffers attached to the connection */

  while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->write_q)) != NULL)
    {
      uip_tcpwrbuffer_release(wrbuffer);
    }

  while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->unacked_q)) != NULL)
    {
      uip_tcpwrbuffer_release(wrbuffer);
    }
#endif

#ifdef CONFIG_NET_TCPBACKLOG
  /* Remove any backlog attached to this connection */

  if (conn->backlog)
    {
      uip_backlogdestroy(conn);
    }

  /* If this connection is, itself, backlogged, then remove it from the
   * parent connection's backlog list.
   */

  if (conn->blparent)
    {
      uip_backlogdelete(conn->blparent, conn);
    }
#endif

  /* Mark the connection available and put it into the free list */

  conn->tcpstateflags = UIP_CLOSED;
  dq_addlast(&conn->node, &g_free_tcp_connections);
  uip_unlock(flags);
}
Example #3
0
int uip_backlogcreate(FAR struct uip_conn *conn, int nblg)
{
  FAR struct uip_backlog_s     *bls = NULL;
  FAR struct uip_blcontainer_s *blc;
  uip_lock_t flags;
  int size;
  int offset;
  int i;

  nllvdbg("conn=%p nblg=%d\n", conn, nblg);

#ifdef CONFIG_DEBUG
  if (!conn)
    {
      return -EINVAL;
    }
#endif

  /* Then allocate the backlog as requested */

  if (nblg > 0)
    {
      /* Align the list of backlog structures to 32-bit boundaries.  This
       * may be excessive on 24-16-bit address machines; and insufficient
       * on 64-bit address machines -- REVISIT
       */

      offset = (sizeof(struct uip_backlog_s) + 3) & ~3;

      /* Then determine the full size of the allocation include the
       * uip_backlog_s, a pre-allocated array of struct uip_blcontainer_s
       * and alignement padding
       */

      size = offset + nblg * sizeof(struct uip_blcontainer_s);

      /* Then allocate that much */

      bls = (FAR struct uip_backlog_s *)kzalloc(size);
      if (!bls)
        {
          nlldbg("Failed to allocate backlog\n");
          return -ENOMEM;
        }

      /* Then add all of the pre-allocated containers to the free list */

      blc = (FAR struct uip_blcontainer_s*)(((FAR uint8_t*)bls) + offset);
      for (i = 0; i < nblg; i++)
        {
          sq_addfirst(&blc->bc_node, &bls->bl_free);
        }
    }

  /* Destroy any existing backlog (shouldn't be any) */

  flags = uip_lock();
  uip_backlogdestroy(conn);

  /* Now install the backlog tear-off in the connection.  NOTE that bls may
   * actually be NULL if nblg is <= 0;  In that case, we are disabling backlog
   * support.  Since interrupts are disabled, destroying the old backlog and
   * replace it with the new is an atomic operation
   */

  conn->backlog = bls;
  uip_unlock(flags);
  return OK;
}