Exemple #1
0
int work_queue(int qid, FAR struct work_s *work, worker_t worker,
               FAR void *arg, uint32_t delay)
{
  FAR struct wqueue_s *wqueue = &g_work[qid];
  irqstate_t flags;

  DEBUGASSERT(work != NULL && (unsigned)qid < NWORKERS);

  /* First, initialize the work structure */

  work->worker = worker;           /* Work callback */
  work->arg    = arg;              /* Callback argument */
  work->delay  = delay;            /* Delay until work performed */

  /* Now, time-tag that entry and put it in the work queue.  This must be
   * done with interrupts disabled.  This permits this function to be called
   * from with task logic or interrupt handlers.
   */

  flags        = irqsave();
  work->qtime  = clock_systimer(); /* Time work queued */

  dq_addlast((FAR dq_entry_t *)work, &wqueue->q);
  kill(wqueue->pid, SIGWORK);      /* Wake up the worker thread */

  irqrestore(flags);
  return OK;
}
Exemple #2
0
int hrt_work_queue(struct work_s *work, worker_t worker, void *arg, uint32_t delay)
{
  struct wqueue_s *wqueue = &g_hrt_work;

  /* First, initialize the work structure */

  work->worker = worker;           /* Work callback */
  work->arg    = arg;              /* Callback argument */
  work->delay  = delay;            /* Delay until work performed */

  /* Now, time-tag that entry and put it in the work queue.  This must be
   * done with interrupts disabled.  This permits this function to be called
   * from with task logic or interrupt handlers.
   */

  hrt_work_lock();
  work->qtime  = hrt_absolute_time(); /* Time work queued */
  //PX4_INFO("hrt work_queue adding work delay=%u time=%lu", delay, work->qtime);

  dq_addlast((dq_entry_t *)work, &wqueue->q);
  px4_task_kill(wqueue->pid, SIGALRM);      /* Wake up the worker thread */

  hrt_work_unlock();
  return PX4_OK;
}
Exemple #3
0
int work_queue(int qid, struct work_s *work, worker_t worker, void *arg, uint32_t delay)
{
  struct wqueue_s *wqueue = &g_work[qid];

  //DEBUGASSERT(work != NULL && (unsigned)qid < NWORKERS);

  /* First, initialize the work structure */

  work->worker = worker;           /* Work callback */
  work->arg    = arg;              /* Callback argument */
  work->delay  = delay;            /* Delay until work performed */

  /* Now, time-tag that entry and put it in the work queue.  This must be
   * done with interrupts disabled.  This permits this function to be called
   * from with task logic or interrupt handlers.
   */

  work_lock(qid);
  work->qtime  = clock_systimer(); /* Time work queued */

  dq_addlast((dq_entry_t *)work, &wqueue->q);
#ifdef __PX4_QURT
  px4_task_kill(wqueue->pid, SIGALRM);      /* Wake up the worker thread */
#else
  px4_task_kill(wqueue->pid, SIGCONT);      /* Wake up the worker thread */
#endif

  work_unlock(qid);
  return PX4_OK;
}
Exemple #4
0
/// @brief Returns a no longer needed request to the queue
void
MavlinkFTP::_return_request(Request *req)
{
	_lock_request_queue();
	dq_addlast(&req->work.dq, &_request_queue);
	_unlock_request_queue();
}
void sched_addblocked(FAR _TCB *btcb, tstate_t task_state)
{
  /* Make sure that we received a valid blocked state */

  ASSERT(task_state >= FIRST_BLOCKED_STATE &&
         task_state <= LAST_BLOCKED_STATE);

  /* Add the TCB to the blocked task list associated with this state.
   * First, determine if the task is to be added to a prioritized task
   * list
   */

  if (g_tasklisttable[task_state].prioritized)
    {
      /* Add the task to a prioritized list */

      sched_addprioritized(btcb,
                           (FAR dq_queue_t*)g_tasklisttable[task_state].list);
    }
  else
    {
      /* Add the task to a non-prioritized list */

      dq_addlast((FAR dq_entry_t*)btcb,
                 (FAR dq_queue_t*)g_tasklisttable[task_state].list);
    }

  /* Make sure the TCB's state corresponds to the list */

  btcb->task_state = task_state;
}
Exemple #6
0
static int work_qqueue(FAR struct usr_wqueue_s *wqueue,
                       FAR struct work_s *work, worker_t worker,
                       FAR void *arg, uint32_t delay)
{
    DEBUGASSERT(work != NULL);

    /* First, initialize the work structure */

    work->worker = worker;           /* Work callback */
    work->arg    = arg;              /* Callback argument */
    work->delay  = delay;            /* Delay until work performed */

    /* Get exclusive access to the work queue */

    while (work_lock() < 0);

    /* Now, time-tag that entry and put it in the work queue. */

    work->qtime  = clock_systimer(); /* Time work queued */

    dq_addlast((FAR dq_entry_t *)work, &wqueue->q);
    kill(wqueue->pid, SIGWORK);   /* Wake up the worker thread */

    work_unlock();
    return OK;
}
static void work_qqueue(FAR struct kwork_wqueue_s *wqueue,
                        FAR struct work_s *work, worker_t worker,
                        FAR void *arg, systime_t delay)
{
  irqstate_t flags;

  DEBUGASSERT(work != NULL && worker != NULL);

  /* First, initialize the work structure.  This must be done with interrupts
   * disabled.  This permits this function to be called from with task logic
   * or interrupt handlers.
   */

  flags        = irqsave();
  work->worker = worker;           /* Work callback. non-NULL means queued */
  work->arg    = arg;              /* Callback argument */
  work->delay  = delay;            /* Delay until work performed */

  /* Now, time-tag that entry and put it in the work queue */

  work->qtime  = clock_systimer(); /* Time work queued */

  dq_addlast((FAR dq_entry_t *)work, &wqueue->q);

  irqrestore(flags);
}
Exemple #8
0
void uip_tcpfree(struct uip_conn *conn)
{
#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  struct uip_readahead_s *readahead;
#endif
  uip_lock_t flags;

  /* Because g_free_tcp_connections is accessed from user level and interrupt
   * level, code, it is necessary to keep interrupts disabled during this
   * operation.
   */

  DEBUGASSERT(conn->crefs == 0);
  flags = uip_lock();

  /* UIP_ALLOCATED means that that the connection is not in the active list
   * yet.
   */

  if (conn->tcpstateflags != UIP_ALLOCATED)
    {
      /* Remove the connection from the active list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }

  /* Release any read-ahead buffers attached to the connection */

#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL)
    {
      uip_tcpreadaheadrelease(readahead);
    }
#endif

  /* Remove any backlog attached to this connection */

#ifdef CONFIG_NET_TCPBACKLOG
  if (conn->backlog)
    {
      uip_backlogdestroy(conn);
    }

  /* If this connection is, itself, backlogged, then remove it from the
   * parent connection's backlog list.
   */

  if (conn->blparent)
    {
      uip_backlogdelete(conn->blparent, conn);
    }
#endif

  /* Mark the connection available and put it into the free list */

  conn->tcpstateflags = UIP_CLOSED;
  dq_addlast(&conn->node, &g_free_tcp_connections);
  uip_unlock(flags);
}
Exemple #9
0
struct uip_conn *uip_tcpaccept(struct uip_tcpip_hdr *buf)
{
  struct uip_conn *conn = uip_tcpalloc();
  if (conn)
    {
      /* Fill in the necessary fields for the new connection. */

      conn->rto           = UIP_RTO;
      conn->timer         = UIP_RTO;
      conn->sa            = 0;
      conn->sv            = 4;
      conn->nrtx          = 0;
      conn->lport         = buf->destport;
      conn->rport         = buf->srcport;
      conn->mss           = UIP_TCP_INITIAL_MSS;
      uip_ipaddr_copy(conn->ripaddr, uip_ip4addr_conv(buf->srcipaddr));
      conn->tcpstateflags = UIP_SYN_RCVD;

      uip_tcpinitsequence(conn->sndseq);
      conn->unacked       = 1;
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
      conn->expired       = 0;
      conn->isn           = 0;
      conn->sent          = 0;
#endif

      /* rcvseq should be the seqno from the incoming packet + 1. */

      memcpy(conn->rcvseq, buf->seqno, 4);

#ifdef CONFIG_NET_TCP_READAHEAD
      /* Initialize the list of TCP read-ahead buffers */

      sq_init(&conn->readahead);
#endif

#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
      /* Initialize the write buffer lists */

      sq_init(&conn->write_q);
      sq_init(&conn->unacked_q);
#endif

      /* And, finally, put the connection structure into the active list.
       * Interrupts should already be disabled in this context.
       */

      dq_addlast(&conn->node, &g_active_tcp_connections);
    }

  return conn;
}
Exemple #10
0
void dq_addafter(FAR dq_entry_t *prev, FAR dq_entry_t *node,
                 dq_queue_t *queue)
{
  if (!queue->head || prev == queue->tail)
    {
      dq_addlast(node, queue);
    }
  else
    {
      FAR dq_entry_t *next = prev->flink;
      node->blink = prev;
      node->flink = next;
      next->blink = node;
      prev->flink = node;
    }
}
Exemple #11
0
void aioc_free(FAR struct aio_container_s *aioc)
{
	DEBUGASSERT(aioc);

	/* Return the container to the free list */

	aio_lock();
	dq_addlast(&aioc->aioc_link, &g_aioc_free);
	aio_unlock();

	/* The post the counting semaphore, announcing the availability of the
	 * free AIO container.
	 */

	sem_post(&g_aioc_freesem);
}
void uip_udpfree(struct uip_udp_conn *conn)
{
  /* The free list is only accessed from user, non-interrupt level and
   * is protected by a semaphore (that behaves like a mutex).
   */

  DEBUGASSERT(conn->crefs == 0);

  _uip_semtake(&g_free_sem);
  conn->lport = 0;

  /* Remove the connection from the active list */

  dq_rem(&conn->node, &g_active_udp_connections);

  /* Free the connection */

  dq_addlast(&conn->node, &g_free_udp_connections);
  _uip_semgive(&g_free_sem);
}
void uip_udpinit(void)
{
  int i;

  /* Initialize the queues */

  dq_init(&g_free_udp_connections);
  dq_init(&g_active_udp_connections);
  sem_init(&g_free_sem, 0, 1);

  for (i = 0; i < CONFIG_NET_UDP_CONNS; i++)
    {
      /* Mark the connection closed and move it to the free list */

      g_udp_connections[i].lport = 0;
      dq_addlast(&g_udp_connections[i].node, &g_free_udp_connections);
    }

  g_last_udp_port = 1024;
}
Exemple #14
0
void netlink_initialize(void)
{
  int i;

  /* Initialize the queues */

  dq_init(&g_free_netlink_connections);
  dq_init(&g_active_netlink_connections);
  nxsem_init(&g_free_sem, 0, 1);

  for (i = 0; i < CONFIG_NET_NETLINK_CONNS; i++)
    {
      FAR struct netlink_conn_s *conn = &g_netlink_connections[i];

      /* Mark the connection closed and move it to the free list */

      memset(conn, 0, sizeof(*conn));
      dq_addlast(&conn->node, &g_free_netlink_connections);
    }
}
Exemple #15
0
void uip_tcpinit(void)
{
  int i;

  /* Initialize the queues */

  dq_init(&g_free_tcp_connections);
  dq_init(&g_active_tcp_connections);

  /* Now initialize each connection structure */

  for (i = 0; i < CONFIG_NET_TCP_CONNS; i++)
    {
      /* Mark the connection closed and move it to the free list */

      g_tcp_connections[i].tcpstateflags = UIP_CLOSED;
      dq_addlast(&g_tcp_connections[i].node, &g_free_tcp_connections);
    }

  g_last_tcp_port = 1024;
}
Exemple #16
0
void netlink_free(FAR struct netlink_conn_s *conn)
{
  /* The free list is protected by a semaphore (that behaves like a mutex). */

  DEBUGASSERT(conn->crefs == 0);

  _netlink_semtake(&g_free_sem);

  /* Remove the connection from the active list */

  dq_rem(&conn->node, &g_active_netlink_connections);

  /* Reset structure */

  memset(conn, 0, sizeof(*conn));

  /* Free the connection */

  dq_addlast(&conn->node, &g_free_netlink_connections);
  _netlink_semgive(&g_free_sem);
}
Exemple #17
0
FAR struct netlink_conn_s *netlink_alloc(void)
{
  FAR struct netlink_conn_s *conn;

  /* The free list is protected by a semaphore (that behaves like a mutex). */

  _netlink_semtake(&g_free_sem);
  conn = (FAR struct netlink_conn_s *)dq_remfirst(&g_free_netlink_connections);
  if (conn)
    {
      /* Make sure that the connection is marked as uninitialized */

      memset(conn, 0, sizeof(*conn));

      /* Enqueue the connection into the active list */

      dq_addlast(&conn->node, &g_active_netlink_connections);
    }

  _netlink_semgive(&g_free_sem);
  return conn;
}
struct uip_udp_conn *uip_udpalloc(void)
{
  struct uip_udp_conn *conn;

  /* The free list is only accessed from user, non-interrupt level and
   * is protected by a semaphore (that behaves like a mutex).
   */

  _uip_semtake(&g_free_sem);
  conn = (struct uip_udp_conn *)dq_remfirst(&g_free_udp_connections);
  if (conn)
    {
      /* Make sure that the connection is marked as uninitialized */

      conn->lport = 0;

      /* Enqueue the connection into the active list */

      dq_addlast(&conn->node, &g_active_udp_connections);
    }
  _uip_semgive(&g_free_sem);
  return conn;
}
Exemple #19
0
static int work_qqueue(FAR struct usr_wqueue_s *wqueue,
                       FAR struct work_s *work, worker_t worker,
                       FAR void *arg, clock_t delay)
{
  DEBUGASSERT(work != NULL);

  /* Get exclusive access to the work queue */

  while (work_lock() < 0);

  /* Is there already pending work? */

  if (work->worker != NULL)
    {
      /* Remove the entry from the work queue.  It will re requeued at the
       * end of the work queue.
       */

      dq_rem((FAR dq_entry_t *)work, &wqueue->q);
    }

  /* Initialize the work structure */

  work->worker = worker;           /* Work callback. non-NULL means queued */
  work->arg    = arg;              /* Callback argument */
  work->delay  = delay;            /* Delay until work performed */

  /* Now, time-tag that entry and put it in the work queue. */

  work->qtime  = clock(); /* Time work queued */

  dq_addlast((FAR dq_entry_t *)work, &wqueue->q);
  kill(wqueue->pid, SIGWORK);   /* Wake up the worker thread */

  work_unlock();
  return OK;
}
Exemple #20
0
void aio_initialize(void)
{
	int i;

	/* Initialize counting semaphores */

	(void)sem_init(&g_aioc_freesem, 0, CONFIG_FS_NAIOC);
	(void)sem_init(&g_aio_exclsem, 0, 1);

	g_aio_holder = INVALID_PROCESS_ID;

	/* Initialize the container queues */

	dq_init(&g_aioc_free);
	dq_init(&g_aio_pending);

	/* Add all of the pre-allocated AIO containers to the free list */

	for (i = 0; i < CONFIG_FS_NAIOC; i++) {
		/* Add the container to the free list */

		dq_addlast(&g_aioc_alloc[i].aioc_link, &g_aioc_free);
	}
}
Exemple #21
0
static void hrt_usleep_add(struct hrt_s *phrt)
{
  dq_entry_t *pent;
  irqstate_t flags;

  /* Disable MTM2-Ch0 */

  putreg32(0, rMT2OPR);

  hrt_queue_refresh();

  flags = spin_lock_irqsave();

  /* add phrt to hrt_timer_queue */

  for (pent = hrt_timer_queue.head; pent; pent = dq_next(pent))
    {
      struct hrt_s *tmp = container_of(pent, struct hrt_s, ent);
      if (tmp->usec >= phrt->usec)
        {
          break;
        }
    }
  if (pent)
    {
      dq_addbefore(pent, &phrt->ent, &hrt_timer_queue);
    }
  else
    {
      dq_addlast(&phrt->ent, &hrt_timer_queue);
    }

  spin_unlock_irqrestore(flags);

  hrt_usleep_setup();
}
Exemple #22
0
FAR struct aio_container_s *aio_contain(FAR struct aiocb *aiocbp)
{
  FAR struct aio_container_s *aioc;
  union
  {
#ifdef AIO_HAVE_FILEP
    FAR struct file *filep;
#endif
#ifdef AIO_HAVE_FILEP
    FAR struct socket *psock;
#endif
    FAR void *ptr;
  } u;
#ifdef CONFIG_PRIORITY_INHERITANCE
  struct sched_param param;
#endif

#if defined(AIO_HAVE_FILEP) && defined(AIO_HAVE_PSOCK)
  if (aiocbp->aio_fildes  >= CONFIG_NFILE_DESCRIPTORS)
#endif
#ifdef AIO_HAVE_FILEP
    {
      /* Get the file structure corresponding to the file descriptor. */

      u.filep = fs_getfilep(aiocbp->aio_fildes);
      if (!u.filep)
        {
          /* The errno value has already been set */

          return NULL;
        }
    }
#endif
#if defined(AIO_HAVE_FILEP) && defined(AIO_HAVE_PSOCK)
  else
#endif
#ifdef AIO_HAVE_PSOCK
    {
      /* Get the socket structure corresponding to the socket descriptor */

      u.psock = sockfd_socket(aiocbp->aio_fildes);
      if (!u.psock)
        {
          /* Does not set the errno.  EBADF is the most likely explanation. */

          set_errno(EBADF);
          return NULL;
        }
    }
#endif

  /* Allocate the AIO control block container, waiting for one to become
   * available if necessary.  This should never fail.
   */

  aioc = aioc_alloc();
  DEBUGASSERT(aioc);

  /* Initialize the container */

  memset(aioc, 0, sizeof(struct aio_container_s));
  aioc->aioc_aiocbp = aiocbp;
  aioc->u.aioc_filep = u.ptr;
  aioc->aioc_pid = getpid();

#ifdef CONFIG_PRIORITY_INHERITANCE
  DEBUGVERIFY(sched_getparam (aioc->aioc_pid, &param));
  aioc->aioc_prio = param.sched_priority;
#endif

  /* Add the container to the pending transfer list. */

  aio_lock();
  dq_addlast(&aioc->aioc_link, &g_aio_pending);
  aio_unlock();
  return aioc;
}
Exemple #23
0
FAR struct tcp_conn_s *tcp_alloc_accept(FAR struct net_driver_s *dev,
                                        FAR struct tcp_hdr_s *tcp)
{
  FAR struct tcp_conn_s *conn;
  uint8_t domain;
  int ret;

  /* Get the appropriate IP domain */

#if defined(CONFIG_NET_IPv4) && defined(CONFIG_NET_IPv4)
  bool ipv6 = IFF_IS_IPv6(dev->d_flags);
  domain = ipv6 ? PF_INET6 : PF_INET;
#elif defined(CONFIG_NET_IPv4)
  domain = PF_INET;
#else /* defined(CONFIG_NET_IPv6) */
  domain = PF_INET6;
#endif

  /* Allocate the connection structure */

  conn = tcp_alloc(domain);
  if (conn)
    {
      /* Set up the local address (laddr) and the remote address (raddr)
       * that describes the TCP connection.
       */

#ifdef CONFIG_NET_IPv6
#ifdef CONFIG_NET_IPv4
      if (ipv6)
#endif
        {
          FAR struct ipv6_hdr_s *ip = IPv6BUF;

          /* Set the IPv6 specific MSS and the IPv6 locally bound address */

          conn->mss = TCP_IPv6_INITIAL_MSS(dev);
          net_ipv6addr_copy(conn->u.ipv6.raddr, ip->srcipaddr);
#ifdef CONFIG_NETDEV_MULTINIC
          net_ipv6addr_copy(conn->u.ipv6.laddr, ip->destipaddr);

          /* We now have to filter all outgoing transfers so that they use
           * only the MSS of this device.
           */

          DEBUGASSERT(conn->dev == NULL || conn->dev == dev);
          conn->dev = dev;
#endif

          /* Find the device that can receive packets on the network
           * associated with this local address.
           */

          ret = tcp_remote_ipv6_device(conn);
        }
#endif /* CONFIG_NET_IPv6 */

#ifdef CONFIG_NET_IPv4
#ifdef CONFIG_NET_IPv6
      else
#endif
        {
          FAR struct ipv4_hdr_s *ip = IPv4BUF;

          /* Set the IPv6 specific MSS and the IPv4 bound remote address. */

          conn->mss = TCP_IPv4_INITIAL_MSS(dev);
          net_ipv4addr_copy(conn->u.ipv4.raddr,
                            net_ip4addr_conv32(ip->srcipaddr));

#ifdef CONFIG_NETDEV_MULTINIC
          /* Set the local address as well */

          net_ipv4addr_copy(conn->u.ipv4.laddr,
                            net_ip4addr_conv32(ip->destipaddr));

          /* We now have to filter all outgoing transfers so that they use
           * only the MSS of this device.
           */

          DEBUGASSERT(conn->dev == NULL || conn->dev == dev);
          conn->dev = dev;
#endif

          /* Find the device that can receive packets on the network
           * associated with this local address.
           */

          ret = tcp_remote_ipv4_device(conn);
        }
#endif /* CONFIG_NET_IPv4 */

      /* Verify that a network device that can provide packets to this
       * local address was found.
       */

      if (ret < 0)
        {
          /* If no device is found, then the address is not reachable.
           * That should be impossible in this context and we should
           * probably really just assert here.
           */

          nerr("ERROR: Failed to find network device: %d\n", ret);
          tcp_free(conn);
          return NULL;
        }

      /* Fill in the necessary fields for the new connection. */

      conn->rto           = TCP_RTO;
      conn->timer         = TCP_RTO;
      conn->sa            = 0;
      conn->sv            = 4;
      conn->nrtx          = 0;
      conn->lport         = tcp->destport;
      conn->rport         = tcp->srcport;
      conn->tcpstateflags = TCP_SYN_RCVD;

      tcp_initsequence(conn->sndseq);
      conn->unacked       = 1;
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
      conn->expired       = 0;
      conn->isn           = 0;
      conn->sent          = 0;
      conn->sndseq_max    = 0;
#endif

      /* rcvseq should be the seqno from the incoming packet + 1. */

      memcpy(conn->rcvseq, tcp->seqno, 4);

#ifdef CONFIG_NET_TCP_READAHEAD
      /* Initialize the list of TCP read-ahead buffers */

      IOB_QINIT(&conn->readahead);
#endif

#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
      /* Initialize the write buffer lists */

      sq_init(&conn->write_q);
      sq_init(&conn->unacked_q);
#endif

      /* And, finally, put the connection structure into the active list.
       * Interrupts should already be disabled in this context.
       */

      dq_addlast(&conn->node, &g_active_tcp_connections);
    }

  return conn;
}
Exemple #24
0
void uip_tcpfree(struct uip_conn *conn)
{
  FAR struct uip_callback_s *cb;
  FAR struct uip_callback_s *next;
#ifdef CONFIG_NET_TCP_READAHEAD
  FAR struct uip_readahead_s *readahead;
#endif
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  FAR struct uip_wrbuffer_s *wrbuffer;
#endif
  uip_lock_t flags;

  /* Because g_free_tcp_connections is accessed from user level and interrupt
   * level, code, it is necessary to keep interrupts disabled during this
   * operation.
   */

  DEBUGASSERT(conn->crefs == 0);
  flags = uip_lock();

  /* Free remaining callbacks, actually there should be only the close callback
   * left.
   */

  for (cb = conn->list; cb; cb = next)
    {
      next = cb->flink;
      uip_tcpcallbackfree(conn, cb);
    }

  /* UIP_ALLOCATED means that that the connection is not in the active list
   * yet.
   */

  if (conn->tcpstateflags != UIP_ALLOCATED)
    {
      /* Remove the connection from the active list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }

#ifdef CONFIG_NET_TCP_READAHEAD
  /* Release any read-ahead buffers attached to the connection */

  while ((readahead = (struct uip_readahead_s *)sq_remfirst(&conn->readahead)) != NULL)
    {
      uip_tcpreadahead_release(readahead);
    }
#endif

#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  /* Release any write buffers attached to the connection */

  while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->write_q)) != NULL)
    {
      uip_tcpwrbuffer_release(wrbuffer);
    }

  while ((wrbuffer = (struct uip_wrbuffer_s *)sq_remfirst(&conn->unacked_q)) != NULL)
    {
      uip_tcpwrbuffer_release(wrbuffer);
    }
#endif

#ifdef CONFIG_NET_TCPBACKLOG
  /* Remove any backlog attached to this connection */

  if (conn->backlog)
    {
      uip_backlogdestroy(conn);
    }

  /* If this connection is, itself, backlogged, then remove it from the
   * parent connection's backlog list.
   */

  if (conn->blparent)
    {
      uip_backlogdelete(conn->blparent, conn);
    }
#endif

  /* Mark the connection available and put it into the free list */

  conn->tcpstateflags = UIP_CLOSED;
  dq_addlast(&conn->node, &g_free_tcp_connections);
  uip_unlock(flags);
}
Exemple #25
0
int inline local_stream_connect(FAR struct local_conn_s *client,
                                FAR struct local_conn_s *server,
                                bool nonblock)
{
  int ret;

  /* Has server backlog been reached?
   * NOTE: The backlog will be zero if listen() has never been called by the
   * server.
   */

  if (server->lc_state != LOCAL_STATE_LISTENING ||
      server->u.server.lc_pending >= server->u.server.lc_backlog)
    {
      net_unlock();
      nerr("ERROR: Server is not listening: lc_state=%d\n",
           server->lc_state);
      nerr("   OR: The backlog limit was reached: %d or %d\n",
           server->u.server.lc_pending, server->u.server.lc_backlog);
      return -ECONNREFUSED;
    }

  /* Increment the number of pending server connection s */

  server->u.server.lc_pending++;
  DEBUGASSERT(server->u.server.lc_pending != 0);

  /* Create the FIFOs needed for the connection */

  ret = local_create_fifos(client);
  if (ret < 0)
    {
      nerr("ERROR: Failed to create FIFOs for %s: %d\n",
           client->lc_path, ret);

      net_unlock();
      return ret;
    }

  /* Open the client-side write-only FIFO.  This should not block and should
   * prevent the server-side from blocking as well.
   */

  ret = local_open_client_tx(client, nonblock);
  if (ret < 0)
    {
      nerr("ERROR: Failed to open write-only FIFOs for %s: %d\n",
           client->lc_path, ret);

      net_unlock();
      goto errout_with_fifos;
    }

  DEBUGASSERT(client->lc_outfd >= 0);

  /* Add ourself to the list of waiting connections and notify the server. */

  dq_addlast(&client->lc_node, &server->u.server.lc_waiters);
  client->lc_state = LOCAL_STATE_ACCEPT;
  local_accept_pollnotify(server, POLLIN);
  _local_semgive(&server->lc_waitsem);
  net_unlock();

  /* Wait for the server to accept the connections */

  client->u.client.lc_result = -EBUSY;
  do
    {
      _local_semtake(&client->lc_waitsem);
      ret = client->u.client.lc_result;
    }
  while (ret == -EBUSY);

  /* Did we successfully connect? */

  if (ret < 0)
    {
      nerr("ERROR: Failed to connect: %d\n", ret);
      goto errout_with_outfd;
    }

  /* Yes.. open the read-only FIFO */

  ret = local_open_client_rx(client, nonblock);
  if (ret < 0)
    {
      nerr("ERROR: Failed to open write-only FIFOs for %s: %d\n",
           client->lc_path, ret);
      goto errout_with_outfd;
    }

  DEBUGASSERT(client->lc_infd >= 0);
  client->lc_state = LOCAL_STATE_CONNECTED;
  return OK;

errout_with_outfd:
  (void)close(client->lc_outfd);
  client->lc_outfd = -1;

errout_with_fifos:
  (void)local_release_fifos(client);
  client->lc_state = LOCAL_STATE_BOUND;
  return ret;
}
int local_listen(FAR struct local_conn_s *server, int backlog)
{
  net_lock_t state;
  int ret;

  /* Some sanity checks */

  DEBUGASSERT(server);

  if (server->lc_proto != SOCK_STREAM ||
      server->lc_state == LOCAL_STATE_UNBOUND ||
      server->lc_type != LOCAL_TYPE_PATHNAME)
    {
      return -EOPNOTSUPP;
    }

  DEBUGASSERT(server->lc_state == LOCAL_STATE_BOUND ||
              server->lc_state == LOCAL_STATE_LISTENING);

  /* Set the backlog value */

  DEBUGASSERT((unsigned)backlog < 256);
  server->u.server.lc_backlog = backlog;

  /* Is this the first time since being bound to an address and that
   * listen() was called?  If so, the state should be LOCAL_STATE_BOUND.
   */

  if (server->lc_state == LOCAL_STATE_BOUND)
    {
      /* The connection should not reside in any other list */

      DEBUGASSERT(server->lc_node.flink == NULL &&
                  server->lc_node.flink == NULL);

      /* Add the connection structure to the list of listeners */

      state = net_lock();
      dq_addlast(&server->lc_node, &g_local_listeners);
      net_unlock(state);

      /* And change the server state to listing */

      server->lc_state = LOCAL_STATE_LISTENING;
    }

  /* Loop until a connection is requested or we receive a signal */

  while (dq_empty(&server->u.server.lc_waiters))
    {
      /* No.. wait for a connection or a signal */

      ret = sem_wait(&server->lc_waitsem);
      if (ret < 0)
        {
          int errval = errno;
          DEBUGASSERT(errval == EINTR);
          return -errval;
        }
    }

  /* There is a client waiting for the connection */

  return OK;
}
Exemple #27
0
int uip_tcpconnect(struct uip_conn *conn, const struct sockaddr_in *addr)
#endif
{
  uip_lock_t flags;
  int port;

  /* The connection is expected to be in the UIP_ALLOCATED state.. i.e.,
   * allocated via up_tcpalloc(), but not yet put into the active connections
   * list.
   */

  if (!conn || conn->tcpstateflags != UIP_ALLOCATED)
    {
      return -EISCONN;
    }

  /* If the TCP port has not alread been bound to a local port, then select
   * one now.
   */

  flags = uip_lock();
  port = uip_selectport(ntohs(conn->lport));
  uip_unlock(flags);

  if (port < 0)
    {
      return port;
    }

  /* Initialize and return the connection structure, bind it to the port number */

  conn->tcpstateflags = UIP_SYN_SENT;
  uip_tcpinitsequence(conn->sndseq);

  conn->initialmss = conn->mss = UIP_TCP_MSS;
  conn->unacked    = 1;    /* TCP length of the SYN is one. */
  conn->nrtx       = 0;
  conn->timer      = 1;    /* Send the SYN next time around. */
  conn->rto        = UIP_RTO;
  conn->sa         = 0;
  conn->sv         = 16;   /* Initial value of the RTT variance. */
  conn->lport      = htons((uint16_t)port);

  /* The sockaddr port is 16 bits and already in network order */

  conn->rport = addr->sin_port;

  /* The sockaddr address is 32-bits in network order. */

  uip_ipaddr_copy(conn->ripaddr, addr->sin_addr.s_addr);

  /* Initialize the list of TCP read-ahead buffers */

#if CONFIG_NET_NTCP_READAHEAD_BUFFERS > 0
  sq_init(&conn->readahead);
#endif

  /* And, finally, put the connection structure into the active
   * list. Because g_active_tcp_connections is accessed from user level and
   * interrupt level, code, it is necessary to keep interrupts disabled during
   * this operation.
   */

  flags = uip_lock();
  dq_addlast(&conn->node, &g_active_tcp_connections);
  uip_unlock(flags);

  return OK;
}
Exemple #28
0
int tcp_connect(FAR struct tcp_conn_s *conn, FAR const struct sockaddr *addr)
{
  int port;
  int ret;

  /* The connection is expected to be in the TCP_ALLOCATED state.. i.e.,
   * allocated via up_tcpalloc(), but not yet put into the active connections
   * list.
   */

  if (!conn || conn->tcpstateflags != TCP_ALLOCATED)
    {
      return -EISCONN;
    }

  /* If the TCP port has not already been bound to a local port, then select
   * one now.  We assume that the IP address has been bound to a local device,
   * but the port may still be INPORT_ANY.
   */

  net_lock();

#ifdef CONFIG_NETDEV_MULTINIC
  /* If there are multiple network devices, then we need to pass the local,
   * bound address.  This is needed because port unique-ness is only for a
   * given network.
   *
   * This is complicated by the fact that the local address may be either an
   * IPv4 or an IPv6 address.
   */

#ifdef CONFIG_NET_IPv4
#ifdef CONFIG_NET_IPv6
  if (conn->domain == PF_INET)
#endif
    {
      /* Select a port that is unique for this IPv4 local address (host
       * order).
       */

      port = tcp_selectport(PF_INET,
                            (FAR const union ip_addr_u *)&conn->u.ipv4.laddr,
                            ntohs(conn->lport));
    }
#endif /* CONFIG_NET_IPv4 */

#ifdef CONFIG_NET_IPv6
#ifdef CONFIG_NET_IPv4
  else
#endif
    {
      /* Select a port that is unique for this IPv6 local address (host
       * order).
       */

      port = tcp_selectport(PF_INET6,
                            (FAR const union ip_addr_u *)conn->u.ipv6.laddr,
                            ntohs(conn->lport));
    }
#endif /* CONFIG_NET_IPv6 */

#else /* CONFIG_NETDEV_MULTINIC */
  /* Select the next available port number.  This is only one network device
   * so we do not have to bother with all of the IPv4/IPv6 local address
   * silliness.
   */

  port = tcp_selectport(ntohs(conn->lport));

#endif /* CONFIG_NETDEV_MULTINIC */

  /* Did we have a port assignment? */

  if (port < 0)
    {
      ret = port;
      goto errout_with_lock;
    }

  /* Set up the local address (laddr) and the remote address (raddr) that
   * describes the TCP connection.
   */

#ifdef CONFIG_NET_IPv4
#ifdef CONFIG_NET_IPv6
  if (conn->domain == PF_INET)
#endif
    {
      FAR const struct sockaddr_in *inaddr =
        (FAR const struct sockaddr_in *)addr;

      /* Save MSS and the port from the sockaddr (already in network order) */

      conn->mss    = MIN_IPv4_TCP_INITIAL_MSS;
      conn->rport  = inaddr->sin_port;

      /* The sockaddr address is 32-bits in network order. */

      net_ipv4addr_copy(conn->u.ipv4.raddr, inaddr->sin_addr.s_addr);

      /* Find the device that can receive packets on the network associated
       * with this remote address.
       */

      ret = tcp_remote_ipv4_device(conn);
    }
#endif /* CONFIG_NET_IPv4 */

#ifdef CONFIG_NET_IPv6
#ifdef CONFIG_NET_IPv4
  else
#endif
    {
      FAR const struct sockaddr_in6 *inaddr =
        (FAR const struct sockaddr_in6 *)addr;

      /* Save MSS and the port from the sockaddr (already in network order) */

      conn->mss     = MIN_IPv6_TCP_INITIAL_MSS;
      conn->rport   = inaddr->sin6_port;

      /* The sockaddr address is 128-bits in network order. */

      net_ipv6addr_copy(conn->u.ipv6.raddr, inaddr->sin6_addr.s6_addr16);

      /* Find the device that can receive packets on the network associated
       * with this local address.
       */

      ret = tcp_remote_ipv6_device(conn);
    }
#endif /* CONFIG_NET_IPv6 */

  /* Verify that a network device that can provide packets to this local
   * address was found.
   */

  if (ret < 0)
    {
      /* If no device is found, then the address is not reachable.  That
       * should be impossible in this context and we should probably really
       * just assert here.
       */

      nerr("ERROR: Failed to find network device: %d\n", ret);
      goto errout_with_lock;
    }

  /* Initialize and return the connection structure, bind it to the port
   * number.  At this point, we do not know the size of the initial MSS We
   * know the total size of the packet buffer, but we don't yet know the
   * size of link layer header.
   */

  conn->tcpstateflags = TCP_SYN_SENT;
  tcp_initsequence(conn->sndseq);

  conn->unacked    = 1;    /* TCP length of the SYN is one. */
  conn->nrtx       = 0;
  conn->timer      = 1;    /* Send the SYN next time around. */
  conn->rto        = TCP_RTO;
  conn->sa         = 0;
  conn->sv         = 16;   /* Initial value of the RTT variance. */
  conn->lport      = htons((uint16_t)port);
#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  conn->expired    = 0;
  conn->isn        = 0;
  conn->sent       = 0;
  conn->sndseq_max = 0;
#endif

#ifdef CONFIG_NET_TCP_READAHEAD
  /* Initialize the list of TCP read-ahead buffers */

  IOB_QINIT(&conn->readahead);
#endif

#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  /* Initialize the TCP write buffer lists */

  sq_init(&conn->write_q);
  sq_init(&conn->unacked_q);
#endif

  /* And, finally, put the connection structure into the active list. */

  dq_addlast(&conn->node, &g_active_tcp_connections);
  ret = OK;

errout_with_lock:
  net_unlock();
  return ret;
}