Example #1
0
/// @brief Returns a unused Request entry. NULL if none available.
MavlinkFTP::Request *
MavlinkFTP::_get_request(void)
{
	_lock_request_queue();
	Request* req = reinterpret_cast<Request *>(dq_remfirst(&_request_queue));
	_unlock_request_queue();
	return req;
}
Example #2
0
FAR struct aio_container_s *aioc_alloc(void)
{
	FAR struct aio_container_s *aioc;

	/* Take a count from semaphore, thus guaranteeing that we have an AIO
	 * container set aside for us.
	 */

	while (sem_wait(&g_aioc_freesem) < 0) {
		DEBUGASSERT(get_errno() == EINTR);
	}

	/* Get our AIO container */

	aio_lock();
	aioc = (FAR struct aio_container_s *)dq_remfirst(&g_aioc_free);
	aio_unlock();

	DEBUGASSERT(aioc);
	return aioc;
}
Example #3
0
FAR struct netlink_conn_s *netlink_alloc(void)
{
  FAR struct netlink_conn_s *conn;

  /* The free list is protected by a semaphore (that behaves like a mutex). */

  _netlink_semtake(&g_free_sem);
  conn = (FAR struct netlink_conn_s *)dq_remfirst(&g_free_netlink_connections);
  if (conn)
    {
      /* Make sure that the connection is marked as uninitialized */

      memset(conn, 0, sizeof(*conn));

      /* Enqueue the connection into the active list */

      dq_addlast(&conn->node, &g_active_netlink_connections);
    }

  _netlink_semgive(&g_free_sem);
  return conn;
}
struct uip_udp_conn *uip_udpalloc(void)
{
  struct uip_udp_conn *conn;

  /* The free list is only accessed from user, non-interrupt level and
   * is protected by a semaphore (that behaves like a mutex).
   */

  _uip_semtake(&g_free_sem);
  conn = (struct uip_udp_conn *)dq_remfirst(&g_free_udp_connections);
  if (conn)
    {
      /* Make sure that the connection is marked as uninitialized */

      conn->lport = 0;

      /* Enqueue the connection into the active list */

      dq_addlast(&conn->node, &g_active_udp_connections);
    }
  _uip_semgive(&g_free_sem);
  return conn;
}
Example #5
0
struct uip_conn *uip_tcpalloc(void)
{
  struct uip_conn *conn;
  uip_lock_t flags;

  /* Because this routine is called from both interrupt level and
   * and from user level, we have not option but to disable interrupts
   * while accessing g_free_tcp_connections[];
   */

  flags = uip_lock();

  /* Return the entry from the head of the free list */

  conn = (struct uip_conn *)dq_remfirst(&g_free_tcp_connections);

  /* Is the free list empty? */

  if (!conn)
    {
      /* As a fallback, check for connection structures which are not
       * established yet.
       *
       * Search the active connection list for the oldest connection
       * that is not in the UIP_ESTABLISHED state.
       */

      struct uip_conn *tmp = g_active_tcp_connections.head;
      while (tmp)
        {
          nllvdbg("conn: %p state: %02x\n", tmp, tmp->tcpstateflags);

          /* Is this connection in some state other than UIP_ESTABLISHED
           * state?
           */

          if (tmp->tcpstateflags != UIP_ESTABLISHED)
            {
              /* Yes.. Is it the oldest one we have seen so far? */

              if (!conn || tmp->timer > conn->timer)
                {
                  /* Yes.. remember it */

                  conn = tmp;
                }
            }

          /* Look at the next active connection */

          tmp = tmp->node.flink;
        }

      /* Did we find a connection that we can re-use? */

      if (conn != NULL)
        {
          nlldbg("Closing unestablished connection: %p\n", conn);

          /* Yes... free it.  This will remove the connection from the list
           * of active connections and release all resources held by the
           * connection.
           *
           * REVISIT:  Could there be any higher level, socket interface
           * that needs to be informed that we did this to them?
           */

          uip_tcpfree(conn);

          /* Now there is guaranteed to be one free connection.  Get it! */

          conn = (struct uip_conn *)dq_remfirst(&g_free_tcp_connections);
        }
    }

  uip_unlock(flags);

  /* Mark the connection allocated */

  if (conn)
    {
      memset(conn, 0, sizeof(struct uip_conn));
      conn->tcpstateflags = UIP_ALLOCATED;
    }

  return conn;
}
Example #6
0
static inline bool pg_dequeue(void)
{
  /* Loop until either (1) the TCB of a task that requires a fill is found, OR
   * (2) the g_watingforfill list becomes empty.
   */

  do
    {
      /* Remove the TCB from the head of the list (if any) */

      g_pftcb = (FAR struct tcb_s *)dq_remfirst((dq_queue_t*)&g_waitingforfill);
      pgllvdbg("g_pftcb: %p\n", g_pftcb);
      if (g_pftcb != NULL)
        {
          /* Call the architecture-specific function up_checkmapping() to see if
           * the page fill still needs to be performed. In certain conditions,
           * the page fault may occur on several threads for the same page and
           * be queues multiple times. In this corner case, the blocked task will
           * simply be restarted.
           */

          if (!up_checkmapping(g_pftcb))
            {
              /* This page needs to be filled.  pg_miss bumps up
               * the priority of the page fill worker thread as each
               * TCB is added to the g_waitingforfill list.  So we
               * may need to also drop the priority of the worker
               * thread as the next TCB comes off of the list.
               *
               * If wtcb->sched_priority > CONFIG_PAGING_DEFPRIO,
               * then the page fill worker thread is executing at
               * an elevated priority that may be reduced.
               *
               * If wtcb->sched_priority > g_pftcb->sched_priority
               * then the page fill worker thread is executing at
               * a higher priority than is appropriate for this
               * fill (this priority can get re-boosted by pg_miss()
               * if a new higher priority fill is required).
               */

              FAR struct tcb_s *wtcb = (FAR struct tcb_s *)g_readytorun.head;
              if (wtcb->sched_priority > CONFIG_PAGING_DEFPRIO &&
                  wtcb->sched_priority > g_pftcb->sched_priority)
                {
                  /* Don't reduce the priority of the page fill
                   * worker thread lower than the configured
                   * minimum.
                   */

                  int priority = g_pftcb->sched_priority;
                  if (priority < CONFIG_PAGING_DEFPRIO)
                    {
                      priority = CONFIG_PAGING_DEFPRIO;
                    }

                  /* Reduce the priority of the page fill worker thread */

                  pgllvdbg("New worker priority. %d->%d\n",
                           wtcb->sched_priority, priority);
                  sched_setpriority(wtcb, priority);
                }

              /* Return with g_pftcb holding the pointer to
               * the TCB associated with task that requires the page fill.
               */

              return true;
            }

          /* The page need by this task has already been mapped into the
           * virtual address space -- just restart it.
           */

          pglldbg("Restarting TCB: %p\n", g_pftcb);
          up_unblock_task(g_pftcb);
        }
    }
  while (g_pftcb != NULL);

  return false;
}
Example #7
0
struct uip_conn *uip_tcpalloc(void)
{
  struct uip_conn *conn;
  uip_lock_t flags;

  /* Because this routine is called from both interrupt level and
   * and from user level, we have not option but to disable interrupts
   * while accessing g_free_tcp_connections[];
   */

  flags = uip_lock();

  /* Return the entry from the head of the free list */

  conn = (struct uip_conn *)dq_remfirst(&g_free_tcp_connections);

#if 0 /* Revisit */
  /* Is the free list empty? */

  if (!conn)
    {
      /* As a fallback, check for connection structures in the TIME_WAIT
       * state.  If no CLOSED connections are found, then take the oldest
       */

      struct uip_conn *tmp = g_active_tcp_connections.head;
      while (tmp)
        {
          /* Is this connectin in the UIP_TIME_WAIT state? */

          if (tmp->tcpstateflags == UIP_TIME_WAIT)
            {
              /* Is it the oldest one we have seen so far? */

              if (!conn || tmp->timer > conn->timer)
                {
                  /* Yes.. remember it */

                  conn = tmp;
                }
            }

          /* Look at the next active connection */

          tmp = tmp->node.flink;
        }

      /* If we found one, remove it from the active connection list */

      dq_rem(&conn->node, &g_active_tcp_connections);
    }
#endif

  uip_unlock(flags);

  /* Mark the connection allocated */

  if (conn)
    {
      conn->tcpstateflags = UIP_ALLOCATED;
    }

  return conn;
}
Example #8
0
int psock_local_accept(FAR struct socket *psock, FAR struct sockaddr *addr,
                       FAR socklen_t *addrlen, FAR void **newconn)

{
    FAR struct local_conn_s *server;
    FAR struct local_conn_s *client;
    FAR struct local_conn_s *conn;
    int ret;

    /* Some sanity checks */

    DEBUGASSERT(psock && psock->s_conn);
    server = (FAR struct local_conn_s *)psock->s_conn;

    if (server->lc_proto != SOCK_STREAM ||
            server->lc_state != LOCAL_STATE_LISTENING ||
            server->lc_type  != LOCAL_TYPE_PATHNAME)
    {
        return -EOPNOTSUPP;
    }

    /* Loop as necessary if we have to wait for a connection */

    for (; ; )
    {
        /* Are there pending connections.  Remove the client from the
         * head of the waiting list.
         */

        client = (FAR struct local_conn_s *)
                 dq_remfirst(&server->u.server.lc_waiters);

        if (client)
        {
            /* Decrement the number of pending clients */

            DEBUGASSERT(server->u.server.lc_pending > 0);
            server->u.server.lc_pending--;

            /* Create a new connection structure for the server side of the
             * connection.
             */

            conn = local_alloc();
            if (!conn)
            {
                ndbg("ERROR:  Failed to allocate new connection structure\n");
                ret = -ENOMEM;
            }
            else
            {
                /* Initialize the new connection structure */

                conn->lc_crefs  = 1;
                conn->lc_proto  = SOCK_STREAM;
                conn->lc_type   = LOCAL_TYPE_PATHNAME;
                conn->lc_state  = LOCAL_STATE_CONNECTED;

                strncpy(conn->lc_path, client->lc_path, UNIX_PATH_MAX-1);
                conn->lc_path[UNIX_PATH_MAX-1] = '\0';
                conn->lc_instance_id = client->lc_instance_id;

                /* Open the server-side write-only FIFO.  This should not
                 * block.
                 */

                ret = local_open_server_tx(conn,
                                           _SS_ISNONBLOCK(psock->s_flags));
                if (ret < 0)
                {
                    ndbg("ERROR: Failed to open write-only FIFOs for %s: %d\n",
                         conn->lc_path, ret);
                }
            }

            /* Do we have a connection?  Is the write-side FIFO opened? */

            if (ret == OK)
            {
                DEBUGASSERT(conn->lc_outfd >= 0);

                /* Open the server-side read-only FIFO.  This should not
                 * block because the client side has already opening it
                 * for writing.
                 */

                ret = local_open_server_rx(conn,
                                           _SS_ISNONBLOCK(psock->s_flags));
                if (ret < 0)
                {
                    ndbg("ERROR: Failed to open read-only FIFOs for %s: %d\n",
                         conn->lc_path, ret);
                }
            }

            /* Do we have a connection?  Are the FIFOs opened? */

            if (ret == OK)
            {
                DEBUGASSERT(conn->lc_infd >= 0);

                /* Return the address family */

                if (addr)
                {
                    ret = local_getaddr(client, addr, addrlen);
                }
            }

            if (ret == OK)
            {
                /* Return the client connection structure */

                *newconn = (FAR void *)conn;
            }

            /* Signal the client with the result of the connection */

            client->u.client.lc_result = ret;
            sem_post(&client->lc_waitsem);
            return ret;
        }

        /* No.. then there should be no pending connections */

        DEBUGASSERT(server->u.server.lc_pending == 0);

        /* Was the socket opened non-blocking? */

        if (_SS_ISNONBLOCK(psock->s_flags))
        {
            /* Yes.. return EAGAIN */

            return -EAGAIN;
        }

        /* Otherwise, listen for a connection and try again. */

        ret = local_waitlisten(server);
        if (ret < 0)
        {
            return ret;
        }
    }
}
Example #9
0
bool sched_removereadytorun(FAR struct tcb_s *rtcb)
{
  FAR dq_queue_t *tasklist;
  bool doswitch = false;
  int cpu;

  /* Which CPU (if any) is the task running on?  Which task list holds the
   * TCB?
   */

  cpu      = rtcb->cpu;
  tasklist = TLIST_HEAD(rtcb->task_state, cpu);

  /* Check if the TCB to be removed is at the head of a ready-to-run list.
   * For the case of SMP, there are two lists involved:  (1) the
   * g_readytorun list that holds non-running tasks that have not been
   * assigned to a CPU, and (2) and the g_assignedtasks[] lists which hold
   * tasks assigned a CPU, including the task that is currently running on
   * that CPU.  Only this latter list contains the currently active task
   * only only removing the head of that list can result in a context
   * switch.
   *
   * rtcb->blink == NULL will tell us if the TCB is at the head of the
   * ready-to-run list and, hence, a candidate for the new running task.
   *
   * If so, then the tasklist RUNNABLE attribute will inform us if the list
   * holds the currently executing task and, hence, if a context switch
   * should occur.
   */

  if (rtcb->blink == NULL && TLIST_ISRUNNABLE(rtcb->task_state))
    {
      FAR struct tcb_s *nxttcb;
      FAR struct tcb_s *rtrtcb;
      int me;

      /* There must always be at least one task in the list (the IDLE task)
       * after the TCB being removed.
       */

      nxttcb = (FAR struct tcb_s *)rtcb->flink;
      DEBUGASSERT(nxttcb != NULL);

      /* If we are modifying the head of some assigned task list other than
       * our own, we will need to stop that CPU.
       */

      me = this_cpu();
      if (cpu != me)
        {
          DEBUGVERIFY(up_cpu_pause(cpu));
        }

      /* The task is running but the CPU that it was running on has been
       * paused.  We can now safely remove its TCB from the ready-to-run
       * task list.  In the SMP case this may be either the g_readytorun()
       * or the g_assignedtasks[cpu] list.
       */

      dq_rem((FAR dq_entry_t *)rtcb, tasklist);

      /* Which task will go at the head of the list?  It will be either the
       * next tcb in the assigned task list (nxttcb) or a TCB in the
       * g_readytorun list.  We can only select a task from that list if
       * the affinity mask includes the current CPU.
       *
       * REVISIT: What should we do, if anything, if pre-emption is locked
       * by the another CPU?  Should just used nxttcb?  Should we select
       * from the pending task list instead of the g_readytorun list?
       */

      for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head;
           rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity);
           rtrtcb = (FAR struct tcb_s *)rtrtcb->flink);

      /* Did we find a task in the g_readytorun list?  Which task should
       * we use?  We decide strictly by the priority of the two tasks:
       * Either (1) the task currently at the head of the g_assignedtasks[cpu]
       * list (nexttcb) or (2) the highest priority task from the
       * g_readytorun list with matching affinity (rtrtcb).
       */

      if (rtrtcb != NULL && rtrtcb->sched_priority >= nxttcb->sched_priority)
        {
          FAR struct tcb_s *tmptcb;

          /* The TCB at the head of the ready to run list has the higher
           * priority.  Remove that task from the head of the g_readytorun
           * list and add to the head of the g_assignedtasks[cpu] list.
           */

          tmptcb = (FAR struct tcb_s *)
            dq_remfirst((FAR dq_queue_t *)&g_readytorun);

          DEBUGASSERT(tmptcb == rtrtcb);

          dq_addfirst((FAR dq_entry_t *)tmptcb, tasklist);

          tmptcb->cpu = cpu;
          nxttcb = tmptcb;
        }

      /* Will pre-emption be disabled after the switch?  If the lockcount is
       * greater than zero, then this task/this CPU holds the scheduler lock.
       */

      if (nxttcb->lockcount > 0)
        {
          /* Yes... make sure that scheduling logic knows about this */

          spin_setbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
                      &g_cpu_schedlock);
        }
      else
        {
          /* No.. we may need to perform release our hold on the lock. */

          spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock,
                      &g_cpu_schedlock);
        }

      /* Interrupts may be disabled after the switch.  If irqcount is greater
       * than zero, then this task/this CPU holds the IRQ lock
       */

      if (nxttcb->irqcount > 0)
        {
          /* Yes... make sure that scheduling logic knows about this */

          spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                      &g_cpu_irqlock);
        }
      else
        {
          /* No.. we may need to release our hold on the irq state. */

          spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock,
                      &g_cpu_irqlock);
        }

      nxttcb->task_state = TSTATE_TASK_RUNNING;

      /* All done, restart the other CPU (if it was paused). */

      doswitch = true;
      if (cpu != me)
        {
          /* In this we will not want to report a context switch to this
           * CPU.  Only the other CPU is affected.
           */

          DEBUGVERIFY(up_cpu_resume(cpu));
          doswitch = false;
        }
    }
  else
    {
      /* The task is not running.  Just remove its TCB from the ready-to-run
       * list.  In the SMP case this may be either the g_readytorun() or the
       * g_assignedtasks[cpu] list.
       */

      dq_rem((FAR dq_entry_t *)rtcb, tasklist);
    }

  /* Since the TCB is no longer in any list, it is now invalid */

  rtcb->task_state = TSTATE_TASK_INVALID;
  return doswitch;
}
Example #10
0
FAR struct tcp_conn_s *tcp_alloc(void)
{
  FAR struct tcp_conn_s *conn;
  net_lock_t flags;

  /* Because this routine is called from both interrupt level and
   * and from user level, we have not option but to disable interrupts
   * while accessing g_free_tcp_connections[];
   */

  flags = net_lock();

  /* Return the entry from the head of the free list */

  conn = (FAR struct tcp_conn_s *)dq_remfirst(&g_free_tcp_connections);

#ifndef CONFIG_NET_SOLINGER
  /* Is the free list empty? */

  if (!conn)
    {
      /* As a fall-back, check for connection structures which can be stalled.
       *
       * Search the active connection list for the oldest connection
       * that is about to be closed anyway.
       */

      FAR struct tcp_conn_s *tmp =
        (FAR struct tcp_conn_s *)g_active_tcp_connections.head;

      while (tmp)
        {
          nllvdbg("conn: %p state: %02x\n", tmp, tmp->tcpstateflags);

          /* Is this connection in a state we can sacrifice. */

          /* REVISIT: maybe we could check for SO_LINGER but it's buried
           * in the socket layer.
           */

          if (tmp->tcpstateflags == TCP_CLOSING    ||
              tmp->tcpstateflags == TCP_FIN_WAIT_1 ||
              tmp->tcpstateflags == TCP_FIN_WAIT_2 ||
              tmp->tcpstateflags == TCP_TIME_WAIT  ||
              tmp->tcpstateflags == TCP_LAST_ACK)
            {
              /* Yes.. Is it the oldest one we have seen so far? */

              if (!conn || tmp->timer > conn->timer)
                {
                  /* Yes.. remember it */

                  conn = tmp;
                }
            }

          /* Look at the next active connection */

          tmp = (FAR struct tcp_conn_s *)tmp->node.flink;
        }

      /* Did we find a connection that we can re-use? */

      if (conn != NULL)
        {
          nlldbg("Closing unestablished connection: %p\n", conn);

          /* Yes... free it.  This will remove the connection from the list
           * of active connections and release all resources held by the
           * connection.
           *
           * REVISIT:  Could there be any higher level, socket interface
           * that needs to be informed that we did this to them?
           *
           * Actually yes. When CONFIG_NET_SOLINGER is enabled there is a
           * pending callback in netclose_disconnect waiting for getting
           * woken up.  Otherwise there's the callback too, but no one is
           * waiting for it.
           */

          tcp_free(conn);

          /* Now there is guaranteed to be one free connection.  Get it! */

          conn = (FAR struct tcp_conn_s *)dq_remfirst(&g_free_tcp_connections);
        }
    }
#endif

  net_unlock(flags);

  /* Mark the connection allocated */

  if (conn)
    {
      memset(conn, 0, sizeof(struct tcp_conn_s));
      conn->tcpstateflags = TCP_ALLOCATED;
    }

  return conn;
}