Beispiel #1
0
enum pm_state_e pm_checkstate(void)
{
  uint32_t now;
  irqstate_t flags;

  /* Check for the end of the current time slice.  This must be performed
   * with interrupts disabled so that it does not conflict with the similar
   * logic in pm_activity().
   */

  flags = irqsave();

  /* Check the elapsed time.  In periods of low activity, time slicing is
   * controlled by IDLE loop polling; in periods of higher activity, time
   * slicing is controlled by driver activity.  In either case, the duration
   * of the time slice is only approximate; during times of heavy activity,
   * time slices may be become longer and the activity level may be over-
   * estimated.
   */

   now = clock_systimer();
   if (now - g_pmglobals.stime >= TIME_SLICE_TICKS)
    {
       int16_t accum;

       /* Sample the count, reset the time and count, and assess the PM
        * state.  This is an atomic operation because interrupts are
        * still disabled.
        */

       accum             = g_pmglobals.accum;
       g_pmglobals.stime = now;
       g_pmglobals.accum = 0;

       /* Reassessing the PM state may require some computation.  However,
        * the work will actually be performed on a worker thread at a user-
        * controlled priority.
        */

       (void)pm_update(accum);
    }
  irqrestore(flags);

  /* Return the recommended state.  Assuming that we are called from the
   * IDLE thread at the lowest priority level, any updates scheduled on the
   * worker thread above should have already been peformed and the recommended
   * state should be current:
   */

  return g_pmglobals.recommended;
}
int pingpong_main(int argc, char *argv[])
{
    char* arg = NULL;
    if (argc >= 1)
    {
        arg = argv[1];
        if (arg != NULL)
        {
            // make a copy, because nuttxshell will delete this memory as soon as we return
            if (strcmp(arg, "stop") == 0 && ContainerProcess != NULL)
            {
                PrtStopProcess(ContainerProcess);
                ContainerProcess = NULL;
                return 0;
            }
            else
            {
                int len = strlen(arg);
                if (len > 0)
                {
                    char* copy = malloc(len+1);
                    strcpy(copy, arg);
                    arg = copy;
                } else {
                    arg = NULL;
                }
            }
        }
    }

    if (ContainerProcess != NULL)
    {
        int32_t elapsed = clock_systimer() - start;
        int32_t milliseconds = elapsed / (TICK_PER_SEC / 1000);
        printf("PingPong is running, so far %d steps in %d ms\n", steps, milliseconds);
        return 0;
    }

    // start the pingpong thread from the worker thread and not from
    // this thread.  This way we allow NuttxShell to continue processing
    // input (since NuttxShell has a waitpid() call waiting for this task to complete
    // so if we start the pingpong thread then this task will not be considered complete
    // even if this method returns because it still has a child thread that is alive
    // running the state machine.
    steps = 0;
    work_queue(LPWORK, &_work, (worker_t)StartPingPong, arg, 1);
    return 0;
}
static void CountSteps(void)
{
    steps++;
    intervalSteps++;

    int32_t now = clock_systimer();
    int32_t elapsed = now - intervalStart;
    int32_t milliseconds = elapsed / (TICK_PER_SEC / 1000);
    if (milliseconds > intervalLength)
    {
        PrintReport();
        intervalStart = now;
        intervalSteps = 0;
    }

}
Beispiel #4
0
static void recvfrom_init(FAR struct socket *psock, FAR void *buf, size_t len,
#ifdef CONFIG_NET_IPv6
                          FAR struct sockaddr_in6 *infrom,
#else
                          FAR struct sockaddr_in *infrom,
#endif
                          struct recvfrom_s *pstate)
{
  /* Initialize the state structure. */

  memset(pstate, 0, sizeof(struct recvfrom_s));
  (void)sem_init(&pstate->rf_sem, 0, 0); /* Doesn't really fail */
  pstate->rf_buflen    = len;
  pstate->rf_buffer    = buf;
  pstate->rf_from      = infrom;

  /* Set up the start time for the timeout */

  pstate->rf_sock      = psock;
#if defined(CONFIG_NET_SOCKOPTS) && !defined(CONFIG_DISABLE_CLOCK)
  pstate->rf_starttime = clock_systimer();
#endif
}
Beispiel #5
0
int uip_ping(uip_ipaddr_t addr, uint16_t id, uint16_t seqno,
             uint16_t datalen, int dsecs)
{
  struct icmp_ping_s state;
  uip_lock_t save;

  /* Initialize the state structure */

  sem_init(&state.png_sem, 0, 0);
  state.png_ticks  = DSEC2TICK(dsecs); /* System ticks to wait */
  state.png_result = -ENOMEM;          /* Assume allocation failure */
  state.png_addr   = addr;             /* Address of the peer to be ping'ed */
  state.png_id     = id;               /* The ID to use in the ECHO request */
  state.png_seqno  = seqno;            /* The seqno to use int the ECHO request */
  state.png_datlen = datalen;          /* The length of data to send in the ECHO request */
  state.png_sent   = false;            /* ECHO request not yet sent */

  save             = uip_lock();
  state.png_time   = clock_systimer();

  /* Set up the callback */

  state.png_cb = uip_icmpcallbackalloc();
  if (state.png_cb)
    {
      state.png_cb->flags   = UIP_POLL|UIP_ECHOREPLY;
      state.png_cb->priv    = (void*)&state;
      state.png_cb->event   = ping_interrupt;
      state.png_result      = -EINTR; /* Assume sem-wait interrupted by signal */

      /* Notify the device driver of the availaibilty of TX data */

      netdev_txnotify(&state.png_addr);

      /* Wait for either the full round trip transfer to complete or
       * for timeout to occur. (1) uip_lockedwait will also terminate if a
       * signal is received, (2) interrupts may be disabled!  They will
       * be re-enabled while the task sleeps and automatically
       * re-enabled when the task restarts.
       */

      nlldbg("Start time: 0x%08x seqno: %d\n", state.png_time, seqno);
      uip_lockedwait(&state.png_sem);

      uip_icmpcallbackfree(state.png_cb);
    }
  uip_unlock(save);

  /* Return the negated error number in the event of a failure, or the
   * sequence number of the ECHO reply on success.
   */

  if (!state.png_result)
    {
      nlldbg("Return seqno=%d\n", state.png_seqno);
      return (int)state.png_seqno;
    }
  else
    {
      nlldbg("Return error=%d\n", -state.png_result);
      return state.png_result;
    }
}
ssize_t psock_tcp_send(FAR struct socket *psock,
                       FAR const void *buf, size_t len)
{
  FAR struct tcp_conn_s *conn = (FAR struct tcp_conn_s *)psock->s_conn;
  struct send_s state;
  net_lock_t save;
  int err;
  int ret = OK;

  /* Verify that the sockfd corresponds to valid, allocated socket */

  if (!psock || psock->s_crefs <= 0)
    {
      ndbg("ERROR: Invalid socket\n");
      err = EBADF;
      goto errout;
    }

  /* If this is an un-connected socket, then return ENOTCONN */

  if (psock->s_type != SOCK_STREAM || !_SS_ISCONNECTED(psock->s_flags))
    {
      ndbg("ERROR: Not connected\n");
      err = ENOTCONN;
      goto errout;
    }

  /* Make sure that we have the IP address mapping */

  conn = (FAR struct tcp_conn_s *)psock->s_conn;
  DEBUGASSERT(conn);

#if defined(CONFIG_NET_ARP_SEND) || defined(CONFIG_NET_ICMPv6_NEIGHBOR)
#ifdef CONFIG_NET_ARP_SEND
#ifdef CONFIG_NET_ICMPv6_NEIGHBOR
  if (psock->s_domain == PF_INET)
#endif
    {
      /* Make sure that the IP address mapping is in the ARP table */

      ret = arp_send(conn->u.ipv4.raddr);
    }
#endif /* CONFIG_NET_ARP_SEND */


#ifdef CONFIG_NET_ICMPv6_NEIGHBOR
#ifdef CONFIG_NET_ARP_SEND
  else
#endif
    {
      /* Make sure that the IP address mapping is in the Neighbor Table */

      ret = icmpv6_neighbor(conn->u.ipv6.raddr);
    }
#endif /* CONFIG_NET_ICMPv6_NEIGHBOR */

  /* Did we successfully get the address mapping? */

  if (ret < 0)
    {
      ndbg("ERROR: Not reachable\n");
      err = ENETUNREACH;
      goto errout;
    }
#endif /* CONFIG_NET_ARP_SEND || CONFIG_NET_ICMPv6_NEIGHBOR */

  /* Set the socket state to sending */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_SEND);

  /* Perform the TCP send operation */

  /* Initialize the state structure.  This is done with interrupts
   * disabled because we don't want anything to happen until we
   * are ready.
   */

  save                = net_lock();
  memset(&state, 0, sizeof(struct send_s));
  (void)sem_init(&state.snd_sem, 0, 0);    /* Doesn't really fail */
  state.snd_sock      = psock;             /* Socket descriptor to use */
  state.snd_buflen    = len;               /* Number of bytes to send */
  state.snd_buffer    = buf;               /* Buffer to send from */

  if (len > 0)
    {
      /* Allocate resources to receive a callback */

      state.snd_cb = tcp_callback_alloc(conn);
      if (state.snd_cb)
        {
          /* Get the initial sequence number that will be used */

          state.snd_isn         = tcp_getsequence(conn->sndseq);

          /* There is no outstanding, unacknowledged data after this
           * initial sequence number.
           */

          conn->unacked         = 0;

          /* Set the initial time for calculating timeouts */

#ifdef CONFIG_NET_SOCKOPTS
          state.snd_time        = clock_systimer();
#endif
          /* Set up the callback in the connection */

          state.snd_cb->flags   = (TCP_ACKDATA | TCP_REXMIT | TCP_POLL |
                                   TCP_DISCONN_EVENTS);
          state.snd_cb->priv    = (FAR void *)&state;
          state.snd_cb->event   = tcpsend_interrupt;

          /* Notify the device driver of the availability of TX data */

          send_txnotify(psock, conn);

          /* Wait for the send to complete or an error to occur:  NOTES: (1)
           * net_lockedwait will also terminate if a signal is received, (2) interrupts
           * may be disabled!  They will be re-enabled while the task sleeps and
           * automatically re-enabled when the task restarts.
           */

          ret = net_lockedwait(&state.snd_sem);

          /* Make sure that no further interrupts are processed */

          tcp_callback_free(conn, state.snd_cb);
        }
    }

  sem_destroy(&state.snd_sem);
  net_unlock(save);

  /* Set the socket state to idle */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE);

  /* Check for a errors.  Errors are signalled by negative errno values
   * for the send length
   */

  if (state.snd_sent < 0)
    {
      err = state.snd_sent;
      goto errout;
    }

  /* If net_lockedwait failed, then we were probably reawakened by a signal. In
   * this case, net_lockedwait will have set errno appropriately.
   */

  if (ret < 0)
    {
      err = -ret;
      goto errout;
    }

  /* Return the number of bytes actually sent */

  return state.snd_sent;

errout:
  set_errno(err);
  return ERROR;
}
Beispiel #7
0
static inline bool pg_startfill(void)
{
  FAR void *vpage;
  int result;

  /* Remove the TCB at the head of the g_waitfor fill list and check if there
   * is any task waiting for a page fill. pg_dequeue will handle this (plus
   * some cornercases) and will true if the next page TCB was successfully
   * dequeued.
   */

  if (pg_dequeue())
    {
      /* Call up_allocpage(tcb, &vpage). This architecture-specific function will
       * set aside page in memory and map to virtual address (vpage). If all
       * available pages are in-use (the typical case), this function will select
       * a page in-use, un-map it, and make it available.
       */

      pgllvdbg("Call up_allocpage(%p)\n", g_pftcb);
      result = up_allocpage(g_pftcb, &vpage);
      DEBUGASSERT(result == OK);

      /* Start the fill.  The exact way that the fill is started depends upon
       * the nature of the architecture-specific up_fillpage() function -- Is it
       * a blocking or a non-blocking call?
       */
#ifdef CONFIG_PAGING_BLOCKINGFILL
      /* If CONFIG_PAGING_BLOCKINGFILL is defined, then up_fillpage is blocking
       * call. In this case, up_fillpage() will accept only (1) a reference to
       * the TCB that requires the fill. Architecture-specific context information
       * within the TCB will be sufficient to perform the fill. And (2) the
       * (virtual) address of the allocated page to be filled. The resulting
       * status of the fill will be provided by return value from up_fillpage().
       */

      pgllvdbg("Call up_fillpage(%p)\n", g_pftcb);
      result = up_fillpage(g_pftcb, vpage);
      DEBUGASSERT(result == OK);
#else
      /* If CONFIG_PAGING_BLOCKINGFILL is defined, then up_fillpage is non-blocking
       * call. In this case up_fillpage() will accept an additional argument: The page
       * fill worker thread will provide a callback function, pg_callback.
       *
       * Calling up_fillpage will start an asynchronous page fill. pg_callback
       * ill be called when the page fill is finished (or an error occurs). This
       * This callback will probably from interrupt level.
       */

      pgllvdbg("Call up_fillpage(%p)\n", g_pftcb);
      result = up_fillpage(g_pftcb, vpage, pg_callback);
      DEBUGASSERT(result == OK);

      /* Save the time that the fill was started.  These will be used to check for
       * timeouts.
       */

#ifdef CONFIG_PAGING_TIMEOUT_TICKS
      g_starttime = clock_systimer();
#endif

      /* Return and wait to be signaled for the next event -- the fill completion
       * event. While the fill is in progress, other tasks may execute. If
       * another page fault occurs during this time, the faulting task will be
       * blocked, its TCB will be added (in priority order) to g_waitingforfill
       * and the priority of the page worker task may be boosted. But no action
       * will be taken until the current page fill completes. NOTE: The IDLE task
       * must also be fully locked in memory. The IDLE task cannot be blocked. It
       * the case where all tasks are blocked waiting for a page fill, the IDLE
       * task must still be available to run.
       */
#endif /* CONFIG_PAGING_BLOCKINGFILL */

      return true;
    }

  pglldbg("Queue empty\n");
  return false;
}
Beispiel #8
0
unsigned int sleep(unsigned int seconds)
{
  sigset_t set;
  struct timespec ts;
  struct siginfo value;
  irqstate_t flags;
  uint32_t start;
  int32_t elapsed;
  int32_t remaining = 0;

  /* Don't sleep if seconds == 0 */

  if (seconds)
    {
      /* Set up for the sleep.  Using the empty set means that we are not
       * waiting for any particular signal.  However, any unmasked signal
       * can still awaken sigtimedwait().
       */

      (void)sigemptyset(&set);
      ts.tv_sec  = seconds;
      ts.tv_nsec = 0;

      /* Interrupts are disabled around the following so that it is atomic */
       
      flags = irqsave();
 
      /* Get the current time then sleep for the requested time.  
       * sigtimedwait() cannot succeed.  It should always return error with
       * either (1) EAGAIN meaning that the timeout occurred, or (2) EINTR
       * meaning that some other unblocked signal was caught.
       */

      start = clock_systimer();
      (void)sigtimedwait(&set, &value, &ts);

      /* Calculate the elapsed time (in clock ticks) when we wake up from the sleep.
       * This is really only necessary if we were awakened from the sleep early
       * due to the receipt of a signal.
       */

      elapsed = clock_systimer() - start;
      irqrestore(flags);

      /* Get the remaining, un-waited seconds. Note that this calculation
       * truncates the elapsed seconds in the division.  We may have slept some
       * fraction of a second longer than this!  But if the calculation is less
       * than the 'seconds', we certainly did not sleep for the complete
       * requested interval.
       */

      remaining = (int32_t)seconds - elapsed / TICK_PER_SEC;

      /* Make sure that the elapsed time is non-negative (this should always
       * be the case unless something exceptional happened while were we
       * sleeping -- like the clock was reset or we went into a low power mode,
       * OR if we had to wait a long time to run again after calling
       * sigtimedwait() making 'elapsed' bigger than it should have been).
       */

      if (remaining < 0)
        {
          remaining = 0;
        }
    }

  return (unsigned int)remaining;
}
Beispiel #9
0
void pm_activity(int priority)
{
  uint32_t now;
  uint32_t accum;
  irqstate_t flags;

  /* Just increment the activity count in the current time slice. The priority
   * is simply the number of counts that are added.
   */

  if (priority > 0)
    {
      /* Add the priority to the accumulated counts in a critical section. */

      flags = irqsave();
      accum = (uint32_t)g_pmglobals.accum + priority;

      /* Make sure that we do not overflow the underlying uint16_t representation */

      if (accum > INT16_MAX)
        {
          accum = INT16_MAX;
        }

      /* Save the updated count */

      g_pmglobals.accum = (int16_t)accum;

      /* Check the elapsed time.  In periods of low activity, time slicing is
       * controlled by IDLE loop polling; in periods of higher activity, time
       * slicing is controlled by driver activity.  In either case, the duration
       * of the time slice is only approximate; during times of heavy activity,
       * time slices may be become longer and the activity level may be over-
       * estimated.
       */

      now = clock_systimer();
      if (now - g_pmglobals.stime >= TIME_SLICE_TICKS)
        {
          int16_t tmp;

          /* Sample the count, reset the time and count, and assess the PM
           * state.  This is an atomic operation because interrupts are
           * still disabled.
           */

          tmp               = g_pmglobals.accum;
          g_pmglobals.stime = now;
          g_pmglobals.accum = 0;

          /* Reassessing the PM state may require some computation.  However,
           * the work will actually be performed on a worker thread at a user-
           * controlled priority.
           */

          (void)pm_update(tmp);
        }

      irqrestore(flags);
    }
}
Beispiel #10
0
static uint16_t recvfrom_tcpinterrupt(struct uip_driver_s *dev, void *conn,
                                      void *pvpriv, uint16_t flags)
{
  struct recvfrom_s *pstate = (struct recvfrom_s *)pvpriv;

  nllvdbg("flags: %04x\n", flags);

  /* 'priv' might be null in some race conditions (?) */

  if (pstate)
    {
      /* If new data is available, then complete the read action. */

      if ((flags & UIP_NEWDATA) != 0)
        {
          /* Copy the data from the packet (saving any unused bytes from the
           * packet in the read-ahead buffer).
           */

          recvfrom_newtcpdata(dev, pstate);

          /* Save the sender's address in the caller's 'from' location */

          recvfrom_tcpsender(dev, pstate);

          /* Indicate that the data has been consumed and that an ACK
           * should be sent.
           */

          flags = (flags & ~UIP_NEWDATA) | UIP_SNDACK;

          /* Check for transfer complete.  We will consider the transfer
           * complete in own of two different ways, depending on the setting
           * of CONFIG_NET_TCP_RECVDELAY.
           *
           * 1) If CONFIG_NET_TCP_RECVDELAY == 0 then we will consider the
           *    TCP/IP transfer complete as soon as any data has been received.
           *    This is safe because if any additional data is received, it
           *    will be retained inthe TCP/IP read-ahead buffer until the
           *    next receive is performed.
           * 2) CONFIG_NET_TCP_RECVDELAY > 0 may be set to wait a little
           *    bit to determine if more data will be received.  You might
           *    do this if read-ahead buffereing is disabled and we want to
           *    minimize the loss of back-to-back packets.  In this case,
           *    the transfer is complete when either a) the entire user buffer 
           *    is full or 2) when the receive timeout occurs (below).
           */

#if CONFIG_NET_TCP_RECVDELAY > 0
          if (pstate->rf_buflen == 0)
#else
          if (pstate->rf_recvlen > 0)
#endif
            {
              nllvdbg("TCP resume\n");

              /* The TCP receive buffer is full.  Return now and don't allow
               * any further TCP call backs.
               */

              pstate->rf_cb->flags   = 0;
              pstate->rf_cb->priv    = NULL;
              pstate->rf_cb->event   = NULL;

              /* Wake up the waiting thread, returning the number of bytes
               * actually read.
               */

              sem_post(&pstate->rf_sem);
            }

            /* Reset the timeout.  We will want a short timeout to terminate
             * the TCP receive.
             */

#if defined(CONFIG_NET_SOCKOPTS) && !defined(CONFIG_DISABLE_CLOCK)
            pstate->rf_starttime = clock_systimer();
#endif
        }

      /* Check for a loss of connection.
       *
       * UIP_CLOSE: The remote host has closed the connection
       * UIP_ABORT: The remote host has aborted the connection
       * UIP_TIMEDOUT: Connection aborted due to too many retransmissions.
       */

      else if ((flags & (UIP_CLOSE|UIP_ABORT|UIP_TIMEDOUT)) != 0)
        {
          nllvdbg("error\n");

          /* Stop further callbacks */

          pstate->rf_cb->flags   = 0;
          pstate->rf_cb->priv    = NULL;
          pstate->rf_cb->event   = NULL;

          /* If the peer gracefully closed the connection, then return zero
           * (end-of-file).  Otherwise, report a not-connected error
           */

          if ((flags & UIP_CLOSE) != 0)
            {
              pstate->rf_result = 0;
            }
          else
            {
              pstate->rf_result = -ENOTCONN;
            }

          /* Wake up the waiting thread */

          sem_post(&pstate->rf_sem);
        }

      /* No data has been received -- this is some other event... probably a
       * poll -- check for a timeout.
       */

#if defined(CONFIG_NET_SOCKOPTS) && !defined(CONFIG_DISABLE_CLOCK)
      else if (recvfrom_timeout(pstate))
        {
          /* Yes.. the timeout has elapsed... do not allow any further
           * callbacks
           */

          nllvdbg("TCP timeout\n");

          pstate->rf_cb->flags   = 0;
          pstate->rf_cb->priv    = NULL;
          pstate->rf_cb->event   = NULL;

          /* Report an error only if no data has been received. (If 
           * CONFIG_NET_TCP_RECVDELAY then rf_recvlen should always be
           * zero).
           */

#if CONFIG_NET_TCP_RECVDELAY > 0
          if (pstate->rf_recvlen == 0)
#endif
            {
              /* Report the timeout error */

              pstate->rf_result = -EAGAIN;
            }

          /* Wake up the waiting thread, returning either the error -EAGAIN
           * that signals the timeout event or the data received up to
           * the point tht the timeout occured (no error).
           */

          sem_post(&pstate->rf_sem);
        }
#endif /* CONFIG_NET_SOCKOPTS && !CONFIG_DISABLE_CLOCK */
    }
  return flags;
}
Beispiel #11
0
static void slip_txtask(int argc, FAR char *argv[])
{
  FAR struct slip_driver_s *priv;
  unsigned int index = *(argv[1]) - '0';
  net_lock_t flags;
  systime_t msec_start;
  systime_t msec_now;
  unsigned int hsec;

  nerr("index: %d\n", index);
  DEBUGASSERT(index < CONFIG_NET_SLIP_NINTERFACES);

  /* Get our private data structure instance and wake up the waiting
   * initialization logic.
   */

  priv = &g_slip[index];
  slip_semgive(priv);

  /* Loop forever */

  msec_start = clock_systimer() * MSEC_PER_TICK;
  for (;  ; )
    {
      /* Wait for the timeout to expire (or until we are signaled by by  */

      slip_semtake(priv);
      if (!priv->txnodelay)
        {
          slip_semgive(priv);
          usleep(SLIP_WDDELAY);
        }
      else
        {
          priv->txnodelay = false;
          slip_semgive(priv);
        }

      /* Is the interface up? */

      if (priv->bifup)
        {
          /* Get exclusive access to the network (if it it is already being used
           * slip_rxtask, then we have to wait).
           */

          slip_semtake(priv);

          /* Poll the networking layer for new XMIT data. */

          flags = net_lock();
          priv->dev.d_buf = priv->txbuf;

          /* Has a half second elapsed since the last timer poll? */

          msec_now = clock_systimer() * MSEC_PER_TICK;
          hsec = (unsigned int)(msec_now - msec_start) / (MSEC_PER_SEC / 2);
          if (hsec)
            {
              /* Yes, perform the timer poll */

              (void)devif_timer(&priv->dev, slip_txpoll);
              msec_start += hsec * (MSEC_PER_SEC / 2);
            }
          else
            {
              /* No, perform the normal TX poll */

              (void)devif_poll(&priv->dev, slip_txpoll);
            }

          net_unlock(flags);
          slip_semgive(priv);
        }
    }
}
Beispiel #12
0
void work_process(FAR struct usr_wqueue_s *wqueue)
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  FAR void *arg;
  uint32_t elapsed;
  uint32_t remaining;
  uint32_t stick;
  uint32_t ctick;
  uint32_t next;
  int ret;

  /* Then process queued work.  Lock the work queue while we process items
   * in the work list.
   */

  next = wqueue->delay;
  ret = work_lock();
  if (ret < 0)
    {
      /* Break out earlier if we were awakened by a signal */

      return;
    }

  /* Get the time that we started this polling cycle in clock ticks. */

  stick = clock_systimer();

  /* And check each entry in the work queue.  Since we have locked the
   * work queue we know:  (1) we will not be suspended unless we do
   * so ourselves, and (2) there will be no changes to the work queue
   */

  work = (FAR struct work_s *)wqueue->q.head;
  while (work)
    {
      /* Is this work ready?  It is ready if there is no delay or if
       * the delay has elapsed. qtime is the time that the work was added
       * to the work queue.  It will always be greater than or equal to
       * zero.  Therefore a delay of zero will always execute immediately.
       */

      ctick   = clock_systimer();
      elapsed = ctick - work->qtime;
      if (elapsed >= work->delay)
        {
          /* Remove the ready-to-execute work from the list */

          (void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

          /* Extract the work description from the entry (in case the work
           * instance by the re-used after it has been de-queued).
           */

          worker = work->worker;

          /* Check for a race condition where the work may be nullified
           * before it is removed from the queue.
           */

          if (worker != NULL)
            {
              /* Extract the work argument (before unlocking the work queue) */

              arg = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Unlock the the work queue while the work is being
               * performed... we don't have any idea how long this will take!
               */

              work_unlock();
              worker(arg);

              /* Now, unfortunately, since we unlocked the work queue we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              ret = work_lock();
              if (ret < 0)
                {
                  /* Break out earlier if we were awakened by a signal */

                  return;
                }

              work = (FAR struct work_s *)wqueue->q.head;
            }
          else
            {
              /* Cancelled.. Just move to the next work in the list with
               * the work queue still locked.
               */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }
      else /* elapsed < work->delay */
        {
          /* This one is not ready.
           *
           * NOTE that elapsed is relative to the the current time,
           * not the time of beginning of this queue processing pass.
           * So it may need an adjustment.
           */

          elapsed += (ctick - stick);
          if (elapsed > work->delay)
            {
              /* The delay has expired while we are processing */

              elapsed = work->delay;
            }

          /* Will it be ready before the next scheduled wakeup interval? */

          remaining = work->delay - elapsed;
          if (remaining < next)
            {
              /* Yes.. Then schedule to wake up when the work is ready */

              next = remaining;
            }

          /* Then try the next in the list. */

          work = (FAR struct work_s *)work->dq.flink;
        }
    }

  /* Get the delay (in clock ticks) since we started the sampling */

  elapsed = clock_systimer() - stick;
  if (elapsed < wqueue->delay && next > 0)
    {
      /* How must time would we need to delay to get to the end of the 
       * sampling period?  The amount of time we delay should be the smaller
       * of the time to the end of the sampling period and the time to the
       * next work expiry.
       */

      remaining = wqueue->delay - elapsed;
      next      = MIN(next, remaining);

      /* Wait awhile to check the work list.  We will wait here until
       * either the time elapses or until we are awakened by a signal.
       * Interrupts will be re-enabled while we wait.
       */

      usleep(next * USEC_PER_TICK);
    }

  work_unlock();
}
Beispiel #13
0
static void work_process(struct wqueue_s *wqueue, int lock_id)
{
	volatile struct work_s *work;
	worker_t  worker;
	void *arg;
	uint64_t elapsed;
	uint32_t remaining;
	uint32_t next;

	/* Then process queued work.  We need to keep interrupts disabled while
	 * we process items in the work list.
	 */

	next  = CONFIG_SCHED_WORKPERIOD;

	work_lock(lock_id);

	work  = (struct work_s *)wqueue->q.head;

	while (work) {
		/* Is this work ready?  It is ready if there is no delay or if
		 * the delay has elapsed. qtime is the time that the work was added
		 * to the work queue.  It will always be greater than or equal to
		 * zero.  Therefore a delay of zero will always execute immediately.
		 */

		elapsed = USEC2TICK(clock_systimer() - work->qtime);

		//printf("work_process: in ticks elapsed=%lu delay=%u\n", elapsed, work->delay);
		if (elapsed >= work->delay) {
			/* Remove the ready-to-execute work from the list */

			(void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

			/* Extract the work description from the entry (in case the work
			 * instance by the re-used after it has been de-queued).
			 */

			worker = work->worker;
			arg    = work->arg;

			/* Mark the work as no longer being queued */

			work->worker = NULL;

			/* Do the work.  Re-enable interrupts while the work is being
			 * performed... we don't have any idea how long that will take!
			 */

			work_unlock(lock_id);

			if (!worker) {
				PX4_WARN("MESSED UP: worker = 0\n");

			} else {
				worker(arg);
			}

			/* Now, unfortunately, since we re-enabled interrupts we don't
			 * know the state of the work list and we will have to start
			 * back at the head of the list.
			 */

			work_lock(lock_id);
			work  = (struct work_s *)wqueue->q.head;

		} else {
			/* This one is not ready.. will it be ready before the next
			 * scheduled wakeup interval?
			 */

			/* Here: elapsed < work->delay */
			remaining = USEC_PER_TICK * (work->delay - elapsed);

			if (remaining < next) {
				/* Yes.. Then schedule to wake up when the work is ready */

				next = remaining;
			}

			/* Then try the next in the list. */

			work = (struct work_s *)work->dq.flink;
		}
	}

	/* Wait awhile to check the work list.  We will wait here until either
	 * the time elapses or until we are awakened by a signal.
	 */
	work_unlock(lock_id);

	usleep(next);
}
Beispiel #14
0
int nanosleep(FAR const struct timespec *rqtp, FAR struct timespec *rmtp)
{
  irqstate_t flags;
  systime_t starttick;
  sigset_t set;
  struct siginfo value;
  int errval;
#ifdef CONFIG_DEBUG_ASSERTIONS /* Warning avoidance */
  int ret;
#endif

  /* nanosleep() is a cancellation point */

  (void)enter_cancellation_point();

  if (!rqtp || rqtp->tv_nsec < 0 || rqtp->tv_nsec >= 1000000000)
    {
      errval = EINVAL;
      goto errout;
    }

  /* Get the start time of the wait.  Interrupts are disabled to prevent
   * timer interrupts while we do tick-related calculations before and
   * after the wait.
   */

  flags     = enter_critical_section();
  starttick = clock_systimer();

  /* Set up for the sleep.  Using the empty set means that we are not
   * waiting for any particular signal.  However, any unmasked signal can
   * still awaken sigtimedwait().
   */

  (void)sigemptyset(&set);

  /* nanosleep is a simple application of sigtimedwait. */

#ifdef CONFIG_DEBUG_ASSERTIONS /* Warning avoidance */
  ret = sigtimedwait(&set, &value, rqtp);
#else
  (void)sigtimedwait(&set, &value, rqtp);
#endif

  /* sigtimedwait() cannot succeed.  It should always return error with
   * either (1) EAGAIN meaning that the timeout occurred, or (2) EINTR
   * meaning that some other unblocked signal was caught.
   */

  errval = get_errno();
  DEBUGASSERT(ret < 0 && (errval == EAGAIN || errval == EINTR));

  if (errval == EAGAIN)
    {
      /* The timeout "error" is the normal, successful result */

      leave_critical_section(flags);
      leave_cancellation_point();
      return OK;
    }

  /* If we get there, the wait has failed because we were awakened by a
   * signal.  Return the amount of "unwaited" time if rmtp is non-NULL.
   */

  if (rmtp)
    {
      systime_t elapsed;
      systime_t remaining;
      int ticks;

      /* REVISIT: The conversion from time to ticks and back could
       * be avoided.  clock_timespec_subtract() would be used instead
       * to get the time difference.
       */

      /* First get the number of clock ticks that we were requested to
       * wait.
       */

      (void)clock_time2ticks(rqtp, &ticks);

      /* Get the number of ticks that we actually waited */

      elapsed = clock_systimer() - starttick;

      /* The difference between the number of ticks that we were requested
       * to wait and the number of ticks that we actualy waited is that
       * amount of time that we failed to wait.
       */

      if (elapsed >= (uint32_t)ticks)
        {
          remaining = 0;
        }
      else
        {
          remaining = (uint32_t)ticks - elapsed;
        }

      (void)clock_ticks2time((int)remaining, rmtp);
    }

  leave_critical_section(flags);

errout:
  set_errno(errval);
  leave_cancellation_point();
  return ERROR;
}
Beispiel #15
0
static inline int netclose_disconnect(FAR struct socket *psock)
{
  struct tcp_close_s state;
  FAR struct tcp_conn_s *conn;
  net_lock_t flags;
#ifdef CONFIG_NET_SOLINGER
  bool linger;
#endif
  int ret = OK;

  /* Interrupts are disabled here to avoid race conditions */

  flags = net_lock();
  conn = (FAR struct tcp_conn_s *)psock->s_conn;

  /* If we have a semi-permanent write buffer callback in place, then
   * release it now.
   */

#ifdef CONFIG_NET_TCP_WRITE_BUFFERS
  if (psock->s_sndcb)
    {
      tcp_callback_free(conn, psock->s_sndcb);
      psock->s_sndcb = NULL;
    }
#endif

  /* There shouldn't be any callbacks registered. */

  DEBUGASSERT(conn && conn->list == NULL);

  /* Check for the case where the host beat us and disconnected first */

  if (conn->tcpstateflags == TCP_ESTABLISHED &&
      (state.cl_cb = tcp_callback_alloc(conn)) != NULL)
    {
      /* Set up to receive TCP data event callbacks */

      state.cl_cb->flags = (TCP_NEWDATA | TCP_POLL | TCP_CLOSE | TCP_ABORT |
                            TCP_TIMEDOUT);
      state.cl_cb->event = netclose_interrupt;

#ifdef CONFIG_NET_SOLINGER
      /* Check for a lingering close */

      linger = _SO_GETOPT(psock->s_options, SO_LINGER);

      /* Has a lingering close been requested */

      if (linger)
        {
          /* A non-NULL value of the priv field means that lingering is
           * enabled.
           */

          state.cl_cb->priv  = (FAR void *)&state;

          /* Set up for the lingering wait */

          state.cl_psock     = psock;
          state.cl_result    = -EBUSY;
          sem_init(&state.cl_sem, 0, 0);

          /* Record the time that we started the wait (in ticks) */

          state.cl_start = clock_systimer();
        }
      else
#endif /* CONFIG_NET_SOLINGER */

        {
          /* We will close immediately. The NULL priv field signals this */

          state.cl_cb->priv  = NULL;

          /* No further references on the connection */

          conn->crefs = 0;
        }

      /* Notify the device driver of the availability of TX data */

      netdev_txnotify(conn->ripaddr);

#ifdef CONFIG_NET_SOLINGER
      /* Wait only if we are lingering */

      if (linger)
        {
          /* Wait for the disconnect event */

          (void)net_lockedwait(&state.cl_sem);

          /* We are now disconnected */

          sem_destroy(&state.cl_sem);
          tcp_callback_free(conn, state.cl_cb);

          /* Free the connection */

          conn->crefs = 0;          /* No more references on the connection */
          tcp_free(conn);           /* Free uIP resources */

          /* Get the result of the close */

          ret = state.cl_result;
        }
#endif /* CONFIG_NET_SOLINGER */
    }
  else
    {
      tcp_free(conn);
    }

  net_unlock(flags);
  return ret;
}
Beispiel #16
0
int work_thread(int argc, char *argv[])
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  FAR void *arg;
  uint32_t elapsed;
  uint32_t remaining;
  uint32_t next;
  int usec;
  irqstate_t flags;

  /* Loop forever */

  usec = CONFIG_SCHED_WORKPERIOD;
  flags = irqsave();
  for (;;)
    {
      /* Wait awhile to check the work list.  We will wait here until either
       * the time elapses or until we are awakened by a signal.
       */

      usleep(usec);
      irqrestore(flags);

      /* First, perform garbage collection.  This cleans-up memory de-allocations
       * that were queued because they could not be freed in that execution
       * context (for example, if the memory was freed from an interrupt handler).
       * NOTE: If the work thread is disabled, this clean-up is performed by
       * the IDLE thread (at a very, very lower priority).
       */

      sched_garbagecollection();

      /* Then process queued work.  We need to keep interrupts disabled while
       * we process items in the work list.
       */

      next  = CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK;
      flags = irqsave();
      work  = (FAR struct work_s *)g_work.head;
      while (work)
        {
          /* Is this work ready?  It is ready if there is no delay or if
           * the delay has elapsed. qtime is the time that the work was added
           * to the work queue.  It will always be greater than or equal to
           * zero.  Therefore a delay of zero will always execute immediately.
           */

          elapsed = clock_systimer() - work->qtime;
          if (elapsed >= work->delay)
            {
              /* Remove the ready-to-execute work from the list */

              (void)dq_rem((struct dq_entry_s *)work, &g_work);

              /* Extract the work description from the entry (in case the work
               * instance by the re-used after it has been de-queued).
               */

              worker = work->worker;
              arg    = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long that will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)g_work.head;
            }
          else
            {
              /* This one is not ready.. will it be ready before the next
               * scheduled wakeup interval?
               */

              remaining = elapsed - work->delay;
              if (remaining < next)
                {
                  /* Yes.. Then schedule to wake up when the work is ready */

                  next = remaining;
                }
              
              /* Then try the next in the list. */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }

      /* Now calculate the microsecond delay we should wait */

      usec = next * USEC_PER_TICK;
    }

  return OK; /* To keep some compilers happy */
}
Beispiel #17
0
int sif_main(int argc, char *argv[])
{
    if (argc >= 2) {	
        if (!strcmp(argv[1], "init")) {
          return sif_init();
        }
        else if (!strcmp(argv[1], "gpio") && argc == 4) {
            vsn_sif.gpio[0] = atoi(argv[2]);
            vsn_sif.gpio[1] = atoi(argv[3]);
            sif_gpio1_update();
            sif_gpio2_update();
            printf("GPIO States: %2x %2x\n", vsn_sif.gpio[0], vsn_sif.gpio[1]);
            return 0;
        }
        else if (!strcmp(argv[1], "pwr") && argc == 3) {
            int val = atoi(argv[2]);
            //STM32_TIM_SETCOMPARE(vsn_sif.tim8, GPIO_OUT_PWRPWM_TIM8_CH, val);
            STM32_TIM_SETCOMPARE(vsn_sif.tim3, GPIO_OUT_PWM_TIM3_CH, val);
            return 0;
        }
        else if (!strcmp(argv[1], "time") && argc == 3) {
            struct timespec t_set;
            t_set.tv_sec = atoi(argv[2]);
            clock_settime(CLOCK_REALTIME, &t_set);
        }
        else if (!strcmp(argv[1], "free")) {
            size_t  page = 0, stpage = 0xFFFF;
            ssize_t status;
            do {
                status = up_progmem_ispageerased(page++);

                /* Is this beginning of new free space section */
                if (status == 0) {
                    if (stpage == 0xFFFF) stpage = page-1;
                }
                else if (status != 0) {
                    if (stpage != 0xFFFF) {
                        printf("Free Range:\t%lu\t-\t%lu\n",
                               (unsigned long)stpage, (unsigned long)(page-2));
                        stpage = 0xFFFF;
                    }
                }
            }
            while (status >= 0);
            return 0;
        }
        else if (!strcmp(argv[1], "erase") && argc == 3) {
            size_t page = atoi(argv[2]);
            printf("Erase result: %d\n", up_progmem_erasepage(page));
            return 0;
        }
        else if (!strcmp(argv[1], "flash") && argc == 3) {
            size_t page = atoi(argv[2]);
            size_t addr = page * up_progmem_pagesize(page);

            printf("Write result: %d (writing to address %lxh)\n",
                up_progmem_write(addr, "Test", 4), (unsigned long)addr);
            return 0;
        }
        else if (!strcmp(argv[1], "i2c") && argc == 3) {
            int val = atoi(argv[2]);

            I2C_SETFREQUENCY(vsn_sif.i2c1, 100000);

            struct lis331dl_dev_s * lis = lis331dl_init(vsn_sif.i2c1, val);

            if (lis) {
                const struct lis331dl_vector_s * a;
                int i;
                uint32_t time_stamp = clock_systimer();

                /* Set to 400 Hz : 3 = 133 Hz/axis */

                lis331dl_setconversion(lis, false, true);

                /* Sample some values */

                for (i=0; i<1000;) {
                    if ((a = lis331dl_getreadings(lis))) {
                        i++;
                        printf("%d %d %d\n", a->x, a->y, a->z);
                    }
                    else if (errno != 11) {
                        printf("Readings errno %d\n", errno);
                        break;
                    }
                }

                printf("Time diff = %d\n", clock_systimer() - time_stamp);

                lis331dl_deinit(lis);
            }
            else printf("Exit point: errno=%d\n", errno);

            return 0;
        }
        else if (!strcmp(argv[1], "pga")) {
            int gain = atoi(argv[2]);

            gain = vsn_muxbus_setpgagain(gain);

            printf("Gain changed: %d\n", gain);
            return 0;
        }
        else if (!strcmp(argv[1], "cc")) {
            struct cc1101_dev_s * cc;
            uint8_t buf[64];
            int sta;

            cc = cc1101_init(vsn_sif.spi2, CC1101_PIN_GDO0, GPIO_CC1101_GDO0,
                &cc1101_rfsettings_ISM1_868MHzGFSK100kbps);

            if (cc) {

                /* Work-around: enable falling edge, event and interrupt */
                stm32_gpiosetevent(GPIO_CC1101_GDO0, false, true, true, cc1101_eventcb);

                /* Enable clock to ARM PLL, allowing to speed-up to 72 MHz */
                cc1101_setgdo(cc, CC1101_PIN_GDO2, CC1101_GDO_CLK_XOSC3);

                cc1101_setchannel(cc, 0);   /* AV Test Hex, receive on that channel */
                cc1101_receive(cc);          /* Enter RX mode */
                while (1)
                {
                    fflush(stdout);
                    sta = cc1101_read(cc, buf, 64);
                    if (sta > 0) {
                        printf("Received %d bytes: rssi=%d [dBm], LQI=%d (CRC %s)\n",
                            sta, cc1101_calcRSSIdBm(buf[sta-2]), buf[sta-1]&0x7F,
                            (buf[sta-1]&0x80)?"OK":"BAD");

                        cc1101_write(cc, buf, 61);
                        cc1101_send(cc);

                        printf("Packet send back\n");

                        cc1101_receive(cc);
                    }
                }
            }
        }
    }

    fprintf(stderr, "%s:\tinit\n\tgpio\tA B\n\tpwr\tval\n", argv[0]);

    fprintf(stderr, "rtc time = %u, time / systick = %u / %u\n",
        up_rtc_time(), time(NULL), clock_systimer());
    return -1;
}
Beispiel #18
0
void work_process(FAR struct kwork_wqueue_s *wqueue, systime_t period, int wndx)
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  irqstate_t flags;
  FAR void *arg;
  systime_t elapsed;
  systime_t remaining;
  systime_t stick;
  systime_t ctick;
  systime_t next;

  /* Then process queued work.  We need to keep interrupts disabled while
   * we process items in the work list.
   */

  next  = period;
  flags = irqsave();

  /* Get the time that we started this polling cycle in clock ticks. */

  stick = clock_systimer();

  /* And check each entry in the work queue.  Since we have disabled
   * interrupts we know:  (1) we will not be suspended unless we do
   * so ourselves, and (2) there will be no changes to the work queue
   */

  work = (FAR struct work_s *)wqueue->q.head;
  while (work)
    {
      /* Is this work ready?  It is ready if there is no delay or if
       * the delay has elapsed. qtime is the time that the work was added
       * to the work queue.  It will always be greater than or equal to
       * zero.  Therefore a delay of zero will always execute immediately.
       */

      ctick   = clock_systimer();
      elapsed = ctick - work->qtime;
      if (elapsed >= work->delay)
        {
          /* Remove the ready-to-execute work from the list */

          (void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

          /* Extract the work description from the entry (in case the work
           * instance by the re-used after it has been de-queued).
           */

          worker = work->worker;

          /* Check for a race condition where the work may be nullified
           * before it is removed from the queue.
           */

          if (worker != NULL)
            {
              /* Extract the work argument (before re-enabling interrupts) */

              arg = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long this will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)wqueue->q.head;
            }
          else
            {
              /* Cancelled.. Just move to the next work in the list with
               * interrupts still disabled.
               */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }
      else /* elapsed < work->delay */
        {
          /* This one is not ready.
           *
           * NOTE that elapsed is relative to the the current time,
           * not the time of beginning of this queue processing pass.
           * So it may need an adjustment.
           */

          elapsed += (ctick - stick);
          if (elapsed > work->delay)
            {
              /* The delay has expired while we are processing */

              elapsed = work->delay;
            }

          /* Will it be ready before the next scheduled wakeup interval? */

          remaining = work->delay - elapsed;
          if (remaining < next)
            {
              /* Yes.. Then schedule to wake up when the work is ready */

              next = remaining;
            }

          /* Then try the next in the list. */

          work = (FAR struct work_s *)work->dq.flink;
        }
    }

#if defined(CONFIG_SCHED_LPWORK) && CONFIG_SCHED_LPNTHREADS > 0
  /* Value of zero for period means that we should wait indefinitely until
   * signalled.  This option is used only for the case where there are
   * multiple, low-priority worker threads.  In that case, only one of
   * the threads does the poll... the others simple.  In all other cases
   * period will be non-zero and equal to wqueue->delay.
   */

   if (period == 0)
     {
       sigset_t set;

       /* Wait indefinitely until signalled with SIGWORK */

       sigemptyset(&set);
       sigaddset(&set, SIGWORK);

       wqueue->worker[wndx].busy = false;
       DEBUGVERIFY(sigwaitinfo(&set, NULL));
       wqueue->worker[wndx].busy = true;
     }
   else
#endif
    {
      /* Get the delay (in clock ticks) since we started the sampling */

      elapsed = clock_systimer() - stick;
      if (elapsed < period && next > 0)
        {
          /* How much time would we need to delay to get to the end of the
           * sampling period?  The amount of time we delay should be the smaller
           * of the time to the end of the sampling period and the time to the
           * next work expiry.
           */

          remaining = period - elapsed;
          next      = MIN(next, remaining);

          /* Wait awhile to check the work list.  We will wait here until
           * either the time elapses or until we are awakened by a signal.
           * Interrupts will be re-enabled while we wait.
           */

          wqueue->worker[wndx].busy = false;
          usleep(next * USEC_PER_TICK);
          wqueue->worker[wndx].busy = true;
        }
    }

  irqrestore(flags);
}
static ssize_t uptime_read(FAR struct file *filep, FAR char *buffer,
                           size_t buflen)
{
  FAR struct uptime_file_s *attr;
  size_t linesize;
  off_t offset;
  ssize_t ret;

#ifdef CONFIG_SYSTEM_TIME64
  uint64_t ticktime;
#if !defined(CONFIG_HAVE_DOUBLE) || !defined(CONFIG_LIBC_FLOATINGPOINT)
  uint64_t sec;
#endif

#else
  uint32_t ticktime;
#if !defined(CONFIG_HAVE_DOUBLE) || !defined(CONFIG_LIBC_FLOATINGPOINT)
  uint32_t sec;
#endif
#endif

#if defined(CONFIG_HAVE_DOUBLE) && defined(CONFIG_LIBC_FLOATINGPOINT)
  double now;
#else
  unsigned int remainder;
  unsigned int csec;
#endif

  fvdbg("buffer=%p buflen=%d\n", buffer, (int)buflen);

  /* Recover our private data from the struct file instance */

  attr = (FAR struct uptime_file_s *)filep->f_priv;
  DEBUGASSERT(attr);

  /* If f_pos is zero, then sample the system time.  Otherwise, use
   * the cached system time from the previous read().  It is necessary
   * save the cached value in case, for example, the user is reading
   * the time one byte at a time.  In that case, the time must remain
   * stable throughout the reads.
   */

  if (filep->f_pos == 0)
    {
#ifdef CONFIG_SYSTEM_TIME64
      /* 64-bit timer */

      ticktime = clock_systimer64();
#else
      /* 32-bit timer */

      ticktime = clock_systimer();
#endif

#if defined(CONFIG_HAVE_DOUBLE) && defined(CONFIG_LIBC_FLOATINGPOINT)
      /* Convert the system up time to a seconds + hundredths of seconds string */

      now       = (double)ticktime / (double)CLOCKS_PER_SEC;
      linesize  = snprintf(attr->line, UPTIME_LINELEN, "%10.2f\n", now);

#else
      /* Convert the system up time to seconds + hundredths of seconds */

      sec       = ticktime / CLOCKS_PER_SEC;
      remainder = (unsigned int)(ticktime % CLOCKS_PER_SEC);
      csec      = (100 * remainder + (CLOCKS_PER_SEC / 2)) / CLOCKS_PER_SEC;

      /* Make sure that rounding did not force the hundredths of a second above 99 */

      if (csec > 99)
        {
          sec++;
          csec -= 100;
        }

      /* Convert the seconds + hundredths of seconds to a string */

      linesize = snprintf(attr->line, UPTIME_LINELEN, "%7lu.%02u\n", sec, csec);

#endif
      /* Save the linesize in case we are re-entered with f_pos > 0 */

      attr->linesize = linesize;
    }

  /* Transfer the system up time to user receive buffer */

  offset = filep->f_pos;
  ret    = procfs_memcpy(attr->line, attr->linesize, buffer, buflen, &offset);

  /* Update the file offset */

  if (ret > 0)
    {
      filep->f_pos += ret;
    }

  return ret;
}
Beispiel #20
0
static uint16_t ack_interrupt(FAR struct net_driver_s *dev, FAR void *pvconn,
                              FAR void *pvpriv, uint16_t flags)
{
  FAR struct sendfile_s *pstate = (FAR struct sendfile_s *)pvpriv;

  nllvdbg("flags: %04x\n", flags);

  if ((flags & TCP_ACKDATA) != 0)
    {
      FAR struct tcp_hdr_s *tcp;

#ifdef CONFIG_NET_SOCKOPTS
      /* Update the timeout */

      pstate->snd_time = clock_systimer();
#endif

      /* Get the offset address of the TCP header */

#ifdef CONFIG_NET_IPv6
#ifdef CONFIG_NET_IPv4
      if (IFF_IS_IPv6(dev->d_flags))
#endif
        {
          DEBUGASSERT(pstate->snd_sock == PF_INET6);
          tcp = TCPIPv6BUF;
        }
#endif /* CONFIG_NET_IPv6 */

#ifdef CONFIG_NET_IPv4
#ifdef CONFIG_NET_IPv6
      else
#endif
        {
          DEBUGASSERT(pstate->snd_sock == PF_INET);
          tcp = TCPIPv4BUF;
        }
#endif /* CONFIG_NET_IPv4 */

      /* The current acknowledgement number number is the (relative) offset
       * of the of the next byte needed by the receiver.  The snd_isn is the
       * offset of the first byte to send to the receiver.  The difference
       * is the number of bytes to be acknowledged.
       */

      pstate->snd_acked = tcp_getsequence(tcp->ackno) - pstate->snd_isn;
      nllvdbg("ACK: acked=%d sent=%d flen=%d\n",
             pstate->snd_acked, pstate->snd_sent, pstate->snd_flen);

      dev->d_sndlen = 0;

      flags &= ~TCP_ACKDATA;
    }
  else if ((flags & TCP_REXMIT) != 0)
    {
      nlldbg("REXMIT\n");

      /* Yes.. in this case, reset the number of bytes that have been sent
       * to the number of bytes that have been ACKed.
       */

      pstate->snd_sent = pstate->snd_acked;
    }

  /* Check for a loss of connection */

  else if ((flags & TCP_DISCONN_EVENTS) != 0)
    {
      /* Report not connected */

      nlldbg("Lost connection\n");

      net_lostconnection(pstate->snd_sock, flags);
      pstate->snd_sent = -ENOTCONN;
    }

  /* Wake up the waiting thread */

  sem_post(&pstate->snd_sem);

  return flags;
}
Beispiel #21
0
static int ft80x_fade(FAR struct ft80x_dev_s *priv,
                      FAR const struct ft80x_fade_s *fade)
{
  systime_t start;
  systime_t elapsed;
  int32_t delay;
  int32_t duty;
  int16_t endduty;
  int16_t delta;

  /* 0% corresponds to the value 0, but 100% corresponds to the value 128 */

  endduty = (uint16_t)((uint16_t)fade->duty << 7) / 100;

  /* Get the change in duty from the current to the terminal duty. */

  duty  = (int32_t)(ft80x_read_byte(priv, FT80X_REG_PWM_DUTY) & 0x7f);
  delta = endduty - (int16_t)duty;

  /* The "smoothness" of the steps will depend on the resolution of the
   * system timer.  The minimum delay is <= 2 * system_clock_period.
   *
   * We will try for a FADE_STEP_MSEC delay, but we will try to adapt to
   * whatever we get is we are working close the system time resolution.
   * For human factors reasons, any delay less than 100 MS or so should
   * appear more or less smooth.
   *
   * The delay calculation should never overflow:
   *
   *   Max delay:        16,700 msec (MAX_FADE_DELAY)
   *   Min clock period: 1 usec
   *   Max delay:        16,700,000 ticks
   *   INT32_MAX         2,147,483,647
   */

  delay = MSEC2TICK((int32_t)fade->delay);
  if (delay <= 0)
    {
      delay = 1;
    }

  start = clock_systimer();

  do
    {
      /* Wait for FADE_STEP_MSEC msec (or whatever we get) */

      (void)nxsig_usleep(FADE_STEP_MSEC * 1000);

      /* Get the elapsed time */

      elapsed = clock_systimer() - start;
      if (elapsed > INT32_MAX || (int32_t)elapsed >= delay)
        {
          duty = endduty;
        }
      else
        {
          /* Interpolate to get the next PWM duty in the fade.  This
           * calculation should never overflow:
           *
           *   Max delta:       128
           *   Max elapsed:     16,700,000 ticks
           *   Max numerator:   2,137,600,000
           *   Min denominator: 1
           *   Max duty:        2,137,600,000
           *   INT32_MAX        2,147,483,647
           */

          duty += ((int32_t)delta * (int32_t)elapsed) / delay;
          if (duty > 128)
            {
              duty = 128;
            }
          else if (duty < 0)
            {
              duty = 0;
            }
        }

      /* The set the new backlight PWM duty */

      ft80x_write_byte(priv, FT80X_REG_PWM_DUTY, (uint8_t)duty);
    }
  while (duty != endduty);

  return OK;
}
Beispiel #22
0
ssize_t net_sendfile(int outfd, struct file *infile, off_t *offset,
                     size_t count)
{
  FAR struct socket *psock = sockfd_socket(outfd);
  FAR struct tcp_conn_s *conn;
  struct sendfile_s state;
  net_lock_t save;
  int err;

  /* Verify that the sockfd corresponds to valid, allocated socket */

  if (!psock || psock->s_crefs <= 0)
    {
      ndbg("ERROR: Invalid socket\n");
      err = EBADF;
      goto errout;
    }

  /* If this is an un-connected socket, then return ENOTCONN */

  if (psock->s_type != SOCK_STREAM || !_SS_ISCONNECTED(psock->s_flags))
    {
      ndbg("ERROR: Not connected\n");
      err = ENOTCONN;
      goto errout;
    }

  /* Make sure that we have the IP address mapping */

  conn = (FAR struct tcp_conn_s *)psock->s_conn;
  DEBUGASSERT(conn);

#if defined(CONFIG_NET_ARP_SEND) || defined(CONFIG_NET_ICMPv6_NEIGHBOR)
#ifdef CONFIG_NET_ARP_SEND
#ifdef CONFIG_NET_ICMPv6_NEIGHBOR
  if (psock->s_domain == PF_INET)
#endif
    {
      /* Make sure that the IP address mapping is in the ARP table */

      ret = arp_send(conn->u.ipv4.raddr);
    }
#endif /* CONFIG_NET_ARP_SEND */


#ifdef CONFIG_NET_ICMPv6_NEIGHBOR
#ifdef CONFIG_NET_ARP_SEND
  else
#endif
    {
      /* Make sure that the IP address mapping is in the Neighbor Table */

      ret = icmpv6_neighbor(conn->u.ipv6.raddr);
    }
#endif /* CONFIG_NET_ICMPv6_NEIGHBOR */

  /* Did we successfully get the address mapping? */

  if (ret < 0)
    {
      ndbg("ERROR: Not reachable\n");
      err = ENETUNREACH;
      goto errout;
    }
#endif /* CONFIG_NET_ARP_SEND || CONFIG_NET_ICMPv6_NEIGHBOR */

  /* Set the socket state to sending */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_SEND);

  /* Initialize the state structure.  This is done with interrupts
   * disabled because we don't want anything to happen until we
   * are ready.
   */

  save  = net_lock();

  memset(&state, 0, sizeof(struct sendfile_s));
  sem_init(&state. snd_sem, 0, 0);          /* Doesn't really fail */
  state.snd_sock    = psock;                /* Socket descriptor to use */
  state.snd_foffset = offset ? *offset : 0; /* Input file offset */
  state.snd_flen    = count;                /* Number of bytes to send */
  state.snd_file    = infile;               /* File to read from */

  /* Allocate resources to receive a callback */

  state.snd_datacb = tcp_callback_alloc(conn);

  if (state.snd_datacb == NULL)
    {
      nlldbg("Failed to allocate data callback\n");
      err = ENOMEM;
      goto errout_locked;
    }

  state.snd_ackcb = tcp_callback_alloc(conn);

  if (state.snd_ackcb == NULL)
    {
      nlldbg("Failed to allocate ack callback\n");
      err = ENOMEM;
      goto errout_datacb;
    }

  /* Get the initial sequence number that will be used */

  state.snd_isn          = tcp_getsequence(conn->sndseq);

  /* There is no outstanding, unacknowledged data after this
   * initial sequence number.
   */

  conn->unacked          = 0;

#ifdef CONFIG_NET_SOCKOPTS
  /* Set the initial time for calculating timeouts */

  state.snd_time         = clock_systimer();
#endif

  /* Set up the ACK callback in the connection */

  state.snd_ackcb->flags = (TCP_ACKDATA | TCP_REXMIT | TCP_DISCONN_EVENTS);
  state.snd_ackcb->priv  = (FAR void *)&state;
  state.snd_ackcb->event = ack_interrupt;

  /* Perform the TCP send operation */

  do
    {
      state.snd_datacb->flags = TCP_POLL;
      state.snd_datacb->priv  = (FAR void *)&state;
      state.snd_datacb->event = sendfile_interrupt;

      /* Notify the device driver of the availability of TX data */

      sendfile_txnotify(psock, conn);
      net_lockedwait(&state.snd_sem);
    }
  while (state.snd_sent >= 0 && state.snd_acked < state.snd_flen);

  /* Set the socket state to idle */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE);

  tcp_callback_free(conn, state.snd_ackcb);

errout_datacb:
  tcp_callback_free(conn, state.snd_datacb);

errout_locked:

  sem_destroy(&state. snd_sem);
  net_unlock(save);

errout:

  if (err)
    {
      set_errno(err);
      return ERROR;
    }
  else if (state.snd_sent < 0)
    {
      set_errno(-state.snd_sent);
      return ERROR;
    }
  else
    {
      return state.snd_sent;
    }
}
Beispiel #23
0
int pg_worker(int argc, char *argv[])
{
  irqstate_t flags;

  /* Loop forever -- Notice that interrupts will be disable at all times that
   * this thread runs.  That is so that we con't lose signals or have
   * asynchronous page faults.
   *
   * All interrupt logic as well as all page fill worker thread logic must
   * be locked in memory.  Therefore, keeping interrupts disabled here
   * should prevent any concurrent page faults.  Any page faults or page
   * fill completions should occur while this thread sleeps.
   */

  pglldbg("Started\n");
  flags = irqsave();
  for (;;)
    {
      /* Wait awhile.  We will wait here until either the configurable timeout
       * elapses or until we are awakened by a signal (which terminates the
       * usleep with an EINTR error).  Note that interrupts will be re-enabled
       * while this task sleeps.
       *
       * The timeout is a failsafe that will handle any cases where a single
       * is lost (that would really be a bug and shouldn't happen!) and also
       * supports timeouts for case of non-blocking, asynchronous fills.
       */

      usleep(CONFIG_PAGING_WORKPERIOD);

      /* The page fill worker thread will be awakened on one of three conditions:
       *
       *   - When signaled by pg_miss(), the page fill worker thread will be awakenend,
       *   - if CONFIG_PAGING_BLOCKINGFILL is not defined, from pg_callback()
       *     after completing a page fill, or
       *   - On a configurable timeout expires with no activity.
       *
       * Interrupts are still disabled.
       */

#ifndef CONFIG_PAGING_BLOCKINGFILL
      /* For the non-blocking up_fillpage(), the page fill worker thread will detect
       * that the page fill is complete when it is awakened with g_pftcb non-NULL
       * and fill completion status from pg_callback.
       */

      if (g_pftcb != NULL)
        {
          /* If it is a real page fill completion event, then the result of the page
           * fill will be in g_fillresult and will not be equal to -EBUSY.
           */

          if (g_fillresult != -EBUSY)
            {
              /* Any value other than OK, brings the system down */

              ASSERT(g_fillresult == OK);

              /* Handle the successful page fill complete event by restarting the
               * task that was blocked waiting for this page fill.
               */

              pglldbg("Restarting TCB: %p\n", g_pftcb);
              up_unblock_task(g_pftcb);;

              /* Yes .. Start the next asynchronous fill.  Check the return
               * value to see a fill was actually started (false means that
               * no fill was started).
               */

              pgllvdbg("Calling pg_startfill\n");
              if (!pg_startfill())
                {
                  /* No fill was started.  This can mean only that all queued
                   * page fill actions have and been completed and there is
                   * nothing more to do.
                   */

                  pgllvdbg("Call pg_alldone()\n");
                  pg_alldone();
                }
            }

          /* If a configurable timeout period expires with no page fill completion
           * event, then declare a failure.
           */

#ifdef CONFIG_PAGING_TIMEOUT_TICKS
          else
            {
              lldbg("Timeout!\n");
              ASSERT(clock_systimer() - g_starttime < CONFIG_PAGING_TIMEOUT_TICKS);
            }
#endif
        }

      /* Otherwise, this might be a page fill initiation event.  When
       * awakened from pg_miss(), no fill will be in progress and
       * g_pftcb will be NULL.
       */

      else
        {
          /* Are there tasks blocked and waiting for a fill?  If so,
           * pg_startfill() will start the asynchronous fill (and set
           * g_pftcb).
           */

           pgllvdbg("Calling pg_startfill\n");
           (void)pg_startfill();
        }
#else
      /* Are there tasks blocked and waiting for a fill?  Loop until all
       * pending fills have been processed.
       */

      for (;;)
        {
          /* Yes .. Start the fill and block until the fill completes.
           * Check the return value to see a fill was actually performed.
           * (false means that no fill was perforemd).
           */

          pgllvdbg("Calling pg_startfill\n");
          if (!pg_startfill())
            {
               /* Break out of the loop -- there is nothing more to do */

               break;
            }

          /* Handle the page fill complete event by restarting the
           * task that was blocked waiting for this page fill. In the
           * non-blocking fill case, the page fill worker thread will
           * know that the page fill is  complete when pg_startfill()
           * returns true.
           */

          pgllvdbg("Restarting TCB: %p\n", g_pftcb);
          up_unblock_task(g_pftcb);;
        }

      /* All queued fills have been processed */

      pgllvdbg("Call pg_alldone()\n");
      pg_alldone();
#endif
    }

  return OK; /* To keep some compilers happy */
}
Beispiel #24
0
int icmp_ping(in_addr_t addr, uint16_t id, uint16_t seqno, uint16_t datalen,
              int dsecs)
{
  struct icmp_ping_s state;
  net_lock_t save;
#ifdef CONFIG_NET_ARP_SEND
  int ret;

  /* Make sure that the IP address mapping is in the ARP table */

  ret = arp_send(addr);
  if (ret < 0)
    {
      ndbg("ERROR: Not reachable\n");
      return -ENETUNREACH;
    }
#endif

  /* Initialize the state structure */

  sem_init(&state.png_sem, 0, 0);
  state.png_ticks  = DSEC2TICK(dsecs); /* System ticks to wait */
  state.png_result = -ENOMEM;          /* Assume allocation failure */
  state.png_addr   = addr;             /* Address of the peer to be ping'ed */
  state.png_id     = id;               /* The ID to use in the ECHO request */
  state.png_seqno  = seqno;            /* The seqno to use in the ECHO request */
  state.png_datlen = datalen;          /* The length of data to send in the ECHO request */
  state.png_sent   = false;            /* ECHO request not yet sent */

  save             = net_lock();
  state.png_time   = clock_systimer();

  /* Set up the callback */

  state.png_cb = icmp_callback_alloc();
  if (state.png_cb)
    {
      state.png_cb->flags   = (ICMP_POLL | ICMP_ECHOREPLY);
      state.png_cb->priv    = (void*)&state;
      state.png_cb->event   = ping_interrupt;
      state.png_result      = -EINTR; /* Assume sem-wait interrupted by signal */

      /* Notify the device driver of the availability of TX data */

#ifdef CONFIG_NETDEV_MULTINIC
      netdev_ipv4_txnotify(g_ipv4_allzeroaddr, state.png_addr);
#else
      netdev_ipv4_txnotify(state.png_addr);
#endif

      /* Wait for either the full round trip transfer to complete or
       * for timeout to occur. (1) net_lockedwait will also terminate if a
       * signal is received, (2) interrupts may be disabled!  They will
       * be re-enabled while the task sleeps and automatically
       * re-enabled when the task restarts.
       */

      nllvdbg("Start time: 0x%08x seqno: %d\n", state.png_time, seqno);
      net_lockedwait(&state.png_sem);

      icmp_callback_free(state.png_cb);
    }

  net_unlock(save);

  /* Return the negated error number in the event of a failure, or the
   * sequence number of the ECHO reply on success.
   */

  if (!state.png_result)
    {
      nllvdbg("Return seqno=%d\n", state.png_seqno);
      return (int)state.png_seqno;
    }
  else
    {
      nlldbg("Return error=%d\n", -state.png_result);
      return state.png_result;
    }
}
static uint16_t tcpsend_interrupt(FAR struct net_driver_s *dev,
                                  FAR void *pvconn,
                                  FAR void *pvpriv, uint16_t flags)
{
  FAR struct tcp_conn_s *conn = (FAR struct tcp_conn_s *)pvconn;
  FAR struct send_s *pstate = (FAR struct send_s *)pvpriv;

#ifdef CONFIG_NETDEV_MULTINIC
  /* The TCP socket is connected and, hence, should be bound to a device.
   * Make sure that the polling device is the one that we are bound to.
   */

  DEBUGASSERT(conn->dev != NULL);
  if (dev != conn->dev)
    {
      return flags;
    }
#endif

  nllvdbg("flags: %04x acked: %d sent: %d\n",
          flags, pstate->snd_acked, pstate->snd_sent);

  /* If this packet contains an acknowledgement, then update the count of
   * acknowledged bytes.
   */

  if ((flags & TCP_ACKDATA) != 0)
    {
      FAR struct tcp_hdr_s *tcp;

      /* Update the timeout */

#ifdef CONFIG_NET_SOCKOPTS
      pstate->snd_time = clock_systimer();
#endif

      /* Get the offset address of the TCP header */

#ifdef CONFIG_NET_IPv4
#ifdef CONFIG_NET_IPv6
      if (conn->domain == PF_INET)
#endif
        {
          DEBUGASSERT(IFF_IS_IPv4(dev->d_flags));
          tcp = TCPIPv4BUF;
        }
#endif /* CONFIG_NET_IPv4 */

#ifdef CONFIG_NET_IPv6
#ifdef CONFIG_NET_IPv4
      else
#endif
        {
          DEBUGASSERT(IFF_IS_IPv6(dev->d_flags));
          tcp = TCPIPv6BUF;
        }
#endif /* CONFIG_NET_IPv6 */

      /* The current acknowledgement number number is the (relative) offset
       * of the of the next byte needed by the receiver.  The snd_isn is the
       * offset of the first byte to send to the receiver.  The difference
       * is the number of bytes to be acknowledged.
       */

      pstate->snd_acked = tcp_getsequence(tcp->ackno) - pstate->snd_isn;
      nllvdbg("ACK: acked=%d sent=%d buflen=%d\n",
              pstate->snd_acked, pstate->snd_sent, pstate->snd_buflen);

      /* Have all of the bytes in the buffer been sent and acknowledged? */

      if (pstate->snd_acked >= pstate->snd_buflen)
        {
          /* Yes.  Then pstate->snd_buflen should hold the number of bytes
           * actually sent.
           */

          goto end_wait;
        }

      /* No.. fall through to send more data if necessary */
    }

  /* Check if we are being asked to retransmit data */

  else if ((flags & TCP_REXMIT) != 0)
    {
      /* Yes.. in this case, reset the number of bytes that have been sent
       * to the number of bytes that have been ACKed.
       */

      pstate->snd_sent = pstate->snd_acked;

#if defined(CONFIG_NET_TCP_SPLIT)
      /* Reset the even/odd indicator to even since we need to
       * retransmit.
       */

      pstate->snd_odd = false;
#endif

      /* Fall through to re-send data from the last that was ACKed */
    }

  /* Check for a loss of connection */

  else if ((flags & TCP_DISCONN_EVENTS) != 0)
    {
      /* Report not connected */

      nllvdbg("Lost connection\n");

      net_lostconnection(pstate->snd_sock, flags);
      pstate->snd_sent = -ENOTCONN;
      goto end_wait;
    }

  /* Check if the outgoing packet is available (it may have been claimed
   * by a sendto interrupt serving a different thread).
   */

#if 0 /* We can't really support multiple senders on the same TCP socket */
  else if (dev->d_sndlen > 0)
    {
      /* Another thread has beat us sending data, wait for the next poll */

      return flags;
    }
#endif

  /* We get here if (1) not all of the data has been ACKed, (2) we have been
   * asked to retransmit data, (3) the connection is still healthy, and (4)
   * the outgoing packet is available for our use.  In this case, we are
   * now free to send more data to receiver -- UNLESS the buffer contains
   * unprocessed incoming data.  In that event, we will have to wait for the
   * next polling cycle.
   */

  if ((flags & TCP_NEWDATA) == 0 && pstate->snd_sent < pstate->snd_buflen)
    {
      uint32_t seqno;

      /* Get the amount of data that we can send in the next packet */

      uint32_t sndlen = pstate->snd_buflen - pstate->snd_sent;

#if defined(CONFIG_NET_TCP_SPLIT)

      /* RFC 1122 states that a host may delay ACKing for up to 500ms but
       * must respond to every second  segment).  This logic here will trick
       * the RFC 1122 recipient into responding sooner.  This logic will be
       * activated if:
       *
       *   1. An even number of packets has been send (where zero is an even
       *      number),
       *   2. There is more data be sent (more than or equal to
       *      CONFIG_NET_TCP_SPLIT_SIZE), but
       *   3. Not enough data for two packets.
       *
       * Then we will split the remaining, single packet into two partial
       * packets.  This will stimulate the RFC 1122 peer to ACK sooner.
       *
       * Don't try to split very small packets (less than CONFIG_NET_TCP_SPLIT_SIZE).
       * Only the first even packet and the last odd packets could have
       * sndlen less than CONFIG_NET_TCP_SPLIT_SIZE.  The value of sndlen on
       * the last even packet is guaranteed to be at least MSS/2 by the
       * logic below.
       */

      if (sndlen >= CONFIG_NET_TCP_SPLIT_SIZE)
        {
          /* sndlen is the number of bytes remaining to be sent.
           * conn->mss will provide the number of bytes that can sent
           * in one packet.  The difference, then, is the number of bytes
           * that would be sent in the next packet after this one.
           */

          int32_t next_sndlen = sndlen - conn->mss;

          /*  Is this the even packet in the packet pair transaction? */

          if (!pstate->snd_odd)
            {
              /* next_sndlen <= 0 means that the entire remaining data
               * could fit into this single packet.  This is condition
               * in which we must do the split.
               */

              if (next_sndlen <= 0)
                {
                  /* Split so that there will be an odd packet.  Here
                   * we know that 0 < sndlen <= MSS
                   */

                  sndlen = (sndlen / 2) + 1;
                }
            }

          /* No... this is the odd packet in the packet pair transaction */

          else
            {
              /* Will there be another (even) packet afer this one?
               * (next_sndlen > 0)  Will the split condition occur on that
               * next, even packet? ((next_sndlen - conn->mss) < 0) If
               * so, then perform the split now to avoid the case where the
               * byte count is less than CONFIG_NET_TCP_SPLIT_SIZE on the
               * next pair.
               */

              if (next_sndlen > 0 && (next_sndlen - conn->mss) < 0)
                {
                  /* Here, we know that sndlen must be MSS < sndlen <= 2*MSS
                   * and so (sndlen / 2) is <= MSS.
                   */

                  sndlen /= 2;
                }
            }
        }

      /* Toggle the even/odd indicator */

      pstate->snd_odd ^= true;

#endif /* CONFIG_NET_TCP_SPLIT */

      if (sndlen > conn->mss)
        {
          sndlen = conn->mss;
        }

      /* Check if we have "space" in the window */

      if ((pstate->snd_sent - pstate->snd_acked + sndlen) < conn->winsize)
        {
          /* Set the sequence number for this packet.  NOTE:  The network updates
           * sndseq on receipt of ACK *before* this function is called.  In that
           * case sndseq will point to the next unacknowledged byte (which might
           * have already been sent).  We will overwrite the value of sndseq
           * here before the packet is sent.
           */

          seqno = pstate->snd_sent + pstate->snd_isn;
          nllvdbg("SEND: sndseq %08x->%08x\n", conn->sndseq, seqno);
          tcp_setsequence(conn->sndseq, seqno);

#ifdef NEED_IPDOMAIN_SUPPORT
          /* If both IPv4 and IPv6 support are enabled, then we will need to
           * select which one to use when generating the outgoing packet.
           * If only one domain is selected, then the setup is already in
           * place and we need do nothing.
           */

          tcpsend_ipselect(dev, pstate);
#endif
          /* Then set-up to send that amount of data. (this won't actually
           * happen until the polling cycle completes).
           */

          devif_send(dev, &pstate->snd_buffer[pstate->snd_sent], sndlen);

          /* Check if the destination IP address is in the ARP  or Neighbor
           * table.  If not, then the send won't actually make it out... it
           * will be replaced with an ARP request or Neighbor Solicitation.
           */

          if (pstate->snd_sent != 0 || psock_send_addrchck(conn))
            {
              /* Update the amount of data sent (but not necessarily ACKed) */

              pstate->snd_sent += sndlen;
              nllvdbg("SEND: acked=%d sent=%d buflen=%d\n",
                      pstate->snd_acked, pstate->snd_sent, pstate->snd_buflen);

            }
        }
    }

#ifdef CONFIG_NET_SOCKOPTS
  /* All data has been sent and we are just waiting for ACK or re-transmit
   * indications to complete the send.  Check for a timeout.
   */

  if (send_timeout(pstate))
    {
      /* Yes.. report the timeout */

      nlldbg("SEND timeout\n");
      pstate->snd_sent = -ETIMEDOUT;
      goto end_wait;
    }
#endif /* CONFIG_NET_SOCKOPTS */

  /* Continue waiting */

  return flags;

end_wait:
  /* Do not allow any further callbacks */

  pstate->snd_cb->flags   = 0;
  pstate->snd_cb->priv    = NULL;
  pstate->snd_cb->event   = NULL;

  /* There are no outstanding, unacknowledged bytes */

  conn->unacked           = 0;

  /* Wake up the waiting thread */

  sem_post(&pstate->snd_sem);
  return flags;
}
Beispiel #26
0
static uint16_t send_interrupt(struct uip_driver_s *dev, void *pvconn,
                               void *pvpriv, uint16_t flags)
{
  struct uip_conn *conn = (struct uip_conn*)pvconn;
  struct send_s *pstate = (struct send_s *)pvpriv;

  nllvdbg("flags: %04x acked: %d sent: %d\n",
          flags, pstate->snd_acked, pstate->snd_sent);

  /* If this packet contains an acknowledgement, then update the count of
   * acknowldged bytes.
   */

  if ((flags & UIP_ACKDATA) != 0)
    {
      /* The current acknowledgement number number is the (relative) offset
       * of the of the next byte needed by the receiver.  The snd_isn is the
       * offset of the first byte to send to the receiver.  The difference
       * is the number of bytes to be acknowledged.
       */

      pstate->snd_acked = uip_tcpgetsequence(TCPBUF->ackno) - pstate->snd_isn;
      nllvdbg("ACK: acked=%d sent=%d buflen=%d\n",
              pstate->snd_acked, pstate->snd_sent, pstate->snd_buflen);

      /* Have all of the bytes in the buffer been sent and acknowledged? */

      if (pstate->snd_acked >= pstate->snd_buflen)
        {
          /* Yes.  Then pstate->snd_buflen should hold the number of bytes
           * actually sent.
           */

          goto end_wait;
        }

      /* No.. fall through to send more data if necessary */
    }

  /* Check if we are being asked to retransmit data */

  else if ((flags & UIP_REXMIT) != 0)
    {
      /* Yes.. in this case, reset the number of bytes that have been sent
       * to the number of bytes that have been ACKed.
       */

      pstate->snd_sent = pstate->snd_acked;

      /* Fall through to re-send data from the last that was ACKed */
    }

 /* Check for a loss of connection */

  else if ((flags & (UIP_CLOSE|UIP_ABORT|UIP_TIMEDOUT)) != 0)
    {
      /* Report not connected */

      nllvdbg("Lost connection\n");
      pstate->snd_sent = -ENOTCONN;
      goto end_wait;
    }

   /* Check if the outgoing packet is available (it may have been claimed
    * by a sendto interrupt serving a different thread).
    */

#if 0 /* We can't really support multiple senders on the same TCP socket */
   else if (dev->d_sndlen > 0)
     {
       /* Another thread has beat us sending data, wait for the next poll */

         return flags;
      }
#endif

  /* We get here if (1) not all of the data has been ACKed, (2) we have been
   * asked to retransmit data, (3) the connection is still healthy, and (4)
   * the outgoing packet is available for our use.  In this case, we are
   * now free to send more data to receiver -- UNLESS the buffer contains
   * unprocessing incoming data.  In that event, we will have to wait for the
   * next polling cycle.
   */

  if ((flags & UIP_NEWDATA) == 0 && pstate->snd_sent < pstate->snd_buflen)
    {
      uint32_t seqno;

      /* Get the amount of data that we can send in the next packet */

      uint32_t sndlen = pstate->snd_buflen - pstate->snd_sent;
      if (sndlen > uip_mss(conn))
        {
          sndlen = uip_mss(conn);
        }

      /* Set the sequence number for this packet.  NOTE:  uIP updates
       * sndseq on recept of ACK *before* this function is called.  In that
       * case sndseq will point to the next unacknowledge byte (which might
       * have already been sent).  We will overwrite the value of sndseq
       * here before the packet is sent.
       */

      seqno = pstate->snd_sent + pstate->snd_isn;
      nllvdbg("SEND: sndseq %08x->%08x\n", conn->sndseq, seqno);
      uip_tcpsetsequence(conn->sndseq, seqno);

      /* Then set-up to send that amount of data. (this won't actually
       * happen until the polling cycle completes).
       */

      uip_send(dev, &pstate->snd_buffer[pstate->snd_sent], sndlen);

      /* Check if the destination IP address is in the ARP table.  If not,
       * then the send won't actually make it out... it will be replaced with
       * an ARP request.
       *
       * NOTE 1: This could an expensive check if there are a lot of entries
       * in the ARP table.  Hence, we only check on the first packet -- when
       * snd_sent is zero.
       *
       * NOTE 2: If we are actually harvesting IP addresses on incomming IP
       * packets, then this check should not be necessary; the MAC mapping
       * should already be in the ARP table.
       */

#if defined(CONFIG_NET_ETHERNET) && defined (CONFIG_NET_ARP_IPIN)
      if (pstate->snd_sent != 0 || uip_arp_find(conn->ripaddr) != NULL)
#endif
        {
          /* Update the amount of data sent (but not necessarily ACKed) */

          pstate->snd_sent += sndlen;
          nllvdbg("SEND: acked=%d sent=%d buflen=%d\n",
                  pstate->snd_acked, pstate->snd_sent, pstate->snd_buflen);

          /* Update the send time */

#if defined(CONFIG_NET_SOCKOPTS) && !defined(CONFIG_DISABLE_CLOCK)
          pstate->snd_time = clock_systimer();
#endif
        }
    }

  /* All data has been send and we are just waiting for ACK or re-transmit
   * indications to complete the send.  Check for a timeout.
   */

#if defined(CONFIG_NET_SOCKOPTS) && !defined(CONFIG_DISABLE_CLOCK)
  else if (send_timeout(pstate))
    {
      /* Yes.. report the timeout */

      nlldbg("SEND timeout\n");
      pstate->snd_sent = -ETIMEDOUT;
      goto end_wait;
    }
#endif /* CONFIG_NET_SOCKOPTS && !CONFIG_DISABLE_CLOCK */

  /* Continue waiting */

  return flags;

end_wait:
  /* Do not allow any further callbacks */

  pstate->snd_cb->flags   = 0;
  pstate->snd_cb->priv    = NULL;
  pstate->snd_cb->event   = NULL;

  /* There are no outstanding, unacknowledged bytes */

  conn->unacked           = 0;

  /* Wake up the waiting thread */

  sem_post(&pstate->snd_sem);
  return flags;
}
int UavcanNode::run()
{

	get_node().setRestartRequestHandler(&restart_request_handler);

	while (init_indication_controller(get_node()) < 0) {
		::syslog(LOG_INFO, "UAVCAN: Indication controller init failed\n");
		::sleep(1);
	}

	while (init_sim_controller(get_node()) < 0) {
		::syslog(LOG_INFO, "UAVCAN: sim controller init failed\n");
		::sleep(1);
	}

	(void)pthread_mutex_lock(&_node_mutex);

	/*
	 * Set up the time synchronization
	 */

	const int slave_init_res = _time_sync_slave.start();

	if (slave_init_res < 0) {
		warnx("Failed to start time_sync_slave");
		_task_should_exit = true;
	}

	const unsigned PollTimeoutMs = 50;

	const int busevent_fd = ::open(uavcan_stm32::BusEvent::DevName, 0);

	if (busevent_fd < 0) {
		warnx("Failed to open %s", uavcan_stm32::BusEvent::DevName);
		_task_should_exit = true;
	}

	/* If we had an  RTC we would call uavcan_stm32::clock::setUtc()
	* but for now we use adjustUtc with a correction of 0
	*/
//        uavcan_stm32::clock::adjustUtc(uavcan::UtcDuration::fromUSec(0));

	_node.setModeOperational();

	/*
	 * This event is needed to wake up the thread on CAN bus activity (RX/TX/Error).
	 * Please note that with such multiplexing it is no longer possible to rely only on
	 * the value returned from poll() to detect whether actuator control has timed out or not.
	 * Instead, all ORB events need to be checked individually (see below).
	 */
	add_poll_fd(busevent_fd);

	uint32_t start_tick = clock_systimer();

	while (!_task_should_exit) {
		// Mutex is unlocked while the thread is blocked on IO multiplexing
		(void)pthread_mutex_unlock(&_node_mutex);

		const int poll_ret = ::poll(_poll_fds, _poll_fds_num, PollTimeoutMs);


		(void)pthread_mutex_lock(&_node_mutex);

		node_spin_once();  // Non-blocking


		// this would be bad...
		if (poll_ret < 0) {
			PX4_ERR("poll error %d", errno);
			continue;

		} else {
			// Do Something
		}

		if (clock_systimer() - start_tick > TICK_PER_SEC) {
			start_tick = clock_systimer();
			resources("Udate:");
			/*
			 *  Printing the slave status information once a second
			 */
			const bool active = _time_sync_slave.isActive();                      // Whether it can sync with a remote master

			const int master_node_id = _time_sync_slave.getMasterNodeID().get();  // Returns an invalid Node ID if (active == false)

			const long msec_since_last_adjustment = (_node.getMonotonicTime() - _time_sync_slave.getLastAdjustmentTime()).toMSec();
			const uavcan::UtcTime utc = uavcan_stm32::clock::getUtc();
			syslog(LOG_INFO, "Time:%lld\n"
			       "            Time sync slave status:\n"
			       "    Active: %d\n"
			       "    Master Node ID: %d\n"
			       "    Last clock adjustment was %ld ms ago\n",
			       utc .toUSec(), int(active), master_node_id, msec_since_last_adjustment);
			syslog(LOG_INFO, "UTC %lu sec   Rate corr: %fPPM   Jumps: %lu   Locked: %i\n\n",
			       static_cast<unsigned long>(utc.toMSec() / 1000),
			       static_cast<double>(uavcan_stm32::clock::getUtcRateCorrectionPPM()),
			       uavcan_stm32::clock::getUtcJumpCount(),
			       int(uavcan_stm32::clock::isUtcLocked()));
		}

	}

	teardown();
	warnx("exiting.");

	exit(0);
}
Beispiel #28
0
ssize_t send(int sockfd, const void *buf, size_t len, int flags)
{
  FAR struct socket *psock = sockfd_socket(sockfd);
  struct send_s state;
  uip_lock_t save;
  int err;
  int ret = OK;

  /* Verify that the sockfd corresponds to valid, allocated socket */

  if (!psock || psock->s_crefs <= 0)
    {
      err = EBADF;
      goto errout;
    }

  /* If this is an un-connected socket, then return ENOTCONN */

  if (psock->s_type != SOCK_STREAM || !_SS_ISCONNECTED(psock->s_flags))
    {
      err = ENOTCONN;
      goto errout;
    }

  /* Set the socket state to sending */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_SEND);

  /* Perform the TCP send operation */

  /* Initialize the state structure.  This is done with interrupts
   * disabled because we don't want anything to happen until we
   * are ready.
   */

  save                = uip_lock();
  memset(&state, 0, sizeof(struct send_s));
  (void)sem_init(&state. snd_sem, 0, 0); /* Doesn't really fail */
  state.snd_sock      = psock;             /* Socket descriptor to use */
  state.snd_buflen    = len;               /* Number of bytes to send */
  state.snd_buffer    = buf;               /* Buffer to send from */

  if (len > 0)
    {
      struct uip_conn *conn = (struct uip_conn*)psock->s_conn;

      /* Allocate resources to receive a callback */

      state.snd_cb = uip_tcpcallbackalloc(conn);
      if (state.snd_cb)
        {
          /* Get the initial sequence number that will be used */

          state.snd_isn         = uip_tcpgetsequence(conn->sndseq);

          /* There is no outstanding, unacknowledged data after this
           * initial sequence number.
           */

          conn->unacked         = 0;

          /* Update the initial time for calculating timeouts */

#if defined(CONFIG_NET_SOCKOPTS) && !defined(CONFIG_DISABLE_CLOCK)
          state.snd_time        = clock_systimer();
#endif
          /* Set up the callback in the connection */

          state.snd_cb->flags   = UIP_ACKDATA|UIP_REXMIT|UIP_POLL|UIP_CLOSE|UIP_ABORT|UIP_TIMEDOUT;
          state.snd_cb->priv    = (void*)&state;
          state.snd_cb->event   = send_interrupt;

          /* Notify the device driver of the availaibilty of TX data */

          netdev_txnotify(&conn->ripaddr);

          /* Wait for the send to complete or an error to occur:  NOTES: (1)
           * uip_lockedwait will also terminate if a signal is received, (2) interrupts
           * may be disabled!  They will be re-enabled while the task sleeps and
           * automatically re-enabled when the task restarts.
           */

          ret = uip_lockedwait(&state. snd_sem);

          /* Make sure that no further interrupts are processed */

          uip_tcpcallbackfree(conn, state.snd_cb);
        }
    }

  sem_destroy(&state. snd_sem);
  uip_unlock(save);

  /* Set the socket state to idle */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE);

  /* Check for a errors.  Errors are signaled by negative errno values
   * for the send length
   */

  if (state.snd_sent < 0)
    {
      err = state.snd_sent;
      goto errout;
    }

  /* If uip_lockedwait failed, then we were probably reawakened by a signal. In
   * this case, uip_lockedwait will have set errno appropriately.
   */

  if (ret < 0)
    {
      err = -ret;
      goto errout;
    }

  /* Return the number of bytes actually sent */

  return state.snd_sent;

errout:
  *get_errno_ptr() = err;
  return ERROR;
}
ssize_t psock_sendto(FAR struct socket *psock, FAR const void *buf,
                     size_t len, int flags, FAR const struct sockaddr *to,
                     socklen_t tolen)
{
#ifdef CONFIG_NET_UDP
  FAR struct uip_udp_conn *conn;
#ifdef CONFIG_NET_IPv6
  FAR const struct sockaddr_in6 *into = (const struct sockaddr_in6 *)to;
#else
  FAR const struct sockaddr_in *into = (const struct sockaddr_in *)to;
#endif
  struct sendto_s state;
  uip_lock_t save;
  int ret;
#endif
  int err;

  /* If to is NULL or tolen is zero, then this function is same as send (for
   * connected socket types)
   */

  if (!to || !tolen)
    {
#ifdef CONFIG_NET_TCP
      return psock_send(psock, buf, len, flags);
#else
      err = EINVAL;
      goto errout;
#endif
    }

  /* Verify that a valid address has been provided */

#ifdef CONFIG_NET_IPv6
  if (to->sa_family != AF_INET6 || tolen < sizeof(struct sockaddr_in6))
#else
  if (to->sa_family != AF_INET || tolen < sizeof(struct sockaddr_in))
#endif
  {
      err = EBADF;
      goto errout;
  }

  /* Verify that the psock corresponds to valid, allocated socket */

  if (!psock || psock->s_crefs <= 0)
    {
      err = EBADF;
      goto errout;
    }

  /* If this is a connected socket, then return EISCONN */

  if (psock->s_type != SOCK_DGRAM)
    {
      err = EISCONN;
      goto errout;
    }

  /* Perform the UDP sendto operation */

#ifdef CONFIG_NET_UDP
  /* Set the socket state to sending */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_SEND);

  /* Initialize the state structure.  This is done with interrupts
   * disabled because we don't want anything to happen until we
   * are ready.
   */

  save = uip_lock();
  memset(&state, 0, sizeof(struct sendto_s));
  sem_init(&state.st_sem, 0, 0);
  state.st_buflen = len;
  state.st_buffer = buf;

  /* Set the initial time for calculating timeouts */

#ifdef CONFIG_NET_SENDTO_TIMEOUT
  state.st_sock = psock;
  state.st_time = clock_systimer();
#endif

  /* Setup the UDP socket */

  conn = (struct uip_udp_conn *)psock->s_conn;
  ret = uip_udpconnect(conn, into);
  if (ret < 0)
    {
      uip_unlock(save);
      err = -ret;
      goto errout;
    }

  /* Set up the callback in the connection */

  state.st_cb = uip_udpcallbackalloc(conn);
  if (state.st_cb)
    {
      state.st_cb->flags   = UIP_POLL;
      state.st_cb->priv    = (void*)&state;
      state.st_cb->event   = sendto_interrupt;

      /* Notify the device driver of the availabilty of TX data */

      netdev_txnotify(conn->ripaddr);

      /* Wait for either the receive to complete or for an error/timeout to occur.
       * NOTES:  (1) uip_lockedwait will also terminate if a signal is received, (2)
       * interrupts may be disabled!  They will be re-enabled while the task sleeps
       * and automatically re-enabled when the task restarts.
       */

      uip_lockedwait(&state.st_sem);

      /* Make sure that no further interrupts are processed */

      uip_udpcallbackfree(conn, state.st_cb);
    }
  uip_unlock(save);

  sem_destroy(&state.st_sem);

  /* Set the socket state to idle */

  psock->s_flags = _SS_SETSTATE(psock->s_flags, _SF_IDLE);

  /* Check for errors */

  if (state.st_sndlen < 0)
    {
      err = -state.st_sndlen;
      goto errout;
    }

  /* Sucess */

  return state.st_sndlen;
#else
  err = ENOSYS;
#endif

errout:
  set_errno(err);
  return ERROR;
}
static void work_process(FAR struct wqueue_s *wqueue)
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  irqstate_t flags;
  FAR void *arg;
  uint32_t elapsed;
  uint32_t remaining;
  uint32_t next;

  /* Then process queued work.  We need to keep interrupts disabled while
   * we process items in the work list.
   */

  next  = CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK;
  flags = irqsave();
  work  = (FAR struct work_s *)wqueue->q.head;
  while (work)
    {
      /* Is this work ready?  It is ready if there is no delay or if
       * the delay has elapsed. qtime is the time that the work was added
       * to the work queue.  It will always be greater than or equal to
       * zero.  Therefore a delay of zero will always execute immediately.
       */

      elapsed = clock_systimer() - work->qtime;
      if (elapsed >= work->delay)
        {
          /* Remove the ready-to-execute work from the list */

          (void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

          /* Extract the work description from the entry (in case the work
           * instance by the re-used after it has been de-queued).
           */

          worker = work->worker;

          /* Check for a race condition where the work may be nullified
           * before it is removed from the queue.
           */

          if (worker != NULL)
            {
              /* Extract the work argument (before re-enabling interrupts) */

              arg = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long that will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)wqueue->q.head;
            }
          else
            {
              /* Cancelled.. Just move to the next work in the list with
               * interrupts still disabled.
               */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }
      else
        {
          /* This one is not ready.. will it be ready before the next
           * scheduled wakeup interval?
           */

          remaining = elapsed - work->delay;
          if (remaining < next)
            {
              /* Yes.. Then schedule to wake up when the work is ready */

              next = remaining;
            }

          /* Then try the next in the list. */

          work = (FAR struct work_s *)work->dq.flink;
        }
    }

  /* Wait awhile to check the work list.  We will wait here until either
   * the time elapses or until we are awakened by a signal.
   */

  usleep(next * USEC_PER_TICK);
  irqrestore(flags);
}