コード例 #1
0
ファイル: z180_mmu.c プロジェクト: casro/vrbrain_nuttx
int up_addrenv_create(size_t envsize, FAR task_addrenv_t *addrenv)
{
  FAR struct z180_cbr_s *cbr;
  irqstate_t flags;
  uintptr_t alloc;
  unsigned int npages;
  int ret;

  /* Convert the size from bytes to numbers of pages */

  npages = PHYS_ALIGNUP(envsize);
  if (npages < 1)
    {
      /* No address environment... but I suppose that is not an error */

      sdbg("ERROR: npages is zero\n");
      return OK;
    }
  
  /* Allocate a structure in the common .bss to hold information about the
   * task's address environment.  NOTE that this is not a part of the TCB,
   * but rather a break-away structure that can be shared by the task as
   * well as other threads.  That is necessary because the life of the
   * address of environment might be longer than the life of the task.
   */

  flags = irqsave();
  cbr = z180_mmu_alloccbr();
  if (!cbr)
    {
      sdbg("ERROR: No free CBR structures\n");
      ret = -ENOMEM;
      goto errout_with_irq;
    }

  /* Now allocate the physical memory to back up the address environment */

#ifdef CONFIG_GRAN_SINGLE
  alloc = (uintptr_t)gran_alloc(npages);
#else
  alloc = (uintptr_t)gran_alloc(g_physhandle, npages);
#endif
  if (!alloc)
    {
      sdbg("ERROR: Failed to allocate %d pages\n", npages);
      ret = -ENOMEM;
      goto errout_with_cbr;
    }

  /* Save the information in the CBR structure.  Note that alloc is in
   * 4KB pages, already in the right form for the CBR.
   */

  DEBUGASSERT(alloc <= 0xff);

  cbr->cbr     = (uint8_t)alloc;
  cbr->pages   = (uint8_t)npages;
  *addrenv     = (task_addrenv_t)cbr;

  irqrestore(flags);
  return OK;

errout_with_cbr:
  z180_mmu_freecbr(cbr);
  
errout_with_irq:
  irqrestore(flags);
  return ret;
}
コード例 #2
0
static int proc_stat(const char *relpath, struct stat *buf)
{
  FAR const struct proc_node_s *node;
  FAR struct tcb_s *tcb;
  unsigned long tmp;
  FAR char *ptr;
  irqstate_t flags;
  pid_t pid;

  /* Two path forms are accepted:
   *
   * "<pid>" - If <pid> refers to a currently active task/thread, then it
   *   is a directory
   * "<pid>/<node>" - If <node> is a recognized node then, then it
   *   is a file or directory.
   */

  ptr = NULL;
  tmp = strtoul(relpath, &ptr, 10);

  if (!ptr)
    {
      fdbg("ERROR: Invalid path \"%s\"\n", relpath);
      return -ENOENT;
   }

  /* A valid PID would be in the range of 0-32767 (0 is reserved for the
   * IDLE thread).
   */

  if (tmp >= 32768)
    {
      fdbg("ERROR: Invalid PID %ld\n", tmp);
      return -ENOENT;
    }

  /* Now verify that a task with this task/thread ID exists */

  pid = (pid_t)tmp;

  flags = irqsave();
  tcb = sched_gettcb(pid);
  irqrestore(flags);

  if (!tcb)
    {
      fdbg("ERROR: PID %d is no longer valid\n", (int)pid);
      return -ENOENT;
    }

  /* Was the <pid> the final element of the path? */

  if (*ptr == '\0' || strcmp(ptr, "/") == 0)
    {
      /* Yes ... It's a read-only directory */

      buf->st_mode = S_IFDIR|S_IROTH|S_IRGRP|S_IRUSR;
    }

  /* Verify that the process ID is followed by valid path segment delimiter */

  else if (*ptr != '/')
    {
      /* We are required to return -ENOENT all all invalid paths */

      fdbg("ERROR: Bad delimiter '%c' in relpath '%s'\n", *ptr, relpath);
      return -ENOENT;
    }
  else
    {
      /* Otherwise, the second segment of the relpath should be a well
       * known node of the task/thread directory structure.
       */

      /* Skip over the path segment delimiter */

      ptr++;

      /* Lookup the well-known node associated with the relative path. */

      node = proc_findnode(ptr);
      if (!node)
        {
          fdbg("ERROR: Invalid path \"%s\"\n", relpath);
          return -ENOENT;
        }

      /* If the node exists, it is the name for a read-only file or
       * directory.
       */

      if (node->dtype == DTYPE_FILE)
        {
          buf->st_mode = S_IFREG|S_IROTH|S_IRGRP|S_IRUSR;
        }
      else
        {
          buf->st_mode = S_IFDIR|S_IROTH|S_IRGRP|S_IRUSR;
        }
    }

  /* File/directory size, access block size */

  buf->st_size    = 0;
  buf->st_blksize = 0;
  buf->st_blocks  = 0;
  return OK;
}
コード例 #3
0
ファイル: buttons_main.c プロジェクト: centurysys/NuttX-SA1xx
int buttons_main(int argc, char *argv[])
{
  uint8_t newset;
  irqstate_t flags;
  int i;

  /* If this example is configured as an NX add-on, then limit the number of
   * samples that we collect before returning.  Otherwise, we never return
   */

#ifdef CONFIG_NSH_BUILTIN_APPS
  long maxbuttons = 1;
  g_nbuttons      = 0;
  if (argc > 1)
    {
      maxbuttons = strtol(argv[1], NULL, 10);
    }
  lowsyslog("maxbuttons: %d\n", maxbuttons);
#endif

  /* Initialize the button GPIOs */

  board_button_initialize();

  /* Register to recieve button interrupts */

#ifdef CONFIG_ARCH_IRQBUTTONS
  for (i = CONFIG_EXAMPLES_IRQBUTTONS_MIN; i <= CONFIG_EXAMPLES_IRQBUTTONS_MAX; i++)
    {
      xcpt_t oldhandler = board_button_irq(i, g_buttoninfo[BUTTON_INDEX(i)].handler);

      /* Use lowsyslog() for compatibility with interrrupt handler output. */

      lowsyslog("Attached handler at %p to button %d [%s], oldhandler:%p\n",
                g_buttoninfo[BUTTON_INDEX(i)].handler, i,
                g_buttoninfo[BUTTON_INDEX(i)].name, oldhandler);

      /* Some hardware multiplexes different GPIO button sources to the same
       * physical interrupt.  If we register multiple such multiplexed button
       * interrupts, then the second registration will overwrite the first.  In
       * this case, the first button interrupts may be aliased to the second
       * interrupt handler (or worse, could be lost).
       */

      if (oldhandler != NULL)
        {
          lowsyslog("WARNING: oldhandler:%p is not NULL!  "
                    "Button events may be lost or aliased!\n",
                    oldhandler);
        }
    }
#endif

  /* Poll button state */

  g_oldset = board_buttons();
#ifdef CONFIG_NSH_BUILTIN_APPS
  while (g_nbuttons < maxbuttons)
#else
  for (;;)
#endif
    {
      /* Get the set of pressed and release buttons. */

      newset = board_buttons();

      /* Any changes from the last sample? */

      if (newset != g_oldset)
        {
          /* Disable interrupts so that output here will not collide with
           * output from an interrupt handler.
           */

          flags = irqsave();

          /* Use lowsyslog() for compatibility with interrrupt handler
           * output.
           */

          lowsyslog("POLL SET:%02x:\n", newset);
          show_buttons(g_oldset, newset);
          g_oldset = newset;
          irqrestore(flags);
        }

      /* Sleep a little... but not long.  This will determine how fast we
       * poll for button changes.
       */

      usleep(150000); /* 150 Milliseconds */
    }

  /* Un-register button handlers */

#if defined(CONFIG_ARCH_IRQBUTTONS) && defined(CONFIG_NSH_BUILTIN_APPS)
  for (i = CONFIG_EXAMPLES_IRQBUTTONS_MIN; i <= CONFIG_EXAMPLES_IRQBUTTONS_MAX; i++)
    {
      (void)board_button_irq(i, NULL);
    }
#endif

  return 0;
}
コード例 #4
0
ファイル: arm_pgalloc.c プロジェクト: cloudyourcar/nuttx
uintptr_t pgalloc(uintptr_t brkaddr, unsigned int npages)
{
  FAR struct tcb_s *tcb = sched_self();
  FAR struct task_group_s *group;
  FAR uint32_t *l2table;
  irqstate_t flags;
  uintptr_t paddr;
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
  uint32_t l1save;
#endif
  unsigned int index;

  DEBUGASSERT(tcb && tcb->group);
  group = tcb->group;

  /* The current implementation only supports extending the user heap
   * region as part of the implementation of user sbrk().  This function
   * needs to be expanded to also handle (1) extending the user stack
   * space and (2) extending the kernel memory regions as well.
   */

  DEBUGASSERT((group->tg_flags & GROUP_FLAG_ADDRENV) != 0);

  /* brkaddr = 0 means that no heap has yet been allocated */

  if (brkaddr == 0)
    {
      brkaddr = CONFIG_ARCH_HEAP_VBASE;
    }

  DEBUGASSERT(brkaddr >= CONFIG_ARCH_HEAP_VBASE && brkaddr < ARCH_HEAP_VEND);
  DEBUGASSERT(MM_ISALIGNED(brkaddr));

  for (; npages > 0; npages--)
    {
      /* Get the physical address of the level 2 page table */

      paddr = get_pgtable(&group->addrenv, brkaddr);
      if (paddr == 0)
        {
          return 0;
        }

      flags = irqsave();

#ifdef CONFIG_ARCH_PGPOOL_MAPPING
      /* Get the virtual address corresponding to the physical page address */

      l2table = (FAR uint32_t *)arm_pgvaddr(paddr);
#else
      /* Temporarily map the level 2 page table into the "scratch" virtual
       * address space
       */

      l1save = mmu_l1_getentry(ARCH_SCRATCH_VBASE);
      mmu_l1_setentry(paddr & ~SECTION_MASK, ARCH_SCRATCH_VBASE, MMU_MEMFLAGS);
      l2table = (FAR uint32_t *)(ARCH_SCRATCH_VBASE | (paddr & SECTION_MASK));
#endif

      /* Back up L2 entry with physical memory */

      paddr = mm_pgalloc(1);
      if (paddr == 0)
        {
#ifndef CONFIG_ARCH_PGPOOL_MAPPING
          mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
          irqrestore(flags);
          return 0;
        }

      /* The table divides a 1Mb address space up into 256 entries, each
       * corresponding to 4Kb of address space.  The page table index is
       * related to the offset from the beginning of 1Mb region.
       */

      index = (brkaddr & 0x000ff000) >> 12;

       /* Map the .text region virtual address to this physical address */

      DEBUGASSERT(l2table[index] == 0);
      l2table[index] = paddr | MMU_L2_UDATAFLAGS;
      brkaddr += MM_PGSIZE;

      /* Make sure that the modified L2 table is flushed to physical
       * memory.
       */

      arch_flush_dcache((uintptr_t)&l2table[index],
                        (uintptr_t)&l2table[index] + sizeof(uint32_t));

#ifndef CONFIG_ARCH_PGPOOL_MAPPING
      /* Restore the scratch L1 page table entry */

      mmu_l1_restore(ARCH_SCRATCH_VBASE, l1save);
#endif
      irqrestore(flags);
    }

  return brkaddr;
}
コード例 #5
0
static int proc_opendir(FAR const char *relpath, FAR struct fs_dirent_s *dir)
{
  FAR struct proc_dir_s *procdir;
  FAR const struct proc_node_s *node;
  FAR struct tcb_s *tcb;
  irqstate_t flags;
  unsigned long tmp;
  FAR char *ptr;
  pid_t pid;

  fvdbg("relpath: \"%s\"\n", relpath ? relpath : "NULL");
  DEBUGASSERT(relpath && dir && !dir->u.procfs);

  /* The relative must be either:
   *
   *  (1) "<pid>" - The sub-directory of task/thread attributes, or
   *  (2) The name of a directory node under <pid>
   */

  /* Otherwise, the relative path should be a valid task/thread ID */

  ptr = NULL;
  tmp = strtoul(relpath, &ptr, 10);

  if (!ptr || (*ptr != '\0' && *ptr != '/'))
    {
      /* strtoul failed or there is something in the path after the pid */

      fdbg("ERROR: Invalid path \"%s\"\n", relpath);
      return -ENOENT;
   }

  /* A valid PID would be in the range of 0-32767 (0 is reserved for the
   * IDLE thread).
   */

  if (tmp >= 32768)
    {
      fdbg("ERROR: Invalid PID %ld\n", tmp);
      return -ENOENT;
    }

  /* Now verify that a task with this task/thread ID exists */

  pid = (pid_t)tmp;

  flags = irqsave();
  tcb = sched_gettcb(pid);
  irqrestore(flags);

  if (!tcb)
    {
      fdbg("ERROR: PID %d is not valid\n", (int)pid);
      return -ENOENT;
    }

  /* Allocate the directory structure.  Note that the index and procentry
   * pointer are implicitly nullified by kzalloc().  Only the remaining,
   * non-zero entries will need be initialized.
   */

  procdir = (FAR struct proc_dir_s *)kzalloc(sizeof(struct proc_dir_s));
  if (!procdir)
    {
      fdbg("ERROR: Failed to allocate the directory structure\n");
      return -ENOMEM;
    }

  /* Was the <pid> the final element of the path? */

  if (*ptr != '\0' && strcmp(ptr, "/") != 0)
    {
      /* There is something in the path after the pid.  Skip over the path
       * segment delimiter and see if we can identify the node of interest.
       */

      ptr++;
      node = proc_findnode(ptr);
      if (!node)
        {
          fdbg("ERROR: Invalid path \"%s\"\n", relpath);
          kfree(procdir);
          return -ENOENT;
        }

      /* The node must be a directory, not a file */

      if (node->dtype != DTYPE_DIRECTORY)
        {
          fdbg("ERROR: Path \"%s\" is not a directory\n", relpath);
          kfree(procdir);
          return -ENOTDIR;
        }

      /* This is a second level directory */

      procdir->base.level    = 2;
      procdir->base.nentries = PROC_NGROUPNODES;
      procdir->node          = node;
    }
  else
    {
      /* Use the special level0 node */

      procdir->base.level    = 1;
      procdir->base.nentries = PROC_NLEVEL0NODES;
      procdir->node          = &g_level0node;
    }

   procdir->pid  = pid;
   dir->u.procfs = (FAR void *)procdir;
   return OK;
}
コード例 #6
0
static void up_idlepm(void)
{
#ifdef CONFIG_RTC_ALARM
  struct timespec alarmtime;
#endif
  static enum pm_state_e oldstate = PM_NORMAL;
  enum pm_state_e newstate;
  irqstate_t flags;
  int ret;

  /* Decide, which power saving level can be obtained */

  newstate = pm_checkstate();

  /* Check for state changes */

  if (newstate != oldstate)
    {
      lldbg("newstate= %d oldstate=%d\n", newstate, oldstate);

      flags = irqsave();

      /* Force the global state change */

      ret = pm_changestate(newstate);
      if (ret < 0)
        {
          /* The new state change failed, revert to the preceding state */

          (void)pm_changestate(oldstate);

          /* No state change... */

          goto errout;
        }

      /* Then perform board-specific, state-dependent logic here */

      switch (newstate)
        {
        case PM_NORMAL:
          {
          }
          break;

        case PM_IDLE:
          {
          }
          break;

        case PM_STANDBY:
          {
#ifdef CONFIG_RTC_ALARM
            /* Disable RTC Alarm interrupt */

#warning "missing logic"

            /* Configure the RTC alarm to Auto Wake the system */

#warning "missing logic"

            /* The tv_nsec value must not exceed 1,000,000,000. That
             * would be an invalid time.
             */

#warning "missing logic"

            /* Set the alarm */

#warning "missing logic"
#endif
            /* Call the STM32 stop mode */

            stm32_pmstop(true);

            /* We have been re-awakened by some even:  A button press?
             * An alarm?  Cancel any pending alarm and resume the normal
             * operation.
             */

#ifdef CONFIG_RTC_ALARM
#warning "missing logic"
#endif
            /* Resume normal operation */

            pm_changestate(PM_NORMAL);
            newstate = PM_NORMAL;
          }
          break;

        case PM_SLEEP:
          {
            /* We should not return from standby mode.  The only way out
             * of standby is via the reset path.
             */

            (void)stm32_pmstandby();
          }
          break;

        default:
          break;
        }

      /* Save the new state */

      oldstate = newstate;

errout:
      irqrestore(flags);
    }
}
コード例 #7
0
ファイル: work_thread.c プロジェクト: aliniger/Firmware_orca
int work_thread(int argc, char *argv[])
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  FAR void *arg;
  uint32_t elapsed;
  uint32_t remaining;
  uint32_t next;
  int usec;
  irqstate_t flags;

  /* Loop forever */

  usec = CONFIG_SCHED_WORKPERIOD;
  flags = irqsave();
  for (;;)
    {
      /* Wait awhile to check the work list.  We will wait here until either
       * the time elapses or until we are awakened by a signal.
       */

      usleep(usec);
      irqrestore(flags);

      /* First, perform garbage collection.  This cleans-up memory de-allocations
       * that were queued because they could not be freed in that execution
       * context (for example, if the memory was freed from an interrupt handler).
       * NOTE: If the work thread is disabled, this clean-up is performed by
       * the IDLE thread (at a very, very lower priority).
       */

      sched_garbagecollection();

      /* Then process queued work.  We need to keep interrupts disabled while
       * we process items in the work list.
       */

      next  = CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK;
      flags = irqsave();
      work  = (FAR struct work_s *)g_work.head;
      while (work)
        {
          /* Is this work ready?  It is ready if there is no delay or if
           * the delay has elapsed. qtime is the time that the work was added
           * to the work queue.  It will always be greater than or equal to
           * zero.  Therefore a delay of zero will always execute immediately.
           */

          elapsed = clock_systimer() - work->qtime;
          if (elapsed >= work->delay)
            {
              /* Remove the ready-to-execute work from the list */

              (void)dq_rem((struct dq_entry_s *)work, &g_work);

              /* Extract the work description from the entry (in case the work
               * instance by the re-used after it has been de-queued).
               */

              worker = work->worker;
              arg    = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long that will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)g_work.head;
            }
          else
            {
              /* This one is not ready.. will it be ready before the next
               * scheduled wakeup interval?
               */

              remaining = elapsed - work->delay;
              if (remaining < next)
                {
                  /* Yes.. Then schedule to wake up when the work is ready */

                  next = remaining;
                }
              
              /* Then try the next in the list. */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }

      /* Now calculate the microsecond delay we should wait */

      usec = next * USEC_PER_TICK;
    }

  return OK; /* To keep some compilers happy */
}
コード例 #8
0
ファイル: sig_suspend.c プロジェクト: CNCBASHER/Firmware
int sigsuspend(FAR const sigset_t *set)
{
  FAR _TCB       *rtcb = (FAR _TCB*)g_readytorun.head;
  sigset_t        intersection;
  sigset_t        saved_sigprocmask;
  FAR sigpendq_t *sigpend;
  irqstate_t      saved_state;
  int             unblocksigno;

  /* Several operations must be performed below:  We must determine if any
   * signal is pending and, if not, wait for the signal.  Since signals can
   * be posted from the interrupt level, there is a race condition that
   * can only be eliminated by disabling interrupts!
   */

  sched_lock();  /* Not necessary */
  saved_state = irqsave();

  /* Check if there is a pending signal corresponding to one of the
   * signals that will be unblocked by the new sigprocmask.
   */

  intersection = ~(*set) & sig_pendingset(rtcb);
  if (intersection != NULL_SIGNAL_SET)
    {
      /* One or more of the signals in intersections is sufficient to cause
       * us to not wait.  Pick the lowest numbered signal and mark it not
       * pending.
       */

      unblocksigno = sig_lowest(&intersection);
      sigpend = sig_removependingsignal(rtcb, unblocksigno);
      if (!sigpend)
        {
          PANIC(OSERR_FAILEDTOREMOVESIGNAL);
        }

      sig_releasependingsignal(sigpend);
      irqrestore(saved_state);
    }
  else
    {
      /* Its time to wait. Save a copy of the old sigprocmask and install
       * the new (temporary) sigprocmask 
       */

      saved_sigprocmask = rtcb->sigprocmask;
      rtcb->sigprocmask = *set;
      rtcb->sigwaitmask = NULL_SIGNAL_SET;

      /* And wait until one of the unblocked signals is posted */

      up_block_task(rtcb, TSTATE_WAIT_SIG);

      /* We are running again, restore the original sigprocmask */

      rtcb->sigprocmask = saved_sigprocmask;
      irqrestore(saved_state);

      /* Now, handle the (rare?) case where (a) a blocked signal was received
       * while the task was suspended but (b) restoring the original
       * sigprocmask will unblock the signal.
       */

      sig_unmaskpendingsignal();
    }

  sched_unlock();
  return ERROR;
}
コード例 #9
0
ファイル: sam_idle.c プロジェクト: FreddieChopin/NuttX
static void up_idlepm(void)
{
  static enum pm_state_e oldstate = PM_NORMAL;
  enum pm_state_e newstate;
  irqstate_t flags;
  int ret;

  /* Decide, which power saving level can be obtained */

  newstate = pm_checkstate();

  /* Check for state changes */

  if (newstate != oldstate)
    {
      flags = irqsave();

      /* Perform board-specific, state-dependent logic here */

      llvdbg("newstate= %d oldstate=%d\n", newstate, oldstate);

      /* Then force the global state change */

      ret = pm_changestate(newstate);
      if (ret < 0)
        {
          /* The new state change failed, revert to the preceding state */

          (void)pm_changestate(oldstate);
        }
      else
        {
          /* Save the new state */

          oldstate = newstate;
        }

      /* MCU-specific power management logic */

      switch (newstate)
        {
        case PM_NORMAL:
          break;

        case PM_IDLE:
          break;

        case PM_STANDBY:
          sam_pmstop(true);
          break;

        case PM_SLEEP:
          (void)sam_pmstandby();
          break;

        default:
          break;
        }

      irqrestore(flags);
    }
}
コード例 #10
0
int sam_oneshot_start(struct sam_oneshot_s *oneshot, oneshot_handler_t handler,
                      void *arg, const struct timespec *ts)
{
  uint64_t usec;
  uint64_t regval;
  irqstate_t flags;

  tcvdbg("handler=%p arg=%p, ts=(%lu, %lu)\n",
         handler, arg, (unsigned long)ts->tv_sec, (unsigned long)ts->tv_nsec);
  DEBUGASSERT(oneshot && handler && ts);

  /* Was the oneshot already running? */

  flags = irqsave();
  if (oneshot->running)
    {
      /* Yes.. then cancel it */

      tcvdbg("Already running... cancelling\n");
      (void)sam_oneshot_cancel(oneshot, NULL);
    }

  /* Save the new handler and its argument */

  oneshot->handler = handler;
  oneshot->arg     = arg;

  /* Express the delay in microseconds */

  usec = (uint64_t)ts->tv_sec * USEC_PER_SEC + (uint64_t)(ts->tv_nsec / NSEC_PER_USEC);

  /* Get the timer counter frequency and determine the number of counts need to achieve the requested delay.
   *
   *   frequency = ticks / second
   *   ticks     = seconds * frequency
   *             = (usecs * frequency) / USEC_PER_SEC;
   */

  regval = (usec * (uint64_t)sam_tc_divfreq(oneshot->tch)) / USEC_PER_SEC;

  tcvdbg("usec=%llu regval=%08llx\n", usec, regval);
  DEBUGASSERT(regval <= UINT16_MAX);

  /* Set up to receive the callback when the interrupt occurs */

  (void)sam_tc_attach(oneshot->tch, sam_oneshot_handler, oneshot,
                      TC_INT_CPCS);

  /* Set RC so that an event will be triggered when TC_CV register counts
   * up to RC.
   */

  sam_tc_setregister(oneshot->tch, TC_REGC, (uint32_t)regval);

  /* Start the counter */

  sam_tc_start(oneshot->tch);

  /* Enable interrupts.  We should get the callback when the interrupt
   * occurs.
   */

  oneshot->running = true;
  irqrestore(flags);
  return OK;
}
コード例 #11
0
int sam_oneshot_cancel(struct sam_oneshot_s *oneshot, struct timespec *ts)
{
  irqstate_t flags;
  uint64_t usec;
  uint64_t sec;
  uint64_t nsec;
  uint32_t count;
  uint32_t rc;

  /* Was the timer running? */

  flags = irqsave();
  if (!oneshot->running)
    {
      /* No.. Just return zero timer remaining and successful cancellation.
       * This function may execute at a high rate with no timer running
       * (as when pre-emption is enabled and disabled).
       */

      ts->tv_sec  = 0;
      ts->tv_nsec = 0;
      irqrestore(flags);
      return OK;
    }

  /* Yes.. Get the timer counter and rc registers and stop the counter.  If
   * the counter expires while we are doing this, the counter clock will be
   * stopped, but the clock will not be disabled.
   *
   * The expected behavior is that the the counter register will freezes at
   * a value equal to the RC register when the timer expires.  The counter
   * should have values between 0 and RC in all other cased.
   *
   * REVISIT:  This does not appear to be the case.
   */

  tcvdbg("Cancelling...\n");

  count = sam_tc_getcounter(oneshot->tch);
  rc    = sam_tc_getregister(oneshot->tch, TC_REGC);

  /* Now we can disable the interrupt and stop the timer. */

  sam_tc_attach(oneshot->tch, NULL, NULL, 0);
  sam_tc_stop(oneshot->tch);

  oneshot->running = false;
  oneshot->handler = NULL;
  oneshot->arg     = NULL;
  irqrestore(flags);

  /* Did the caller provide us with a location to return the time
   * remaining?
   */

  if (ts)
    {
      /* Yes.. then calculate and return the time remaining on the
       * oneshot timer.
       */

      tcvdbg("rc=%lu count=%lu usec=%lu\n",
             (unsigned long)rc, (unsigned long)count, (unsigned long)usec);

      /* REVISIT: I am not certain why the timer counter value sometimes
       * exceeds RC.  Might be a bug, or perhaps the counter does not stop
       * in all cases.
       */

      if (count >= rc)
        {
          /* No time remaining (?) */

          ts->tv_sec  = 0;
          ts->tv_nsec = 0;
        }
      else
        {
          /* The total time remaining is the difference.  Convert the that
           * to units of microseconds.
           *
           *   frequency = ticks / second
           *   seconds   = ticks * frequency
           *   usecs     = (ticks * USEC_PER_SEC) / frequency;
           */

          usec        = (((uint64_t)(rc - count)) * USEC_PER_SEC) /
                        sam_tc_divfreq(oneshot->tch);

          /* Return the time remaining in the correct form */

          sec         = usec / USEC_PER_SEC;
          nsec        = ((usec) - (sec * USEC_PER_SEC)) * NSEC_PER_USEC;

          ts->tv_sec  = (time_t)sec;
          ts->tv_nsec = (unsigned long)nsec;
        }

      tcvdbg("remaining (%lu, %lu)\n",
             (unsigned long)ts->tv_sec, (unsigned long)ts->tv_nsec);
    }

  return OK;
}
コード例 #12
0
ファイル: sig_timedwait.c プロジェクト: rmsilva/nuttx
int sigtimedwait(FAR const sigset_t *set, FAR struct siginfo *info,
                 FAR const struct timespec *timeout)
{
    FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
    sigset_t intersection;
    FAR sigpendq_t *sigpend;
    irqstate_t saved_state;
    int32_t waitticks;
    int ret = ERROR;

    DEBUGASSERT(rtcb->waitdog == NULL);

    sched_lock();  /* Not necessary */

    /* Several operations must be performed below:  We must determine if any
     * signal is pending and, if not, wait for the signal.  Since signals can
     * be posted from the interrupt level, there is a race condition that
     * can only be eliminated by disabling interrupts!
     */

    saved_state = irqsave();

    /* Check if there is a pending signal corresponding to one of the
     * signals in the pending signal set argument.
     */

    intersection = *set & sig_pendingset(rtcb);
    if (intersection != NULL_SIGNAL_SET)
    {
        /* One or more of the signals in intersections is sufficient to cause
         * us to not wait.  Pick the lowest numbered signal and mark it not
         * pending.
         */

        sigpend = sig_removependingsignal(rtcb, sig_lowest(&intersection));
        ASSERT(sigpend);

        /* Return the signal info to the caller if so requested */

        if (info)
        {
            memcpy(info, &sigpend->info, sizeof(struct siginfo));
        }

        /* Then dispose of the pending signal structure properly */

        sig_releasependingsignal(sigpend);
        irqrestore(saved_state);

        /* The return value is the number of the signal that awakened us */

        ret = sigpend->info.si_signo;
    }

    /* We will have to wait for a signal to be posted to this task. */

    else
    {
        /* Save the set of pending signals to wait for */

        rtcb->sigwaitmask = *set;

        /* Check if we should wait for the timeout */

        if (timeout)
        {
            /* Convert the timespec to system clock ticks, making sure that
             * the resulting delay is greater than or equal to the requested
             * time in nanoseconds.
             */

#ifdef CONFIG_HAVE_LONG_LONG
            uint64_t waitticks64 = ((uint64_t)timeout->tv_sec * NSEC_PER_SEC +
                                    (uint64_t)timeout->tv_nsec + NSEC_PER_TICK - 1) /
                                   NSEC_PER_TICK;
            DEBUGASSERT(waitticks64 <= UINT32_MAX);
            waitticks = (uint32_t)waitticks64;
#else
            uint32_t waitmsec;

            DEBUGASSERT(timeout->tv_sec < UINT32_MAX / MSEC_PER_SEC);
            waitmsec = timeout->tv_sec * MSEC_PER_SEC +
                       (timeout->tv_nsec + NSEC_PER_MSEC - 1) / NSEC_PER_MSEC;
            waitticks = MSEC2TICK(waitmsec);
#endif

            /* Create a watchdog */

            rtcb->waitdog = wd_create();
            DEBUGASSERT(rtcb->waitdog);

            if (rtcb->waitdog)
            {
                /* This little bit of nonsense is necessary for some
                 * processors where sizeof(pointer) < sizeof(uint32_t).
                 * see wdog.h.
                 */

                wdparm_t wdparm;
                wdparm.pvarg = (FAR void *)rtcb;

                /* Start the watchdog */

                wd_start(rtcb->waitdog, waitticks, (wdentry_t)sig_timeout, 1,
                         wdparm.dwarg);

                /* Now wait for either the signal or the watchdog */

                up_block_task(rtcb, TSTATE_WAIT_SIG);

                /* We no longer need the watchdog */

                wd_delete(rtcb->waitdog);
                rtcb->waitdog = NULL;
            }

            /* REVISIT: And do what if there are no watchdog timers?  The wait
             * will fail and we will return something bogus.
             */
        }

        /* No timeout, just wait */

        else
        {
            /* And wait until one of the unblocked signals is posted */

            up_block_task(rtcb, TSTATE_WAIT_SIG);
        }

        /* We are running again, clear the sigwaitmask */

        rtcb->sigwaitmask = NULL_SIGNAL_SET;

        /* When we awaken, the cause will be in the TCB.  Get the signal number
         * or timeout) that awakened us.
         */

        if (GOOD_SIGNO(rtcb->sigunbinfo.si_signo))
        {
            /* We were awakened by a signal... but is it one of the signals that
             * we were waiting for?
             */

            if (sigismember(set, rtcb->sigunbinfo.si_signo))
            {
                /* Yes.. the return value is the number of the signal that
                 * awakened us.
                 */

                ret = rtcb->sigunbinfo.si_signo;
            }
            else
            {
                /* No... then set EINTR and report an error */

                set_errno(EINTR);
                ret = ERROR;
            }
        }
        else
        {
            /* Otherwise, we must have been awakened by the timeout.  Set EGAIN
             * and return an error.
             */

            DEBUGASSERT(rtcb->sigunbinfo.si_signo == SIG_WAIT_TIMEOUT);
            set_errno(EAGAIN);
            ret = ERROR;
        }

        /* Return the signal info to the caller if so requested */

        if (info)
        {
            memcpy(info, &rtcb->sigunbinfo, sizeof(struct siginfo));
        }

        irqrestore(saved_state);
    }

    sched_unlock();
    return ret;
}
コード例 #13
0
ファイル: mq_sndinternal.c プロジェクト: 1015472/PX4NuttX
int mq_dosend(mqd_t mqdes, FAR mqmsg_t *mqmsg, const void *msg, size_t msglen, int prio)
{
  FAR struct tcb_s *btcb;
  FAR msgq_t *msgq;
  FAR mqmsg_t *next;
  FAR mqmsg_t *prev;
  irqstate_t saved_state;

  /* Get a pointer to the message queue */

  sched_lock();
  msgq = mqdes->msgq;

  /* Construct the message header info */

  mqmsg->priority = prio;
  mqmsg->msglen   = msglen;

  /* Copy the message data into the message */

  memcpy((void*)mqmsg->mail, (const void*)msg, msglen);

  /* Insert the new message in the message queue */

  saved_state = irqsave();

  /* Search the message list to find the location to insert the new
   * message. Each is list is maintained in ascending priority order.
   */

  for (prev = NULL, next = (FAR mqmsg_t*)msgq->msglist.head;
       next && prio <= next->priority;
       prev = next, next = next->next);

  /* Add the message at the right place */

  if (prev)
    {
      sq_addafter((FAR sq_entry_t*)prev, (FAR sq_entry_t*)mqmsg,
                  &msgq->msglist);
    }
  else
    {
      sq_addfirst((FAR sq_entry_t*)mqmsg, &msgq->msglist);
    }

  /* Increment the count of messages in the queue */

  msgq->nmsgs++;
  irqrestore(saved_state);

  /* Check if we need to notify any tasks that are attached to the
   * message queue
   */

#ifndef CONFIG_DISABLE_SIGNALS
  if (msgq->ntmqdes)
    {
      /* Remove the message notification data from the message queue. */

#ifdef CONFIG_CAN_PASS_STRUCTS
      union sigval value      = msgq->ntvalue;
#else
      void *sival_ptr         = msgq->ntvalue.sival_ptr;
#endif
      int signo               = msgq->ntsigno;
      int pid                 = msgq->ntpid;

      /* Detach the notification */

      msgq->ntpid             = INVALID_PROCESS_ID;
      msgq->ntsigno           = 0;
      msgq->ntvalue.sival_int = 0;
      msgq->ntmqdes           = NULL;

      /* Queue the signal -- What if this returns an error? */

#ifdef CONFIG_CAN_PASS_STRUCTS
      sig_mqnotempty(pid, signo, value);
#else
      sig_mqnotempty(pid, signo, sival_ptr);
#endif
    }
#endif

  /* Check if any tasks are waiting for the MQ not empty event. */

  saved_state = irqsave();
  if (msgq->nwaitnotempty > 0)
    {
      /* Find the highest priority task that is waiting for
       * this queue to be non-empty in g_waitingformqnotempty
       * list. sched_lock() should give us sufficent protection since
       * interrupts should never cause a change in this list
       */

      for (btcb = (FAR struct tcb_s*)g_waitingformqnotempty.head;
           btcb && btcb->msgwaitq != msgq;
           btcb = btcb->flink);

      /* If one was found, unblock it */

      ASSERT(btcb);

      btcb->msgwaitq = NULL;
      msgq->nwaitnotempty--;
      up_unblock_task(btcb);
    }

  irqrestore(saved_state);
  sched_unlock();
  return OK;
}
コード例 #14
0
void up_schedule_sigaction(_TCB *tcb, sig_deliver_t sigdeliver)
{
  /* Refuse to handle nested signal actions */

  sdbg("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver);

  if (!tcb->xcp.sigdeliver)
    {
      irqstate_t flags;

      /* Make sure that interrupts are disabled */

      flags = irqsave();

      /* First, handle some special cases when the signal is
       * being delivered to the currently executing task.
       */

      sdbg("rtcb=0x%p current_regs=0x%p\n", g_readytorun.head, current_regs);

      if (tcb == (_TCB*)g_readytorun.head)
        {
          /* CASE 1:  We are not in an interrupt handler and
           * a task is signalling itself for some reason.
           */

          if (!current_regs)
            {
              /* In this case just deliver the signal now. */

              sigdeliver(tcb);
            }

          /* CASE 2:  We are in an interrupt handler AND the
           * interrupted task is the same as the one that
           * must receive the signal, then we will have to modify
           * the return state as well as the state in the TCB.
           */

          else
            {
              /* Save the return PC and SR and one scratch register
               * These will be restored by the signal trampoline after
               * the signals have been delivered.
               */

              tcb->xcp.sigdeliver   = sigdeliver;
              tcb->xcp.saved_pc     = current_regs[REG_PC];
              tcb->xcp.saved_sr     = current_regs[REG_SR];

              /* Then set up to vector to the trampoline with interrupts
               * disabled
               */

              current_regs[REG_PC]  = (uint32_t)up_sigdeliver;
              current_regs[REG_SR] |= 0x000000f0;

              /* And make sure that the saved context in the TCB
               * is the same as the interrupt return context.
               */

              up_copystate(tcb->xcp.regs, current_regs);
            }
        }

      /* Otherwise, we are (1) signaling a task is not running
       * from an interrupt handler or (2) we are not in an
       * interrupt handler and the running task is signalling
       * some non-running task.
       */

      else
        {
          /* Save the return PC and SR and one scratch register
           * These will be restored by the signal trampoline after
           * the signals have been delivered.
           */

          tcb->xcp.sigdeliver    = sigdeliver;
          tcb->xcp.saved_pc      = tcb->xcp.regs[REG_PC];
          tcb->xcp.saved_sr      = tcb->xcp.regs[REG_SR];

          /* Then set up to vector to the trampoline with interrupts
           * disabled
           */

          tcb->xcp.regs[REG_PC]  = (uint32_t)up_sigdeliver;
          tcb->xcp.regs[REG_SR] |= 0x000000f0 ;
        }

      irqrestore(flags);
    }
}
コード例 #15
0
ファイル: bma180.cpp プロジェクト: Userskii/Firmware
int
BMA180::ioctl(struct file *filp, int cmd, unsigned long arg)
{
	switch (cmd) {

	case SENSORIOCSPOLLRATE: {
			switch (arg) {

				/* switching to manual polling */
			case SENSOR_POLLRATE_MANUAL:
				stop();
				_call_interval = 0;
				return OK;

				/* external signalling not supported */
			case SENSOR_POLLRATE_EXTERNAL:

				/* zero would be bad */
			case 0:
				return -EINVAL;


				/* set default/max polling rate */
			case SENSOR_POLLRATE_MAX:
			case SENSOR_POLLRATE_DEFAULT:
				/* With internal low pass filters enabled, 250 Hz is sufficient */
				return ioctl(filp, SENSORIOCSPOLLRATE, 250);

				/* adjust to a legal polling interval in Hz */
			default: {
					/* do we need to start internal polling? */
					bool want_start = (_call_interval == 0);

					/* convert hz to hrt interval via microseconds */
					unsigned ticks = 1000000 / arg;

					/* check against maximum sane rate */
					if (ticks < 1000)
						return -EINVAL;

					/* update interval for next measurement */
					/* XXX this is a bit shady, but no other way to adjust... */
					_call.period = _call_interval = ticks;

					/* if we need to start the poll state machine, do it */
					if (want_start)
						start();

					return OK;
				}
			}
		}

	case SENSORIOCGPOLLRATE:
		if (_call_interval == 0)
			return SENSOR_POLLRATE_MANUAL;

		return 1000000 / _call_interval;

	case SENSORIOCSQUEUEDEPTH: {
		/* lower bound is mandatory, upper bound is a sanity check */
		if ((arg < 2) || (arg > 100))
			return -EINVAL;
		
		irqstate_t flags = irqsave();
		if (!_reports->resize(arg)) {
			irqrestore(flags);
			return -ENOMEM;
		}
		irqrestore(flags);
		
		return OK;
	}

	case SENSORIOCGQUEUEDEPTH:
		return _reports->size();

	case SENSORIOCRESET:
		/* XXX implement */
		return -EINVAL;

	case ACCELIOCSSAMPLERATE:	/* sensor sample rate is not (really) adjustable */
		return -EINVAL;

	case ACCELIOCGSAMPLERATE:
		return 1200;		/* always operating in low-noise mode */

	case ACCELIOCSLOWPASS:
		return set_lowpass(arg);

	case ACCELIOCGLOWPASS:
		return _current_lowpass;

	case ACCELIOCSSCALE:
		/* copy scale in */
		memcpy(&_accel_scale, (struct accel_scale *) arg, sizeof(_accel_scale));
		return OK;

	case ACCELIOCGSCALE:
		/* copy scale out */
		memcpy((struct accel_scale *) arg, &_accel_scale, sizeof(_accel_scale));
		return OK;

	case ACCELIOCSRANGE:
		return set_range(arg);

	case ACCELIOCGRANGE:
		return _current_range;

	default:
		/* give it to the superclass */
		return SPI::ioctl(filp, cmd, arg);
	}
}
コード例 #16
0
static void work_process(FAR struct wqueue_s *wqueue)
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  irqstate_t flags;
  FAR void *arg;
  uint32_t elapsed;
  uint32_t remaining;
  uint32_t next;

  /* Then process queued work.  We need to keep interrupts disabled while
   * we process items in the work list.
   */

  next  = CONFIG_SCHED_WORKPERIOD / USEC_PER_TICK;
  flags = irqsave();
  work  = (FAR struct work_s *)wqueue->q.head;
  while (work)
    {
      /* Is this work ready?  It is ready if there is no delay or if
       * the delay has elapsed. qtime is the time that the work was added
       * to the work queue.  It will always be greater than or equal to
       * zero.  Therefore a delay of zero will always execute immediately.
       */

      elapsed = clock_systimer() - work->qtime;
      if (elapsed >= work->delay)
        {
          /* Remove the ready-to-execute work from the list */

          (void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

          /* Extract the work description from the entry (in case the work
           * instance by the re-used after it has been de-queued).
           */

          worker = work->worker;

          /* Check for a race condition where the work may be nullified
           * before it is removed from the queue.
           */

          if (worker != NULL)
            {
              /* Extract the work argument (before re-enabling interrupts) */

              arg = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long that will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)wqueue->q.head;
            }
          else
            {
              /* Cancelled.. Just move to the next work in the list with
               * interrupts still disabled.
               */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }
      else
        {
          /* This one is not ready.. will it be ready before the next
           * scheduled wakeup interval?
           */

          remaining = elapsed - work->delay;
          if (remaining < next)
            {
              /* Yes.. Then schedule to wake up when the work is ready */

              next = remaining;
            }

          /* Then try the next in the list. */

          work = (FAR struct work_s *)work->dq.flink;
        }
    }

  /* Wait awhile to check the work list.  We will wait here until either
   * the time elapses or until we are awakened by a signal.
   */

  usleep(next * USEC_PER_TICK);
  irqrestore(flags);
}
コード例 #17
0
ファイル: nor_main.c プロジェクト: FreddieChopin/NuttX
int nor_main(int argc, char *argv)
{
  uint32_t regval;

  /* Here we have a in memory value we can change in the debugger
   * to begin booting in NOR Flash
   */

  static volatile uint32_t wait = NOR_BOOT_MODE;

  printf("Configuring NOR FLASH on CS0 and %s\n", wait ? "waiting" : "booting");

  /* Make sure that the SMC peripheral is enabled (But of course it is... we
   * are executing from NOR FLASH now).
   */

  sam_hsmc_enableclk();

  /* The SAMA5D3x-EK has 118MB of 16-bit NOR FLASH at CS0.  The NOR FLASH
   * has already been configured by the first level ROM bootloader... we
   * simply need to modify the timing here.
   */

  regval = HSMC_SETUP_NWE_SETUP(1) |  HSMC_SETUP_NCS_WRSETUP(0) |
           HSMC_SETUP_NRD_SETUP(2) | HSMC_SETUP_NCS_RDSETUP(0);
  putreg32(regval, SAM_HSMC_SETUP(HSMC_CS0));

  regval = HSMC_PULSE_NWE_PULSE(10) | HSMC_PULSE_NCS_WRPULSE(10) |
           HSMC_PULSE_NRD_PULSE(11) | HSMC_PULSE_NCS_RDPULSE(11);
  putreg32(regval, SAM_HSMC_PULSE(HSMC_CS0));

  regval = HSMC_CYCLE_NWE_CYCLE(11) | HSMC_CYCLE_NRD_CYCLE(14);
  putreg32(regval, SAM_HSMC_CYCLE(HSMC_CS0));

  regval = HSMC_TIMINGS_TCLR(0) | HSMC_TIMINGS_TADL(0) |
           HSMC_TIMINGS_TAR(0) | HSMC_TIMINGS_TRR(0) |
           HSMC_TIMINGS_TWB(0) | HSMC_TIMINGS_RBNSEL(0);
  putreg32(regval, SAM_HSMC_TIMINGS(HSMC_CS0));

  regval = HSMC_MODE_READMODE | HSMC_MODE_WRITEMODE |
           HSMC_MODE_EXNWMODE_DISABLED | HSMC_MODE_BIT_16 |
           HSMC_MODE_TDFCYCLES(1);
  putreg32(regval, SAM_HSMC_MODE(HSMC_CS0));

  /* Interrupts must be disabled through the following.  In this configuration,
   * there should only be timer interrupts.  Your NuttX configuration must use
   * CONFIG_SERIAL_LOWCONSOLE=y or printf() will hang when the interrupts
   * are disabled!
   */

  (void)irqsave();

  /* Disable MATRIX write protection */

#if 0 /* Disabled on reset */
  putreg32(MATRIX_WPMR_WPKEY, SAM_MATRIX_WPMR);
#endif

  /* Set remap state 1.
   *
   *   Boot state:    ROM is seen at address 0x00000000
   *   Remap State 0: SRAM is seen at address 0x00000000 (through AHB slave
   *                  interface) instead of ROM.
   *   Remap State 1: HEBI is seen at address 0x00000000 (through AHB slave
   *                  interface) instead of ROM for external boot.
   *
   * REVISIT:  This does not work.  No matter what I do, the internal
   * SRAM is always visible at address zero.  I am missing something.
   */

  putreg32(MATRIX_MRCR_RCB0, SAM_MATRIX_MRCR);   /* Enable remap */
  putreg32(AXIMX_REMAP_REMAP1, SAM_AXIMX_REMAP); /* Remap HEBI */

  /* Restore MATRIX write protection */

#if 0 /* Disabled on reset */
  putreg32(MATRIX_WPMR_WPKEY | MATRIX_WPMR_WPEN, SAM_MATRIX_WPMR);
#endif

  /* Disable the caches and the MMU.  Disabling the MMU should be safe here
   * because there is a 1-to-1 identity mapping between the physical and
   * virtual addressing.
   */

  /* NOTE:  This generates crashes and lots of error, but does leave the
   * system in the proper state to run from NOR:  very ugly but usable.
   * Better than the alternative.
   */

  cp15_disable_mmu();
  cp15_disable_caches();

  /* Invalidate caches and TLBs */

  arch_invalidate_icache();
  arch_invalidate_dcache_all();
  cp15_invalidate_tlbs();

  /* Then jump into NOR flash */

  while (wait)
    {
    }

  NOR_ENTRY();

  return 0; /* We should not get here in either case */
}
コード例 #18
0
ファイル: lsm303d.cpp プロジェクト: JamesxL/Firmware
int
LSM303D::ioctl(struct file *filp, int cmd, unsigned long arg)
{
	switch (cmd) {

	case SENSORIOCSPOLLRATE: {
		switch (arg) {

			/* switching to manual polling */
			case SENSOR_POLLRATE_MANUAL:
				stop();
				_call_accel_interval = 0;
				return OK;

			/* external signalling not supported */
			case SENSOR_POLLRATE_EXTERNAL:

			/* zero would be bad */
			case 0:
				return -EINVAL;

			/* set default/max polling rate */
			case SENSOR_POLLRATE_MAX:
				return ioctl(filp, SENSORIOCSPOLLRATE, 1600);

			case SENSOR_POLLRATE_DEFAULT:
				return ioctl(filp, SENSORIOCSPOLLRATE, LSM303D_ACCEL_DEFAULT_RATE);

				/* adjust to a legal polling interval in Hz */
			default: {
				/* do we need to start internal polling? */
				bool want_start = (_call_accel_interval == 0);

				/* convert hz to hrt interval via microseconds */
				unsigned ticks = 1000000 / arg;

				/* check against maximum sane rate */
				if (ticks < 500)
					return -EINVAL;

				/* adjust filters */
				accel_set_driver_lowpass_filter((float)arg, _accel_filter_x.get_cutoff_freq());

				/* update interval for next measurement */
				/* XXX this is a bit shady, but no other way to adjust... */
				_accel_call.period = _call_accel_interval = ticks;

				/* if we need to start the poll state machine, do it */
				if (want_start)
					start();

				return OK;
			}
		}
	}

	case SENSORIOCGPOLLRATE:
		if (_call_accel_interval == 0)
			return SENSOR_POLLRATE_MANUAL;

		return 1000000 / _call_accel_interval;

	case SENSORIOCSQUEUEDEPTH: {
		/* lower bound is mandatory, upper bound is a sanity check */
		if ((arg < 1) || (arg > 100))
			return -EINVAL;

		irqstate_t flags = irqsave();
		if (!_accel_reports->resize(arg)) {
			irqrestore(flags);
			return -ENOMEM;
		}
		irqrestore(flags);

		return OK;
	}

	case SENSORIOCGQUEUEDEPTH:
		return _accel_reports->size();

	case SENSORIOCRESET:
		reset();
		return OK;

	case ACCELIOCSSAMPLERATE:
		return accel_set_samplerate(arg);

	case ACCELIOCGSAMPLERATE:
		return _accel_samplerate;

	case ACCELIOCSLOWPASS: {
		return accel_set_driver_lowpass_filter((float)_accel_samplerate, (float)arg);
	}

	case ACCELIOCGLOWPASS:
		return _accel_filter_x.get_cutoff_freq();

	case ACCELIOCSSCALE: {
		/* copy scale, but only if off by a few percent */
		struct accel_scale *s = (struct accel_scale *) arg;
		float sum = s->x_scale + s->y_scale + s->z_scale;
		if (sum > 2.0f && sum < 4.0f) {
			memcpy(&_accel_scale, s, sizeof(_accel_scale));
			return OK;
		} else {
			return -EINVAL;
		}
	}

	case ACCELIOCSRANGE:
		/* arg needs to be in G */
		return accel_set_range(arg);

	case ACCELIOCGRANGE:
		/* convert to m/s^2 and return rounded in G */
		return (unsigned long)((_accel_range_m_s2)/LSM303D_ONE_G + 0.5f);

	case ACCELIOCGSCALE:
		/* copy scale out */
		memcpy((struct accel_scale *) arg, &_accel_scale, sizeof(_accel_scale));
		return OK;

	case ACCELIOCSELFTEST:
		return accel_self_test();

	default:
		/* give it to the superclass */
		return SPI::ioctl(filp, cmd, arg);
	}
}
コード例 #19
0
ファイル: hmc5883.cpp プロジェクト: 9510030662/Firmware
int
HMC5883::ioctl(struct file *filp, int cmd, unsigned long arg)
{
	switch (cmd) {
	case SENSORIOCSPOLLRATE: {
		switch (arg) {

			/* switching to manual polling */
		case SENSOR_POLLRATE_MANUAL:
			stop();
			_measure_ticks = 0;
			return OK;

			/* external signalling (DRDY) not supported */
		case SENSOR_POLLRATE_EXTERNAL:

			/* zero would be bad */
		case 0:
			return -EINVAL;

			/* set default/max polling rate */
		case SENSOR_POLLRATE_MAX:
		case SENSOR_POLLRATE_DEFAULT: {
				/* do we need to start internal polling? */
				bool want_start = (_measure_ticks == 0);

				/* set interval for next measurement to minimum legal value */
				_measure_ticks = USEC2TICK(HMC5883_CONVERSION_INTERVAL);

				/* if we need to start the poll state machine, do it */
				if (want_start)
					start();

				return OK;
			}

			/* adjust to a legal polling interval in Hz */
		default: {
				/* do we need to start internal polling? */
				bool want_start = (_measure_ticks == 0);

				/* convert hz to tick interval via microseconds */
				unsigned ticks = USEC2TICK(1000000 / arg);

				/* check against maximum rate */
				if (ticks < USEC2TICK(HMC5883_CONVERSION_INTERVAL))
					return -EINVAL;

				/* update interval for next measurement */
				_measure_ticks = ticks;

				/* if we need to start the poll state machine, do it */
				if (want_start)
					start();

				return OK;
			}
		}
	}

	case SENSORIOCGPOLLRATE:
		if (_measure_ticks == 0)
			return SENSOR_POLLRATE_MANUAL;

		return 1000000/TICK2USEC(_measure_ticks);

	case SENSORIOCSQUEUEDEPTH: {
			/* lower bound is mandatory, upper bound is a sanity check */
			if ((arg < 1) || (arg > 100))
				return -EINVAL;

			irqstate_t flags = irqsave();
			if (!_reports->resize(arg)) {
				irqrestore(flags);
				return -ENOMEM;
			}
			irqrestore(flags);

			return OK;
		}

	case SENSORIOCGQUEUEDEPTH:
		return _reports->size();

	case SENSORIOCRESET:
		return reset();

	case MAGIOCSSAMPLERATE:
		/* same as pollrate because device is in single measurement mode*/
		return ioctl(filp, SENSORIOCSPOLLRATE, arg);

	case MAGIOCGSAMPLERATE:
		/* same as pollrate because device is in single measurement mode*/
		return 1000000/TICK2USEC(_measure_ticks);

	case MAGIOCSRANGE:
		return set_range(arg);

	case MAGIOCGRANGE:
		return _range_ga;

	case MAGIOCSLOWPASS:
	case MAGIOCGLOWPASS:
		/* not supported, no internal filtering */
		return -EINVAL;

	case MAGIOCSSCALE:
		/* set new scale factors */
		memcpy(&_scale, (mag_scale *)arg, sizeof(_scale));
		/* check calibration, but not actually return an error */
		(void)check_calibration();
		return 0;

	case MAGIOCGSCALE:
		/* copy out scale factors */
		memcpy((mag_scale *)arg, &_scale, sizeof(_scale));
		return 0;

	case MAGIOCCALIBRATE:
		return calibrate(filp, arg);

	case MAGIOCEXSTRAP:
		return set_excitement(arg);

	case MAGIOCSELFTEST:
		return check_calibration();

	case MAGIOCGEXTERNAL:
		if (_bus == PX4_I2C_BUS_EXPANSION)
			return 1;
		else
			return 0;

	default:
		/* give it to the superclass */
		return I2C::ioctl(filp, cmd, arg);
	}
}
コード例 #20
0
ファイル: lsm303d.cpp プロジェクト: JamesxL/Firmware
int
LSM303D::mag_ioctl(struct file *filp, int cmd, unsigned long arg)
{
	switch (cmd) {

	case SENSORIOCSPOLLRATE: {
		switch (arg) {

			/* switching to manual polling */
			case SENSOR_POLLRATE_MANUAL:
				stop();
				_call_mag_interval = 0;
				return OK;

			/* external signalling not supported */
			case SENSOR_POLLRATE_EXTERNAL:

			/* zero would be bad */
			case 0:
				return -EINVAL;

			/* set default/max polling rate */
			case SENSOR_POLLRATE_MAX:
			case SENSOR_POLLRATE_DEFAULT:
				/* 100 Hz is max for mag */
				return mag_ioctl(filp, SENSORIOCSPOLLRATE, 100);

			/* adjust to a legal polling interval in Hz */
			default: {
					/* do we need to start internal polling? */
					bool want_start = (_call_mag_interval == 0);

					/* convert hz to hrt interval via microseconds */
					unsigned ticks = 1000000 / arg;

					/* check against maximum sane rate */
					if (ticks < 1000)
						return -EINVAL;

					/* update interval for next measurement */
					/* XXX this is a bit shady, but no other way to adjust... */
					_mag_call.period = _call_mag_interval = ticks;

					/* if we need to start the poll state machine, do it */
					if (want_start)
						start();

					return OK;
				}
			}
		}

	case SENSORIOCGPOLLRATE:
		if (_call_mag_interval == 0)
			return SENSOR_POLLRATE_MANUAL;

		return 1000000 / _call_mag_interval;
	
	case SENSORIOCSQUEUEDEPTH: {
		/* lower bound is mandatory, upper bound is a sanity check */
		if ((arg < 1) || (arg > 100))
			return -EINVAL;

		irqstate_t flags = irqsave();
		if (!_mag_reports->resize(arg)) {
			irqrestore(flags);
			return -ENOMEM;
		}
		irqrestore(flags);

		return OK;
	}

	case SENSORIOCGQUEUEDEPTH:
		return _mag_reports->size();

	case SENSORIOCRESET:
		reset();
		return OK;

	case MAGIOCSSAMPLERATE:
		return mag_set_samplerate(arg);

	case MAGIOCGSAMPLERATE:
		return _mag_samplerate;

	case MAGIOCSLOWPASS:
	case MAGIOCGLOWPASS:
		/* not supported, no internal filtering */
		return -EINVAL;

	case MAGIOCSSCALE:
		/* copy scale in */
		memcpy(&_mag_scale, (struct mag_scale *) arg, sizeof(_mag_scale));
		return OK;

	case MAGIOCGSCALE:
		/* copy scale out */
		memcpy((struct mag_scale *) arg, &_mag_scale, sizeof(_mag_scale));
		return OK;

	case MAGIOCSRANGE:
		return mag_set_range(arg);

	case MAGIOCGRANGE:
		return _mag_range_ga;

	case MAGIOCSELFTEST:
		return mag_self_test();

	case MAGIOCGEXTERNAL:
		/* no external mag board yet */
		return 0;

	default:
		/* give it to the superclass */
		return SPI::ioctl(filp, cmd, arg);
	}
}
コード例 #21
0
ファイル: px4flow.cpp プロジェクト: 2014matthew/PX4Firmware
int
PX4FLOW::ioctl(struct file *filp, int cmd, unsigned long arg)
{
	switch (cmd) {

	case SENSORIOCSPOLLRATE: {
			switch (arg) {

			/* switching to manual polling */
			case SENSOR_POLLRATE_MANUAL:
				stop();
				_measure_ticks = 0;
				return OK;

			/* external signalling (DRDY) not supported */
			case SENSOR_POLLRATE_EXTERNAL:

			/* zero would be bad */
			case 0:
				return -EINVAL;

			/* set default/max polling rate */
			case SENSOR_POLLRATE_MAX:
			case SENSOR_POLLRATE_DEFAULT: {
					/* do we need to start internal polling? */
					bool want_start = (_measure_ticks == 0);

					/* set interval for next measurement to minimum legal value */
					_measure_ticks = USEC2TICK(PX4FLOW_CONVERSION_INTERVAL);

					/* if we need to start the poll state machine, do it */
					if (want_start) {
						start();
					}

					return OK;
				}

			/* adjust to a legal polling interval in Hz */
			default: {
					/* do we need to start internal polling? */
					bool want_start = (_measure_ticks == 0);

					/* convert hz to tick interval via microseconds */
					unsigned ticks = USEC2TICK(1000000 / arg);

					/* check against maximum rate */
					if (ticks < USEC2TICK(PX4FLOW_CONVERSION_INTERVAL)) {
						return -EINVAL;
					}

					/* update interval for next measurement */
					_measure_ticks = ticks;

					/* if we need to start the poll state machine, do it */
					if (want_start) {
						start();
					}

					return OK;
				}
			}
		}

	case SENSORIOCGPOLLRATE:
		if (_measure_ticks == 0) {
			return SENSOR_POLLRATE_MANUAL;
		}

		return (1000 / _measure_ticks);

	case SENSORIOCSQUEUEDEPTH: {
			/* lower bound is mandatory, upper bound is a sanity check */
			if ((arg < 1) || (arg > 100)) {
				return -EINVAL;
			}

			irqstate_t flags = irqsave();

			if (!_reports->resize(arg)) {
				irqrestore(flags);
				return -ENOMEM;
			}

			irqrestore(flags);

			return OK;
		}

	case SENSORIOCGQUEUEDEPTH:
		return _reports->size();

	case SENSORIOCRESET:
		/* XXX implement this */
		return -EINVAL;

	default:
		/* give it to the superclass */
		return I2C::ioctl(filp, cmd, arg);
	}
}
コード例 #22
0
ファイル: mq_waitirq.c プロジェクト: nsrango/Firmware
void mq_waitirq(FAR struct tcb_s *wtcb, int errcode)
{
  FAR msgq_t *msgq;
  irqstate_t saved_state;

  /* Disable interrupts.  This is necessary because an interrupt handler may
   * attempt to send a message while we are doing this.
   */

  saved_state = irqsave();

  /* It is possible that an interrupt/context switch beat us to the punch and
   * already changed the task's state.  NOTE:  The operations within the if
   * are safe because interrupts are always disabled with the msgwaitq,
   * nwaitnotempty, and nwaitnotfull fields are modified.
   */

  if (wtcb->task_state == TSTATE_WAIT_MQNOTEMPTY ||
      wtcb->task_state == TSTATE_WAIT_MQNOTFULL)
    {
      /* Get the message queue associated with the waiter from the TCB */

      msgq = wtcb->msgwaitq;
#ifdef CONFIG_DEBUG
      if (!msgq)
        {
          /* In these states there must always be an associated message queue */

          PANIC((uint32_t)OSERR_MQNOWAITER);
        }
#endif
      wtcb->msgwaitq = NULL;

      /* Decrement the count of waiters and cancel the wait */

      if (wtcb->task_state == TSTATE_WAIT_MQNOTEMPTY)
        {
#ifdef CONFIG_DEBUG
          if (msgq->nwaitnotempty <= 0)
            {
              /* This state, there should be a positive, non-zero waiter
               * count.
               */

               PANIC((uint32_t)OSERR_MQNONEMPTYCOUNT);

            }
#endif
          msgq->nwaitnotempty--;
        }
      else
        {
#ifdef CONFIG_DEBUG
          if (msgq->nwaitnotfull <= 0)
            {
              /* This state, there should be a positive, non-zero waiter
               * count.
               */

               PANIC((uint32_t)OSERR_MQNOTFULLCOUNT);

            }
#endif
          msgq->nwaitnotfull--;
        }

      /* Mark the errno value for the thread. */

      wtcb->pterrno = errcode;

      /* Restart the task. */

      up_unblock_task(wtcb);
    }

  /* Interrupts may now be enabled. */

  irqrestore(saved_state);
}
コード例 #23
0
static ssize_t proc_read(FAR struct file *filep, FAR char *buffer,
                         size_t buflen)
{
  FAR struct proc_file_s *procfile;
  FAR struct tcb_s *tcb;
  irqstate_t flags;
  ssize_t ret;

  fvdbg("buffer=%p buflen=%d\n", buffer, (int)buflen);

  /* Recover our private data from the struct file instance */

  procfile = (FAR struct proc_file_s *)filep->f_priv;
  DEBUGASSERT(procfile);

  /* Verify that the thread is still valid */

  flags = irqsave();
  tcb = sched_gettcb(procfile->pid);

  if (!tcb)
    {
      fdbg("ERROR: PID %d is not valid\n", (int)procfile->pid);
      irqrestore(flags);
      return -ENODEV;
    }

  /* Provide the requested data */

  switch (procfile->node->node)
    {
    case PROC_STATUS: /* Task/thread status */
      ret = proc_status(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

    case PROC_CMDLINE: /* Task command line */
      ret = proc_cmdline(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

#ifdef CONFIG_SCHED_CPULOAD
    case PROC_LOADAVG: /* Average CPU utilization */
      ret = proc_loadavg(procfile, tcb, buffer, buflen, filep->f_pos);
      break;
#endif
    case PROC_STACK: /* Task stack info */
      ret = proc_stack(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

    case PROC_GROUP_STATUS: /* Task group status */
      ret = proc_groupstatus(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

    case PROC_GROUP_FD: /* Group file descriptors */
      ret = proc_groupfd(procfile, tcb, buffer, buflen, filep->f_pos);
      break;

     default:
      ret = -EINVAL;
      break;
    }

  irqrestore(flags);

  /* Update the file offset */

  if (ret > 0)
    {
      filep->f_pos += ret;
    }

  return ret;
}
コード例 #24
0
xcpt_t kinetis_pinirqattach(uint32_t pinset, xcpt_t pinisr)
{
#ifdef HAVE_PORTINTS
  xcpt_t      *isrtab;
  xcpt_t       oldisr;
  irqstate_t   flags;
  unsigned int port;
  unsigned int pin;

  /* It only makes sense to call this function for input pins that are configured
   * as interrupts.
   */

  DEBUGASSERT((pinset & _PIN_INTDMA_MASK) == _PIN_INTERRUPT);
  DEBUGASSERT((pinset & _PIN_IO_MASK) == _PIN_INPUT);

  /* Get the port number and pin number */

  port = (pinset & _PIN_PORT_MASK) >> _PIN_PORT_SHIFT;
  pin  = (pinset & _PIN_MASK)      >> _PIN_SHIFT;

  /* Get the table associated with this port */

  DEBUGASSERT(port < KINETIS_NPORTS);
  flags = irqsave();
  switch (port)
    {
#ifdef CONFIG_KINETIS_PORTAINTS
      case KINETIS_PORTA :
        isrtab = g_portaisrs;
        break;
#endif
#ifdef CONFIG_KINETIS_PORTBINTS
      case KINETIS_PORTB :
        isrtab = g_portbisrs;
        break;
#endif
#ifdef CONFIG_KINETIS_PORTCINTS
      case KINETIS_PORTC :
        isrtab = g_portcisrs;
        break;
#endif
#ifdef CONFIG_KINETIS_PORTDINTS
      case KINETIS_PORTD :
        isrtab = g_portdisrs;
        break;
#endif
#ifdef CONFIG_KINETIS_PORTEINTS
      case KINETIS_PORTE :
        isrtab = g_porteisrs;
        break;
#endif
      default:
        return NULL;
    }

   /* Get the old PIN ISR and set the new PIN ISR */

   oldisr      = isrtab[pin];
   isrtab[pin] = pinisr;

   /* And return the old PIN isr address */

   return oldisr;
#else
   return NULL;
#endif /* HAVE_PORTINTS */
}
コード例 #25
0
static int proc_readdir(struct fs_dirent_s *dir)
{
  FAR struct proc_dir_s *procdir;
  FAR const struct proc_node_s *node = NULL;
  FAR struct tcb_s *tcb;
  unsigned int index;
  irqstate_t flags;
  pid_t pid;
  int ret;

  DEBUGASSERT(dir && dir->u.procfs);
  procdir = dir->u.procfs;

  /* Have we reached the end of the directory */

  index = procdir->base.index;
  if (index >= procdir->base.nentries)
    {
      /* We signal the end of the directory by returning the special
       * error -ENOENT
       */

      fvdbg("Entry %d: End of directory\n", index);
      ret = -ENOENT;
    }

  /* No, we are not at the end of the directory */

  else
    {
      /* Verify that the pid still refers to an active task/thread */

      pid = procdir->pid;

      flags = irqsave();
      tcb = sched_gettcb(pid);
      irqrestore(flags);

      if (!tcb)
        {
          fdbg("ERROR: PID %d is no longer valid\n", (int)pid);
          return -ENOENT;
        }

      /* The TCB is still valid (or at least was when we entered this function) */
      /* Handle the directory listing by the node type */

      switch (procdir->node->node)
        {
         case PROC_LEVEL0: /* Top level directory */
           DEBUGASSERT(procdir->base.level == 1);
           node = g_level0info[index];
           break;

         case PROC_GROUP:  /* Group sub-directory */
           DEBUGASSERT(procdir->base.level == 2);
           node = g_groupinfo[index];
           break;

          default:
            ret = -ENOENT;
           break;
        }

      /* Save the filename and file type */

      dir->fd_dir.d_type = node->dtype;
      strncpy(dir->fd_dir.d_name, node->name, NAME_MAX+1);

      /* Set up the next directory entry offset.  NOTE that we could use the
       * standard f_pos instead of our own private index.
       */

      procdir->base.index = index + 1;
      ret = OK;
    }

  return ret;
}
コード例 #26
0
ファイル: input_pwm.cpp プロジェクト: Sujo1/Firmware
int
InputPWM::ioctl(struct file *filp, int cmd, unsigned long arg)
{
	switch (cmd) {
	case SENSORIOCSPOLLRATE: {
		switch (arg) {

			/* switching to manual polling */
		case SENSOR_POLLRATE_MANUAL:
			stop();
			_measure_ticks = 0;
			return OK;

			/* external signalling (DRDY) not supported */
		case SENSOR_POLLRATE_EXTERNAL:

			/* zero would be bad */
		case 0:
			return -EINVAL;

			/* set default/max polling rate */
		case SENSOR_POLLRATE_MAX:
		case SENSOR_POLLRATE_DEFAULT: {
				/* do we need to start internal polling? */
				bool want_start = (_measure_ticks == 0);

				/* set interval for next measurement to minimum legal value */
				_measure_ticks = USEC2TICK(INPUT_PWM_INTERVAL);

				/* if we need to start the poll state machine, do it */
				if (want_start)
					start();

				return OK;
			}

			/* adjust to a legal polling interval in Hz */
		default: {
				/* do we need to start internal polling? */
				bool want_start = (_measure_ticks == 0);

				/* convert hz to tick interval via microseconds */
				unsigned ticks = USEC2TICK(1000000 / arg);

				/* check against maximum rate */
				if (ticks < USEC2TICK(INPUT_PWM_INTERVAL))
					return -EINVAL;

				/* update interval for next measurement */
				_measure_ticks = ticks;

				/* if we need to start the poll state machine, do it */
				if (want_start)
					start();

				return OK;
			}
		}
	}

	case SENSORIOCGPOLLRATE:
		if (_measure_ticks == 0)
			return SENSOR_POLLRATE_MANUAL;

		return 1000000/TICK2USEC(_measure_ticks);

	case SENSORIOCSQUEUEDEPTH: {
			/* lower bound is mandatory, upper bound is a sanity check */
			if ((arg < 1) || (arg > 100))
				return -EINVAL;

			irqstate_t flags = irqsave();
			if (!_reports->resize(arg)) {
				irqrestore(flags);
				return -ENOMEM;
			}
			irqrestore(flags);

			return OK;
		}

	case SENSORIOCGQUEUEDEPTH:
		return _reports->size();

	case SENSORIOCRESET:
		return OK;

	case RC_INPUT_GET: {
		/* fetch R/C input values into (rc_input_values *)arg */
			struct rc_input_values *report = (rc_input_values *)arg;
			int ret;
			ret = read(0, (char *)report, sizeof(*report));
			if (ret > 0)
				return OK;
			else
				return ret;
		}

	default:
		/* give it to the superclass */
		return CDev::ioctl(filp, cmd, arg);
	}
}
コード例 #27
0
static int proc_open(FAR struct file *filep, FAR const char *relpath,
                     int oflags, mode_t mode)
{
  FAR struct proc_file_s *procfile;
  FAR const struct proc_node_s *node;
  FAR struct tcb_s *tcb;
  FAR char *ptr;
  irqstate_t flags;
  unsigned long tmp;
  pid_t pid;

  fvdbg("Open '%s'\n", relpath);

  /* PROCFS is read-only.  Any attempt to open with any kind of write
   * access is not permitted.
   *
   * REVISIT:  Write-able proc files could be quite useful.
   */

  if ((oflags & O_WRONLY) != 0 || (oflags & O_RDONLY) == 0)
    {
      fdbg("ERROR: Only O_RDONLY supported\n");
      return -EACCES;
    }

  /* The first segment of the relative path should be a task/thread ID */

  ptr = NULL;
  tmp = strtoul(relpath, &ptr, 10);

  if (!ptr || *ptr != '/')
    {
      fdbg("ERROR: Invalid path \"%s\"\n", relpath);
      return -ENOENT;
    }

  /* Skip over the slash */

  ptr++;

  /* A valid PID would be in the range of 0-32767 (0 is reserved for the
   * IDLE thread).
   */

  if (tmp >= 32768)
    {
      fdbg("ERROR: Invalid PID %ld\n", tmp);
      return -ENOENT;
    }

  /* Now verify that a task with this task/thread ID exists */

  pid = (pid_t)tmp;

  flags = irqsave();
  tcb = sched_gettcb(pid);
  irqrestore(flags);

  if (!tcb)
    {
      fdbg("ERROR: PID %d is no longer valid\n", (int)pid);
      return -ENOENT;
    }

  /* The remaining segments of the relpath should be a well known node in
   * the task/thread tree.
   */

  node = proc_findnode(ptr);
  if (!node)
    {
      fdbg("ERROR: Invalid path \"%s\"\n", relpath);
      return -ENOENT;
    }

  /* The node must be a file, not a directory */

  if (node->dtype != DTYPE_FILE)
    {
      fdbg("ERROR: Path \"%s\" is a directory\n", relpath);
      return -EISDIR;
    }

  /* Allocate a container to hold the task and node selection */

  procfile = (FAR struct proc_file_s *)kzalloc(sizeof(struct proc_file_s));
  if (!procfile)
    {
      fdbg("ERROR: Failed to allocate file container\n");
      return -ENOMEM;
    }

  /* Initialize the file container */

  procfile->pid  = pid;
  procfile->node = node;

  /* Save the index as the open-specific state in filep->f_priv */

  filep->f_priv = (FAR void *)procfile;
  return OK;
}
コード例 #28
0
ファイル: sam_ethernet.c プロジェクト: FreddieChopin/NuttX
xcpt_t arch_phy_irq(FAR const char *intf, xcpt_t handler, phy_enable_t *enable)
{
  irqstate_t flags;
  xcpt_t *phandler;
  xcpt_t oldhandler;
  pio_pinset_t pinset;
  phy_enable_t enabler;
  int irq;

  DEBUGASSERT(intf);

  nvdbg("%s: handler=%p\n", intf, handler);
#ifdef CONFIG_SAMA5_EMACA
  phydbg("EMAC: devname=%s\n", SAMA5_EMAC_DEVNAME);
#endif
#ifdef CONFIG_SAMA5_GMAC
  phydbg("GMAC: devname=%s\n", SAMA5_GMAC_DEVNAME);
#endif

#ifdef CONFIG_SAMA5_EMACA
  if (strcmp(intf, SAMA5_EMAC_DEVNAME) == 0)
    {
      phydbg("Select EMAC\n");
      phandler = &g_emac_handler;
      pinset   = PIO_INT_ETH1;
      irq      = IRQ_INT_ETH1;
      enabler  = sam_emac_phy_enable;
    }
  else
#endif
#ifdef CONFIG_SAMA5_GMAC
  if (strcmp(intf, SAMA5_GMAC_DEVNAME) == 0)
    {
      phydbg("Select GMAC\n");
      phandler = &g_gmac_handler;
      pinset   = PIO_INT_ETH0;
      irq      = IRQ_INT_ETH0;
      enabler  = sam_gmac_phy_enable;
    }
  else
#endif
    {
      ndbg("Unsupported interface: %s\n", intf);
      return NULL;
    }

  /* Disable interrupts until we are done.  This guarantees that the
   * following operations are atomic.
   */

  flags = irqsave();

  /* Get the old interrupt handler and save the new one */

  oldhandler = *phandler;
  *phandler = handler;

  /* Configure the interrupt */

  if (handler)
    {
      phydbg("Configure pin: %08x\n", pinset);
      sam_pioirq(pinset);

      phydbg("Attach IRQ%d\n", irq);
      (void)irq_attach(irq, handler);
    }
  else
    {
      phydbg("Detach IRQ%d\n", irq);
      (void)irq_detach(irq);
      enabler = NULL;
    }

  /* Return with the interrupt disabled in either case */

  sam_pioirqdisable(irq);

  /* Return the enabling function pointer */

  if (enable)
    {
      *enable = enabler;
    }

  /* Return the old handler (so that it can be restored) */

  irqrestore(flags);
  return oldhandler;
}
コード例 #29
0
ファイル: pg_worker.c プロジェクト: CNCBASHER/Firmware
int pg_worker(int argc, char *argv[])
{
  irqstate_t flags;

  /* Loop forever -- Notice that interrupts will be disable at all times that
   * this thread runs.  That is so that we con't lose signals or have
   * asynchronous page faults.
   *
   * All interrupt logic as well as all page fill worker thread logic must
   * be locked in memory.  Therefore, keeping interrupts disabled here
   * should prevent any concurrent page faults.  Any page faults or page
   * fill completions should occur while this thread sleeps.
   */

  pglldbg("Started\n");
  flags = irqsave();
  for (;;)
    {
      /* Wait awhile.  We will wait here until either the configurable timeout
       * elapses or until we are awakened by a signal (which terminates the
       * usleep with an EINTR error).  Note that interrupts will be re-enabled
       * while this task sleeps.
       *
       * The timeout is a failsafe that will handle any cases where a single
       * is lost (that would really be a bug and shouldn't happen!) and also
       * supports timeouts for case of non-blocking, asynchronous fills.
       */

      usleep(CONFIG_PAGING_WORKPERIOD);

      /* The page fill worker thread will be awakened on one of three conditions:
       *
       *   - When signaled by pg_miss(), the page fill worker thread will be awakenend,
       *   - if CONFIG_PAGING_BLOCKINGFILL is not defined, from pg_callback()
       *     after completing a page fill, or
       *   - On a configurable timeout expires with no activity.
       *
       * Interrupts are still disabled.
       */

#ifndef CONFIG_PAGING_BLOCKINGFILL
      /* For the non-blocking up_fillpage(), the page fill worker thread will detect
       * that the page fill is complete when it is awakened with g_pftcb non-NULL
       * and fill completion status from pg_callback.
       */

      if (g_pftcb != NULL)
        {
          /* If it is a real page fill completion event, then the result of the page
           * fill will be in g_fillresult and will not be equal to -EBUSY.
           */

          if (g_fillresult != -EBUSY)
            {
              /* Any value other than OK, brings the system down */

              ASSERT(g_fillresult == OK);

              /* Handle the successful page fill complete event by restarting the
               * task that was blocked waiting for this page fill.
               */

              pglldbg("Restarting TCB: %p\n", g_pftcb);
              up_unblock_task(g_pftcb);;

              /* Yes .. Start the next asynchronous fill.  Check the return
               * value to see a fill was actually started (false means that
               * no fill was started).
               */

              pgllvdbg("Calling pg_startfill\n");
              if (!pg_startfill())
                {
                  /* No fill was started.  This can mean only that all queued
                   * page fill actions have and been completed and there is
                   * nothing more to do.
                   */

                  pgllvdbg("Call pg_alldone()\n");
                  pg_alldone();
                }
            }

          /* If a configurable timeout period expires with no page fill completion
           * event, then declare a failure.
           */

#ifdef CONFIG_PAGING_TIMEOUT_TICKS
          else
            {
              lldbg("Timeout!\n");
              ASSERT(clock_systimer() - g_starttime < CONFIG_PAGING_TIMEOUT_TICKS);
            }
#endif
        }

      /* Otherwise, this might be a page fill initiation event.  When
       * awakened from pg_miss(), no fill will be in progress and
       * g_pftcb will be NULL.
       */

      else
        {
          /* Are there tasks blocked and waiting for a fill?  If so,
           * pg_startfill() will start the asynchronous fill (and set
           * g_pftcb).
           */

           pgllvdbg("Calling pg_startfill\n");
           (void)pg_startfill();
        }
#else
      /* Are there tasks blocked and waiting for a fill?  Loop until all
       * pending fills have been processed.
       */

      for (;;)
        {
          /* Yes .. Start the fill and block until the fill completes.
           * Check the return value to see a fill was actually performed.
           * (false means that no fill was perforemd).
           */

          pgllvdbg("Calling pg_startfill\n");
          if (!pg_startfill())
            {
               /* Break out of the loop -- there is nothing more to do */

               break;
            }

          /* Handle the page fill complete event by restarting the
           * task that was blocked waiting for this page fill. In the
           * non-blocking fill case, the page fill worker thread will
           * know that the page fill is  complete when pg_startfill()
           * returns true.
           */

          pgllvdbg("Restarting TCB: %p\n", g_pftcb);
          up_unblock_task(g_pftcb);;
        }

      /* All queued fills have been processed */

      pgllvdbg("Call pg_alldone()\n");
      pg_alldone();
#endif
    }

  return OK; /* To keep some compilers happy */
}
コード例 #30
0
void work_process(FAR struct kwork_wqueue_s *wqueue, systime_t period, int wndx)
{
  volatile FAR struct work_s *work;
  worker_t  worker;
  irqstate_t flags;
  FAR void *arg;
  systime_t elapsed;
  systime_t remaining;
  systime_t stick;
  systime_t ctick;
  systime_t next;

  /* Then process queued work.  We need to keep interrupts disabled while
   * we process items in the work list.
   */

  next  = period;
  flags = irqsave();

  /* Get the time that we started this polling cycle in clock ticks. */

  stick = clock_systimer();

  /* And check each entry in the work queue.  Since we have disabled
   * interrupts we know:  (1) we will not be suspended unless we do
   * so ourselves, and (2) there will be no changes to the work queue
   */

  work = (FAR struct work_s *)wqueue->q.head;
  while (work)
    {
      /* Is this work ready?  It is ready if there is no delay or if
       * the delay has elapsed. qtime is the time that the work was added
       * to the work queue.  It will always be greater than or equal to
       * zero.  Therefore a delay of zero will always execute immediately.
       */

      ctick   = clock_systimer();
      elapsed = ctick - work->qtime;
      if (elapsed >= work->delay)
        {
          /* Remove the ready-to-execute work from the list */

          (void)dq_rem((struct dq_entry_s *)work, &wqueue->q);

          /* Extract the work description from the entry (in case the work
           * instance by the re-used after it has been de-queued).
           */

          worker = work->worker;

          /* Check for a race condition where the work may be nullified
           * before it is removed from the queue.
           */

          if (worker != NULL)
            {
              /* Extract the work argument (before re-enabling interrupts) */

              arg = work->arg;

              /* Mark the work as no longer being queued */

              work->worker = NULL;

              /* Do the work.  Re-enable interrupts while the work is being
               * performed... we don't have any idea how long this will take!
               */

              irqrestore(flags);
              worker(arg);

              /* Now, unfortunately, since we re-enabled interrupts we don't
               * know the state of the work list and we will have to start
               * back at the head of the list.
               */

              flags = irqsave();
              work  = (FAR struct work_s *)wqueue->q.head;
            }
          else
            {
              /* Cancelled.. Just move to the next work in the list with
               * interrupts still disabled.
               */

              work = (FAR struct work_s *)work->dq.flink;
            }
        }
      else /* elapsed < work->delay */
        {
          /* This one is not ready.
           *
           * NOTE that elapsed is relative to the the current time,
           * not the time of beginning of this queue processing pass.
           * So it may need an adjustment.
           */

          elapsed += (ctick - stick);
          if (elapsed > work->delay)
            {
              /* The delay has expired while we are processing */

              elapsed = work->delay;
            }

          /* Will it be ready before the next scheduled wakeup interval? */

          remaining = work->delay - elapsed;
          if (remaining < next)
            {
              /* Yes.. Then schedule to wake up when the work is ready */

              next = remaining;
            }

          /* Then try the next in the list. */

          work = (FAR struct work_s *)work->dq.flink;
        }
    }

#if defined(CONFIG_SCHED_LPWORK) && CONFIG_SCHED_LPNTHREADS > 0
  /* Value of zero for period means that we should wait indefinitely until
   * signalled.  This option is used only for the case where there are
   * multiple, low-priority worker threads.  In that case, only one of
   * the threads does the poll... the others simple.  In all other cases
   * period will be non-zero and equal to wqueue->delay.
   */

   if (period == 0)
     {
       sigset_t set;

       /* Wait indefinitely until signalled with SIGWORK */

       sigemptyset(&set);
       sigaddset(&set, SIGWORK);

       wqueue->worker[wndx].busy = false;
       DEBUGVERIFY(sigwaitinfo(&set, NULL));
       wqueue->worker[wndx].busy = true;
     }
   else
#endif
    {
      /* Get the delay (in clock ticks) since we started the sampling */

      elapsed = clock_systimer() - stick;
      if (elapsed < period && next > 0)
        {
          /* How much time would we need to delay to get to the end of the
           * sampling period?  The amount of time we delay should be the smaller
           * of the time to the end of the sampling period and the time to the
           * next work expiry.
           */

          remaining = period - elapsed;
          next      = MIN(next, remaining);

          /* Wait awhile to check the work list.  We will wait here until
           * either the time elapses or until we are awakened by a signal.
           * Interrupts will be re-enabled while we wait.
           */

          wqueue->worker[wndx].busy = false;
          usleep(next * USEC_PER_TICK);
          wqueue->worker[wndx].busy = true;
        }
    }

  irqrestore(flags);
}