Ejemplo n.º 1
0
void _exit(int status)
{
  FAR struct tcb_s *tcb;

  sinfo("TCB=%p exiting\n", tcb);

  /* Destroy the task at the head of the ready to run list. */

  (void)task_exit();

  /* Now, perform the context switch to the new ready-to-run task at the
   * head of the list.
   */

  tcb = this_task();
  sinfo("New Active Task TCB=%p\n", tcb);

  /* The way that we handle signals in the simulation is kind of
   * a kludge.  This would be unsafe in a truly multi-threaded, interrupt
   * driven environment.
   */

  if (tcb->xcp.sigdeliver)
    {
      sinfo("Delivering signals TCB=%p\n", tcb);
      ((sig_deliver_t)tcb->xcp.sigdeliver)(tcb);
      tcb->xcp.sigdeliver = NULL;
    }

  /* Then switch contexts */

  up_longjmp(tcb->xcp.regs, 1);
}
Ejemplo n.º 2
0
void _exit(int status)
{
    struct tcb_s *tcb;

    /* Disable interrupts.  They will be restored when the next task is
     * started.
     */

    (void)up_irq_save();

    sinfo("TCB=%p exiting\n", this_task());

#ifdef CONFIG_DUMP_ON_EXIT
    sinfo("Other tasks:\n");
    sched_foreach(_xtensa_dumponexit, NULL);
#endif

#if XCHAL_CP_NUM > 0
    /* Disable co-processor support for the task that is exit-ing. */

    tcb = this_task();
    xtensa_coproc_disable(&tcb->xcp.cpstate, XTENSA_CP_ALLSET);
#endif

    /* Destroy the task at the head of the ready to run list. */

    (void)task_exit();

    /* Now, perform the context switch to the new ready-to-run task at the
     * head of the list.
     */

    tcb = this_task();

#if XCHAL_CP_NUM > 0
    /* Set up the co-processor state for the newly started thread. */

    xtensa_coproc_restorestate(&tcb->xcp.cpstate);
#endif

#ifdef CONFIG_ARCH_ADDRENV
    /* Make sure that the address environment for the previously running
     * task is closed down gracefully (data caches dump, MMU flushed) and
     * set up the address environment for the new thread at the head of
     * the ready-to-run list.
     */

    (void)group_addrenv(tcb);
#endif

    /* Then switch contexts */

    xtensa_context_restore(tcb->xcp.regs);

    /* xtensa_full_context_restore() should not return but could if the software
     * interrupts are disabled.
     */

    PANIC();
}
Ejemplo n.º 3
0
void up_sigdeliver(void)
{
#ifndef CONFIG_DISABLE_SIGNALS
  struct tcb_s  *rtcb = this_task();
  uint32_t regs[XCPTCONTEXT_REGS];
  sig_deliver_t sigdeliver;

  /* Save the errno.  This must be preserved throughout the signal handling
   * so that the user code final gets the correct errno value (probably
   * EINTR).
   */

  int saved_errno = rtcb->pterrno;

  board_autoled_on(LED_SIGNAL);

  sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n",
        rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head);
  ASSERT(rtcb->xcp.sigdeliver != NULL);

  /* Save the real return state on the stack. */

  up_copystate(regs, rtcb->xcp.regs);
  regs[REG_PC] = rtcb->xcp.saved_pc;
  regs[REG_SR] = rtcb->xcp.saved_sr;

  /* Get a local copy of the sigdeliver function pointer.  We do this so
   * that we can nullify the sigdeliver function pointer in the TCB and
   * accept more signal deliveries while processing the current pending
   * signals.
   */

  sigdeliver           = rtcb->xcp.sigdeliver;
  rtcb->xcp.sigdeliver = NULL;

  /* Then restore the task interrupt state. */

  up_irq_restore(regs[REG_SR] & 0x000000f0);

  /* Deliver the signals */

  sigdeliver(rtcb);

  /* Output any debug messages BEFORE restoring errno (because they may
   * alter errno), then disable interrupts again and restore the original
   * errno that is needed by the user logic (it is probably EINTR).
   */

  sinfo("Resuming\n");
  (void)up_irq_save();
  rtcb->pterrno = saved_errno;

  /* Then restore the correct state for this thread of execution. */

  board_autoled_off(LED_SIGNAL);
  up_fullcontextrestore(regs);
#endif
}
Ejemplo n.º 4
0
int pthread_cond_broadcast(FAR pthread_cond_t *cond)
{
  int ret = OK;
  int sval;

  sinfo("cond=0x%p\n", cond);

  if (!cond)
    {
      ret = EINVAL;
    }
  else
    {
      /* Disable pre-emption until all of the waiting threads have been
       * restarted. This is necessary to assure that the sval behaves as
       * expected in the following while loop
       */

      sched_lock();

      /* Get the current value of the semaphore */

      if (sem_getvalue((FAR sem_t *)&cond->sem, &sval) != OK)
        {
          ret = EINVAL;
        }
      else
        {
          /* Loop until all of the waiting threads have been restarted. */

          while (sval < 0)
            {
              /* If the value is less than zero (meaning that one or more
               * thread is waiting), then post the condition semaphore.
               * Only the highest priority waiting thread will get to execute
               */

              ret = pthread_givesemaphore((FAR sem_t *)&cond->sem);

              /* Increment the semaphore count (as was done by the
               * above post).
               */

              sval++;
            }
        }

      /* Now we can let the restarted threads run */

      sched_unlock();
    }

  sinfo("Returning %d\n", ret);
  return ret;
}
Ejemplo n.º 5
0
int pthread_mutex_init(FAR pthread_mutex_t *mutex, FAR const pthread_mutexattr_t *attr)
{
  int pshared = 0;
#ifdef CONFIG_MUTEX_TYPES
  uint8_t type  = PTHREAD_MUTEX_DEFAULT;
#endif
  int ret       = OK;
  int status;

  sinfo("mutex=0x%p attr=0x%p\n", mutex, attr);

  if (!mutex)
    {
      ret = EINVAL;
    }
  else
    {
      /* Were attributes specified?  If so, use them */

      if (attr)
        {
          pshared = attr->pshared;
#ifdef CONFIG_MUTEX_TYPES
          type    = attr->type;
#endif
        }

      /* Indicate that the semaphore is not held by any thread. */

      mutex->pid = -1;

      /* Initialize the mutex like a semaphore with initial count = 1 */

      status = sem_init((FAR sem_t *)&mutex->sem, pshared, 1);
      if (status != OK)
        {
          ret = EINVAL;
        }

      /* Set up attributes unique to the mutex type */

#ifdef CONFIG_MUTEX_TYPES
      mutex->type   = type;
      mutex->nlocks = 0;
#endif
    }

  sinfo("Returning %d\n", ret);
  return ret;
}
Ejemplo n.º 6
0
static void _up_dumponexit(FAR struct tcb_s *tcb, FAR void *arg)
{
#if CONFIG_NFILE_DESCRIPTORS > 0
  FAR struct filelist *filelist;
#if CONFIG_NFILE_STREAMS > 0
  FAR struct streamlist *streamlist;
#endif
  int i;
#endif

  sinfo("  TCB=%p name=%s pid=%d\n", tcb, tcb->argv[0], tcb->pid);
  sinfo("    priority=%d state=%d\n", tcb->sched_priority, tcb->task_state);

#if CONFIG_NFILE_DESCRIPTORS > 0
  filelist = tcb->group->tg_filelist;
  for (i = 0; i < CONFIG_NFILE_DESCRIPTORS; i++)
    {
      struct inode *inode = filelist->fl_files[i].f_inode;
      if (inode)
        {
          sinfo("      fd=%d refcount=%d\n",
                i, inode->i_crefssinfo);
        }
    }
#endif

#if CONFIG_NFILE_STREAMS > 0
  streamlist = tcb->group->tg_streamlist;
  for (i = 0; i < CONFIG_NFILE_STREAMS; i++)
    {
      struct file_struct *filep = &streamlist->sl_streams[i];
      if (filep->fs_fd >= 0)
        {
#ifndef CONFIG_STDIO_DISABLE_BUFFERING
          if (filep->fs_bufstart != NULL)
            {
              sinfo("      fd=%d nbytes=%d\n",
                    filep->fs_fd,
                    filep->fs_bufpos - filep->fs_bufstart);
            }
          else
#endif
            {
              sinfo("      fd=%d\n", filep->fs_fd);
            }
        }
    }
#endif
}
Ejemplo n.º 7
0
static void wait_for_state(int fd, void* data) {
    std::unique_ptr<state_info> sinfo(reinterpret_cast<state_info*>(data));

    D("wait_for_state %d", sinfo->state);

    while (true) {
        bool is_ambiguous = false;
        std::string error = "unknown error";
        const char* serial = sinfo->serial.length() ? sinfo->serial.c_str() : NULL;
        atransport* t = acquire_one_transport(sinfo->transport_type, serial, &is_ambiguous, &error);

        if (t != nullptr && t->connection_state == sinfo->state) {
            SendOkay(fd);
            break;
        } else if (!is_ambiguous) {
            adb_sleep_ms(1000);
            // Try again...
        } else {
            SendFail(fd, error);
            break;
        }
    }

    adb_close(fd);
    D("wait_for_state is done");
}
Ejemplo n.º 8
0
void _exit(int status)
{
  struct tcb_s *tcb;

  /* Disable interrupts.  They will be restored when the next
   * task is started.
   */

  (void)up_irq_save();

  sinfo("TCB=%p exiting\n", this_task());

#ifdef CONFIG_DUMP_ON_EXIT
  sinfo("Other tasks:\n");
  sched_foreach(_up_dumponexit, NULL);
#endif

  /* Destroy the task at the head of the ready to run list. */

  (void)task_exit();

  /* Now, perform the context switch to the new ready-to-run task at the
   * head of the list.
   */

  tcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
  /* Make sure that the address environment for the previously running
   * task is closed down gracefully (data caches dump, MMU flushed) and
   * set up the address environment for the new thread at the head of
   * the ready-to-run list.
   */

  (void)group_addrenv(tcb);
#endif

  /* Then switch contexts */

  up_fullcontextrestore(tcb->xcp.regs);

  /* up_fullcontextrestore() should not return but could if the software
   * interrupts are disabled.
   */

  PANIC();
}
Ejemplo n.º 9
0
static inline int spawn_close(FAR struct spawn_close_file_action_s *action)
{
  /* The return value from close() is ignored */

  sinfo("Closing fd=%d\n", action->fd);

  (void)close(action->fd);
  return OK;
}
Ejemplo n.º 10
0
/*--------------------------------------------------------------*
 *  ss_m::_create_index()                                        *
 *--------------------------------------------------------------*/
rc_t
ss_m::_create_index(
    vid_t                   vid, 
    ndx_t                   ntype, 
    store_property_t        property,
    const char*             key_desc,
    concurrency_t           cc, // = t_cc_kvl
    stid_t&                 stid
    )
{
    FUNC(ss_m::_create_index);

    DBG(<<" vid " << vid);
    uint4_t count = max_keycomp;
    key_type_s kcomp[max_keycomp];
    lpid_t root;

    W_DO( key_type_s::parse_key_type(key_desc, count, kcomp) );
    {
        DBG(<<"vid " << vid);
        W_DO( io->create_store(vid, 100/*unused*/, _make_store_flag(property), stid) );
    DBG(<<" stid " << stid);
    }

    // Note: theoretically, some other thread could destroy
    //       the above store before the following lock request
    //       is granted.  The only forseable way for this to
    //       happen would be due to a bug in a vas causing
    //       it to destroy the wrong store.  We make no attempt
    //       to prevent this.
    W_DO(lm->lock(stid, EX, t_long, WAIT_SPECIFIED_BY_XCT));

    if( (cc != t_cc_none) && (cc != t_cc_file) &&
        (cc != t_cc_kvl) && (cc != t_cc_modkvl) &&
        (cc != t_cc_im)
        ) return RC(eBADCCLEVEL);

    switch (ntype)  {
    case t_btree:
    case t_uni_btree:
        // compress prefixes only if the first part is compressed
        W_DO( bt->create(stid, root, kcomp[0].compressed != 0) );

        break;
    default:
        return RC(eBADNDXTYPE);
    }
    sinfo_s sinfo(stid.store, t_index, 100/*unused*/, 
                  ntype,
                  cc,
                  root.page, 
                  count, kcomp);
    W_DO( dir->insert(stid, sinfo) );

    return RCOK;
}
Ejemplo n.º 11
0
int pthread_mutex_destroy(FAR pthread_mutex_t *mutex)
{
  int ret = OK;
  int status;

  sinfo("mutex=0x%p\n", mutex);

  if (!mutex)
    {
      ret = EINVAL;
    }
  else
    {
      /* Make sure the semaphore is stable while we make the following
       * checks
       */

      sched_lock();

      /* Is the semaphore available? */

      if (mutex->pid != -1)
        {
          ret = EBUSY;
        }
      else
        {
          /* Destroy the semaphore */

          status = sem_destroy((FAR sem_t *)&mutex->sem);
          if (status != OK)
            {
              ret = EINVAL;
            }
        }

      sched_unlock();
    }

  sinfo("Returning %d\n", ret);
  return ret;
}
Ejemplo n.º 12
0
void _exit(int status)
{
  struct tcb_s *tcb;

  /* Make sure that we are in a critical section with local interrupts.
   * The IRQ state will be restored when the next task is started.
   */

  (void)enter_critical_section();

  sinfo("TCB=%p exiting\n", this_task());

#ifdef CONFIG_DUMP_ON_EXIT
  sinfo("Other tasks:\n");
  sched_foreach(_up_dumponexit, NULL);
#endif

  /* Destroy the task at the head of the ready to run list. */

  (void)task_exit();

  /* Now, perform the context switch to the new ready-to-run task at the
   * head of the list.
   */

  tcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
  /* Make sure that the address environment for the previously running
   * task is closed down gracefully (data caches dump, MMU flushed) and
   * set up the address environment for the new thread at the head of
   * the ready-to-run list.
   */

  (void)group_addrenv(tcb);
#endif

  /* Then switch contexts */

  up_fullcontextrestore(tcb->xcp.regs);
}
Ejemplo n.º 13
0
static inline int spawn_open(FAR struct spawn_open_file_action_s *action)
{
  int fd;
  int ret = OK;

  /* Open the file */

  sinfo("Open'ing path=%s oflags=%04x mode=%04x\n",
        action->path, action->oflags, action->mode);

  fd = open(action->path, action->oflags, action->mode);
  if (fd < 0)
    {
      ret = get_errno();
      serr("ERROR: open failed: %d\n", ret);
    }

  /* Does the return file descriptor happen to match the required file
   * descriptor number?
   */

  else if (fd != action->fd)
    {
      /* No.. dup2 to get the correct file number */

      sinfo("Dup'ing %d->%d\n", fd, action->fd);

      ret = dup2(fd, action->fd);
      if (ret < 0)
        {
          ret = get_errno();
          serr("ERROR: dup2 failed: %d\n", ret);
        }

      sinfo("Closing fd=%d\n", fd);
      close(fd);
    }

  return ret;
}
Ejemplo n.º 14
0
int pthread_completejoin(pid_t pid, FAR void *exit_value)
{
  FAR struct task_group_s *group = task_getgroup(pid);
  FAR struct join_s *pjoin;

  sinfo("pid=%d exit_value=%p group=%p\n", pid, exit_value, group);
  DEBUGASSERT(group);

  /* First, find thread's structure in the private data set. */

  (void)pthread_takesemaphore(&group->tg_joinsem);
  pjoin = pthread_findjoininfo(group, pid);
  if (!pjoin)
    {
      serr("ERROR: Could not find join info, pid=%d\n", pid);
      (void)pthread_givesemaphore(&group->tg_joinsem);
      return ERROR;
    }
  else
    {
      bool waiters;

      /* Save the return exit value in the thread structure. */

      pjoin->terminated = true;
      pjoin->exit_value = exit_value;

      /* Notify waiters of the availability of the exit value */

      waiters = pthread_notifywaiters(pjoin);

      /* If there are no waiters and if the thread is marked as detached.
       * then discard the join information now.  Otherwise, the pthread
       * join logic will call pthread_destroyjoin() when all of the threads
       * have sampled the exit value.
       */

      if (!waiters && pjoin->detached)
        {
           pthread_destroyjoin(group, pjoin);
        }

      /* Giving the following semaphore will allow the waiters
       * to call pthread_destroyjoin.
       */

      (void)pthread_givesemaphore(&group->tg_joinsem);
    }

  return OK;
}
Ejemplo n.º 15
0
asocket* host_service_to_socket(const char* name, const char* serial) {
    if (!strcmp(name,"track-devices")) {
        return create_device_tracker();
    } else if (android::base::StartsWith(name, "wait-for-")) {
        name += strlen("wait-for-");

        std::unique_ptr<state_info> sinfo(new state_info);
        if (sinfo == nullptr) {
            fprintf(stderr, "couldn't allocate state_info: %s", strerror(errno));
            return nullptr;
        }

        if (serial) sinfo->serial = serial;

        if (android::base::StartsWith(name, "local")) {
            name += strlen("local");
            sinfo->transport_type = kTransportLocal;
        } else if (android::base::StartsWith(name, "usb")) {
            name += strlen("usb");
            sinfo->transport_type = kTransportUsb;
        } else if (android::base::StartsWith(name, "any")) {
            name += strlen("any");
            sinfo->transport_type = kTransportAny;
        } else {
            return nullptr;
        }

        if (!strcmp(name, "-device")) {
            sinfo->state = kCsDevice;
        } else if (!strcmp(name, "-recovery")) {
            sinfo->state = kCsRecovery;
        } else if (!strcmp(name, "-sideload")) {
            sinfo->state = kCsSideload;
        } else if (!strcmp(name, "-bootloader")) {
            sinfo->state = kCsBootloader;
        } else {
            return nullptr;
        }

        int fd = create_service_thread(wait_for_state, sinfo.release());
        return create_local_socket(fd);
    } else if (!strncmp(name, "connect:", 8)) {
        char* host = strdup(name + 8);
        int fd = create_service_thread(connect_service, host);
        return create_local_socket(fd);
    }
    return NULL;
}
Ejemplo n.º 16
0
int work_lpstart(void)
{
  pid_t pid;
  int wndx;

  /* Initialize work queue data structures */

  memset(&g_lpwork, 0, sizeof(struct kwork_wqueue_s));

  g_lpwork.delay = CONFIG_SCHED_LPWORKPERIOD / USEC_PER_TICK;
  dq_init(&g_lpwork.q);

  /* Don't permit any of the threads to run until we have fully initialized
   * g_lpwork.
   */

  sched_lock();

  /* Start the low-priority, kernel mode worker thread(s) */

  sinfo("Starting low-priority kernel worker thread(s)\n");

  for (wndx = 0; wndx < CONFIG_SCHED_LPNTHREADS; wndx++)
    {
       pid = kernel_thread(LPWORKNAME, CONFIG_SCHED_LPWORKPRIORITY,
                           CONFIG_SCHED_LPWORKSTACKSIZE,
                           (main_t)work_lpthread,
                           (FAR char * const *)NULL);

      DEBUGASSERT(pid > 0);
      if (pid < 0)
        {
          int errcode = errno;
          DEBUGASSERT(errcode > 0);

          serr("ERROR: kernel_thread %d failed: %d\n", wndx, errcode);
          sched_unlock();
          return -errcode;
        }

      g_lpwork.worker[wndx].pid  = pid;
      g_lpwork.worker[wndx].busy = true;
    }

  sched_unlock();
  return g_lpwork.worker[0].pid;
}
Ejemplo n.º 17
0
void pthread_destroyjoin(FAR struct task_group_s *group,
                         FAR struct join_s *pjoin)
{
  sinfo("pjoin=0x%p\n", pjoin);

  /* Remove the join info from the set of joins */

  pthread_removejoininfo(group, (pid_t)pjoin->thread);

  /* Destroy its semaphores */

  (void)sem_destroy(&pjoin->data_sem);
  (void)sem_destroy(&pjoin->exit_sem);

  /* And deallocate the pjoin structure */

  sched_kfree(pjoin);
}
Ejemplo n.º 18
0
static inline int spawn_dup2(FAR struct spawn_dup2_file_action_s *action)
{
  int ret;

  /* Perform the dup */

  sinfo("Dup'ing %d->%d\n", action->fd1, action->fd2);

  ret = dup2(action->fd1, action->fd2);
  if (ret < 0)
    {
      int errcode = get_errno();

      serr("ERROR: dup2 failed: %d\n", errcode);
      return -errcode;
    }

  return OK;
}
Ejemplo n.º 19
0
static bool pthread_notifywaiters(FAR struct join_s *pjoin)
{
  int ntasks_waiting;
  int status;

  sinfo("pjoin=0x%p\n", pjoin);

  /* Are any tasks waiting for our exit value? */

  status = sem_getvalue(&pjoin->exit_sem, &ntasks_waiting);
  if (status == OK && ntasks_waiting < 0)
    {
      /* Set the data semaphore so that this thread will be
       * awakened when all waiting tasks receive the data
       */

      (void)sem_init(&pjoin->data_sem, 0, (ntasks_waiting+1));

      /* Post the semaphore to restart each thread that is waiting
       * on the semaphore
       */

      do
        {
          status = pthread_givesemaphore(&pjoin->exit_sem);
          if (status == OK)
            {
              status = sem_getvalue(&pjoin->exit_sem, &ntasks_waiting);
            }
        }
      while (ntasks_waiting < 0 && status == OK);

      /* Now wait for all these restarted tasks to obtain the return
       * value.
       */

      (void)pthread_takesemaphore(&pjoin->data_sem);
      return true;
    }

  return false;
}
Ejemplo n.º 20
0
void Tracer::info(const char* method, const char* message, ...) const
{
    // immediately return if the current set tracelevel is higher
    if (m_tracelevel > LEVEL_INFO) {
        return;
    }

    ///@todo with qt4 the follwing code can be used:
    /*
    va_list ap;
    va_start(ap, message);
    QString str;
    str.vsprintf(message, ap);
    va_end(ap);

    sinfo(method) << str << endl;
    */

    va_list ap;

    int maxLength = 1024;
    int result = maxLength;
    char* str;
    while (result >= maxLength) {
        va_start(ap, message);
        str = (char*)malloc(maxLength);
        result = vsnprintf(str, maxLength-1, message, ap);
        va_end(ap);

        if (result >= maxLength) {
            delete str;
            maxLength = 2 * maxLength;
            result = maxLength;
        }
    }

    sinfo(method) << str << endl;
    delete str;
}
Ejemplo n.º 21
0
int mod_findsection(FAR struct mod_loadinfo_s *loadinfo,
                    FAR const char *sectname)
{
  FAR const Elf32_Shdr *shdr;
  int ret;
  int i;

  /* Search through the shdr[] array in loadinfo for a section named 'sectname' */

  for (i = 0; i < loadinfo->ehdr.e_shnum; i++)
    {
      /* Get the name of this section */

      shdr = &loadinfo->shdr[i];
      ret  = mod_sectname(loadinfo, shdr);
      if (ret < 0)
        {
          serr("ERROR: mod_sectname failed: %d\n", ret);
          return ret;
        }

      /* Check if the name of this section is 'sectname' */

      sinfo("%d. Comparing \"%s\" and .\"%s\"\n",
            i, loadinfo->iobuffer, sectname);

      if (strcmp((FAR const char *)loadinfo->iobuffer, sectname) == 0)
        {
          /* We found it... return the index */

          return i;
        }
    }

  /* We failed to find a section with this name. */

  return -ENOENT;
}
Ejemplo n.º 22
0
/*--------------------------------------------------------------*
 *  ss_m::_create_md_index()                                    *
 *--------------------------------------------------------------*/
rc_t
ss_m::_create_md_index(
    vid_t                 vid, 
    ndx_t                 ntype, 
    store_property_t      property,
    stid_t&               stid, 
    int2_t                dim
    )
{
    W_DO( io->create_store(vid, 100/*unused*/, 
                           _make_store_flag(property), stid) );

    lpid_t root;

    // Note: theoretically, some other thread could destroy
    //       the above store before the following lock request
    //       is granted.  The only forseable way for this to
    //       happen would be due to a bug in a vas causing
    //       it to destroy the wrong store.  We make no attempt
    //       to prevent this.
    W_DO(lm->lock(stid, EX, t_long, WAIT_SPECIFIED_BY_XCT));

    switch (ntype)  {
    case t_rtree:
        W_DO( rt->create(stid, root, dim) );
        break;
    default:
        return RC(eBADNDXTYPE);
    }

    sinfo_s sinfo(stid.store, t_index, 100/*unused*/, 
                    ntype, t_cc_none, // cc not used for md indexes
                  root.page,
                  0, 0);
    W_DO( dir->insert(stid, sinfo) );

    return RCOK;
}
Ejemplo n.º 23
0
int pthread_getaffinity_np(pthread_t thread, size_t cpusetsize,
                           FAR cpu_set_t *cpuset)
{
  int ret;

  sinfo("thread ID=%d cpusetsize=%d cpuset=%p\n",
        (int)thread, (int)cpusetsize, cpusetsize);

  DEBUGASSERT(thread > 0 && cpusetsize == sizeof(cpu_set_t) &&
              cpuset != NULL);

  /* Let sched_getaffinity do all of the work */

  ret = sched_getaffinity((pid_t)thread, cpusetsize, cpuset);
  if (ret < 0)
    {
      /* If sched_getaffinity() fails, return the errno */

      ret = get_errno();
      DEBUGASSERT(ret > 0);
    }

  return ret;
}
Ejemplo n.º 24
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       PANIC();
    }
  else
    {
      FAR struct tcb_s *rtcb = this_task();
      bool switch_needed;

      sinfo("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

          /* Copy the exception context into the TCB at the (old) head of the
           * ready-to-run Task list. if up_setjmp returns a non-zero
           * value, then this is really the previously running task restarting!
           */

          if (!up_setjmp(rtcb->xcp.regs))
            {
              /* Restore the exception context of the rtcb at the (new) head
               * of the ready-to-run task list.
               */

              rtcb = this_task();
              sinfo("New Active Task TCB=%p\n", rtcb);

              /* The way that we handle signals in the simulation is kind of
               * a kludge.  This would be unsafe in a truly multi-threaded, interrupt
               * driven environment.
               */

              if (rtcb->xcp.sigdeliver)
                {
                  sinfo("Delivering signals TCB=%p\n", rtcb);
                  ((sig_deliver_t)rtcb->xcp.sigdeliver)(rtcb);
                  rtcb->xcp.sigdeliver = NULL;
                }

              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts */

              up_longjmp(rtcb->xcp.regs, 1);
            }
        }
    }
}
Ejemplo n.º 25
0
void up_sigdeliver(void)
{
  struct tcb_s *rtcb = this_task();
  uint32_t regs[XCPTCONTEXT_REGS];
  sig_deliver_t sigdeliver;

  /* Save the errno.  This must be preserved throughout the signal handling
   * so that the user code final gets the correct errno value (probably
   * EINTR).
   */

  int saved_errno = rtcb->pterrno;

  board_autoled_on(LED_SIGNAL);

  sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n",
        rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head);
  ASSERT(rtcb->xcp.sigdeliver != NULL);

  /* Save the real return state on the stack. */

  up_copystate(regs, rtcb->xcp.regs);
  regs[REG_EPC]        = rtcb->xcp.saved_epc;
  regs[REG_STATUS]     = rtcb->xcp.saved_status;

  /* Get a local copy of the sigdeliver function pointer. We do this so that
   * we can nullify the sigdeliver function pointer in the TCB and accept
   * more signal deliveries while processing the current pending signals.
   */

  sigdeliver           = rtcb->xcp.sigdeliver;
  rtcb->xcp.sigdeliver = NULL;

  /* Then restore the task interrupt state */

  up_irq_restore((irqstate_t)regs[REG_STATUS]);

  /* Deliver the signals */

  sigdeliver(rtcb);

  /* Output any debug messages BEFORE restoring errno (because they may
   * alter errno), then disable interrupts again and restore the original
   * errno that is needed by the user logic (it is probably EINTR).
   */

  sinfo("Resuming EPC: %08x STATUS: %08x\n", regs[REG_EPC], regs[REG_STATUS]);

  (void)up_irq_save();
  rtcb->pterrno = saved_errno;

  /* Then restore the correct state for this thread of
   * execution.
   */

  board_autoled_off(LED_SIGNAL);
  up_fullcontextrestore(regs);

  /* up_fullcontextrestore() should not return but could if the software
   * interrupts are disabled.
   */

  PANIC();
}
Ejemplo n.º 26
0
void up_sigdeliver(void)
{
  struct tcb_s *rtcb = this_task();
#if 0
  uint32_t regs[XCPTCONTEXT_REGS+3];  /* Why +3? See below */
#else
  uint32_t regs[XCPTCONTEXT_REGS];
#endif
  sig_deliver_t sigdeliver;

  /* Save the errno.  This must be preserved throughout the signal handling
   * so that the user code final gets the correct errno value (probably EINTR).
   */

  int saved_errno = rtcb->pterrno;

  board_autoled_on(LED_SIGNAL);

  sinfo("rtcb=%p sigdeliver=%p sigpendactionq.head=%p\n",
        rtcb, rtcb->xcp.sigdeliver, rtcb->sigpendactionq.head);
  DEBUGASSERT(rtcb->xcp.sigdeliver != NULL);

  /* Save the real return state on the stack. */

  up_copystate(regs, rtcb->xcp.regs);
  regs[REG_PC]         = rtcb->xcp.saved_pc;
  regs[REG_SR]         = rtcb->xcp.saved_sr;

  /* Get a local copy of the sigdeliver function pointer. We do this so that
   * we can nullify the sigdeliver function pointer in the TCB and accept
   * more signal deliveries while processing the current pending signals.
   */

  sigdeliver           = rtcb->xcp.sigdeliver;
  rtcb->xcp.sigdeliver = NULL;

#ifndef CONFIG_SUPPRESS_INTERRUPTS
  /* Then make sure that interrupts are enabled.  Signal handlers must always
   * run with interrupts enabled.
   */

  up_irq_enable();
#endif

  /* Deliver the signals */

  sigdeliver(rtcb);

  /* Output any debug messages BEFORE restoring errno (because they may
   * alter errno), then disable interrupts again and restore the original
   * errno that is needed by the user logic (it is probably EINTR).
   */

  sinfo("Resuming\n");
  (void)up_irq_save();
  rtcb->pterrno = saved_errno;

  /* Then restore the correct state for this thread of execution. This is an
   * unusual case that must be handled by up_fullcontextresore. This case is
   * unusal in two ways:
   *
   *   1. It is not a context switch between threads.  Rather, up_fullcontextrestore
   *      must behave more it more like a longjmp within the same task, using
   *      he same stack.
   *   2. In this case, up_fullcontextrestore is called with r12 pointing to
   *      a register save area on the stack to be destroyed.  This is
   *      dangerous because there is the very real possibility that the new
   *      stack pointer might overlap with the register save area and hat stack
   *      usage in up_fullcontextrestore might corrupt the register save data
   *      before the state is restored.  At present, there does not appear to
   *      be any stack overlap problems.  If there were, then adding 3 words
   *      to the size of register save structure size will protect its contents.
   */

  board_autoled_off(LED_SIGNAL);
  up_fullcontextrestore(regs);
}
Ejemplo n.º 27
0
pid_t task_vforkstart(FAR struct task_tcb_s *child)
{
  struct tcb_s *parent = this_task();
  pid_t pid;
  int rc;
  int ret;

  sinfo("Starting Child TCB=%p, parent=%p\n", child, this_task());
  DEBUGASSERT(child);

  /* Duplicate the original argument list in the forked child TCB */

  ret = vfork_argsetup(parent, child);
  if (ret < 0)
    {
      task_vforkabort(child, -ret);
      return ERROR;
    }

  /* Now we have enough in place that we can join the group */

#ifdef HAVE_TASK_GROUP
  ret = group_initialize(child);
  if (ret < 0)
    {
      task_vforkabort(child, -ret);
      return ERROR;
    }
#endif

  /* Get the assigned pid before we start the task */

  pid = (int)child->cmn.pid;

  /* Eliminate a race condition by disabling pre-emption.  The child task
   * can be instantiated, but cannot run until we call waitpid().  This
   * assures us that we cannot miss the the death-of-child signal (only
   * needed in the SMP case).
   */

  sched_lock();

  /* Activate the task */

  ret = task_activate((FAR struct tcb_s *)child);
  if (ret < OK)
    {
      task_vforkabort(child, -ret);
      sched_unlock();
      return ERROR;
    }

  /* The child task has not yet ran because pre-emption is disabled.
   * The child task has the same priority as the parent task, so that
   * would typically be the case anyway.  However, in the SMP
   * configuration, the child thread might have already ran on
   * another CPU if pre-emption were not disabled.
   *
   * It is a requirement that the parent environment be stable while
   * vfork runs; the child thread is still dependent on things in the
   * parent thread... like the pointers into parent thread's stack
   * which will still appear in the child's registers and environment.
   *
   * We assure that by waiting for the child thread to exit before
   * returning to the parent thread.  NOTE that pre-emption will be
   * re-enabled while we are waiting, giving the child thread the
   * opportunity to run.
   */

  rc = 0;

#ifdef CONFIG_DEBUG_FEATURES
  ret = waitpid(pid, &rc, 0);
  if (ret < 0)
    {
      serr("ERROR: waitpid failed: %d\n", errno);
    }
#else
  (void)waitpid(pid, &rc, 0);
#endif

  sched_unlock();
  return pid;
}
Ejemplo n.º 28
0
static void mod_dumploadinfo(FAR struct mod_loadinfo_s *loadinfo)
{
  int i;

  sinfo("LOAD_INFO:\n");
  sinfo("  textalloc:    %08lx\n", (long)loadinfo->textalloc);
  sinfo("  datastart:    %08lx\n", (long)loadinfo->datastart);
  sinfo("  textsize:     %ld\n",   (long)loadinfo->textsize);
  sinfo("  datasize:     %ld\n",   (long)loadinfo->datasize);
  sinfo("  filelen:      %ld\n",   (long)loadinfo->filelen);
  sinfo("  filfd:        %d\n",    loadinfo->filfd);
  sinfo("  symtabidx:    %d\n",    loadinfo->symtabidx);
  sinfo("  strtabidx:    %d\n",    loadinfo->strtabidx);

  sinfo("ELF Header:\n");
  sinfo("  e_ident:      %02x %02x %02x %02x\n",
    loadinfo->ehdr.e_ident[0], loadinfo->ehdr.e_ident[1],
    loadinfo->ehdr.e_ident[2], loadinfo->ehdr.e_ident[3]);
  sinfo("  e_type:       %04x\n",  loadinfo->ehdr.e_type);
  sinfo("  e_machine:    %04x\n",  loadinfo->ehdr.e_machine);
  sinfo("  e_version:    %08x\n",  loadinfo->ehdr.e_version);
  sinfo("  e_entry:      %08lx\n", (long)loadinfo->ehdr.e_entry);
  sinfo("  e_phoff:      %d\n",    loadinfo->ehdr.e_phoff);
  sinfo("  e_shoff:      %d\n",    loadinfo->ehdr.e_shoff);
  sinfo("  e_flags:      %08x\n" , loadinfo->ehdr.e_flags);
  sinfo("  e_ehsize:     %d\n",    loadinfo->ehdr.e_ehsize);
  sinfo("  e_phentsize:  %d\n",    loadinfo->ehdr.e_phentsize);
  sinfo("  e_phnum:      %d\n",    loadinfo->ehdr.e_phnum);
  sinfo("  e_shentsize:  %d\n",    loadinfo->ehdr.e_shentsize);
  sinfo("  e_shnum:      %d\n",    loadinfo->ehdr.e_shnum);
  sinfo("  e_shstrndx:   %d\n",    loadinfo->ehdr.e_shstrndx);

  if (loadinfo->shdr && loadinfo->ehdr.e_shnum > 0)
    {
      for (i = 0; i < loadinfo->ehdr.e_shnum; i++)
        {
          FAR Elf32_Shdr *shdr = &loadinfo->shdr[i];
          sinfo("Sections %d:\n", i);
          sinfo("  sh_name:      %08x\n", shdr->sh_name);
          sinfo("  sh_type:      %08x\n", shdr->sh_type);
          sinfo("  sh_flags:     %08x\n", shdr->sh_flags);
          sinfo("  sh_addr:      %08x\n", shdr->sh_addr);
          sinfo("  sh_offset:    %d\n",   shdr->sh_offset);
          sinfo("  sh_size:      %d\n",   shdr->sh_size);
          sinfo("  sh_link:      %d\n",   shdr->sh_link);
          sinfo("  sh_info:      %d\n",   shdr->sh_info);
          sinfo("  sh_addralign: %d\n",   shdr->sh_addralign);
          sinfo("  sh_entsize:   %d\n",   shdr->sh_entsize);
        }
    }
}
Ejemplo n.º 29
0
int insmod(FAR const char *filename, FAR const char *modulename)
{
  struct mod_loadinfo_s loadinfo;
  FAR struct module_s *modp;
  mod_initializer_t initializer;
  int ret;

  DEBUGASSERT(filename != NULL && modulename != NULL);
  sinfo("Loading file: %s\n", filename);

  /* Get exclusive access to the module registry */

  mod_registry_lock();

  /* Check if this module is already installed */

  if (mod_registry_find(modulename) != NULL)
    {
      mod_registry_unlock();
      ret = -EEXIST;
      goto errout_with_lock;
    }

  /* Initialize the ELF library to load the program binary. */

  ret = mod_initialize(filename, &loadinfo);
  mod_dumploadinfo(&loadinfo);
  if (ret != 0)
    {
      serr("ERROR: Failed to initialize to load module: %d\n", ret);
      goto errout_with_lock;
    }

  /* Allocate a module registry entry to hold the module data */

  modp = (FAR struct module_s *)kmm_zalloc(sizeof(struct module_s));
  if (ret != 0)
    {
      sinfo("Failed to initialize for load of ELF program: %d\n", ret);
      goto errout_with_loadinfo;
    }

  /* Save the module name in the registry entry */

  strncpy(modp->modulename, modulename, MODULENAME_MAX);

  /* Load the program binary */

  ret = mod_load(&loadinfo);
  mod_dumploadinfo(&loadinfo);
  if (ret != 0)
    {
      sinfo("Failed to load ELF program binary: %d\n", ret);
      goto errout_with_registry_entry;
    }

  /* Bind the program to the kernel symbol table */

  ret = mod_bind(&loadinfo);
  if (ret != 0)
    {
      sinfo("Failed to bind symbols program binary: %d\n", ret);
      goto errout_with_load;
    }

  /* Return the load information */

  modp->alloc       = (FAR void *)loadinfo.textalloc;
#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MODULE)
  modp->textsize    = loadinfo.textsize;
  modp->datasize    = loadinfo.datasize;
#endif

  /* Get the module initializer entry point */

  initializer = (mod_initializer_t)(loadinfo.textalloc + loadinfo.ehdr.e_entry);
#if defined(CONFIG_FS_PROCFS) && !defined(CONFIG_FS_PROCFS_EXCLUDE_MODULE)
  modp->initializer = initializer;
#endif
  mod_dumpinitializer(initializer, &loadinfo);

  /* Call the module initializer */

  ret = initializer(&modp->uninitializer, &modp->arg);
  if (ret < 0)
    {
      sinfo("Failed to initialize the module: %d\n", ret);
      goto errout_with_load;
    }

  /* Add the new module entry to the registry */

  mod_registry_add(modp);

  mod_uninitialize(&loadinfo);
  mod_registry_unlock();
  return OK;

errout_with_load:
  mod_unload(&loadinfo);
errout_with_registry_entry:
  kmm_free(modp);
errout_with_loadinfo:
  mod_uninitialize(&loadinfo);
errout_with_lock:
  mod_registry_unlock();
  set_errno(-ret);
  return ERROR;
}
Ejemplo n.º 30
0
void up_reprioritize_rtr(struct tcb_s *tcb, uint8_t priority)
{
  /* Verify that the caller is sane */

  if (tcb->task_state < FIRST_READY_TO_RUN_STATE ||
      tcb->task_state > LAST_READY_TO_RUN_STATE
#if SCHED_PRIORITY_MIN > 0
      || priority < SCHED_PRIORITY_MIN
#endif
#if SCHED_PRIORITY_MAX < UINT8_MAX
      || priority > SCHED_PRIORITY_MAX
#endif
    )
    {
       DEBUGPANIC();
    }
  else
    {
      struct tcb_s *rtcb = this_task();
      bool switch_needed;

      sinfo("TCB=%p PRI=%d\n", tcb, priority);

      /* Remove the tcb task from the ready-to-run list.
       * sched_removereadytorun will return true if we just
       * remove the head of the ready to run list.
       */

      switch_needed = sched_removereadytorun(tcb);

      /* Setup up the new task priority */

      tcb->sched_priority = (uint8_t)priority;

      /* Return the task to the specified blocked task list.
       * sched_addreadytorun will return true if the task was
       * added to the new list.  We will need to perform a context
       * switch only if the EXCLUSIVE or of the two calls is non-zero
       * (i.e., one and only one the calls changes the head of the
       * ready-to-run list).
       */

      switch_needed ^= sched_addreadytorun(tcb);

      /* Now, perform the context switch if one is needed */

      if (switch_needed)
        {
          /* If we are going to do a context switch, then now is the right
           * time to add any pending tasks back into the ready-to-run list.
           * task list now
           */

          if (g_pendingtasks.head)
            {
              sched_mergepending();
            }

          /* Update scheduler parameters */

          sched_suspend_scheduler(rtcb);

         /* Are we in an interrupt handler? */

          if (g_current_regs)
            {
              /* Yes, then we have to do things differently.
               * Just copy the g_current_regs into the OLD rtcb.
               */

               up_savestate(rtcb->xcp.regs);

              /* Restore the exception context of the rtcb at the (new) head
               * of the ready-to-run task list.
               */

              rtcb = this_task();

              /* Update scheduler parameters */

              sched_resume_scheduler(rtcb);

              /* Then switch contexts.  Any necessary address environment
               * changes will be made when the interrupt returns.
               */

              up_restorestate(rtcb->xcp.regs);
            }

          /* No, then we will need to perform the user context switch */

          else
            {
              /* Switch context to the context of the task at the head of the
               * ready to run list.
               */

              struct tcb_s *nexttcb = this_task();

#ifdef CONFIG_ARCH_ADDRENV
              /* Make sure that the address environment for the previously
               * running task is closed down gracefully (data caches dump,
               * MMU flushed) and set up the address environment for the new
               * thread at the head of the ready-to-run list.
               */

              (void)group_addrenv(nexttcb);
#endif
              /* Update scheduler parameters */

              sched_resume_scheduler(nexttcb);

              /* Then switch contexts */

              up_switchcontext(rtcb->xcp.regs, nexttcb->xcp.regs);

              /* up_switchcontext forces a context switch to the task at the
               * head of the ready-to-run list.  It does not 'return' in the
               * normal sense.  When it does return, it is because the blocked
               * task is again ready to run and has execution priority.
               */
            }
        }
    }
}