示例#1
0
void up_release_stack(FAR struct tcb_s *dtcb, uint8_t ttype)
{
  /* Is there a stack allocated? */

  if (dtcb->stack_alloc_ptr)
    {
#if defined(CONFIG_NUTTX_KERNEL) && defined(CONFIG_MM_KERNEL_HEAP)
      /* Use the kernel allocator if this is a kernel thread */

      if (ttype == TCB_FLAG_TTYPE_KERNEL)
        {
          sched_kfree(dtcb->stack_alloc_ptr);
        }
      else
#endif
        {
          /* Use the user-space allocator if this is a task or pthread */

          sched_ufree(dtcb->stack_alloc_ptr);
        }

      /* Mark the stack freed */

      dtcb->stack_alloc_ptr = NULL;
    }

  /* The size of the allocated stack is now zero */

  dtcb->adj_stack_size = 0;
}
示例#2
0
static inline void timer_free(struct posix_timer_s *timer)
{
  irqstate_t flags;

  /* Remove the timer from the allocated list */

  flags = irqsave();
  sq_rem((FAR sq_entry_t *)timer, (FAR sq_queue_t *)&g_alloctimers);

  /* Return it to the free list if it is one of the preallocated timers */

#if CONFIG_PREALLOC_TIMERS > 0
  if ((timer->pt_flags & PT_FLAGS_PREALLOCATED) != 0)
    {
      sq_addlast((FAR sq_entry_t *)timer, (FAR sq_queue_t *)&g_freetimers);
      irqrestore(flags);
    }
  else
#endif
    {
      /* Otherwise, return it to the heap */

      irqrestore(flags);
      sched_kfree(timer);
    }
}
示例#3
0
int wd_delete(WDOG_ID wdog)
{
  irqstate_t state;

  DEBUGASSERT(wdog);

  /* The following steps are atomic... the watchdog must not be active when
   * it is being deallocated.
   */

  state = irqsave();

  /* Check if the watchdog has been started. */

  if (WDOG_ISACTIVE(wdog))
    {
      /* Yes.. stop it */

      wd_cancel(wdog);
    }

  /* Did this watchdog come from the pool of pre-allocated timers?  Or, was
   * it allocated from the heap?
   */

  if (WDOG_ISALLOCED(wdog))
    {
      /* It was allocated from the heap.  Use sched_kfree() to release the
       * memory.  If the timer was released from an interrupt handler,
       * sched_kfree() will defer the actual deallocation of the memory
       * until a more appropriate time.
       *
       * We don't need interrupts disabled to do this.
       */

      irqrestore(state);
      sched_kfree(wdog);
    }

  /* This was a pre-allocated timer.  This function should not be called for
   * statically allocated timers.
   */

  else if (!WDOG_ISSTATIC(wdog))
    {
      /* Put the timer back on the free list and increment the count of free
       * timers, all with interrupts disabled.
       */

      sq_addlast((FAR sq_entry_t *)wdog, &g_wdfreelist);
      g_wdnfree++;
      DEBUGASSERT(g_wdnfree <= CONFIG_PREALLOC_WDOGS);
      irqrestore(state);
    }

  /* Return success */

  return OK;
}
示例#4
0
文件: irq.c 项目: OpenAMP/libmetal
static int metal_cntr_irq_handler(int irq, void *context, void *data)
{
	if (context != NULL)
		return metal_irq_handle(data, irq);

	/* context == NULL mean unregister */
	irqchain_detach(irq, metal_cntr_irq_handler, data);
	sched_kfree(data);
	return 0;
}
示例#5
0
void lib_stream_release(FAR struct task_group_s *group)
{
  FAR struct streamlist *list;
#if CONFIG_STDIO_BUFFER_SIZE > 0
  int i;
#endif

#if (defined(CONFIG_BUILD_PROTECTED) || defined(CONFIG_BUILD_KERNEL)) && \
     defined(CONFIG_MM_KERNEL_HEAP)
  DEBUGASSERT(group && group->tg_streamlist);
  list = group->tg_streamlist;
#else
  DEBUGASSERT(group);
  list = &group->tg_streamlist;
#endif

  /* Destroy the semaphore and release the filelist */

  (void)sem_destroy(&list->sl_sem);

  /* Release each stream in the list */

#if CONFIG_STDIO_BUFFER_SIZE > 0
  for (i = 0; i < CONFIG_NFILE_STREAMS; i++)
    {
      /* Destroy the semaphore that protects the IO buffer */

      (void)sem_destroy(&list->sl_streams[i].fs_sem);

      /* Release the IO buffer */

      if (list->sl_streams[i].fs_bufstart)
        {
#ifndef CONFIG_BUILD_KERNEL
          /* Release memory from the user heap */

          sched_ufree(list->sl_streams[i].fs_bufstart);
#else
          /* If the exiting group is unprivileged, then it has an address
           * environment.  Don't bother to release the memory in this case...
           * There is no point since the memory lies in the user heap which
           * will be destroyed anyway.  But if this is a privileged group,
           * when we still have to release the memory using the kernel
           * allocator.
           */

          if ((group->tg_flags & GROUP_FLAG_PRIVILEGED) != 0)
            {
              sched_kfree(list->sl_streams[i].fs_bufstart);
            }
#endif
        }
    }
#endif
}
示例#6
0
void group_delwaiter(FAR struct task_group_s *group)
{
  DEBUGASSERT(group->tg_nwaiters > 0);
  group->tg_nwaiters--;
  if (group->tg_nwaiters == 0 && (group->tg_flags & GROUP_FLAG_DELETED) != 0)
    {
      /* Release the group container (all other resources have already been
       * freed).
       */

      sched_kfree(group);
    }
}
示例#7
0
void mq_msgfree(FAR mqmsg_t *mqmsg)
{
  irqstate_t saved_state;

  /* If this is a generally available pre-allocated message,
   * then just put it back in the free list.
   */

  if (mqmsg->type == MQ_ALLOC_FIXED)
    {
      /* Make sure we avoid concurrent access to the free
       * list from interrupt handlers.
       */

      saved_state = irqsave();
      sq_addlast((FAR sq_entry_t*)mqmsg, &g_msgfree);
      irqrestore(saved_state);
    }

  /* If this is a message pre-allocated for interrupts,
   * then put it back in the correct  free list.
   */

  else if (mqmsg->type == MQ_ALLOC_IRQ)
    {
      /* Make sure we avoid concurrent access to the free
       * list from interrupt handlers.
       */

      saved_state = irqsave();
      sq_addlast((FAR sq_entry_t*)mqmsg, &g_msgfreeirq);
      irqrestore(saved_state);
    }

  /* Otherwise, deallocate it.  Note:  interrupt handlers
   * will never deallocate messages because they will not
   * received them.
   */

  else if (mqmsg->type == MQ_ALLOC_DYN)
    {
      sched_kfree(mqmsg);
    }
  else
    {
      PANIC();
    }
}
示例#8
0
void igmp_grpfree(FAR struct net_driver_s *dev, FAR struct igmp_group_s *group)
{
  net_lock_t flags;

  grplldbg("Free: %p flags: %02x\n", group, group->flags);

  /* Cancel the wdog */

  flags = net_lock();
  wd_cancel(group->wdog);

  /* Remove the group structure from the group list in the device structure */

  sq_rem((FAR sq_entry_t *)group, &dev->grplist);

  /* Destroy the wait semaphore */

  (void)sem_destroy(&group->sem);

  /* Destroy the wdog */

  wd_delete(group->wdog);

  /* Then release the group structure resources.  Check first if this is one
   * of the pre-allocated group structures that we will retain in a free list.
   */

#if CONFIG_PREALLOC_IGMPGROUPS > 0
  if (IS_PREALLOCATED(group->flags))
    {
      grplldbg("Put back on free list\n");
      sq_addlast((FAR sq_entry_t *)group, &g_freelist);
      net_unlock(flags);
    }
  else
#endif
    {
      /* No.. deallocate the group structure.  Use sched_kfree() just in case
       * this function is executing within an interrupt handler.
       */

      net_unlock(flags);
      grplldbg("Call sched_kfree()\n");
      sched_kfree(group);
    }
}
示例#9
0
void pthread_destroyjoin(FAR struct task_group_s *group,
                         FAR struct join_s *pjoin)
{
  sinfo("pjoin=0x%p\n", pjoin);

  /* Remove the join info from the set of joins */

  pthread_removejoininfo(group, (pid_t)pjoin->thread);

  /* Destroy its semaphores */

  (void)sem_destroy(&pjoin->data_sem);
  (void)sem_destroy(&pjoin->exit_sem);

  /* And deallocate the pjoin structure */

  sched_kfree(pjoin);
}
void sig_releasependingsignal(FAR sigpendq_t *sigpend)
{
  irqstate_t saved_state;

  /* If this is a generally available pre-allocated structyre,
   * then just put it back in the free list.
   */

  if (sigpend->type == SIG_ALLOC_FIXED)
    {
      /* Make sure we avoid concurrent access to the free
       * list from interrupt handlers.
       */

      saved_state = irqsave();
      sq_addlast((FAR sq_entry_t*)sigpend, &g_sigpendingsignal);
      irqrestore(saved_state);
    }

  /* If this is a message pre-allocated for interrupts,
   * then put it back in the correct free list.
   */

  else if (sigpend->type == SIG_ALLOC_IRQ)
    {
      /* Make sure we avoid concurrent access to the free
       * list from interrupt handlers.
       */

      saved_state = irqsave();
      sq_addlast((FAR sq_entry_t*)sigpend, &g_sigpendingirqsignal);
      irqrestore(saved_state);
    }

  /* Otherwise, deallocate it.  Note:  interrupt handlers
   * will never deallocate signals because they will not
   * receive them.
   */

  else if (sigpend->type == SIG_ALLOC_DYN)
    {
      sched_kfree(sigpend);
    }
}
示例#11
0
文件: mq_msgqfree.c 项目: a1ien/nuttx
void mq_msgqfree(FAR struct mqueue_inode_s *msgq)
{
  FAR struct mqueue_msg_s *curr;
  FAR struct mqueue_msg_s *next;

  /* Deallocate any stranded messages in the message queue. */

  curr = (FAR struct mqueue_msg_s *)msgq->msglist.head;
  while (curr)
    {
      /* Deallocate the message structure. */

      next = curr->next;
      mq_msgfree(curr);
      curr = next;
    }

  /* Then deallocate the message queue itself */

  sched_kfree(msgq);
}
示例#12
0
static inline void group_release(FAR struct task_group_s *group)
{
  /* Free all un-reaped child exit status */

#if defined(CONFIG_SCHED_HAVE_PARENT) && defined(CONFIG_SCHED_CHILD_STATUS)
  group_removechildren(group);
#endif

#ifndef CONFIG_DISABLE_SIGNALS
  /* Release pending signals */

  sig_release(group);
#endif

#ifndef CONFIG_DISABLE_PTHREAD
  /* Release pthread resources */

  pthread_release(group);
#endif

#if CONFIG_NFILE_DESCRIPTORS > 0
  /* Free all file-related resources now.  We really need to close files as
   * soon as possible while we still have a functioning task.
   */

  /* Free resources held by the file descriptor list */

  files_releaselist(&group->tg_filelist);

#if CONFIG_NFILE_STREAMS > 0
  /* Free resource held by the stream list */

  lib_stream_release(group);

#endif /* CONFIG_NFILE_STREAMS */
#endif /* CONFIG_NFILE_DESCRIPTORS */

#if CONFIG_NSOCKET_DESCRIPTORS > 0
  /* Free resource held by the socket list */

  net_releaselist(&group->tg_socketlist);
#endif /* CONFIG_NSOCKET_DESCRIPTORS */

#ifndef CONFIG_DISABLE_ENVIRON
  /* Release all shared environment variables */

  env_release(group);
#endif

#ifndef CONFIG_DISABLE_MQUEUE
  /* Close message queues opened by members of the group */

  mq_release(group);
#endif

#if defined(CONFIG_BUILD_KERNEL) && defined(CONFIG_MM_SHM)
  /* Release any resource held by shared memory virtual page allocator */

  (void)shm_group_release(group);
#endif

#ifdef CONFIG_ARCH_ADDRENV
  /* Destroy the group address environment */

  (void)up_addrenv_destroy(&group->tg_addrenv);

  /* Mark no address environment */

  g_gid_current = 0;
#endif

#if defined(HAVE_GROUP_MEMBERS) || defined(CONFIG_ARCH_ADDRENV)
  /* Remove the group from the list of groups */

  group_remove(group);
#endif

#ifdef HAVE_GROUP_MEMBERS
  /* Release the members array */

  if (group->tg_members)
    {
      sched_kfree(group->tg_members);
      group->tg_members = NULL;
    }
#endif

#if CONFIG_NFILE_STREAMS > 0 && defined(CONFIG_MM_KERNEL_HEAP)
  /* In a flat, single-heap build.  The stream list is part of the
   * group structure and, hence will be freed when the group structure
   * is freed.  Otherwise, it is separately allocated an must be
   * freed here.
   */

#  if defined(CONFIG_BUILD_PROTECTED)
  /* In the protected build, the task's stream list is always allocated
   * and freed from the single, global user allocator.
   */

  sched_ufree(group->tg_streamlist);

#  elif defined(CONFIG_BUILD_KERNEL)
  /* In the kernel build, the unprivileged process' stream list will be
   * allocated from with its per-process, private user heap. But in that
   * case, there is no reason to do anything here:  That allocation resides
   * in the user heap which which be completely freed when we destroy the
   * process' address environment.
   */

  if ((group->tg_flags & GROUP_FLAG_PRIVILEGED) != 0)
    {
      /* But kernel threads are different in this build configuration: Their
       * stream lists were allocated from the common, global kernel heap and
       * must explicitly freed here.
       */

      sched_kfree(group->tg_streamlist);
    }

#  endif
#endif

#if defined(CONFIG_SCHED_WAITPID) && !defined(CONFIG_SCHED_HAVE_PARENT)
  /* If there are threads waiting for this group to be freed, then we cannot
   * yet free the memory resources.  Instead just mark the group deleted
   * and wait for those threads complete their waits.
   */

  if (group->tg_nwaiters > 0)
    {
      group->tg_flags |= GROUP_FLAG_DELETED;
    }
  else
#endif
    {
      /* Release the group container itself */

      sched_kfree(group);
    }
}
示例#13
0
文件: mq_open.c 项目: KimMui/i2sTest
mqd_t mq_open(const char *mq_name, int oflags, ...)
{
  FAR struct tcb_s *rtcb = (FAR struct tcb_s*)g_readytorun.head;
  FAR msgq_t *msgq;
  mqd_t mqdes = NULL;
  va_list arg;                  /* Points to each un-named argument */
  struct mq_attr *attr;         /* MQ creation attributes */
  int namelen;                  /* Length of MQ name */

  /* Make sure that a non-NULL name is supplied */

  if (mq_name)
    {
      sched_lock();
      namelen = strlen(mq_name);
      if (namelen > 0)
        {
          /* See if the message queue already exists */

          msgq = mq_findnamed(mq_name);
          if (msgq)
            {
              /* It does.  Check if the caller wanted to create a new
               * message queue with this name (i.e., O_CREAT|O_EXCL)
               */

              if ((oflags & O_CREAT) == 0 || (oflags & O_EXCL) == 0)
                {
                  /* Create a message queue descriptor for the TCB */

                  mqdes = mq_descreate(rtcb, msgq, oflags);
                  if (mqdes)
                    {
                      /* Allow a new connection to the message queue */

                      msgq->nconnect++;
                    }
                }
            }

          /* It doesn't exist.  Should we create one? */

          else if ((oflags & O_CREAT) != 0)
            {
              /* Allocate memory for the new message queue.  The size to
               * allocate is the size of the msgq_t header plus the size
               * of the message queue name+1.
               */

              msgq = (FAR msgq_t*)kmm_zalloc(SIZEOF_MQ_HEADER + namelen + 1);
              if (msgq)
                {
                  /* Create a message queue descriptor for the TCB */

                  mqdes = mq_descreate(rtcb, msgq, oflags);
                  if (mqdes)
                    {
                      /* Set up to get the optional arguments needed to create
                       * a message queue.
                       */

                      va_start(arg, oflags);
                      (void)va_arg(arg, mode_t); /* MQ creation mode parameter (ignored) */
                      attr = va_arg(arg, struct mq_attr*);

                      /* Initialize the new named message queue */

                      sq_init(&msgq->msglist);
                      if (attr)
                        {
                          msgq->maxmsgs = (int16_t)attr->mq_maxmsg;
                          if (attr->mq_msgsize <= MQ_MAX_BYTES)
                            {
                              msgq->maxmsgsize = (int16_t)attr->mq_msgsize;
                            }
                          else
                            {
                              msgq->maxmsgsize = MQ_MAX_BYTES;
                            }
                        }
                      else
                        {
                          msgq->maxmsgs = MQ_MAX_MSGS;
                          msgq->maxmsgsize = MQ_MAX_BYTES;
                        }

                      msgq->nconnect = 1;
#ifndef CONFIG_DISABLE_SIGNALS
                      msgq->ntpid    = INVALID_PROCESS_ID;
#endif
                      strcpy(msgq->name, mq_name);

                      /* Add the new message queue to the list of
                       * message queues
                       */

                      sq_addlast((FAR sq_entry_t*)msgq, &g_msgqueues);

                      /* Clean-up variable argument stuff */

                      va_end(arg);
                    }
                  else
                    {
                      /* Deallocate the msgq structure.  Since it is
                       * uninitialized, mq_deallocate() is not used.
                       */

                      sched_kfree(msgq);
                    }
                }
            }
示例#14
0
int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr,
                   pthread_startroutine_t start_routine, pthread_addr_t arg)
{
  FAR struct pthread_tcb_s *ptcb;
  FAR struct join_s *pjoin;
  struct sched_param param;
  int policy;
  int errcode;
  pid_t pid;
  int ret;
#ifdef HAVE_TASK_GROUP
  bool group_joined = false;
#endif

  /* If attributes were not supplied, use the default attributes */

  if (!attr)
    {
      attr = &g_default_pthread_attr;
    }

  /* Allocate a TCB for the new task. */

  ptcb = (FAR struct pthread_tcb_s *)kmm_zalloc(sizeof(struct pthread_tcb_s));
  if (!ptcb)
    {
      sdbg("ERROR: Failed to allocate TCB\n");
      return ENOMEM;
    }

#ifdef HAVE_TASK_GROUP
  /* Bind the parent's group to the new TCB (we have not yet joined the
   * group).
   */

  ret = group_bind(ptcb);
  if (ret < 0)
    {
      errcode = ENOMEM;
      goto errout_with_tcb;
    }
#endif

#ifdef CONFIG_ARCH_ADDRENV
  /* Share the address environment of the parent task group. */

  ret = up_addrenv_attach(ptcb->cmn.group,
                          (FAR struct tcb_s *)g_readytorun.head);
  if (ret < 0)
    {
      errcode = -ret;
      goto errout_with_tcb;
    }
#endif

  /* Allocate a detachable structure to support pthread_join logic */

  pjoin = (FAR struct join_s *)kmm_zalloc(sizeof(struct join_s));
  if (!pjoin)
    {
      sdbg("ERROR: Failed to allocate join\n");
      errcode = ENOMEM;
      goto errout_with_tcb;
    }

  /* Allocate the stack for the TCB */

  ret = up_create_stack((FAR struct tcb_s *)ptcb, attr->stacksize,
                        TCB_FLAG_TTYPE_PTHREAD);
  if (ret != OK)
    {
      errcode = ENOMEM;
      goto errout_with_join;
    }

  /* Should we use the priority and scheduler specified in the pthread
   * attributes?  Or should we use the current thread's priority and
   * scheduler?
   */

  if (attr->inheritsched == PTHREAD_INHERIT_SCHED)
    {
      /* Get the priority (and any other scheduling parameters) for this
       * thread.
       */

      ret = sched_getparam(0, &param);
      if (ret == ERROR)
        {
          errcode = get_errno();
          goto errout_with_join;
        }

      /* Get the scheduler policy for this thread */

      policy = sched_getscheduler(0);
      if (policy == ERROR)
        {
          errcode = get_errno();
          goto errout_with_join;
        }
    }
  else
    {
      /* Use the scheduler policy and policy the attributes */

      policy                             = attr->policy;
      param.sched_priority               = attr->priority;

#ifdef CONFIG_SCHED_SPORADIC
      param.sched_ss_low_priority        = attr->low_priority;
      param.sched_ss_max_repl            = attr->max_repl;
      param.sched_ss_repl_period.tv_sec  = attr->repl_period.tv_sec;
      param.sched_ss_repl_period.tv_nsec = attr->repl_period.tv_nsec;
      param.sched_ss_init_budget.tv_sec  = attr->budget.tv_sec;
      param.sched_ss_init_budget.tv_nsec = attr->budget.tv_nsec;
#endif
    }

#ifdef CONFIG_SCHED_SPORADIC
  if (policy == SCHED_SPORADIC)
    {
      FAR struct sporadic_s *sporadic;
      int repl_ticks;
      int budget_ticks;

      /* Convert timespec values to system clock ticks */

      (void)clock_time2ticks(&param.sched_ss_repl_period, &repl_ticks);
      (void)clock_time2ticks(&param.sched_ss_init_budget, &budget_ticks);

      /* The replenishment period must be greater than or equal to the
       * budget period.
       */

      if (repl_ticks < budget_ticks)
        {
          errcode = EINVAL;
          goto errout_with_join;
        }

      /* Initialize the sporadic policy */

      ret = sched_sporadic_initialize(&ptcb->cmn);
      if (ret >= 0)
        {
          sporadic               = ptcb->cmn.sporadic;
          DEBUGASSERT(sporadic != NULL);

          /* Save the sporadic scheduling parameters */

          sporadic->hi_priority  = param.sched_priority;
          sporadic->low_priority = param.sched_ss_low_priority;
          sporadic->max_repl     = param.sched_ss_max_repl;
          sporadic->repl_period  = repl_ticks;
          sporadic->budget       = budget_ticks;

          /* And start the first replenishment interval */

          ret = sched_sporadic_start(&ptcb->cmn);
        }

      /* Handle any failures */

      if (ret < 0)
        {
          errcode = -ret;
          goto errout_with_join;
        }
    }
#endif

  /* Initialize the task control block */

  ret = pthread_schedsetup(ptcb, param.sched_priority, pthread_start,
                           start_routine);
  if (ret != OK)
    {
      errcode = EBUSY;
      goto errout_with_join;
    }

  /* Configure the TCB for a pthread receiving on parameter
   * passed by value
   */

  pthread_argsetup(ptcb, arg);

#ifdef HAVE_TASK_GROUP
  /* Join the parent's task group */

  ret = group_join(ptcb);
  if (ret < 0)
    {
      errcode = ENOMEM;
      goto errout_with_join;
    }

  group_joined = true;
#endif

  /* Attach the join info to the TCB. */

  ptcb->joininfo = (FAR void *)pjoin;

  /* Set the appropriate scheduling policy in the TCB */

  ptcb->cmn.flags &= ~TCB_FLAG_POLICY_MASK;
  switch (policy)
    {
      default:
        DEBUGPANIC();
      case SCHED_FIFO:
        ptcb->cmn.flags    |= TCB_FLAG_SCHED_FIFO;
        break;

#if CONFIG_RR_INTERVAL > 0
      case SCHED_RR:
        ptcb->cmn.flags    |= TCB_FLAG_SCHED_RR;
        ptcb->cmn.timeslice = MSEC2TICK(CONFIG_RR_INTERVAL);
        break;
#endif

#ifdef CONFIG_SCHED_SPORADIC
      case SCHED_SPORADIC:
        ptcb->cmn.flags    |= TCB_FLAG_SCHED_SPORADIC;
        break;
#endif

#if 0 /* Not supported */
      case SCHED_OTHER:
        ptcb->cmn.flags    |= TCB_FLAG_SCHED_OTHER;
        break;
#endif
    }

  /* Get the assigned pid before we start the task (who knows what
   * could happen to ptcb after this!).  Copy this ID into the join structure
   * as well.
   */

  pid = (int)ptcb->cmn.pid;
  pjoin->thread = (pthread_t)pid;

  /* Initialize the semaphores in the join structure to zero. */

  ret = sem_init(&pjoin->data_sem, 0, 0);
  if (ret == OK)
    {
      ret = sem_init(&pjoin->exit_sem, 0, 0);
    }

  /* Activate the task */

  sched_lock();
  if (ret == OK)
    {
      ret = task_activate((FAR struct tcb_s *)ptcb);
    }

  if (ret == OK)
    {
      /* Wait for the task to actually get running and to register
       * its join structure.
       */

      (void)pthread_takesemaphore(&pjoin->data_sem);

      /* Return the thread information to the caller */

      if (thread)
       {
         *thread = (pthread_t)pid;
       }

      if (!pjoin->started)
        {
          ret = EINVAL;
        }

      sched_unlock();
      (void)sem_destroy(&pjoin->data_sem);
    }
  else
    {
      sched_unlock();
      dq_rem((FAR dq_entry_t *)ptcb, (FAR dq_queue_t *)&g_inactivetasks);
      (void)sem_destroy(&pjoin->data_sem);
      (void)sem_destroy(&pjoin->exit_sem);

      errcode = EIO;
      goto errout_with_join;
    }

  return ret;

errout_with_join:
  sched_kfree(pjoin);
  ptcb->joininfo = NULL;

errout_with_tcb:
#ifdef HAVE_TASK_GROUP
  /* Clear group binding */

  if (ptcb && !group_joined)
    {
      ptcb->cmn.group = NULL;
    }
#endif

  sched_releasetcb((FAR struct tcb_s *)ptcb, TCB_FLAG_TTYPE_PTHREAD);
  return errcode;
}