static int thread_create(FAR const char *name, uint8_t ttype, int priority, int stack_size, main_t entry, FAR char * const argv[]) { FAR struct task_tcb_s *tcb; pid_t pid; int errcode; int ret; /* Allocate a TCB for the new task. */ tcb = (FAR struct task_tcb_s *)kmm_zalloc(sizeof(struct task_tcb_s)); if (!tcb) { sdbg("ERROR: Failed to allocate TCB\n"); errcode = ENOMEM; goto errout; } /* Allocate a new task group with privileges appropriate for the parent * thread type. */ #ifdef HAVE_TASK_GROUP ret = group_allocate(tcb, ttype); if (ret < 0) { errcode = -ret; goto errout_with_tcb; } #endif /* Associate file descriptors with the new task */ #if CONFIG_NFILE_DESCRIPTORS > 0 || CONFIG_NSOCKET_DESCRIPTORS > 0 ret = group_setuptaskfiles(tcb); if (ret < OK) { errcode = -ret; goto errout_with_tcb; } #endif /* Allocate the stack for the TCB */ ret = up_create_stack((FAR struct tcb_s *)tcb, stack_size, ttype); if (ret < OK) { errcode = -ret; goto errout_with_tcb; } /* Initialize the task control block */ ret = task_schedsetup(tcb, priority, task_start, entry, ttype); if (ret < OK) { errcode = -ret; goto errout_with_tcb; } /* Setup to pass parameters to the new task */ (void)task_argsetup(tcb, name, argv); /* Now we have enough in place that we can join the group */ #ifdef HAVE_TASK_GROUP ret = group_initialize(tcb); if (ret < 0) { errcode = -ret; goto errout_with_tcb; } #endif /* Get the assigned pid before we start the task */ pid = (int)tcb->cmn.pid; /* Activate the task */ ret = task_activate((FAR struct tcb_s *)tcb); if (ret < OK) { errcode = get_errno(); /* The TCB was added to the active task list by task_schedsetup() */ dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); goto errout_with_tcb; } return pid; errout_with_tcb: sched_releasetcb((FAR struct tcb_s *)tcb, ttype); errout: set_errno(errcode); return ERROR; }
pid_t up_vfork(const struct vfork_s *context) { struct tcb_s *parent = (FAR struct tcb_s *)g_readytorun.head; struct task_tcb_s *child; size_t stacksize; uint32_t newsp; uint32_t newfp; uint32_t stackutil; int ret; svdbg("vfork context [%p]:\n", context); svdbg(" r4:%08x r5:%08x r6:%08x r7:%08x\n", context->r4, context->r5, context->r6, context->r7); svdbg(" r8:%08x r9:%08x r10:%08x\n", context->r8, context->r9, context->r10); svdbg(" fp:%08x sp:%08x lr:%08x\n", context->fp, context->sp, context->lr); /* Allocate and initialize a TCB for the child task. */ child = task_vforksetup((start_t)(context->lr & ~1)); if (!child) { sdbg("ERROR: task_vforksetup failed\n"); return (pid_t)ERROR; } svdbg("TCBs: Parent=%p Child=%p\n", parent, child); /* Get the size of the parent task's stack. Due to alignment operations, * the adjusted stack size may be smaller than the stack size originally * requested. */ stacksize = parent->adj_stack_size + CONFIG_STACK_ALIGNMENT - 1; /* Allocate the stack for the TCB */ ret = up_create_stack((FAR struct tcb_s *)child, stacksize, parent->flags & TCB_FLAG_TTYPE_MASK); if (ret != OK) { sdbg("ERROR: up_create_stack failed: %d\n", ret); task_vforkabort(child, -ret); return (pid_t)ERROR; } /* How much of the parent's stack was utilized? The ARM uses * a push-down stack so that the current stack pointer should * be lower than the initial, adjusted stack pointer. The * stack usage should be the difference between those two. */ DEBUGASSERT((uint32_t)parent->adj_stack_ptr > context->sp); stackutil = (uint32_t)parent->adj_stack_ptr - context->sp; svdbg("Parent: stacksize:%d stackutil:%d\n", stacksize, stackutil); /* Make some feeble effort to preserve the stack contents. This is * feeble because the stack surely contains invalid pointers and other * content that will not work in the child context. However, if the * user follows all of the caveats of vfork() usage, even this feeble * effort is overkill. */ newsp = (uint32_t)child->cmn.adj_stack_ptr - stackutil; memcpy((void *)newsp, (const void *)context->sp, stackutil); /* Was there a frame pointer in place before? */ if (context->fp <= (uint32_t)parent->adj_stack_ptr && context->fp >= (uint32_t)parent->adj_stack_ptr - stacksize) { uint32_t frameutil = (uint32_t)parent->adj_stack_ptr - context->fp; newfp = (uint32_t)child->cmn.adj_stack_ptr - frameutil; } else { newfp = context->fp; } svdbg("Parent: stack base:%08x SP:%08x FP:%08x\n", parent->adj_stack_ptr, context->sp, context->fp); svdbg("Child: stack base:%08x SP:%08x FP:%08x\n", child->cmn.adj_stack_ptr, newsp, newfp); /* Update the stack pointer, frame pointer, and volatile registers. When * the child TCB was initialized, all of the values were set to zero. * up_initial_state() altered a few values, but the return value in R0 * should be cleared to zero, providing the indication to the newly started * child thread. */ child->cmn.xcp.regs[REG_R4] = context->r4; /* Volatile register r4 */ child->cmn.xcp.regs[REG_R5] = context->r5; /* Volatile register r5 */ child->cmn.xcp.regs[REG_R6] = context->r6; /* Volatile register r6 */ child->cmn.xcp.regs[REG_R7] = context->r7; /* Volatile register r7 */ child->cmn.xcp.regs[REG_R8] = context->r8; /* Volatile register r8 */ child->cmn.xcp.regs[REG_R9] = context->r9; /* Volatile register r9 */ child->cmn.xcp.regs[REG_R10] = context->r10; /* Volatile register r10 */ child->cmn.xcp.regs[REG_FP] = newfp; /* Frame pointer */ child->cmn.xcp.regs[REG_SP] = newsp; /* Stack pointer */ #ifdef CONFIG_LIB_SYSCALL /* If we got here via a syscall, then we are going to have to setup some * syscall return information as well. */ if (parent->xcp.nsyscalls > 0) { int index; for (index = 0; index < parent->xcp.nsyscalls; index++) { child->cmn.xcp.syscall[index].sysreturn = parent->xcp.syscall[index].sysreturn; /* REVISIT: This logic is *not* common. */ #if defined(CONFIG_ARCH_CORTEXA5) || defined(CONFIG_ARCH_CORTEXA8) # ifdef CONFIG_BUILD_KERNEL child->cmn.xcp.syscall[index].cpsr = parent->xcp.syscall[index].cpsr; # endif #elif defined(CONFIG_ARCH_CORTEXM3) || defined(CONFIG_ARCH_CORTEXM4) || \ defined(CONFIG_ARCH_CORTEXM0) || defined(CONFIG_ARCH_CORTEXM7) child->cmn.xcp.syscall[index].excreturn = parent->xcp.syscall[index].excreturn; #else # error Missing logic #endif } child->cmn.xcp.nsyscalls = parent->xcp.nsyscalls; } #endif /* And, finally, start the child task. On a failure, task_vforkstart() * will discard the TCB by calling task_vforkabort(). */ return task_vforkstart(child); }
int pthread_create(FAR pthread_t *thread, FAR pthread_attr_t *attr, pthread_startroutine_t start_routine, pthread_addr_t arg) { FAR _TCB *ptcb; FAR join_t *pjoin; int status; int priority; #if CONFIG_RR_INTERVAL > 0 int policy; #endif pid_t pid; /* If attributes were not supplied, use the default attributes */ if (!attr) { attr = &g_default_pthread_attr; } /* Allocate a TCB for the new task. */ ptcb = (FAR _TCB*)kzalloc(sizeof(_TCB)); if (!ptcb) { return ENOMEM; } /* Associate file descriptors with the new task */ status = sched_setuppthreadfiles(ptcb); if (status != OK) { sched_releasetcb(ptcb); return status; } /* Share the parent's envionment */ (void)env_share(ptcb); /* Allocate a detachable structure to support pthread_join logic */ pjoin = (FAR join_t*)kzalloc(sizeof(join_t)); if (!pjoin) { sched_releasetcb(ptcb); return ENOMEM; } /* Allocate the stack for the TCB */ status = up_create_stack(ptcb, attr->stacksize); if (status != OK) { sched_releasetcb(ptcb); sched_free(pjoin); return ENOMEM; } /* Should we use the priority and scheduler specified in the * pthread attributes? Or should we use the current thread's * priority and scheduler? */ if (attr->inheritsched == PTHREAD_INHERIT_SCHED) { /* Get the priority for this thread. */ struct sched_param param; status = sched_getparam(0, ¶m); if (status == OK) { priority = param.sched_priority; } else { priority = SCHED_FIFO; } /* Get the scheduler policy for this thread */ #if CONFIG_RR_INTERVAL > 0 policy = sched_getscheduler(0); if (policy == ERROR) { policy = SCHED_FIFO; } #endif } else { /* Use the priority and scheduler from the attributes */ priority = attr->priority; #if CONFIG_RR_INTERVAL > 0 policy = attr->policy; #endif } /* Mark this task as a pthread (this setting will be needed in * task_schedsetup() when up_initial_state() is called. */ ptcb->flags |= TCB_FLAG_TTYPE_PTHREAD; /* Initialize the task control block */ status = task_schedsetup(ptcb, priority, pthread_start, (main_t)start_routine); if (status != OK) { sched_releasetcb(ptcb); sched_free(pjoin); return EBUSY; } /* Configure the TCB for a pthread receiving on parameter * passed by value */ pthread_argsetup(ptcb, arg); /* Attach the join info to the TCB. */ ptcb->joininfo = (void*)pjoin; /* If round robin scheduling is selected, set the appropriate flag * in the TCB. */ #if CONFIG_RR_INTERVAL > 0 if (policy == SCHED_RR) { ptcb->flags |= TCB_FLAG_ROUND_ROBIN; ptcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK; } #endif /* Get the assigned pid before we start the task (who knows what * could happen to ptcb after this!). Copy this ID into the join structure * as well. */ pid = (int)ptcb->pid; pjoin->thread = (pthread_t)pid; /* Initialize the semaphores in the join structure to zero. */ status = sem_init(&pjoin->data_sem, 0, 0); if (status == OK) { status = sem_init(&pjoin->exit_sem, 0, 0); } /* Activate the task */ sched_lock(); if (status == OK) { status = task_activate(ptcb); } if (status == OK) { /* Wait for the task to actually get running and to register * its join_t */ (void)pthread_takesemaphore(&pjoin->data_sem); /* Return the thread information to the caller */ if (thread) *thread = (pthread_t)pid; if (!pjoin->started) status = ERROR; sched_unlock(); (void)sem_destroy(&pjoin->data_sem); } else { sched_unlock(); dq_rem((FAR dq_entry_t*)ptcb, (dq_queue_t*)&g_inactivetasks); (void)sem_destroy(&pjoin->data_sem); (void)sem_destroy(&pjoin->exit_sem); sched_releasetcb(ptcb); sched_free(pjoin); return EIO; } return OK; }
static int thread_create(const char *name, uint8_t type, int priority, main_t entry, const char **argv) #endif { FAR _TCB *tcb; pid_t pid; int ret; /* Allocate a TCB for the new task. */ tcb = (FAR _TCB*)kzalloc(sizeof(_TCB)); if (!tcb) { goto errout; } /* Associate file descriptors with the new task */ #if CONFIG_NFILE_DESCRIPTORS > 0 || CONFIG_NSOCKET_DESCRIPTORS > 0 ret = sched_setuptaskfiles(tcb); if (ret != OK) { goto errout_with_tcb; } #endif /* Clone the parent's task environment */ (void)env_dup(tcb); /* Allocate the stack for the TCB */ #ifndef CONFIG_CUSTOM_STACK ret = up_create_stack(tcb, stack_size); if (ret != OK) { goto errout_with_tcb; } #endif /* Mark the type of this thread (this setting will be needed in * task_schedsetup() when up_initial_state() is called. */ tcb->flags |= type; /* Initialize the task control block */ ret = task_schedsetup(tcb, priority, task_start, entry); if (ret != OK) { goto errout_with_tcb; } /* Setup to pass parameters to the new task */ (void)task_argsetup(tcb, name, argv); /* Get the assigned pid before we start the task */ pid = (int)tcb->pid; /* Activate the task */ ret = task_activate(tcb); if (ret != OK) { /* The TCB was added to the active task list by task_schedsetup() */ dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); goto errout_with_tcb; } return pid; errout_with_tcb: sched_releasetcb(tcb); errout: errno = ENOMEM; return ERROR; }
pid_t up_vfork(const struct vfork_s *context) { _TCB *parent = (FAR _TCB *)g_readytorun.head; _TCB *child; size_t stacksize; uint32_t newsp; uint32_t newfp; uint32_t stackutil; int ret; svdbg("r4:%08x r5:%08x r6:%08x r7:%08x\n", context->r4, context->r5, context->r6, context->r7); svdbg("r8:%08x r9:%08x r10:%08x\n", context->r8, context->r9, context->r10); svdbg("fp:%08x sp:%08x lr:%08x\n", context->fp, context->sp, context->lr); /* Allocate and initialize a TCB for the child task. */ child = task_vforksetup((start_t)(context->lr & ~1)); if (!child) { sdbg("task_vforksetup failed\n"); return (pid_t)ERROR; } svdbg("Parent=%p Child=%p\n", parent, child); /* Get the size of the parent task's stack. Due to alignment operations, * the adjusted stack size may be smaller than the stack size originally * requrested. */ stacksize = parent->adj_stack_size + CONFIG_STACK_ALIGNMENT - 1; /* Allocate the stack for the TCB */ ret = up_create_stack(child, stacksize); if (ret != OK) { sdbg("up_create_stack failed: %d\n", ret); task_vforkabort(child, -ret); return (pid_t)ERROR; } /* How much of the parent's stack was utilized? The ARM uses * a push-down stack so that the current stack pointer should * be lower than the initial, adjusted stack pointer. The * stack usage should be the difference between those two. */ DEBUGASSERT((uint32_t)parent->adj_stack_ptr > context->sp); stackutil = (uint32_t)parent->adj_stack_ptr - context->sp; svdbg("stacksize:%d stackutil:%d\n", stacksize, stackutil); /* Make some feeble effort to perserve the stack contents. This is * feeble because the stack surely contains invalid pointers and other * content that will not work in the child context. However, if the * user follows all of the caveats of vfor() usage, even this feeble * effort is overkill. */ newsp = (uint32_t)child->adj_stack_ptr - stackutil; memcpy((void *)newsp, (const void *)context->sp, stackutil); /* Was there a frame pointer in place before? */ if (context->fp <= (uint32_t)parent->adj_stack_ptr && context->fp >= (uint32_t)parent->adj_stack_ptr - stacksize) { uint32_t frameutil = (uint32_t)parent->adj_stack_ptr - context->fp; newfp = (uint32_t)child->adj_stack_ptr - frameutil; } else { newfp = context->fp; } svdbg("Old stack base:%08x SP:%08x FP:%08x\n", parent->adj_stack_ptr, context->sp, context->fp); svdbg("New stack base:%08x SP:%08x FP:%08x\n", child->adj_stack_ptr, newsp, newfp); /* Update the stack pointer, frame pointer, and volatile registers. When * the child TCB was initialized, all of the values were set to zero. * up_initial_state() altered a few values, but the return value in R0 * should be cleared to zero, providing the indication to the newly started * child thread. */ child->xcp.regs[REG_R4] = context->r4; /* Volatile register r4 */ child->xcp.regs[REG_R5] = context->r5; /* Volatile register r5 */ child->xcp.regs[REG_R6] = context->r6; /* Volatile register r6 */ child->xcp.regs[REG_R7] = context->r7; /* Volatile register r7 */ child->xcp.regs[REG_R8] = context->r8; /* Volatile register r8 */ child->xcp.regs[REG_R9] = context->r9; /* Volatile register r9 */ child->xcp.regs[REG_R10] = context->r10; /* Volatile register r10 */ child->xcp.regs[REG_FP] = newfp; /* Frame pointer */ child->xcp.regs[REG_SP] = newsp; /* Stack pointer */ /* And, finally, start the child task. On a failure, task_vforkstart() * will discard the TCB by calling task_vforkabort(). */ return task_vforkstart(child); }
pid_t up_vfork(const struct vfork_s *context) { struct tcb_s *parent = (FAR struct tcb_s *)g_readytorun.head; struct task_tcb_s *child; size_t stacksize; uint32_t newsp; #if CONFIG_MIPS32_FRAMEPOINTER uint32_t newfp; #endif uint32_t stackutil; int ret; svdbg("s0:%08x s1:%08x s2:%08x s3:%08x s4:%08x\n", context->s0, context->s1, context->s2, context->s3, context->s4); #if CONFIG_MIPS32_FRAMEPOINTER svdbg("s5:%08x s6:%08x s7:%08x\n", context->s5, context->s6, context->s7); #ifdef MIPS32_SAVE_GP svdbg("fp:%08x sp:%08x ra:%08x gp:%08x\n", context->fp, context->sp, context->ra, context->gp); #else svdbg("fp:%08x sp:%08x ra:%08x\n", context->fp context->sp, context->ra); #endif #else svdbg("s5:%08x s6:%08x s7:%08x s8:%08x\n", context->s5, context->s6, context->s7, context->s8); #ifdef MIPS32_SAVE_GP svdbg("sp:%08x ra:%08x gp:%08x\n", context->sp, context->ra, context->gp); #else svdbg("sp:%08x ra:%08x\n", context->sp, context->ra); #endif #endif /* Allocate and initialize a TCB for the child task. */ child = task_vforksetup((start_t)context->ra); if (!child) { sdbg("task_vforksetup failed\n"); return (pid_t)ERROR; } svdbg("Parent=%p Child=%p\n", parent, child); /* Get the size of the parent task's stack. Due to alignment operations, * the adjusted stack size may be smaller than the stack size originally * requrested. */ stacksize = parent->adj_stack_size + CONFIG_STACK_ALIGNMENT - 1; /* Allocate the stack for the TCB */ ret = up_create_stack((FAR struct tcb_s *)child, stacksize, parent->flags & TCB_FLAG_TTYPE_MASK); if (ret != OK) { sdbg("up_create_stack failed: %d\n", ret); task_vforkabort(child, -ret); return (pid_t)ERROR; } /* How much of the parent's stack was utilized? The MIPS uses * a push-down stack so that the current stack pointer should * be lower than the initial, adjusted stack pointer. The * stack usage should be the difference between those two. */ DEBUGASSERT((uint32_t)parent->adj_stack_ptr > context->sp); stackutil = (uint32_t)parent->adj_stack_ptr - context->sp; svdbg("stacksize:%d stackutil:%d\n", stacksize, stackutil); /* Make some feeble effort to perserve the stack contents. This is * feeble because the stack surely contains invalid pointers and other * content that will not work in the child context. However, if the * user follows all of the caveats of vfork() usage, even this feeble * effort is overkill. */ newsp = (uint32_t)child->cmn.adj_stack_ptr - stackutil; memcpy((void *)newsp, (const void *)context->sp, stackutil); /* Was there a frame pointer in place before? */ #if CONFIG_MIPS32_FRAMEPOINTER if (context->fp <= (uint32_t)parent->adj_stack_ptr && context->fp >= (uint32_t)parent->adj_stack_ptr - stacksize) { uint32_t frameutil = (uint32_t)parent->adj_stack_ptr - context->fp; newfp = (uint32_t)child->cmn.adj_stack_ptr - frameutil; } else { newfp = context->fp; } svdbg("Old stack base:%08x SP:%08x FP:%08x\n", parent->adj_stack_ptr, context->sp, context->fp); svdbg("New stack base:%08x SP:%08x FP:%08x\n", child->cmn.adj_stack_ptr, newsp, newfp); #else svdbg("Old stack base:%08x SP:%08x\n", parent->adj_stack_ptr, context->sp); svdbg("New stack base:%08x SP:%08x\n", child->cmn.adj_stack_ptr, newsp); #endif /* Update the stack pointer, frame pointer, global pointer and saved * registers. When the child TCB was initialized, all of the values * were set to zero. up_initial_state() altered a few values, but the * return value in v0 should be cleared to zero, providing the * indication to the newly started child thread. */ child->cmn.xcp.regs[REG_S0] = context->s0; /* Saved register s0 */ child->cmn.xcp.regs[REG_S1] = context->s1; /* Saved register s1 */ child->cmn.xcp.regs[REG_S2] = context->s2; /* Saved register s2 */ child->cmn.xcp.regs[REG_S3] = context->s3; /* Volatile register s3 */ child->cmn.xcp.regs[REG_S4] = context->s4; /* Volatile register s4 */ child->cmn.xcp.regs[REG_S5] = context->s5; /* Volatile register s5 */ child->cmn.xcp.regs[REG_S6] = context->s6; /* Volatile register s6 */ child->cmn.xcp.regs[REG_S7] = context->s7; /* Volatile register s7 */ #if CONFIG_MIPS32_FRAMEPOINTER child->cmn.xcp.regs[REG_FP] = newfp; /* Frame pointer */ #else child->cmn.xcp.regs[REG_S8] = context->s8; /* Volatile register s8 */ #endif child->cmn.xcp.regs[REG_SP] = newsp; /* Stack pointer */ #if MIPS32_SAVE_GP child->cmn.xcp.regs[REG_GP] = newsp; /* Global pointer */ #endif /* And, finally, start the child task. On a failure, task_vforkstart() * will discard the TCB by calling task_vforkabort(). */ return task_vforkstart(child); }
int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr, pthread_startroutine_t start_routine, pthread_addr_t arg) { FAR struct pthread_tcb_s *ptcb; FAR struct join_s *pjoin; struct sched_param param; int policy; int errcode; pid_t pid; int ret; #ifdef HAVE_TASK_GROUP bool group_joined = false; #endif /* If attributes were not supplied, use the default attributes */ if (!attr) { attr = &g_default_pthread_attr; } /* Allocate a TCB for the new task. */ ptcb = (FAR struct pthread_tcb_s *)kmm_zalloc(sizeof(struct pthread_tcb_s)); if (!ptcb) { sdbg("ERROR: Failed to allocate TCB\n"); return ENOMEM; } #ifdef HAVE_TASK_GROUP /* Bind the parent's group to the new TCB (we have not yet joined the * group). */ ret = group_bind(ptcb); if (ret < 0) { errcode = ENOMEM; goto errout_with_tcb; } #endif #ifdef CONFIG_ARCH_ADDRENV /* Share the address environment of the parent task group. */ ret = up_addrenv_attach(ptcb->cmn.group, (FAR struct tcb_s *)g_readytorun.head); if (ret < 0) { errcode = -ret; goto errout_with_tcb; } #endif /* Allocate a detachable structure to support pthread_join logic */ pjoin = (FAR struct join_s *)kmm_zalloc(sizeof(struct join_s)); if (!pjoin) { sdbg("ERROR: Failed to allocate join\n"); errcode = ENOMEM; goto errout_with_tcb; } /* Allocate the stack for the TCB */ ret = up_create_stack((FAR struct tcb_s *)ptcb, attr->stacksize, TCB_FLAG_TTYPE_PTHREAD); if (ret != OK) { errcode = ENOMEM; goto errout_with_join; } /* Should we use the priority and scheduler specified in the pthread * attributes? Or should we use the current thread's priority and * scheduler? */ if (attr->inheritsched == PTHREAD_INHERIT_SCHED) { /* Get the priority (and any other scheduling parameters) for this * thread. */ ret = sched_getparam(0, ¶m); if (ret == ERROR) { errcode = get_errno(); goto errout_with_join; } /* Get the scheduler policy for this thread */ policy = sched_getscheduler(0); if (policy == ERROR) { errcode = get_errno(); goto errout_with_join; } } else { /* Use the scheduler policy and policy the attributes */ policy = attr->policy; param.sched_priority = attr->priority; #ifdef CONFIG_SCHED_SPORADIC param.sched_ss_low_priority = attr->low_priority; param.sched_ss_max_repl = attr->max_repl; param.sched_ss_repl_period.tv_sec = attr->repl_period.tv_sec; param.sched_ss_repl_period.tv_nsec = attr->repl_period.tv_nsec; param.sched_ss_init_budget.tv_sec = attr->budget.tv_sec; param.sched_ss_init_budget.tv_nsec = attr->budget.tv_nsec; #endif } #ifdef CONFIG_SCHED_SPORADIC if (policy == SCHED_SPORADIC) { FAR struct sporadic_s *sporadic; int repl_ticks; int budget_ticks; /* Convert timespec values to system clock ticks */ (void)clock_time2ticks(¶m.sched_ss_repl_period, &repl_ticks); (void)clock_time2ticks(¶m.sched_ss_init_budget, &budget_ticks); /* The replenishment period must be greater than or equal to the * budget period. */ if (repl_ticks < budget_ticks) { errcode = EINVAL; goto errout_with_join; } /* Initialize the sporadic policy */ ret = sched_sporadic_initialize(&ptcb->cmn); if (ret >= 0) { sporadic = ptcb->cmn.sporadic; DEBUGASSERT(sporadic != NULL); /* Save the sporadic scheduling parameters */ sporadic->hi_priority = param.sched_priority; sporadic->low_priority = param.sched_ss_low_priority; sporadic->max_repl = param.sched_ss_max_repl; sporadic->repl_period = repl_ticks; sporadic->budget = budget_ticks; /* And start the first replenishment interval */ ret = sched_sporadic_start(&ptcb->cmn); } /* Handle any failures */ if (ret < 0) { errcode = -ret; goto errout_with_join; } } #endif /* Initialize the task control block */ ret = pthread_schedsetup(ptcb, param.sched_priority, pthread_start, start_routine); if (ret != OK) { errcode = EBUSY; goto errout_with_join; } /* Configure the TCB for a pthread receiving on parameter * passed by value */ pthread_argsetup(ptcb, arg); #ifdef HAVE_TASK_GROUP /* Join the parent's task group */ ret = group_join(ptcb); if (ret < 0) { errcode = ENOMEM; goto errout_with_join; } group_joined = true; #endif /* Attach the join info to the TCB. */ ptcb->joininfo = (FAR void *)pjoin; /* Set the appropriate scheduling policy in the TCB */ ptcb->cmn.flags &= ~TCB_FLAG_POLICY_MASK; switch (policy) { default: DEBUGPANIC(); case SCHED_FIFO: ptcb->cmn.flags |= TCB_FLAG_SCHED_FIFO; break; #if CONFIG_RR_INTERVAL > 0 case SCHED_RR: ptcb->cmn.flags |= TCB_FLAG_SCHED_RR; ptcb->cmn.timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); break; #endif #ifdef CONFIG_SCHED_SPORADIC case SCHED_SPORADIC: ptcb->cmn.flags |= TCB_FLAG_SCHED_SPORADIC; break; #endif #if 0 /* Not supported */ case SCHED_OTHER: ptcb->cmn.flags |= TCB_FLAG_SCHED_OTHER; break; #endif } /* Get the assigned pid before we start the task (who knows what * could happen to ptcb after this!). Copy this ID into the join structure * as well. */ pid = (int)ptcb->cmn.pid; pjoin->thread = (pthread_t)pid; /* Initialize the semaphores in the join structure to zero. */ ret = sem_init(&pjoin->data_sem, 0, 0); if (ret == OK) { ret = sem_init(&pjoin->exit_sem, 0, 0); } /* Activate the task */ sched_lock(); if (ret == OK) { ret = task_activate((FAR struct tcb_s *)ptcb); } if (ret == OK) { /* Wait for the task to actually get running and to register * its join structure. */ (void)pthread_takesemaphore(&pjoin->data_sem); /* Return the thread information to the caller */ if (thread) { *thread = (pthread_t)pid; } if (!pjoin->started) { ret = EINVAL; } sched_unlock(); (void)sem_destroy(&pjoin->data_sem); } else { sched_unlock(); dq_rem((FAR dq_entry_t *)ptcb, (FAR dq_queue_t *)&g_inactivetasks); (void)sem_destroy(&pjoin->data_sem); (void)sem_destroy(&pjoin->exit_sem); errcode = EIO; goto errout_with_join; } return ret; errout_with_join: sched_kfree(pjoin); ptcb->joininfo = NULL; errout_with_tcb: #ifdef HAVE_TASK_GROUP /* Clear group binding */ if (ptcb && !group_joined) { ptcb->cmn.group = NULL; } #endif sched_releasetcb((FAR struct tcb_s *)ptcb, TCB_FLAG_TTYPE_PTHREAD); return errcode; }