int main( int argc, char* argv[] ) { // setup COM2/terminal bwsetfifo( COM2, OFF ); // setup peripherals and handlers kernel_init(); // Create idle task struct task* idle_task = task_create(&idle_task_entry, PRIORITY_IDLE, -1); scheduler_add(idle_task); // bootstrap first user task struct task* first_task = task_create(&first_task_entry, PRIORITY_HIGHEST, -1); scheduler_add(first_task); long idleTicks = 0; long userTicks = 0; struct task *current_task; int request; long elapsed; while (1) { current_task = scheduler_get_next(); if (current_task == 0) { break; } ACTIVATE: // Time the execution of the task elapsed = rtc_get_ticks(); // run the user task request = task_activate(current_task); elapsed = rtc_get_ticks() - elapsed; if (current_task == idle_task) { idleTicks += elapsed; } userTicks += elapsed; switch (__interrupt_type) { case INT_TYPE_HWI: // handle the interrupt request and determine if we should preempt the running task if (interrupts_handle_irq(current_task) == HWI_CONTINUE && current_task != idle_task) { goto ACTIVATE; } else { scheduler_add(current_task); } break; case INT_TYPE_SWI: // handle the system call request and determine if we should shutdown if (sys_handle_request(current_task, request) == -1) { goto shutdown; } break; default: KASSERT(0, "We got a weird interrupt type... %d", __interrupt_type); break; } } shutdown: kernel_shutdown(); // reset the scrolling window, in case it exists! bwprintf(COM2, CURSOR_SCROLL, 0, 999); bwprintf(COM2, CURSOR_CLEAR_SCREEN); // int i; // for (i = 0; i < MAX_NUM_TASKS; i++) { // task_print(COM2, i); // } printSensorTicks(); bwprintf(COM2, "Idle time: %d%%\r\n", ((idleTicks * 100) / userTicks)); bwprintf(COM2, "Kernel exiting.\r\n"); return 0; }
int task_restart(pid_t pid) { FAR struct tcb_s *rtcb; FAR struct task_tcb_s *tcb; FAR dq_queue_t *tasklist; irqstate_t state; int status; /* Make sure this task does not become ready-to-run while * we are futzing with its TCB */ sched_lock(); /* Check if the task to restart is the calling task */ rtcb = this_task(); if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ set_errno(ENOSYS); return ERROR; } #ifdef CONFIG_SMP /* There is currently no capability to restart a task that is actively * running on another CPU either. This is not the calling cast so if it * is running, then it could only be running a a different CPU. * * Also, will need some interlocks to assure that no tasks are rescheduled * on any other CPU while we do this. */ #warning Missing SMP logic if (rtcb->task_state == TSTATE_TASK_RUNNING) { /* Not implemented */ set_errno(ENOSYS); return ERROR; } #endif /* We are restarting some other task than ourselves */ /* Find for the TCB associated with matching pid */ tcb = (FAR struct task_tcb_s *)sched_gettcb(pid); #ifndef CONFIG_DISABLE_PTHREAD if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD) #else if (!tcb) #endif { /* There is no TCB with this pid or, if there is, it is not a task. */ set_errno(ESRCH); return ERROR; } /* Try to recover from any bad states */ task_recover((FAR struct tcb_s *)tcb); /* Kill any children of this thread */ #ifdef HAVE_GROUP_MEMBERS (void)group_killchildren(tcb); #endif /* Remove the TCB from whatever list it is in. After this point, the TCB * should no longer be accessible to the system */ #ifdef CONFIG_SMP tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu); #else tasklist = TLIST_HEAD(tcb->cmn.task_state); #endif state = irqsave(); dq_rem((FAR dq_entry_t *)tcb, tasklist); tcb->cmn.task_state = TSTATE_TASK_INVALID; irqrestore(state); /* Deallocate anything left in the TCB's queues */ sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->cmn.sched_priority = tcb->init_priority; /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->cmn.base_priority = tcb->init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->cmn.npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB. This will * reset the entry point and the start-up parameters */ up_initial_state((FAR struct tcb_s *)tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks); tcb->cmn.task_state = TSTATE_TASK_INACTIVE; /* Activate the task */ status = task_activate((FAR struct tcb_s *)tcb); if (status != OK) { (void)task_delete(pid); set_errno(-status); return ERROR; } sched_unlock(); return OK; }
int task_restart(pid_t pid) { FAR _TCB *rtcb; FAR _TCB *tcb; int status; irqstate_t state; /* Make sure this task does not become ready-to-run while * we are futzing with its TCB */ sched_lock(); /* Check if the task to restart is the calling task */ rtcb = (FAR _TCB*)g_readytorun.head; if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ return ERROR; } /* We are restarting some other task than ourselves */ else { /* Find for the TCB associated with matching pid */ tcb = sched_gettcb(pid); if (!tcb) { /* There is no TCB with this pid */ return ERROR; } /* Remove the TCB from whatever list it is in. At this point, the * TCB should no longer be accessible to the system */ state = irqsave(); dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)g_tasklisttable[tcb->task_state].list); tcb->task_state = TSTATE_TASK_INVALID; irqrestore(state); /* Deallocate anything left in the TCB's queues */ sig_cleanup(tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->sched_priority = tcb->init_priority; /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->base_priority = tcb->init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB * This will reset the entry point and the start-up parameters */ up_initial_state(tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); tcb->task_state = TSTATE_TASK_INACTIVE; /* Activate the task */ status = task_activate(tcb); if (status != OK) { dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); sched_releasetcb(tcb); return ERROR; } } sched_unlock(); return OK; }
int exec_module(FAR const struct binary_s *binp) { FAR _TCB *tcb; #ifndef CONFIG_CUSTOM_STACK FAR uint32_t *stack; #endif pid_t pid; int err; int ret; /* Sanity checking */ #ifdef CONFIG_DEBUG if (!binp || !binp->entrypt || binp->stacksize <= 0) { err = EINVAL; goto errout; } #endif bdbg("Executing %s\n", binp->filename); /* Allocate a TCB for the new task. */ tcb = (FAR _TCB*)kzalloc(sizeof(_TCB)); if (!tcb) { err = ENOMEM; goto errout; } /* Allocate the stack for the new task */ #ifndef CONFIG_CUSTOM_STACK stack = (FAR uint32_t*)kmalloc(binp->stacksize); if (!tcb) { err = ENOMEM; goto errout_with_tcb; } /* Initialize the task */ ret = task_init(tcb, binp->filename, binp->priority, stack, binp->stacksize, binp->entrypt, binp->argv); #else /* Initialize the task */ ret = task_init(tcb, binp->filename, binp->priority, stack, binp->entrypt, binp->argv); #endif if (ret < 0) { err = errno; bdbg("task_init() failed: %d\n", err); goto errout_with_stack; } /* Note that tcb->flags are not modified. 0=normal task */ /* tcb->flags |= TCB_FLAG_TTYPE_TASK; */ /* Add the D-Space address as the PIC base address. By convention, this * must be the first allocated address space. */ #ifdef CONFIG_PIC tcb->dspace = binp->alloc[0]; /* Re-initialize the task's initial state to account for the new PIC base */ up_initial_state(tcb); #endif /* Assign the address environment to the task */ #ifdef CONFIG_ADDRENV ret = up_addrenv_assign(binp->addrenv, tcb); if (ret < 0) { err = -ret; bdbg("up_addrenv_assign() failed: %d\n", ret); goto errout_with_stack; } #endif /* Get the assigned pid before we start the task */ pid = tcb->pid; /* Execute all of the C++ static constructors */ #ifdef CONFIG_BINFMT_CONSTRUCTORS ret = exec_ctors(binp); if (ret < 0) { err = -ret; bdbg("exec_ctors() failed: %d\n", ret); goto errout_with_stack; } #endif /* Then activate the task at the provided priority */ ret = task_activate(tcb); if (ret < 0) { err = errno; bdbg("task_activate() failed: %d\n", err); goto errout_with_stack; } return (int)pid; errout_with_stack: #ifndef CONFIG_CUSTOM_STACK tcb->stack_alloc_ptr = NULL; sched_releasetcb(tcb); kfree(stack); #else sched_releasetcb(tcb); #endif goto errout; errout_with_tcb: kfree(tcb); errout: errno = err; bdbg("returning errno: %d\n", err); return ERROR; }
int task_restart(pid_t pid) { FAR struct tcb_s *rtcb; FAR struct task_tcb_s *tcb; FAR dq_queue_t *tasklist; irqstate_t flags; int errcode; #ifdef CONFIG_SMP int cpu; #endif int ret; /* Check if the task to restart is the calling task */ rtcb = this_task(); if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ errcode = ENOSYS; goto errout; } /* We are restarting some other task than ourselves. Make sure that the * task does not change its state while we are executing. In the single * CPU state this could be done by disabling pre-emption. But we will * a little stronger medicine on the SMP case: The task make be running * on another CPU. */ flags = enter_critical_section(); /* Find for the TCB associated with matching pid */ tcb = (FAR struct task_tcb_s *)sched_gettcb(pid); #ifndef CONFIG_DISABLE_PTHREAD if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD) #else if (!tcb) #endif { /* There is no TCB with this pid or, if there is, it is not a task. */ errcode = ESRCH; goto errout_with_lock; } #ifdef CONFIG_SMP /* If the task is running on another CPU, then pause that CPU. We can * then manipulate the TCB of the restarted task and when we resume the * that CPU, the restart take effect. */ cpu = sched_cpu_pause(&tcb->cmn); #endif /* CONFIG_SMP */ /* Try to recover from any bad states */ task_recover((FAR struct tcb_s *)tcb); /* Kill any children of this thread */ #ifdef HAVE_GROUP_MEMBERS (void)group_killchildren(tcb); #endif /* Remove the TCB from whatever list it is in. After this point, the TCB * should no longer be accessible to the system */ #ifdef CONFIG_SMP tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu); #else tasklist = TLIST_HEAD(tcb->cmn.task_state); #endif dq_rem((FAR dq_entry_t *)tcb, tasklist); tcb->cmn.task_state = TSTATE_TASK_INVALID; /* Deallocate anything left in the TCB's queues */ sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->cmn.sched_priority = tcb->cmn.init_priority; /* The task should restart with pre-emption disabled and not in a critical * secton. */ tcb->cmn.lockcount = 0; #ifdef CONFIG_SMP tcb->cmn.irqcount = 0; #endif /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->cmn.base_priority = tcb->cmn.init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->cmn.npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB. This will * reset the entry point and the start-up parameters */ up_initial_state((FAR struct tcb_s *)tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks); tcb->cmn.task_state = TSTATE_TASK_INACTIVE; #ifdef CONFIG_SMP /* Resume the paused CPU (if any) */ if (cpu >= 0) { ret = up_cpu_resume(cpu); if (ret < 0) { errcode = -ret; goto errout_with_lock; } } #endif /* CONFIG_SMP */ leave_critical_section(flags); /* Activate the task. */ ret = task_activate((FAR struct tcb_s *)tcb); if (ret != OK) { (void)task_terminate(pid, true); errcode = -ret; goto errout_with_lock; } return OK; errout_with_lock: leave_critical_section(flags); errout: set_errno(errcode); return ERROR; }
pid_t task_vforkstart(FAR struct task_tcb_s *child) { struct tcb_s *parent = this_task(); pid_t pid; int rc; int ret; sinfo("Starting Child TCB=%p, parent=%p\n", child, this_task()); DEBUGASSERT(child); /* Duplicate the original argument list in the forked child TCB */ ret = vfork_argsetup(parent, child); if (ret < 0) { task_vforkabort(child, -ret); return ERROR; } /* Now we have enough in place that we can join the group */ #ifdef HAVE_TASK_GROUP ret = group_initialize(child); if (ret < 0) { task_vforkabort(child, -ret); return ERROR; } #endif /* Get the assigned pid before we start the task */ pid = (int)child->cmn.pid; /* Eliminate a race condition by disabling pre-emption. The child task * can be instantiated, but cannot run until we call waitpid(). This * assures us that we cannot miss the the death-of-child signal (only * needed in the SMP case). */ sched_lock(); /* Activate the task */ ret = task_activate((FAR struct tcb_s *)child); if (ret < OK) { task_vforkabort(child, -ret); sched_unlock(); return ERROR; } /* The child task has not yet ran because pre-emption is disabled. * The child task has the same priority as the parent task, so that * would typically be the case anyway. However, in the SMP * configuration, the child thread might have already ran on * another CPU if pre-emption were not disabled. * * It is a requirement that the parent environment be stable while * vfork runs; the child thread is still dependent on things in the * parent thread... like the pointers into parent thread's stack * which will still appear in the child's registers and environment. * * We assure that by waiting for the child thread to exit before * returning to the parent thread. NOTE that pre-emption will be * re-enabled while we are waiting, giving the child thread the * opportunity to run. */ rc = 0; #ifdef CONFIG_DEBUG_FEATURES ret = waitpid(pid, &rc, 0); if (ret < 0) { serr("ERROR: waitpid failed: %d\n", errno); } #else (void)waitpid(pid, &rc, 0); #endif sched_unlock(); return pid; }
static int thread_create(FAR const char *name, uint8_t ttype, int priority, int stack_size, main_t entry, FAR char * const argv[]) { FAR struct task_tcb_s *tcb; pid_t pid; int errcode; int ret; /* Allocate a TCB for the new task. */ tcb = (FAR struct task_tcb_s *)kmm_zalloc(sizeof(struct task_tcb_s)); if (!tcb) { sdbg("ERROR: Failed to allocate TCB\n"); errcode = ENOMEM; goto errout; } /* Allocate a new task group with privileges appropriate for the parent * thread type. */ #ifdef HAVE_TASK_GROUP ret = group_allocate(tcb, ttype); if (ret < 0) { errcode = -ret; goto errout_with_tcb; } #endif /* Associate file descriptors with the new task */ #if CONFIG_NFILE_DESCRIPTORS > 0 || CONFIG_NSOCKET_DESCRIPTORS > 0 ret = group_setuptaskfiles(tcb); if (ret < OK) { errcode = -ret; goto errout_with_tcb; } #endif /* Allocate the stack for the TCB */ ret = up_create_stack((FAR struct tcb_s *)tcb, stack_size, ttype); if (ret < OK) { errcode = -ret; goto errout_with_tcb; } /* Initialize the task control block */ ret = task_schedsetup(tcb, priority, task_start, entry, ttype); if (ret < OK) { errcode = -ret; goto errout_with_tcb; } /* Setup to pass parameters to the new task */ (void)task_argsetup(tcb, name, argv); /* Now we have enough in place that we can join the group */ #ifdef HAVE_TASK_GROUP ret = group_initialize(tcb); if (ret < 0) { errcode = -ret; goto errout_with_tcb; } #endif /* Get the assigned pid before we start the task */ pid = (int)tcb->cmn.pid; /* Activate the task */ ret = task_activate((FAR struct tcb_s *)tcb); if (ret < OK) { errcode = get_errno(); /* The TCB was added to the active task list by task_schedsetup() */ dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); goto errout_with_tcb; } return pid; errout_with_tcb: sched_releasetcb((FAR struct tcb_s *)tcb, ttype); errout: set_errno(errcode); return ERROR; }
int task_restart(pid_t pid) { FAR struct tcb_s *rtcb; FAR struct task_tcb_s *tcb; irqstate_t state; int status; /* Make sure this task does not become ready-to-run while * we are futzing with its TCB */ sched_lock(); /* Check if the task to restart is the calling task */ rtcb = (FAR struct tcb_s *)g_readytorun.head; if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ set_errno(ENOSYS); return ERROR; } /* We are restarting some other task than ourselves */ else { /* Find for the TCB associated with matching pid */ tcb = (FAR struct task_tcb_s *)sched_gettcb(pid); #ifndef CONFIG_DISABLE_PTHREAD if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD) #else if (!tcb) #endif { /* There is no TCB with this pid or, if there is, it is not a * task. */ set_errno(ESRCH); return ERROR; } /* Try to recover from any bad states */ task_recover((FAR struct tcb_s *)tcb); /* Kill any children of this thread */ #if HAVE_GROUP_MEMBERS (void)group_killchildren(tcb); #endif /* Remove the TCB from whatever list it is in. At this point, the * TCB should no longer be accessible to the system */ state = irqsave(); dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)g_tasklisttable[tcb->cmn.task_state].list); tcb->cmn.task_state = TSTATE_TASK_INVALID; irqrestore(state); /* Deallocate anything left in the TCB's queues */ sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->cmn.sched_priority = tcb->init_priority; /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->cmn.base_priority = tcb->init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->cmn.npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB * This will reset the entry point and the start-up parameters */ up_initial_state((FAR struct tcb_s *)tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); tcb->cmn.task_state = TSTATE_TASK_INACTIVE; /* Activate the task */ status = task_activate((FAR struct tcb_s *)tcb); if (status != OK) { (void)task_delete(pid); set_errno(-status); return ERROR; } } sched_unlock(); return OK; }
int main(void) { struct task *next; /* Set the CPU speed */ uint32_t skuid = read32(DEVICEID_BASE + DEVICEID_SKUID_OFFSET); uint32_t cpuspeed_id = skuid & DEVICEID_SKUID_CPUSPEED_MASK; uint32_t clksel_val = (1<<19) | 12; if(cpuspeed_id == DEVICEID_SKUID_CPUSPEED_720) clksel_val |= (720 << 8); else if(cpuspeed_id == DEVICEID_SKUID_CPUSPEED_600) clksel_val |= (600 << 8); else panic("Unsupported CPU!"); write32(CM_MPU_BASE + PRM_CLKSEL1_PLL_MPU_OFFSET, clksel_val); /* Basic hardware initialization */ init_cpumodes(); // set up CPU modes for interrupt handling intc_init(); // initialize interrupt controller gpio_init(); // initialize gpio interrupt system /* Start up hardware */ timers_init(); // must come first, since it initializes the watchdog eth_init(); uart_init(); /* For some reason, turning on the caches causes the kernel to hang after finishing the third invocation. Maybe we have to clear the caches here, or enable the MMU. */ printk("mmu init\n"); prep_pagetable(); init_mmu(); printk("cache init\n"); init_cache(); /* Initialize other interrupts */ init_interrupts(); /* Initialize task queues */ init_tasks(); /* Initialize idle task */ syscall_spawn(NULL, 7, idle_task, NULL, 0, SPAWN_DAEMON); pmu_enable(); trace_init(); printk("userspace init\n"); /* Initialize first user program */ syscall_spawn(NULL, 6, init_task, NULL, 0, 0); while (nondaemon_count > 0) { next = schedule(); task_activate(next); check_stack(next); } pmu_disable(); intc_reset(); eth_deinit(); deinit_mmu(); return 0; }
int pthread_create(FAR pthread_t *thread, FAR pthread_attr_t *attr, pthread_startroutine_t start_routine, pthread_addr_t arg) { FAR _TCB *ptcb; FAR join_t *pjoin; int status; int priority; #if CONFIG_RR_INTERVAL > 0 int policy; #endif pid_t pid; /* If attributes were not supplied, use the default attributes */ if (!attr) { attr = &g_default_pthread_attr; } /* Allocate a TCB for the new task. */ ptcb = (FAR _TCB*)kzalloc(sizeof(_TCB)); if (!ptcb) { return ENOMEM; } /* Associate file descriptors with the new task */ status = sched_setuppthreadfiles(ptcb); if (status != OK) { sched_releasetcb(ptcb); return status; } /* Share the parent's envionment */ (void)env_share(ptcb); /* Allocate a detachable structure to support pthread_join logic */ pjoin = (FAR join_t*)kzalloc(sizeof(join_t)); if (!pjoin) { sched_releasetcb(ptcb); return ENOMEM; } /* Allocate the stack for the TCB */ status = up_create_stack(ptcb, attr->stacksize); if (status != OK) { sched_releasetcb(ptcb); sched_free(pjoin); return ENOMEM; } /* Should we use the priority and scheduler specified in the * pthread attributes? Or should we use the current thread's * priority and scheduler? */ if (attr->inheritsched == PTHREAD_INHERIT_SCHED) { /* Get the priority for this thread. */ struct sched_param param; status = sched_getparam(0, ¶m); if (status == OK) { priority = param.sched_priority; } else { priority = SCHED_FIFO; } /* Get the scheduler policy for this thread */ #if CONFIG_RR_INTERVAL > 0 policy = sched_getscheduler(0); if (policy == ERROR) { policy = SCHED_FIFO; } #endif } else { /* Use the priority and scheduler from the attributes */ priority = attr->priority; #if CONFIG_RR_INTERVAL > 0 policy = attr->policy; #endif } /* Mark this task as a pthread (this setting will be needed in * task_schedsetup() when up_initial_state() is called. */ ptcb->flags |= TCB_FLAG_TTYPE_PTHREAD; /* Initialize the task control block */ status = task_schedsetup(ptcb, priority, pthread_start, (main_t)start_routine); if (status != OK) { sched_releasetcb(ptcb); sched_free(pjoin); return EBUSY; } /* Configure the TCB for a pthread receiving on parameter * passed by value */ pthread_argsetup(ptcb, arg); /* Attach the join info to the TCB. */ ptcb->joininfo = (void*)pjoin; /* If round robin scheduling is selected, set the appropriate flag * in the TCB. */ #if CONFIG_RR_INTERVAL > 0 if (policy == SCHED_RR) { ptcb->flags |= TCB_FLAG_ROUND_ROBIN; ptcb->timeslice = CONFIG_RR_INTERVAL / MSEC_PER_TICK; } #endif /* Get the assigned pid before we start the task (who knows what * could happen to ptcb after this!). Copy this ID into the join structure * as well. */ pid = (int)ptcb->pid; pjoin->thread = (pthread_t)pid; /* Initialize the semaphores in the join structure to zero. */ status = sem_init(&pjoin->data_sem, 0, 0); if (status == OK) { status = sem_init(&pjoin->exit_sem, 0, 0); } /* Activate the task */ sched_lock(); if (status == OK) { status = task_activate(ptcb); } if (status == OK) { /* Wait for the task to actually get running and to register * its join_t */ (void)pthread_takesemaphore(&pjoin->data_sem); /* Return the thread information to the caller */ if (thread) *thread = (pthread_t)pid; if (!pjoin->started) status = ERROR; sched_unlock(); (void)sem_destroy(&pjoin->data_sem); } else { sched_unlock(); dq_rem((FAR dq_entry_t*)ptcb, (dq_queue_t*)&g_inactivetasks); (void)sem_destroy(&pjoin->data_sem); (void)sem_destroy(&pjoin->exit_sem); sched_releasetcb(ptcb); sched_free(pjoin); return EIO; } return OK; }
static int thread_create(const char *name, uint8_t type, int priority, main_t entry, const char **argv) #endif { FAR _TCB *tcb; pid_t pid; int ret; /* Allocate a TCB for the new task. */ tcb = (FAR _TCB*)kzalloc(sizeof(_TCB)); if (!tcb) { goto errout; } /* Associate file descriptors with the new task */ #if CONFIG_NFILE_DESCRIPTORS > 0 || CONFIG_NSOCKET_DESCRIPTORS > 0 ret = sched_setuptaskfiles(tcb); if (ret != OK) { goto errout_with_tcb; } #endif /* Clone the parent's task environment */ (void)env_dup(tcb); /* Allocate the stack for the TCB */ #ifndef CONFIG_CUSTOM_STACK ret = up_create_stack(tcb, stack_size); if (ret != OK) { goto errout_with_tcb; } #endif /* Mark the type of this thread (this setting will be needed in * task_schedsetup() when up_initial_state() is called. */ tcb->flags |= type; /* Initialize the task control block */ ret = task_schedsetup(tcb, priority, task_start, entry); if (ret != OK) { goto errout_with_tcb; } /* Setup to pass parameters to the new task */ (void)task_argsetup(tcb, name, argv); /* Get the assigned pid before we start the task */ pid = (int)tcb->pid; /* Activate the task */ ret = task_activate(tcb); if (ret != OK) { /* The TCB was added to the active task list by task_schedsetup() */ dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); goto errout_with_tcb; } return pid; errout_with_tcb: sched_releasetcb(tcb); errout: errno = ENOMEM; return ERROR; }
int pthread_create(FAR pthread_t *thread, FAR const pthread_attr_t *attr, pthread_startroutine_t start_routine, pthread_addr_t arg) { FAR struct pthread_tcb_s *ptcb; FAR struct join_s *pjoin; struct sched_param param; int policy; int errcode; pid_t pid; int ret; #ifdef HAVE_TASK_GROUP bool group_joined = false; #endif /* If attributes were not supplied, use the default attributes */ if (!attr) { attr = &g_default_pthread_attr; } /* Allocate a TCB for the new task. */ ptcb = (FAR struct pthread_tcb_s *)kmm_zalloc(sizeof(struct pthread_tcb_s)); if (!ptcb) { sdbg("ERROR: Failed to allocate TCB\n"); return ENOMEM; } #ifdef HAVE_TASK_GROUP /* Bind the parent's group to the new TCB (we have not yet joined the * group). */ ret = group_bind(ptcb); if (ret < 0) { errcode = ENOMEM; goto errout_with_tcb; } #endif #ifdef CONFIG_ARCH_ADDRENV /* Share the address environment of the parent task group. */ ret = up_addrenv_attach(ptcb->cmn.group, (FAR struct tcb_s *)g_readytorun.head); if (ret < 0) { errcode = -ret; goto errout_with_tcb; } #endif /* Allocate a detachable structure to support pthread_join logic */ pjoin = (FAR struct join_s *)kmm_zalloc(sizeof(struct join_s)); if (!pjoin) { sdbg("ERROR: Failed to allocate join\n"); errcode = ENOMEM; goto errout_with_tcb; } /* Allocate the stack for the TCB */ ret = up_create_stack((FAR struct tcb_s *)ptcb, attr->stacksize, TCB_FLAG_TTYPE_PTHREAD); if (ret != OK) { errcode = ENOMEM; goto errout_with_join; } /* Should we use the priority and scheduler specified in the pthread * attributes? Or should we use the current thread's priority and * scheduler? */ if (attr->inheritsched == PTHREAD_INHERIT_SCHED) { /* Get the priority (and any other scheduling parameters) for this * thread. */ ret = sched_getparam(0, ¶m); if (ret == ERROR) { errcode = get_errno(); goto errout_with_join; } /* Get the scheduler policy for this thread */ policy = sched_getscheduler(0); if (policy == ERROR) { errcode = get_errno(); goto errout_with_join; } } else { /* Use the scheduler policy and policy the attributes */ policy = attr->policy; param.sched_priority = attr->priority; #ifdef CONFIG_SCHED_SPORADIC param.sched_ss_low_priority = attr->low_priority; param.sched_ss_max_repl = attr->max_repl; param.sched_ss_repl_period.tv_sec = attr->repl_period.tv_sec; param.sched_ss_repl_period.tv_nsec = attr->repl_period.tv_nsec; param.sched_ss_init_budget.tv_sec = attr->budget.tv_sec; param.sched_ss_init_budget.tv_nsec = attr->budget.tv_nsec; #endif } #ifdef CONFIG_SCHED_SPORADIC if (policy == SCHED_SPORADIC) { FAR struct sporadic_s *sporadic; int repl_ticks; int budget_ticks; /* Convert timespec values to system clock ticks */ (void)clock_time2ticks(¶m.sched_ss_repl_period, &repl_ticks); (void)clock_time2ticks(¶m.sched_ss_init_budget, &budget_ticks); /* The replenishment period must be greater than or equal to the * budget period. */ if (repl_ticks < budget_ticks) { errcode = EINVAL; goto errout_with_join; } /* Initialize the sporadic policy */ ret = sched_sporadic_initialize(&ptcb->cmn); if (ret >= 0) { sporadic = ptcb->cmn.sporadic; DEBUGASSERT(sporadic != NULL); /* Save the sporadic scheduling parameters */ sporadic->hi_priority = param.sched_priority; sporadic->low_priority = param.sched_ss_low_priority; sporadic->max_repl = param.sched_ss_max_repl; sporadic->repl_period = repl_ticks; sporadic->budget = budget_ticks; /* And start the first replenishment interval */ ret = sched_sporadic_start(&ptcb->cmn); } /* Handle any failures */ if (ret < 0) { errcode = -ret; goto errout_with_join; } } #endif /* Initialize the task control block */ ret = pthread_schedsetup(ptcb, param.sched_priority, pthread_start, start_routine); if (ret != OK) { errcode = EBUSY; goto errout_with_join; } /* Configure the TCB for a pthread receiving on parameter * passed by value */ pthread_argsetup(ptcb, arg); #ifdef HAVE_TASK_GROUP /* Join the parent's task group */ ret = group_join(ptcb); if (ret < 0) { errcode = ENOMEM; goto errout_with_join; } group_joined = true; #endif /* Attach the join info to the TCB. */ ptcb->joininfo = (FAR void *)pjoin; /* Set the appropriate scheduling policy in the TCB */ ptcb->cmn.flags &= ~TCB_FLAG_POLICY_MASK; switch (policy) { default: DEBUGPANIC(); case SCHED_FIFO: ptcb->cmn.flags |= TCB_FLAG_SCHED_FIFO; break; #if CONFIG_RR_INTERVAL > 0 case SCHED_RR: ptcb->cmn.flags |= TCB_FLAG_SCHED_RR; ptcb->cmn.timeslice = MSEC2TICK(CONFIG_RR_INTERVAL); break; #endif #ifdef CONFIG_SCHED_SPORADIC case SCHED_SPORADIC: ptcb->cmn.flags |= TCB_FLAG_SCHED_SPORADIC; break; #endif #if 0 /* Not supported */ case SCHED_OTHER: ptcb->cmn.flags |= TCB_FLAG_SCHED_OTHER; break; #endif } /* Get the assigned pid before we start the task (who knows what * could happen to ptcb after this!). Copy this ID into the join structure * as well. */ pid = (int)ptcb->cmn.pid; pjoin->thread = (pthread_t)pid; /* Initialize the semaphores in the join structure to zero. */ ret = sem_init(&pjoin->data_sem, 0, 0); if (ret == OK) { ret = sem_init(&pjoin->exit_sem, 0, 0); } /* Activate the task */ sched_lock(); if (ret == OK) { ret = task_activate((FAR struct tcb_s *)ptcb); } if (ret == OK) { /* Wait for the task to actually get running and to register * its join structure. */ (void)pthread_takesemaphore(&pjoin->data_sem); /* Return the thread information to the caller */ if (thread) { *thread = (pthread_t)pid; } if (!pjoin->started) { ret = EINVAL; } sched_unlock(); (void)sem_destroy(&pjoin->data_sem); } else { sched_unlock(); dq_rem((FAR dq_entry_t *)ptcb, (FAR dq_queue_t *)&g_inactivetasks); (void)sem_destroy(&pjoin->data_sem); (void)sem_destroy(&pjoin->exit_sem); errcode = EIO; goto errout_with_join; } return ret; errout_with_join: sched_kfree(pjoin); ptcb->joininfo = NULL; errout_with_tcb: #ifdef HAVE_TASK_GROUP /* Clear group binding */ if (ptcb && !group_joined) { ptcb->cmn.group = NULL; } #endif sched_releasetcb((FAR struct tcb_s *)ptcb, TCB_FLAG_TTYPE_PTHREAD); return errcode; }