int do_the_work() { if (dstate_dofork(gDaemonState)) daemonize(); sig_init(); pid_file_init(gPidFileName); msg_note("-- NHT Login Daemon version %s started --\n", gDaemonVersion); while (dstate_active(gDaemonState)) { msg_note("Starting keepalive session with timeout interval of %d\n", gEventTimeout); if (run_keepalive()) sleep(5); /* don't zap all the processor's time */ } msg_note("-- HNT Login Daemon version %s finished --\n", gDaemonVersion); pid_file_cleanup(gPidFileName); sig_cleanup(); return 0; }
void task_exithook(FAR _TCB *tcb, int status) { /* Under certain conditions, task_exithook() can be called multiple times. * A bit in the TCB was set the first time this function was called. If * that bit is set, then just ext doing nothing more.. */ if ((tcb->flags & TCB_FLAG_EXIT_PROCESSING) != 0) { return; } /* If exit function(s) were registered, call them now before we do any un- * initialization. NOTE: In the case of task_delete(), the exit function * will *not* be called on the thread execution of the task being deleted! */ task_atexit(tcb); /* Call any registered on_exit function(s) */ task_onexit(tcb, status); /* Leave the task group */ task_leavegroup(tcb, status); /* Wakeup any tasks waiting for this task to exit */ task_exitwakeup(tcb, status); /* Flush all streams (File descriptors will be closed when * the TCB is deallocated). */ #if CONFIG_NFILE_STREAMS > 0 (void)lib_flushall(&tcb->group->tg_streamlist); #endif /* Leave the task group. Perhaps discarding any un-reaped child * status (no zombies here!) */ #ifdef HAVE_TASK_GROUP group_leave(tcb); #endif /* Deallocate anything left in the TCB's queues */ #ifndef CONFIG_DISABLE_SIGNALS sig_cleanup(tcb); /* Deallocate Signal lists */ #endif /* This function can be re-entered in certain cases. Set a flag * bit in the TCB to not that we have already completed this exit * processing. */ tcb->flags |= TCB_FLAG_EXIT_PROCESSING; }
void task_exithook(FAR _TCB *tcb, int status) { /* Inform the instrumentation layer that the task has stopped */ sched_note_stop(tcb); /* Flush all streams (File descriptors will be closed when * the TCB is deallocated). */ #if CONFIG_NFILE_STREAMS > 0 (void)lib_flushall(tcb->streams); #endif /* Deallocate anything left in the TCB's queues */ #ifndef CONFIG_DISABLE_SIGNALS sig_cleanup(tcb); /* Deallocate Signal lists */ #endif /* Wakeup any tasks waiting for this task to exit */ #ifdef CONFIG_SCHED_WAITPID /* Experimental */ while (tcb->exitsem.semcount < 0) { /* "If more than one thread is suspended in waitpid() awaiting * termination of the same process, exactly one thread will return * the process status at the time of the target process termination." * Hmmm.. what do we return to the others? */ if (tcb->stat_loc) { *tcb->stat_loc = status << 8; tcb->stat_loc = NULL; } /* Wake up the thread */ sem_post(&tcb->exitsem); } #endif /* If an exit function was registered, call it now. NOTE: In the case * of task_delete(), the exit function will *not* be called on the thread * execution of the task being deleted! */ #ifdef CONFIG_SCHED_ATEXIT if (tcb->exitfunc) { (*tcb->exitfunc)(); } #endif }
void task_exithook(FAR _TCB *tcb, int status) { /* If exit function(s) were registered, call them now before we do any un- * initialization. NOTE: In the case of task_delete(), the exit function * will *not* be called on the thread execution of the task being deleted! */ task_atexit(tcb); /* Call any registered on_exit function(s) */ task_onexit(tcb, status); /* Send SIGCHLD to the parent of the exit-ing task */ task_sigchild(tcb, status); /* Wakeup any tasks waiting for this task to exit */ task_exitwakeup(tcb, status); /* Flush all streams (File descriptors will be closed when * the TCB is deallocated). */ #if CONFIG_NFILE_STREAMS > 0 (void)lib_flushall(tcb->streams); #endif /* Discard any un-reaped child status (no zombies here!) */ #if defined(CONFIG_SCHED_HAVE_PARENT) && defined(CONFIG_SCHED_CHILD_STATUS) task_removechildren(tcb); #endif /* Free all file-related resources now. This gets called again * just be be certain when the TCB is delallocated. However, we * really need to close files as soon as possible while we still * have a functioning task. */ (void)sched_releasefiles(tcb); /* Deallocate anything left in the TCB's queues */ #ifndef CONFIG_DISABLE_SIGNALS sig_cleanup(tcb); /* Deallocate Signal lists */ #endif }
int task_restart(pid_t pid) { FAR _TCB *rtcb; FAR _TCB *tcb; int status; irqstate_t state; /* Make sure this task does not become ready-to-run while * we are futzing with its TCB */ sched_lock(); /* Check if the task to restart is the calling task */ rtcb = (FAR _TCB*)g_readytorun.head; if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ return ERROR; } /* We are restarting some other task than ourselves */ else { /* Find for the TCB associated with matching pid */ tcb = sched_gettcb(pid); if (!tcb) { /* There is no TCB with this pid */ return ERROR; } /* Remove the TCB from whatever list it is in. At this point, the * TCB should no longer be accessible to the system */ state = irqsave(); dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)g_tasklisttable[tcb->task_state].list); tcb->task_state = TSTATE_TASK_INVALID; irqrestore(state); /* Deallocate anything left in the TCB's queues */ sig_cleanup(tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->sched_priority = tcb->init_priority; /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->base_priority = tcb->init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB * This will reset the entry point and the start-up parameters */ up_initial_state(tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); tcb->task_state = TSTATE_TASK_INACTIVE; /* Activate the task */ status = task_activate(tcb); if (status != OK) { dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); sched_releasetcb(tcb); return ERROR; } } sched_unlock(); return OK; }
int task_restart(pid_t pid) { FAR struct tcb_s *rtcb; FAR struct task_tcb_s *tcb; FAR dq_queue_t *tasklist; irqstate_t state; int status; /* Make sure this task does not become ready-to-run while * we are futzing with its TCB */ sched_lock(); /* Check if the task to restart is the calling task */ rtcb = this_task(); if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ set_errno(ENOSYS); return ERROR; } #ifdef CONFIG_SMP /* There is currently no capability to restart a task that is actively * running on another CPU either. This is not the calling cast so if it * is running, then it could only be running a a different CPU. * * Also, will need some interlocks to assure that no tasks are rescheduled * on any other CPU while we do this. */ #warning Missing SMP logic if (rtcb->task_state == TSTATE_TASK_RUNNING) { /* Not implemented */ set_errno(ENOSYS); return ERROR; } #endif /* We are restarting some other task than ourselves */ /* Find for the TCB associated with matching pid */ tcb = (FAR struct task_tcb_s *)sched_gettcb(pid); #ifndef CONFIG_DISABLE_PTHREAD if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD) #else if (!tcb) #endif { /* There is no TCB with this pid or, if there is, it is not a task. */ set_errno(ESRCH); return ERROR; } /* Try to recover from any bad states */ task_recover((FAR struct tcb_s *)tcb); /* Kill any children of this thread */ #ifdef HAVE_GROUP_MEMBERS (void)group_killchildren(tcb); #endif /* Remove the TCB from whatever list it is in. After this point, the TCB * should no longer be accessible to the system */ #ifdef CONFIG_SMP tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu); #else tasklist = TLIST_HEAD(tcb->cmn.task_state); #endif state = irqsave(); dq_rem((FAR dq_entry_t *)tcb, tasklist); tcb->cmn.task_state = TSTATE_TASK_INVALID; irqrestore(state); /* Deallocate anything left in the TCB's queues */ sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->cmn.sched_priority = tcb->init_priority; /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->cmn.base_priority = tcb->init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->cmn.npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB. This will * reset the entry point and the start-up parameters */ up_initial_state((FAR struct tcb_s *)tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks); tcb->cmn.task_state = TSTATE_TASK_INACTIVE; /* Activate the task */ status = task_activate((FAR struct tcb_s *)tcb); if (status != OK) { (void)task_delete(pid); set_errno(-status); return ERROR; } sched_unlock(); return OK; }
int task_restart(pid_t pid) { FAR struct tcb_s *rtcb; FAR struct task_tcb_s *tcb; FAR dq_queue_t *tasklist; irqstate_t flags; int errcode; #ifdef CONFIG_SMP int cpu; #endif int ret; /* Check if the task to restart is the calling task */ rtcb = this_task(); if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ errcode = ENOSYS; goto errout; } /* We are restarting some other task than ourselves. Make sure that the * task does not change its state while we are executing. In the single * CPU state this could be done by disabling pre-emption. But we will * a little stronger medicine on the SMP case: The task make be running * on another CPU. */ flags = enter_critical_section(); /* Find for the TCB associated with matching pid */ tcb = (FAR struct task_tcb_s *)sched_gettcb(pid); #ifndef CONFIG_DISABLE_PTHREAD if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD) #else if (!tcb) #endif { /* There is no TCB with this pid or, if there is, it is not a task. */ errcode = ESRCH; goto errout_with_lock; } #ifdef CONFIG_SMP /* If the task is running on another CPU, then pause that CPU. We can * then manipulate the TCB of the restarted task and when we resume the * that CPU, the restart take effect. */ cpu = sched_cpu_pause(&tcb->cmn); #endif /* CONFIG_SMP */ /* Try to recover from any bad states */ task_recover((FAR struct tcb_s *)tcb); /* Kill any children of this thread */ #ifdef HAVE_GROUP_MEMBERS (void)group_killchildren(tcb); #endif /* Remove the TCB from whatever list it is in. After this point, the TCB * should no longer be accessible to the system */ #ifdef CONFIG_SMP tasklist = TLIST_HEAD(tcb->cmn.task_state, tcb->cmn.cpu); #else tasklist = TLIST_HEAD(tcb->cmn.task_state); #endif dq_rem((FAR dq_entry_t *)tcb, tasklist); tcb->cmn.task_state = TSTATE_TASK_INVALID; /* Deallocate anything left in the TCB's queues */ sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->cmn.sched_priority = tcb->cmn.init_priority; /* The task should restart with pre-emption disabled and not in a critical * secton. */ tcb->cmn.lockcount = 0; #ifdef CONFIG_SMP tcb->cmn.irqcount = 0; #endif /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->cmn.base_priority = tcb->cmn.init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->cmn.npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB. This will * reset the entry point and the start-up parameters */ up_initial_state((FAR struct tcb_s *)tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t *)tcb, (FAR dq_queue_t *)&g_inactivetasks); tcb->cmn.task_state = TSTATE_TASK_INACTIVE; #ifdef CONFIG_SMP /* Resume the paused CPU (if any) */ if (cpu >= 0) { ret = up_cpu_resume(cpu); if (ret < 0) { errcode = -ret; goto errout_with_lock; } } #endif /* CONFIG_SMP */ leave_critical_section(flags); /* Activate the task. */ ret = task_activate((FAR struct tcb_s *)tcb); if (ret != OK) { (void)task_terminate(pid, true); errcode = -ret; goto errout_with_lock; } return OK; errout_with_lock: leave_critical_section(flags); errout: set_errno(errcode); return ERROR; }
void task_exithook(FAR struct tcb_s *tcb, int status, bool nonblocking) { /* Under certain conditions, task_exithook() can be called multiple times. * A bit in the TCB was set the first time this function was called. If * that bit is set, then just exit doing nothing more.. */ if ((tcb->flags & TCB_FLAG_EXIT_PROCESSING) != 0) { return; } #if defined(CONFIG_SCHED_ATEXIT) || defined(CONFIG_SCHED_ONEXIT) /* If exit function(s) were registered, call them now before we do any un- * initialization. * * NOTES: * * 1. In the case of task_delete(), the exit function will *not* be called * on the thread execution of the task being deleted! That is probably * a bug. * 2. We cannot call the exit functions if nonblocking is requested: These * functions might block. * 3. This function will only be called with with non-blocking == true * only when called through _exit(). _exit() behaviors requires that * the exit functions *not* be called. */ if (!nonblocking) { task_atexit(tcb); /* Call any registered on_exit function(s) */ task_onexit(tcb, status); } #endif /* If the task was terminated by another task, it may be in an unknown * state. Make some feeble effort to recover the state. */ task_recover(tcb); /* Send the SIGCHILD signal to the parent task group */ task_signalparent(tcb, status); /* Wakeup any tasks waiting for this task to exit */ task_exitwakeup(tcb, status); /* If this is the last thread in the group, then flush all streams (File * descriptors will be closed when the TCB is deallocated). * * NOTES: * 1. We cannot flush the buffered I/O if nonblocking is requested. * that might cause this logic to block. * 2. This function will only be called with with non-blocking == true * only when called through _exit(). _exit() behavior does not * require that the streams be flushed */ if (!nonblocking) { task_flushstreams(tcb); } #ifdef HAVE_TASK_GROUP /* Leave the task group. Perhaps discarding any un-reaped child * status (no zombies here!) */ group_leave(tcb); #endif #ifndef CONFIG_DISABLE_SIGNALS /* Deallocate anything left in the TCB's queues */ sig_cleanup(tcb); /* Deallocate Signal lists */ #endif /* This function can be re-entered in certain cases. Set a flag * bit in the TCB to not that we have already completed this exit * processing. */ tcb->flags |= TCB_FLAG_EXIT_PROCESSING; }
int task_restart(pid_t pid) { FAR struct tcb_s *rtcb; FAR struct task_tcb_s *tcb; irqstate_t state; int status; /* Make sure this task does not become ready-to-run while * we are futzing with its TCB */ sched_lock(); /* Check if the task to restart is the calling task */ rtcb = (FAR struct tcb_s *)g_readytorun.head; if ((pid == 0) || (pid == rtcb->pid)) { /* Not implemented */ set_errno(ENOSYS); return ERROR; } /* We are restarting some other task than ourselves */ else { /* Find for the TCB associated with matching pid */ tcb = (FAR struct task_tcb_s *)sched_gettcb(pid); #ifndef CONFIG_DISABLE_PTHREAD if (!tcb || (tcb->cmn.flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD) #else if (!tcb) #endif { /* There is no TCB with this pid or, if there is, it is not a * task. */ set_errno(ESRCH); return ERROR; } /* Try to recover from any bad states */ task_recover((FAR struct tcb_s *)tcb); /* Kill any children of this thread */ #if HAVE_GROUP_MEMBERS (void)group_killchildren(tcb); #endif /* Remove the TCB from whatever list it is in. At this point, the * TCB should no longer be accessible to the system */ state = irqsave(); dq_rem((FAR dq_entry_t*)tcb, (dq_queue_t*)g_tasklisttable[tcb->cmn.task_state].list); tcb->cmn.task_state = TSTATE_TASK_INVALID; irqrestore(state); /* Deallocate anything left in the TCB's queues */ sig_cleanup((FAR struct tcb_s *)tcb); /* Deallocate Signal lists */ /* Reset the current task priority */ tcb->cmn.sched_priority = tcb->init_priority; /* Reset the base task priority and the number of pending reprioritizations */ #ifdef CONFIG_PRIORITY_INHERITANCE tcb->cmn.base_priority = tcb->init_priority; # if CONFIG_SEM_NNESTPRIO > 0 tcb->cmn.npend_reprio = 0; # endif #endif /* Re-initialize the processor-specific portion of the TCB * This will reset the entry point and the start-up parameters */ up_initial_state((FAR struct tcb_s *)tcb); /* Add the task to the inactive task list */ dq_addfirst((FAR dq_entry_t*)tcb, (dq_queue_t*)&g_inactivetasks); tcb->cmn.task_state = TSTATE_TASK_INACTIVE; /* Activate the task */ status = task_activate((FAR struct tcb_s *)tcb); if (status != OK) { (void)task_delete(pid); set_errno(-status); return ERROR; } } sched_unlock(); return OK; }
static inline void task_atexit(FAR _TCB *tcb) { #if defined(CONFIG_SCHED_ATEXIT_MAX) && CONFIG_SCHED_ATEXIT_MAX > 1 int index; /* Call each atexit function in reverse order of registration atexit() * functions are registered from lower to higher arry indices; they must * be called in the reverse order of registration when task exists, i.e., * from higher to lower indices. */ for (index = CONFIG_SCHED_ATEXIT_MAX-1; index >= 0; index--) { if (tcb->atexitfunc[index]) { /* Call the atexit function */ (*tcb->atexitfunc[index])(); /* Nullify the atexit function. task_exithook may be called more then * once in most task exit scenarios. Nullifying the atext function * pointer will assure that the callback is performed only once. */ tcb->atexitfunc[index] = NULL; } } #else if (tcb->atexitfunc) { /* Call the atexit function */ (*tcb->atexitfunc)(); /* Nullify the atexit function. task_exithook may be called more then * once in most task exit scenarios. Nullifying the atext function * pointer will assure that the callback is performed only once. */ tcb->atexitfunc = NULL; } #endif #else # define task_atexit(tcb) #endif /**************************************************************************** * Name: task_onexit * * Description: * Call any registerd on)exit function(s) * ****************************************************************************/ #ifdef CONFIG_SCHED_ONEXIT static inline void task_onexit(FAR _TCB *tcb, int status) { #if defined(CONFIG_SCHED_ONEXIT_MAX) && CONFIG_SCHED_ONEXIT_MAX > 1 int index; /* Call each on_exit function in reverse order of registration. on_exit() * functions are registered from lower to higher arry indices; they must * be called in the reverse order of registration when task exists, i.e., * from higher to lower indices. */ for (index = CONFIG_SCHED_ONEXIT_MAX-1; index >= 0; index--) { if (tcb->onexitfunc[index]) { /* Call the on_exit function */ (*tcb->onexitfunc[index])(status, tcb->onexitarg[index]); /* Nullify the on_exit function. task_exithook may be called more then * once in most task exit scenarios. Nullifying the atext function * pointer will assure that the callback is performed only once. */ tcb->onexitfunc[index] = NULL; } } #else if (tcb->onexitfunc) { /* Call the on_exit function */ (*tcb->onexitfunc)(status, tcb->onexitarg); /* Nullify the on_exit function. task_exithook may be called more then * once in most task exit scenarios. Nullifying the on_exit function * pointer will assure that the callback is performed only once. */ tcb->onexitfunc = NULL; } #endif #else # define task_onexit(tcb,status) #endif /**************************************************************************** * Name: task_exitwakeup * * Description: * Wakeup any tasks waiting for this task to exit * ****************************************************************************/ #ifdef CONFIG_SCHED_WAITPID static inline void task_exitwakeup(FAR _TCB *tcb, int status) { /* Wakeup any tasks waiting for this task to exit */ while (tcb->exitsem.semcount < 0) { /* "If more than one thread is suspended in waitpid() awaiting * termination of the same process, exactly one thread will return * the process status at the time of the target process termination." * Hmmm.. what do we return to the others? */ if (tcb->stat_loc) { *tcb->stat_loc = status << 8; tcb->stat_loc = NULL; } /* Wake up the thread */ sem_post(&tcb->exitsem); } } #else # define task_exitwakeup(tcb, status) #endif /**************************************************************************** * Public Functions ****************************************************************************/ /**************************************************************************** * Name: task_hook * * Description: * This function implements some of the internal logic of exit() and * task_delete(). This function performs some cleanup and other actions * required when a task exists: * * - All open streams are flushed and closed. * - All functions registered with atexit() and on_exit() are called, in * the reverse order of their registration. * * When called from exit(), the tcb still resides at the head of the ready- * to-run list. The following logic is safe because we will not be * returning from the exit() call. * * When called from task_delete() we are operating on a different thread; * on the thread that called task_delete(). In this case, task_delete * will have already removed the tcb from the ready-to-run list to prevent * any further action on this task. * ****************************************************************************/ void task_exithook(FAR _TCB *tcb, int status) { /* If exit function(s) were registered, call them now before we do any un- * initialization. NOTE: In the case of task_delete(), the exit function * will *not* be called on the thread execution of the task being deleted! */ task_atexit(tcb); /* Call any registered on_exit function(s) */ task_onexit(tcb, status); /* Wakeup any tasks waiting for this task to exit */ task_exitwakeup(tcb, status); /* Flush all streams (File descriptors will be closed when * the TCB is deallocated). */ #if CONFIG_NFILE_STREAMS > 0 (void)lib_flushall(tcb->streams); #endif /* Free all file-related resources now. This gets called again * just be be certain when the TCB is delallocated. However, we * really need to close files as soon as possible while we still * have a functioning task. */ (void)sched_releasefiles(tcb); /* Deallocate anything left in the TCB's queues */ #ifndef CONFIG_DISABLE_SIGNALS sig_cleanup(tcb); /* Deallocate Signal lists */ #endif }