int task_activate(FAR _TCB *tcb) { irqstate_t flags = irqsave(); #ifdef CONFIG_SCHED_INSTRUMENTATION /* Check if this is really a re-start */ if (tcb->task_state != TSTATE_TASK_INACTIVE) { /* Inform the instrumentation layer that the task * has stopped */ sched_note_stop(tcb); } /* Inform the instrumentation layer that the task * has started */ sched_note_start(tcb); #endif up_unblock_task(tcb); irqrestore(flags); return OK; }
static void sig_timeout(int argc, uint32_t itcb) { /* On many small machines, pointers are encoded and cannot be simply cast * from uint32_t to struct tcb_s*. The following union works around this * (see wdogparm_t). This odd logic could be conditioned on * CONFIG_CAN_CAST_POINTERS, but it is not too bad in any case. */ union { FAR struct tcb_s *wtcb; uint32_t itcb; } u; u.itcb = itcb; ASSERT(u.wtcb); /* There may be a race condition -- make sure the task is * still waiting for a signal */ if (u.wtcb->task_state == TSTATE_WAIT_SIG) { u.wtcb->sigunbinfo.si_signo = SIG_WAIT_TIMEOUT; u.wtcb->sigunbinfo.si_code = SI_TIMER; u.wtcb->sigunbinfo.si_value.sival_int = 0; #ifdef CONFIG_SCHED_HAVE_PARENT u.wtcb->sigunbinfo.si_pid = 0; /* Not applicable */ u.wtcb->sigunbinfo.si_status = OK; #endif up_unblock_task(u.wtcb); } }
ssize_t mq_doreceive(mqd_t mqdes, mqmsg_t *mqmsg, void *ubuffer, int *prio) { FAR struct tcb_s *btcb; irqstate_t saved_state; FAR msgq_t *msgq; ssize_t rcvmsglen; /* Get the length of the message (also the return value) */ rcvmsglen = mqmsg->msglen; /* Copy the message into the caller's buffer */ memcpy(ubuffer, (const void*)mqmsg->mail, rcvmsglen); /* Copy the message priority as well (if a buffer is provided) */ if (prio) { *prio = mqmsg->priority; } /* We are done with the message. Deallocate it now. */ mq_msgfree(mqmsg); /* Check if any tasks are waiting for the MQ not full event. */ msgq = mqdes->msgq; if (msgq->nwaitnotfull > 0) { /* Find the highest priority task that is waiting for * this queue to be not-full in g_waitingformqnotfull list. * This must be performed in a critical section because * messages can be sent from interrupt handlers. */ saved_state = irqsave(); for (btcb = (FAR struct tcb_s*)g_waitingformqnotfull.head; btcb && btcb->msgwaitq != msgq; btcb = btcb->flink); /* If one was found, unblock it. NOTE: There is a race * condition here: the queue might be full again by the * time the task is unblocked */ ASSERT(btcb); btcb->msgwaitq = NULL; msgq->nwaitnotfull--; up_unblock_task(btcb); irqrestore(saved_state); } /* Return the length of the message transferred to the user buffer */ return rcvmsglen; }
static inline void pg_fillcomplete(void) { /* Call up_unblocktask(g_pftcb) to make the task that just * received the fill ready-to-run. */ pglldbg("Restarting TCB: %p\n", g_pftcb); up_unblock_task(g_pftcb); }
void mq_waitirq(FAR struct tcb_s *wtcb, int errcode) { FAR struct mqueue_inode_s *msgq; irqstate_t saved_state; /* Disable interrupts. This is necessary because an interrupt handler may * attempt to send a message while we are doing this. */ saved_state = irqsave(); /* It is possible that an interrupt/context switch beat us to the punch and * already changed the task's state. NOTE: The operations within the if * are safe because interrupts are always disabled with the msgwaitq, * nwaitnotempty, and nwaitnotfull fields are modified. */ if (wtcb->task_state == TSTATE_WAIT_MQNOTEMPTY || wtcb->task_state == TSTATE_WAIT_MQNOTFULL) { /* Get the message queue associated with the waiter from the TCB */ msgq = wtcb->msgwaitq; DEBUGASSERT(msgq); wtcb->msgwaitq = NULL; /* Decrement the count of waiters and cancel the wait */ if (wtcb->task_state == TSTATE_WAIT_MQNOTEMPTY) { DEBUGASSERT(msgq->nwaitnotempty > 0); msgq->nwaitnotempty--; } else { DEBUGASSERT(msgq->nwaitnotfull > 0); msgq->nwaitnotfull--; } /* Mark the errno value for the thread. */ wtcb->pterrno = errcode; /* Restart the task. */ up_unblock_task(wtcb); } /* Interrupts may now be enabled. */ irqrestore(saved_state); }
int pg_worker(int argc, char *argv[]) { irqstate_t flags; /* Loop forever -- Notice that interrupts will be disable at all times that * this thread runs. That is so that we con't lose signals or have * asynchronous page faults. * * All interrupt logic as well as all page fill worker thread logic must * be locked in memory. Therefore, keeping interrupts disabled here * should prevent any concurrent page faults. Any page faults or page * fill completions should occur while this thread sleeps. */ pglldbg("Started\n"); flags = irqsave(); for (;;) { /* Wait awhile. We will wait here until either the configurable timeout * elapses or until we are awakened by a signal (which terminates the * usleep with an EINTR error). Note that interrupts will be re-enabled * while this task sleeps. * * The timeout is a failsafe that will handle any cases where a single * is lost (that would really be a bug and shouldn't happen!) and also * supports timeouts for case of non-blocking, asynchronous fills. */ usleep(CONFIG_PAGING_WORKPERIOD); /* The page fill worker thread will be awakened on one of three conditions: * * - When signaled by pg_miss(), the page fill worker thread will be awakenend, * - if CONFIG_PAGING_BLOCKINGFILL is not defined, from pg_callback() * after completing a page fill, or * - On a configurable timeout expires with no activity. * * Interrupts are still disabled. */ #ifndef CONFIG_PAGING_BLOCKINGFILL /* For the non-blocking up_fillpage(), the page fill worker thread will detect * that the page fill is complete when it is awakened with g_pftcb non-NULL * and fill completion status from pg_callback. */ if (g_pftcb != NULL) { /* If it is a real page fill completion event, then the result of the page * fill will be in g_fillresult and will not be equal to -EBUSY. */ if (g_fillresult != -EBUSY) { /* Any value other than OK, brings the system down */ ASSERT(g_fillresult == OK); /* Handle the successful page fill complete event by restarting the * task that was blocked waiting for this page fill. */ pglldbg("Restarting TCB: %p\n", g_pftcb); up_unblock_task(g_pftcb);; /* Yes .. Start the next asynchronous fill. Check the return * value to see a fill was actually started (false means that * no fill was started). */ pgllvdbg("Calling pg_startfill\n"); if (!pg_startfill()) { /* No fill was started. This can mean only that all queued * page fill actions have and been completed and there is * nothing more to do. */ pgllvdbg("Call pg_alldone()\n"); pg_alldone(); } } /* If a configurable timeout period expires with no page fill completion * event, then declare a failure. */ #ifdef CONFIG_PAGING_TIMEOUT_TICKS else { lldbg("Timeout!\n"); ASSERT(clock_systimer() - g_starttime < CONFIG_PAGING_TIMEOUT_TICKS); } #endif } /* Otherwise, this might be a page fill initiation event. When * awakened from pg_miss(), no fill will be in progress and * g_pftcb will be NULL. */ else { /* Are there tasks blocked and waiting for a fill? If so, * pg_startfill() will start the asynchronous fill (and set * g_pftcb). */ pgllvdbg("Calling pg_startfill\n"); (void)pg_startfill(); } #else /* Are there tasks blocked and waiting for a fill? Loop until all * pending fills have been processed. */ for (;;) { /* Yes .. Start the fill and block until the fill completes. * Check the return value to see a fill was actually performed. * (false means that no fill was perforemd). */ pgllvdbg("Calling pg_startfill\n"); if (!pg_startfill()) { /* Break out of the loop -- there is nothing more to do */ break; } /* Handle the page fill complete event by restarting the * task that was blocked waiting for this page fill. In the * non-blocking fill case, the page fill worker thread will * know that the page fill is complete when pg_startfill() * returns true. */ pgllvdbg("Restarting TCB: %p\n", g_pftcb); up_unblock_task(g_pftcb);; } /* All queued fills have been processed */ pgllvdbg("Call pg_alldone()\n"); pg_alldone(); #endif } return OK; /* To keep some compilers happy */ }
static inline bool pg_dequeue(void) { /* Loop until either (1) the TCB of a task that requires a fill is found, OR * (2) the g_watingforfill list becomes empty. */ do { /* Remove the TCB from the head of the list (if any) */ g_pftcb = (FAR struct tcb_s *)dq_remfirst((dq_queue_t*)&g_waitingforfill); pgllvdbg("g_pftcb: %p\n", g_pftcb); if (g_pftcb != NULL) { /* Call the architecture-specific function up_checkmapping() to see if * the page fill still needs to be performed. In certain conditions, * the page fault may occur on several threads for the same page and * be queues multiple times. In this corner case, the blocked task will * simply be restarted. */ if (!up_checkmapping(g_pftcb)) { /* This page needs to be filled. pg_miss bumps up * the priority of the page fill worker thread as each * TCB is added to the g_waitingforfill list. So we * may need to also drop the priority of the worker * thread as the next TCB comes off of the list. * * If wtcb->sched_priority > CONFIG_PAGING_DEFPRIO, * then the page fill worker thread is executing at * an elevated priority that may be reduced. * * If wtcb->sched_priority > g_pftcb->sched_priority * then the page fill worker thread is executing at * a higher priority than is appropriate for this * fill (this priority can get re-boosted by pg_miss() * if a new higher priority fill is required). */ FAR struct tcb_s *wtcb = (FAR struct tcb_s *)g_readytorun.head; if (wtcb->sched_priority > CONFIG_PAGING_DEFPRIO && wtcb->sched_priority > g_pftcb->sched_priority) { /* Don't reduce the priority of the page fill * worker thread lower than the configured * minimum. */ int priority = g_pftcb->sched_priority; if (priority < CONFIG_PAGING_DEFPRIO) { priority = CONFIG_PAGING_DEFPRIO; } /* Reduce the priority of the page fill worker thread */ pgllvdbg("New worker priority. %d->%d\n", wtcb->sched_priority, priority); sched_setpriority(wtcb, priority); } /* Return with g_pftcb holding the pointer to * the TCB associated with task that requires the page fill. */ return true; } /* The page need by this task has already been mapped into the * virtual address space -- just restart it. */ pglldbg("Restarting TCB: %p\n", g_pftcb); up_unblock_task(g_pftcb); } } while (g_pftcb != NULL); return false; }
int mq_dosend(mqd_t mqdes, FAR struct mqueue_msg_s *mqmsg, FAR const char *msg, size_t msglen, int prio) { FAR struct tcb_s *btcb; FAR struct mqueue_inode_s *msgq; FAR struct mqueue_msg_s *next; FAR struct mqueue_msg_s *prev; irqstate_t saved_state; /* Get a pointer to the message queue */ sched_lock(); msgq = mqdes->msgq; /* Construct the message header info */ mqmsg->priority = prio; mqmsg->msglen = msglen; /* Copy the message data into the message */ memcpy((FAR void *)mqmsg->mail, (FAR const void *)msg, msglen); /* Insert the new message in the message queue */ saved_state = irqsave(); /* Search the message list to find the location to insert the new * message. Each is list is maintained in ascending priority order. */ for (prev = NULL, next = (FAR struct mqueue_msg_s *)msgq->msglist.head; next && prio <= next->priority; prev = next, next = next->next); /* Add the message at the right place */ if (prev) { sq_addafter((FAR sq_entry_t *)prev, (FAR sq_entry_t *)mqmsg, &msgq->msglist); } else { sq_addfirst((FAR sq_entry_t *)mqmsg, &msgq->msglist); } /* Increment the count of messages in the queue */ msgq->nmsgs++; irqrestore(saved_state); /* Check if we need to notify any tasks that are attached to the * message queue */ #ifndef CONFIG_DISABLE_SIGNALS if (msgq->ntmqdes) { struct sigevent event; pid_t pid; /* Remove the message notification data from the message queue. */ memcpy(&event, &msgq->ntevent, sizeof(struct sigevent)); pid = msgq->ntpid; /* Detach the notification */ memset(&msgq->ntevent, 0, sizeof(struct sigevent)); msgq->ntpid = INVALID_PROCESS_ID; msgq->ntmqdes = NULL; /* Notification the client via signal? */ if (event.sigev_notify == SIGEV_SIGNAL) { /* Yes... Queue the signal -- What if this returns an error? */ #ifdef CONFIG_CAN_PASS_STRUCTS DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo, event.sigev_value)); #else DEBUGVERIFY(sig_mqnotempty(pid, event.sigev_signo, event.sigev_value.sival_ptr)); #endif } #ifdef CONFIG_SIG_EVTHREAD /* Notify the client via a function call */ else if (event.sigev_notify == SIGEV_THREAD) { DEBUGVERIFY(sig_notification(pid, &event)); } #endif } #endif /* Check if any tasks are waiting for the MQ not empty event. */ saved_state = irqsave(); if (msgq->nwaitnotempty > 0) { /* Find the highest priority task that is waiting for * this queue to be non-empty in g_waitingformqnotempty * list. sched_lock() should give us sufficent protection since * interrupts should never cause a change in this list */ for (btcb = (FAR struct tcb_s *)g_waitingformqnotempty.head; btcb && btcb->msgwaitq != msgq; btcb = btcb->flink); /* If one was found, unblock it */ ASSERT(btcb); btcb->msgwaitq = NULL; msgq->nwaitnotempty--; up_unblock_task(btcb); } irqrestore(saved_state); sched_unlock(); return OK; }
void mq_waitirq(FAR struct tcb_s *wtcb, int errcode) { FAR msgq_t *msgq; irqstate_t saved_state; /* Disable interrupts. This is necessary because an interrupt handler may * attempt to send a message while we are doing this. */ saved_state = irqsave(); /* It is possible that an interrupt/context switch beat us to the punch and * already changed the task's state. NOTE: The operations within the if * are safe because interrupts are always disabled with the msgwaitq, * nwaitnotempty, and nwaitnotfull fields are modified. */ if (wtcb->task_state == TSTATE_WAIT_MQNOTEMPTY || wtcb->task_state == TSTATE_WAIT_MQNOTFULL) { /* Get the message queue associated with the waiter from the TCB */ msgq = wtcb->msgwaitq; #ifdef CONFIG_DEBUG if (!msgq) { /* In these states there must always be an associated message queue */ PANIC((uint32_t)OSERR_MQNOWAITER); } #endif wtcb->msgwaitq = NULL; /* Decrement the count of waiters and cancel the wait */ if (wtcb->task_state == TSTATE_WAIT_MQNOTEMPTY) { #ifdef CONFIG_DEBUG if (msgq->nwaitnotempty <= 0) { /* This state, there should be a positive, non-zero waiter * count. */ PANIC((uint32_t)OSERR_MQNONEMPTYCOUNT); } #endif msgq->nwaitnotempty--; } else { #ifdef CONFIG_DEBUG if (msgq->nwaitnotfull <= 0) { /* This state, there should be a positive, non-zero waiter * count. */ PANIC((uint32_t)OSERR_MQNOTFULLCOUNT); } #endif msgq->nwaitnotfull--; } /* Mark the errno value for the thread. */ wtcb->pterrno = errcode; /* Restart the task. */ up_unblock_task(wtcb); } /* Interrupts may now be enabled. */ irqrestore(saved_state); }
int sem_post(FAR sem_t *sem) { FAR _TCB *stcb = NULL; int ret = ERROR; irqstate_t saved_state; /* Make sure we were supplied with a valid semaphore. */ if (sem) { /* The following operations must be performed with interrupts * disabled because sem_post() may be called from an interrupt * handler. */ saved_state = irqsave(); /* Perform the semaphore unlock operation. */ ASSERT(sem->semcount < SEM_VALUE_MAX); sem_releaseholder(sem); sem->semcount++; /* If the result of of semaphore unlock is non-positive, then * there must be some task waiting for the semaphore. */ #ifdef CONFIG_PRIORITY_INHERITANCE /* Don't let it run until we complete the priority restoration * steps. */ sched_lock(); #endif if (sem->semcount <= 0) { /* Check if there are any tasks in the waiting for semaphore * task list that are waiting for this semaphore. This is a * prioritized list so the first one we encounter is the one * that we want. */ for (stcb = (FAR _TCB*)g_waitingforsemaphore.head; (stcb && stcb->waitsem != sem); stcb = stcb->flink); if (stcb) { /* It is, let the task take the semaphore */ stcb->waitsem = NULL; /* Restart the waiting task. */ up_unblock_task(stcb); } } /* Check if we need to drop the priority of any threads holding * this semaphore. The priority could have been boosted while they * held the semaphore. */ #ifdef CONFIG_PRIORITY_INHERITANCE sem_restorebaseprio(stcb, sem); sched_unlock(); #endif ret = OK; /* Interrupts may now be enabled. */ irqrestore(saved_state); } return ret; }
int sig_received(FAR _TCB *stcb, siginfo_t *info) { irqstate_t saved_state; int ret = ERROR; sdbg("TCB=0x%08x signo=%d code=%d value=%d mask=%08x\n", stcb, info->si_signo, info->si_code, info->si_value.sival_int, stcb->sigprocmask); if (stcb && info) { ret = OK; /****************** MASKED SIGNAL HANDLING ******************/ /* Check if the signal is masked -- if it is, it will be added to the * list of pending signals. */ if (sigismember(&stcb->sigprocmask, info->si_signo)) { /* Check if the task is waiting for this pending signal. If so, * then unblock it. This must be performed in a critical section * because signals can be queued from the interrupt level. */ saved_state = irqsave(); if (stcb->task_state == TSTATE_WAIT_SIG && sigismember(&stcb->sigwaitmask, info->si_signo)) { memcpy(&stcb->sigunbinfo, info, sizeof(siginfo_t)); stcb->sigwaitmask = NULL_SIGNAL_SET; up_unblock_task(stcb); irqrestore(saved_state); } /* Its not one we are waiting for... Add it to the list of pending * signals. */ else { irqrestore(saved_state); if (!sig_addpendingsignal(stcb, info)) { PANIC(OSERR_FAILEDTOADDSIGNAL); } } } /****************** UNMASKED SIGNAL HANDLING ******************/ else { /* Queue any sigaction's requested by this task. */ ret = sig_queueaction(stcb, info); /* Then schedule execution of the signal handling action on * the recipients thread. */ up_schedule_sigaction(stcb, sig_deliver); /* Check if the task is waiting for an unmasked signal. If so, * then unblock it. This must be performed in a critical section * because signals can be queued from the interrupt level. */ saved_state = irqsave(); if (stcb->task_state == TSTATE_WAIT_SIG) { memcpy(&stcb->sigunbinfo, info, sizeof(siginfo_t)); stcb->sigwaitmask = NULL_SIGNAL_SET; up_unblock_task(stcb); } irqrestore(saved_state); /* If the task neither was waiting for the signal nor had a signal * handler attached to the signal, then the default action is * simply to ignore the signal */ /****************** OTHER SIGNAL HANDLING ******************/ /* If the task is blocked waiting for a semaphore, then that * task must be unblocked when a signal is received. */ if (stcb->task_state == TSTATE_WAIT_SEM) { sem_waitirq(stcb); } /* If the task is blocked waiting on a message queue, then that * task must be unblocked when a signal is received. */ #ifndef CONFIG_DISABLE_MQUEUE if (stcb->task_state == TSTATE_WAIT_MQNOTEMPTY || stcb->task_state == TSTATE_WAIT_MQNOTFULL) { mq_waitirq(stcb); } #endif } } return ret; }
int sig_tcbdispatch(FAR struct tcb_s *stcb, siginfo_t *info) { irqstate_t saved_state; int ret = OK; sdbg("TCB=0x%08x signo=%d code=%d value=%d mask=%08x\n", stcb, info->si_signo, info->si_code, info->si_value.sival_int, stcb->sigprocmask); DEBUGASSERT(stcb && info); /************************* MASKED SIGNAL HANDLING ************************/ /* Check if the signal is masked -- if it is, it will be added to the list * of pending signals. */ if (sigismember(&stcb->sigprocmask, info->si_signo)) { /* Check if the task is waiting for this pending signal. If so, then unblock it. * This must be performed in a critical section because signals can be queued * from the interrupt level. */ saved_state = irqsave(); if (stcb->task_state == TSTATE_WAIT_SIG && sigismember(&stcb->sigwaitmask, info->si_signo)) { memcpy(&stcb->sigunbinfo, info, sizeof(siginfo_t)); stcb->sigwaitmask = NULL_SIGNAL_SET; up_unblock_task(stcb); irqrestore(saved_state); } /* Its not one we are waiting for... Add it to the list of pending * signals. */ else { irqrestore(saved_state); FAR sigpendq_t *sigpend = sig_addpendingsignal(stcb, info); ASSERT(sigpend); #ifdef __clang_analyzer__ /* clang analyzer does not understand memory ownership transfer */ free(sigpend); #endif } } /************************ UNMASKED SIGNAL HANDLING ***********************/ else { /* Queue any sigaction's requested by this task. */ ret = sig_queueaction(stcb, info); /* Then schedule execution of the signal handling action on the * recipient's thread. */ up_schedule_sigaction(stcb, sig_deliver); /* Check if the task is waiting for an unmasked signal. If so, then * unblock it. This must be performed in a critical section because * signals can be queued from the interrupt level. */ saved_state = irqsave(); if (stcb->task_state == TSTATE_WAIT_SIG) { memcpy(&stcb->sigunbinfo, info, sizeof(siginfo_t)); stcb->sigwaitmask = NULL_SIGNAL_SET; up_unblock_task(stcb); } irqrestore(saved_state); /* If the task neither was waiting for the signal nor had a signal * handler attached to the signal, then the default action is * simply to ignore the signal */ /*********************** OTHER SIGNAL HANDLING ***********************/ /* If the task is blocked waiting for a semaphore, then that task must * be unblocked when a signal is received. */ if (stcb->task_state == TSTATE_WAIT_SEM) { sem_waitirq(stcb, EINTR); } /* If the task is blocked waiting on a message queue, then that task * must be unblocked when a signal is received. */ #ifndef CONFIG_DISABLE_MQUEUE if (stcb->task_state == TSTATE_WAIT_MQNOTEMPTY || stcb->task_state == TSTATE_WAIT_MQNOTFULL) { mq_waitirq(stcb, EINTR); } #endif } return ret; }