irqstate_t enter_critical_section(void) { FAR struct tcb_s *rtcb; /* Do nothing if called from an interrupt handler */ if (up_interrupt_context()) { /* The value returned does not matter. We assume only that it is a * scalar here. */ return (irqstate_t)0; } /* Do we already have interrupts disabled? */ rtcb = this_task(); DEBUGASSERT(rtcb != NULL); if (rtcb->irqcount > 0) { /* Yes... make sure that the spinlock is set and increment the IRQ * lock count. */ DEBUGASSERT(g_cpu_irqlock == SP_LOCKED && rtcb->irqcount < INT16_MAX); rtcb->irqcount++; } else { /* NO.. Take the spinlock to get exclusive access and set the lock * count to 1. * * We must avoid that case where a context occurs between taking the * g_cpu_irqlock and disabling interrupts. Also interrupts disables * must follow a stacked order. We cannot other context switches to * re-order the enabling/disabling of interrupts. * * The scheduler accomplishes this by treating the irqcount like * lockcount: Both will disable pre-emption. */ spin_setbit(&g_cpu_irqset, this_cpu(), &g_cpu_irqsetlock, &g_cpu_irqlock); rtcb->irqcount = 1; #ifdef CONFIG_SCHED_INSTRUMENTATION_CSECTION /* Note that we have entered the critical section */ sched_note_csection(rtcb, true); #endif } /* Then disable interrupts (they may already be disabled, be we need to * return valid interrupt status in any event). */ return up_irq_save(); }
void up_schedule_sigaction(struct tcb_s *tcb, sig_deliver_t sigdeliver) { irqstate_t flags; int cpu; int me; sinfo("tcb=0x%p sigdeliver=0x%p\n", tcb, sigdeliver); /* Make sure that interrupts are disabled */ flags = enter_critical_section(); /* Refuse to handle nested signal actions */ if (!tcb->xcp.sigdeliver) { /* First, handle some special cases when the signal is being delivered * to task that is currently executing on any CPU. */ sinfo("rtcb=0x%p CURRENT_REGS=0x%p\n", this_task(), CURRENT_REGS); if (tcb->task_state == TSTATE_TASK_RUNNING) { me = this_cpu(); cpu = tcb->cpu; /* CASE 1: We are not in an interrupt handler and a task is * signaling itself for some reason. */ if (cpu == me && !CURRENT_REGS) { /* In this case just deliver the signal now. * REVISIT: Signal handler will run in a critical section! */ sigdeliver(tcb); } /* CASE 2: The task that needs to receive the signal is running. * This could happen if the task is running on another CPU OR if * we are in an interrupt handler and the task is running on this * CPU. In the former case, we will have to PAUSE the other CPU * first. But in either case, we will have to modify the return * state as well as the state in the TCB. */ else { /* If we signaling a task running on the other CPU, we have * to PAUSE the other CPU. */ if (cpu != me) { /* Pause the CPU */ up_cpu_pause(cpu); /* Wait while the pause request is pending */ while (up_cpu_pausereq(cpu)) { } /* Now tcb on the other CPU can be accessed safely */ /* Copy tcb->xcp.regs to tcp.xcp.saved. These will be restored * by the signal trampoline after the signal has been delivered. */ tcb->xcp.sigdeliver = (FAR void *)sigdeliver; tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; #ifdef CONFIG_ARMV7M_USEBASEPRI tcb->xcp.saved_basepri = tcb->xcp.regs[REG_BASEPRI]; #else tcb->xcp.saved_primask = tcb->xcp.regs[REG_PRIMASK]; #endif tcb->xcp.saved_xpsr = tcb->xcp.regs[REG_XPSR]; #ifdef CONFIG_BUILD_PROTECTED tcb->xcp.saved_lr = tcb->xcp.regs[REG_LR]; #endif /* Then set up vector to the trampoline with interrupts * disabled. We must already be in privileged thread mode * to be here. */ tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver; #ifdef CONFIG_ARMV7M_USEBASEPRI tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; #else tcb->xcp.regs[REG_PRIMASK] = 1; #endif tcb->xcp.regs[REG_XPSR] = ARMV7M_XPSR_T; #ifdef CONFIG_BUILD_PROTECTED tcb->xcp.regs[REG_LR] = EXC_RETURN_PRIVTHR; #endif } else { /* tcb is running on the same CPU */ /* Save the return PC, CPSR and either the BASEPRI or PRIMASK * registers (and perhaps also the LR). These will be * restored by the signal trampoline after the signal has been * delivered. */ tcb->xcp.sigdeliver = (FAR void *)sigdeliver; tcb->xcp.saved_pc = CURRENT_REGS[REG_PC]; #ifdef CONFIG_ARMV7M_USEBASEPRI tcb->xcp.saved_basepri = CURRENT_REGS[REG_BASEPRI]; #else tcb->xcp.saved_primask = CURRENT_REGS[REG_PRIMASK]; #endif tcb->xcp.saved_xpsr = CURRENT_REGS[REG_XPSR]; #ifdef CONFIG_BUILD_PROTECTED tcb->xcp.saved_lr = CURRENT_REGS[REG_LR]; #endif /* Then set up vector to the trampoline with interrupts * disabled. The kernel-space trampoline must run in * privileged thread mode. */ CURRENT_REGS[REG_PC] = (uint32_t)up_sigdeliver; #ifdef CONFIG_ARMV7M_USEBASEPRI CURRENT_REGS[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; #else CURRENT_REGS[REG_PRIMASK] = 1; #endif CURRENT_REGS[REG_XPSR] = ARMV7M_XPSR_T; #ifdef CONFIG_BUILD_PROTECTED CURRENT_REGS[REG_LR] = EXC_RETURN_PRIVTHR; #endif /* And make sure that the saved context in the TCB is the same * as the interrupt return context. */ up_savestate(tcb->xcp.regs); } /* Increment the IRQ lock count so that when the task is restarted, * it will hold the IRQ spinlock. */ DEBUGASSERT(tcb->irqcount < INT16_MAX); tcb->irqcount++; /* In an SMP configuration, the interrupt disable logic also * involves spinlocks that are configured per the TCB irqcount * field. This is logically equivalent to enter_critical_section(). * The matching call to leave_critical_section() will be * performed in up_sigdeliver(). */ spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock); /* RESUME the other CPU if it was PAUSED */ if (cpu != me) { up_cpu_resume(cpu); } } } /* Otherwise, we are (1) signaling a task is not running from an * interrupt handler or (2) we are not in an interrupt handler and the * running task is signaling some other non-running task. */ else { /* Save the return PC, CPSR and either the BASEPRI or PRIMASK * registers (and perhaps also the LR). These will be restored * by the signal trampoline after the signal has been delivered. */ tcb->xcp.sigdeliver = (FAR void *)sigdeliver; tcb->xcp.saved_pc = tcb->xcp.regs[REG_PC]; #ifdef CONFIG_ARMV7M_USEBASEPRI tcb->xcp.saved_basepri = tcb->xcp.regs[REG_BASEPRI]; #else tcb->xcp.saved_primask = tcb->xcp.regs[REG_PRIMASK]; #endif tcb->xcp.saved_xpsr = tcb->xcp.regs[REG_XPSR]; #ifdef CONFIG_BUILD_PROTECTED tcb->xcp.saved_lr = tcb->xcp.regs[REG_LR]; #endif /* Increment the IRQ lock count so that when the task is restarted, * it will hold the IRQ spinlock. */ DEBUGASSERT(tcb->irqcount < INT16_MAX); tcb->irqcount++; /* Then set up to vector to the trampoline with interrupts * disabled. We must already be in privileged thread mode to be * here. */ tcb->xcp.regs[REG_PC] = (uint32_t)up_sigdeliver; #ifdef CONFIG_ARMV7M_USEBASEPRI tcb->xcp.regs[REG_BASEPRI] = NVIC_SYSH_DISABLE_PRIORITY; #else tcb->xcp.regs[REG_PRIMASK] = 1; #endif tcb->xcp.regs[REG_XPSR] = ARMV7M_XPSR_T; #ifdef CONFIG_BUILD_PROTECTED tcb->xcp.regs[REG_LR] = EXC_RETURN_PRIVTHR; #endif } } leave_critical_section(flags); }
bool sched_removereadytorun(FAR struct tcb_s *rtcb) { FAR dq_queue_t *tasklist; bool doswitch = false; int cpu; /* Which CPU (if any) is the task running on? Which task list holds the * TCB? */ cpu = rtcb->cpu; tasklist = TLIST_HEAD(rtcb->task_state, cpu); /* Check if the TCB to be removed is at the head of a ready-to-run list. * For the case of SMP, there are two lists involved: (1) the * g_readytorun list that holds non-running tasks that have not been * assigned to a CPU, and (2) and the g_assignedtasks[] lists which hold * tasks assigned a CPU, including the task that is currently running on * that CPU. Only this latter list contains the currently active task * only only removing the head of that list can result in a context * switch. * * rtcb->blink == NULL will tell us if the TCB is at the head of the * ready-to-run list and, hence, a candidate for the new running task. * * If so, then the tasklist RUNNABLE attribute will inform us if the list * holds the currently executing task and, hence, if a context switch * should occur. */ if (rtcb->blink == NULL && TLIST_ISRUNNABLE(rtcb->task_state)) { FAR struct tcb_s *nxttcb; FAR struct tcb_s *rtrtcb; int me; /* There must always be at least one task in the list (the IDLE task) * after the TCB being removed. */ nxttcb = (FAR struct tcb_s *)rtcb->flink; DEBUGASSERT(nxttcb != NULL); /* If we are modifying the head of some assigned task list other than * our own, we will need to stop that CPU. */ me = this_cpu(); if (cpu != me) { DEBUGVERIFY(up_cpu_pause(cpu)); } /* The task is running but the CPU that it was running on has been * paused. We can now safely remove its TCB from the ready-to-run * task list. In the SMP case this may be either the g_readytorun() * or the g_assignedtasks[cpu] list. */ dq_rem((FAR dq_entry_t *)rtcb, tasklist); /* Which task will go at the head of the list? It will be either the * next tcb in the assigned task list (nxttcb) or a TCB in the * g_readytorun list. We can only select a task from that list if * the affinity mask includes the current CPU. * * REVISIT: What should we do, if anything, if pre-emption is locked * by the another CPU? Should just used nxttcb? Should we select * from the pending task list instead of the g_readytorun list? */ for (rtrtcb = (FAR struct tcb_s *)g_readytorun.head; rtrtcb != NULL && !CPU_ISSET(cpu, &rtrtcb->affinity); rtrtcb = (FAR struct tcb_s *)rtrtcb->flink); /* Did we find a task in the g_readytorun list? Which task should * we use? We decide strictly by the priority of the two tasks: * Either (1) the task currently at the head of the g_assignedtasks[cpu] * list (nexttcb) or (2) the highest priority task from the * g_readytorun list with matching affinity (rtrtcb). */ if (rtrtcb != NULL && rtrtcb->sched_priority >= nxttcb->sched_priority) { FAR struct tcb_s *tmptcb; /* The TCB at the head of the ready to run list has the higher * priority. Remove that task from the head of the g_readytorun * list and add to the head of the g_assignedtasks[cpu] list. */ tmptcb = (FAR struct tcb_s *) dq_remfirst((FAR dq_queue_t *)&g_readytorun); DEBUGASSERT(tmptcb == rtrtcb); dq_addfirst((FAR dq_entry_t *)tmptcb, tasklist); tmptcb->cpu = cpu; nxttcb = tmptcb; } /* Will pre-emption be disabled after the switch? If the lockcount is * greater than zero, then this task/this CPU holds the scheduler lock. */ if (nxttcb->lockcount > 0) { /* Yes... make sure that scheduling logic knows about this */ spin_setbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock, &g_cpu_schedlock); } else { /* No.. we may need to perform release our hold on the lock. */ spin_clrbit(&g_cpu_lockset, cpu, &g_cpu_locksetlock, &g_cpu_schedlock); } /* Interrupts may be disabled after the switch. If irqcount is greater * than zero, then this task/this CPU holds the IRQ lock */ if (nxttcb->irqcount > 0) { /* Yes... make sure that scheduling logic knows about this */ spin_setbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock); } else { /* No.. we may need to release our hold on the irq state. */ spin_clrbit(&g_cpu_irqset, cpu, &g_cpu_irqsetlock, &g_cpu_irqlock); } nxttcb->task_state = TSTATE_TASK_RUNNING; /* All done, restart the other CPU (if it was paused). */ doswitch = true; if (cpu != me) { /* In this we will not want to report a context switch to this * CPU. Only the other CPU is affected. */ DEBUGVERIFY(up_cpu_resume(cpu)); doswitch = false; } } else { /* The task is not running. Just remove its TCB from the ready-to-run * list. In the SMP case this may be either the g_readytorun() or the * g_assignedtasks[cpu] list. */ dq_rem((FAR dq_entry_t *)rtcb, tasklist); } /* Since the TCB is no longer in any list, it is now invalid */ rtcb->task_state = TSTATE_TASK_INVALID; return doswitch; }