/* * Task dispatcher. Basically it may be the same one no matter what scheduling algorithm is used */ void core_Dispatch(CONTEXT *regs) { struct ExecBase *SysBase = *SysBasePtr; struct Task *task; struct AROSCPUContext *ctx; Ints_Enabled = 0; D(bug("[KRN] core_Dispatch()\n")); /* * Is the list of ready tasks empty? Well, increment the idle switch cound and stop the main thread. */ if (IsListEmpty(&SysBase->TaskReady)) { if (!Sleep_Mode) { SysBase->IdleCount++; SysBase->AttnResched |= ARF_AttnSwitch; DSLEEP(bug("[KRN] TaskReady list empty. Sleeping for a while...\n")); /* We are entering sleep mode */ Sleep_Mode = SLEEP_MODE_PENDING; } core_LeaveInterrupt(); return; } Sleep_Mode = SLEEP_MODE_OFF; SysBase->DispCount++; /* Get the first task from the TaskReady list, and populate it's settings through Sysbase */ task = (struct Task *)REMHEAD(&SysBase->TaskReady); SysBase->ThisTask = task; SysBase->Elapsed = SysBase->Quantum; SysBase->SysFlags &= ~0x2000; task->tc_State = TS_RUN; SysBase->IDNestCnt = task->tc_IDNestCnt; DS(bug("[KRN] New task = %p (%s)\n", task, task->tc_Node.ln_Name)); /* Handle tasks's flags */ if (task->tc_Flags & TF_EXCEPT) Exception(); if (task->tc_Flags & TF_LAUNCH) { task->tc_Launch(SysBase); } /* Restore the task's state */ ctx = (struct AROSCPUContext *)GetIntETask(task)->iet_Context; CopyMemory(regs, ctx, sizeof(CONTEXT)); *LastErrorPtr = ctx->LastError; /* Leave interrupt and jump to the new task */ core_LeaveInterrupt(); }
/* * Task dispatcher. Basically it may be the same one no matter what scheduling algorithm is used */ void core_Dispatch(regs_t *regs) { volatile struct ExecBase *SysBase = getSysBase(); struct Task *task; if (SysBase) { wrmsr(rdmsr() & ~MSR_EE); /* * Is the list of ready tasks empty? Well, increment the idle switch cound and halt CPU. * It should be extended by some plugin mechanism which would put CPU and whole machine * into some more sophisticated sleep states (ACPI?) */ while (IsListEmpty(&SysBase->TaskReady)) { // SysBase->IdleCount++; SysBase->AttnResched |= ARF_AttnSwitch; //D(bug("[KRN] TaskReady list empty. Sleeping for a while...\n")); /* Sleep almost forever ;) */ wrmsr(rdmsr() | MSR_EE); asm volatile("sync"); // wrmsr(rdmsr() | MSR_POW); // asm volatile("isync"); if (SysBase->SysFlags & SFF_SoftInt) { core_Cause(SysBase); } } SysBase->DispCount++; /* Get the first task from the TaskReady list, and populate it's settings through Sysbase */ task = (struct Task *)REMHEAD(&SysBase->TaskReady); SysBase->ThisTask = task; SysBase->Elapsed = SysBase->Quantum; SysBase->SysFlags &= ~0x2000; task->tc_State = TS_RUN; SysBase->IDNestCnt = task->tc_IDNestCnt; //D(bug("[KRN] New task = %p (%s)\n", task, task->tc_Node.ln_Name)); /* Handle tasks's flags */ if (task->tc_Flags & TF_EXCEPT) Exception(); /* Store the launch time */ GetIntETask(task)->iet_private1 = mftbu(); if (task->tc_Flags & TF_LAUNCH) { AROS_UFC1(void, task->tc_Launch, AROS_UFCA(struct ExecBase *, SysBase, A6)); } /* Restore the task's state */ regs = task->tc_UnionETask.tc_ETask->et_RegFrame; if (SysBase->IDNestCnt < 0) regs->srr1 |= MSR_EE; /* Copy the fpu, mmx, xmm state */ #warning FIXME: Change to the lazy saving of the FPU state!!!! #warning TODO: No FPU support yet!!!!!!! Yay, it sucks! :-D }
INTERNALS SIGF_SIGNAL is used to signal the selected waiting thread. *****************************************************************************/ { AROS_LIBFUNC_INIT struct _Condition *c = (struct _Condition *) cond; struct _CondWaiter *waiter; assert(c != NULL); /* safely remove a waiter from the list */ ObtainSemaphore(&c->lock); waiter = (struct _CondWaiter *) REMHEAD(&c->waiters); if (waiter != NULL) c->count--; ReleaseSemaphore(&c->lock); /* noone waiting */ if (waiter == NULL) return; /* signal the task */ Signal(waiter->task, SIGF_SINGLE); /* all done */ FreeMem(waiter, sizeof(struct _CondWaiter)); AROS_LIBFUNC_EXIT