/* * ======== Clock_walkQueueDynamic ======== * Walk the Clock Queue for TickMode_DYNAMIC, optionally servicing a * specific tick */ UInt32 Clock_walkQueueDynamic(Bool service, UInt32 thisTick) { UInt32 distance = ~0; Queue_Handle clockQ; Queue_Elem *elem; Clock_Object *obj; UInt32 delta; /* Traverse clock queue */ clockQ = Clock_Module_State_clockQ(); elem = Queue_head(clockQ); while (elem != (Queue_Elem *)(clockQ)) { obj = (Clock_Object *)elem; elem = Queue_next(elem); /* if the object is active ... */ if (obj->active == TRUE) { /* optionally service if tick matches timeout */ if (service == TRUE) { /* if this object is timing out update its state */ if (obj->currTimeout == thisTick) { if (obj->period == 0) { /* oneshot? */ /* mark object idle */ obj->active = FALSE; } else { /* periodic */ /* refresh timeout */ obj->currTimeout += obj->period; } Log_write2(Clock_LM_begin, (UArg)obj, (UArg)obj->fxn); /* call handler */ obj->fxn(obj->arg); } } /* if object still active update distance to soonest tick */ if (obj->active == TRUE) { delta = obj->currTimeout - thisTick; /* if this is the soonest tick update distance to soonest */ if (delta < distance) { distance = delta; } } } } return (distance); }
/* * ======== Task_schedule ======== * Find highest priority task and invoke it. * * Must be called with interrupts disabled. */ Void Task_schedule() { Queue_Handle maxQ; Task_Object *prevTask; Task_Object *curTask; #ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS Int i; #endif do { Task_module->workFlag = 0; /* stall until a task is ready */ while (Task_module->curSet == 0) { Task_allBlockedFunction(); } /* Determine current max ready Task priority */ maxQ = (Queue_Handle)((UInt8 *)(Task_module->readyQ) + (UInt)(Intrinsics_maxbit(Task_module->curSet)*(2*sizeof(Ptr)))); /* if a higher priority task is ready - switch to it */ if (maxQ > Task_module->curQ) { prevTask = Task_module->curTask; Task_module->curQ = maxQ; Task_module->curTask = Queue_head(maxQ); curTask = Task_module->curTask; if (Task_checkStackFlag) { Task_checkStacks(prevTask, curTask); } #if !defined(ti_sysbios_knl_Task_DISABLE_ALL_HOOKS) \ || (xdc_runtime_Log_DISABLE_ALL == 0) /* It's safe to enable intrs here */ Hwi_enable(); #ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS for (i = 0; i < Task_hooks.length; i++) { if (Task_hooks.elem[i].switchFxn != NULL) { Task_hooks.elem[i].switchFxn(prevTask, curTask); } } #endif Log_write4(Task_LM_switch, (UArg)prevTask, (UArg)prevTask->fxn, (UArg)curTask, (UArg)curTask->fxn); /* Hard-disable intrs - this fxn is called with them disabled */ Hwi_disable(); #endif Task_SupportProxy_swap((Ptr)&prevTask->context, (Ptr)&curTask->context); } } while (Task_module->workFlag); }
/* * ======== Task_startup ======== */ Void Task_startup() { Queue_Handle maxQ; Task_Object *prevTask; Task_Struct dummyTask; Int i; Hwi_disable(); /* re-enabled in Task_enter of first task */ /* Use dummyTask as initial task to swap from */ prevTask = Task_handle(&dummyTask); /* stall until a task is ready */ while (Task_module->curSet == 0) { Task_allBlockedFunc(); } /* Determine current max ready Task priority */ maxQ = (Queue_Handle)((UInt8 *)(Task_module->readyQ) + (UInt)(Intrinsics_maxbit(Task_module->curSet)*(2*sizeof(Ptr)))); Task_module->curQ = maxQ; Task_module->curTask = Queue_head(maxQ); /* we've done the scheduler's work */ Task_module->workFlag = 0; /* Signal that we are entering task thread mode */ BIOS_setThreadType(BIOS_ThreadType_Task); /* should be safe to enable intrs here */ Hwi_enable(); #ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS /* Run switch hooks for first real Task */ for (i = 0; i < Task_hooks.length; i++) { if (Task_hooks.elem[i].switchFxn != NULL) { Task_hooks.elem[i].switchFxn(NULL, Task_module->curTask); } } #endif Log_write4(Task_LM_switch, (UArg)0, (UArg)0, (UArg)Task_module->curTask, (UArg)Task_module->curTask->fxn); /* must leave this function with ints disabled */ Hwi_disable(); /* inform dispatcher that we're running on task stack */ Hwi_switchFromBootStack(); /* start first task by way of enter() */ Task_SupportProxy_swap((Ptr)&prevTask->context, (Ptr)&Task_module->curTask->context); }
/* * ======== Event_post ======== */ Void Event_post(Event_Object *event, UInt eventId) { UInt tskKey, hwiKey; Event_PendElem *elem; Queue_Handle pendQ; Assert_isTrue((eventId != 0), Event_A_nullEventId); Log_write3(Event_LM_post, (UArg)event, (UArg)event->postedEvents, (UArg)eventId); pendQ = Event_Instance_State_pendQ(event); /* atomically post this event */ hwiKey = Hwi_disable(); /* or in this eventId */ event->postedEvents |= eventId; /* confirm that ANY tasks are pending on this event */ if (Queue_empty(pendQ)) { Hwi_restore(hwiKey); return; } tskKey = Task_disable(); /* examine pendElem on pendQ */ elem = (Event_PendElem *)Queue_head(pendQ); /* check for match, consume matching eventIds if so. */ elem->matchingEvents = Event_checkEvents(event, elem->andMask, elem->orMask); if (elem->matchingEvents != 0) { /* remove event elem from elem queue */ Queue_remove((Queue_Elem *)elem); /* mark the Event as having been posted */ elem->pendState = Event_PendState_POSTED; /* disable Clock object */ if (BIOS_clockEnabled && (elem->tpElem.clock != NULL)) { Clock_stop(elem->tpElem.clock); } /* put task back into readyQ */ Task_unblockI(elem->tpElem.task, hwiKey); } Hwi_restore(hwiKey); /* context switch may occur here */ Task_restore(tskKey); }
/* * ======== Power_rebuildConstraint ======== */ Void Power_rebuildConstraint(Power_Constraint type) { Queue_Handle constraintsQ; Queue_Elem * elem; UInt value; UInt key; /* disable scheduling */ key = Swi_disable(); /* first, re-initialize the aggregate constraint */ if (type == Power_DISALLOWED_CPU_SETPOINT_MASK) { Power_module->disallowedSetpointsCPU = 0; } else if (type == Power_DISALLOWED_PER_SETPOINT_MASK) { Power_module->disallowedSetpointsPER = 0; } else if (type == Power_DISALLOWEDSLEEPSTATE_MASK) { Power_module->disallowedSleepModes = 0; } constraintsQ = Power_Module_State_constraintsQ(); if (!Queue_empty(constraintsQ)) { elem = Queue_head(constraintsQ); do { /* only if constraint 'type' matches... */ if (((Power_ConstraintObj *)elem)->type == type) { /* get the constraint value */ value = (UInt) ((Power_ConstraintObj *)elem)->value; /* update the agregate constraint */ if (type == Power_DISALLOWED_CPU_SETPOINT_MASK) { Power_module->disallowedSetpointsCPU |= value; } else if (type == Power_DISALLOWED_PER_SETPOINT_MASK) { Power_module->disallowedSetpointsPER |= value; } else if (type == Power_DISALLOWEDSLEEPSTATE_MASK) { Power_module->disallowedSleepModes |= value; } } elem = Queue_next(elem); } while (elem != (Queue_Elem *) constraintsQ); } /* re-enable scheduling */ Swi_restore(key); }
/* * ======== Task_deleteTerminatedTasksFunc ======== */ Void Task_deleteTerminatedTasksFunc() { UInt key; Task_Handle tsk; key = Hwi_disable(); if (!Queue_empty(Task_Module_State_terminatedQ())) { tsk = Queue_head(Task_Module_State_terminatedQ()); Hwi_restore(key); Task_delete(&tsk); } else { Hwi_restore(key); } }
/* * ======== Task_deleteTerminatedTasksFunc ======== */ Void Task_deleteTerminatedTasksFunc() { UInt hwiKey, taskKey; Task_Handle tsk; taskKey = Task_disable(); hwiKey = Hwi_disable(); if (!Queue_empty(Task_Module_State_terminatedQ())) { tsk = Queue_head(Task_Module_State_terminatedQ()); Hwi_restore(hwiKey); tsk->readyQ = NULL; Task_delete(&tsk); } else { Hwi_restore(hwiKey); } Task_restore(taskKey); }
/* * ======== Power_serviceNotifyQ ======== * */ Power_NotifyResponse Power_serviceNotifyQ(Power_Event eventType) { Power_NotifyResponse returnStatus = Power_NOTIFYDONE; Power_NotifyResponse clientStatus; Queue_Handle notifyQ; Queue_Elem * elem; Fxn notifyFxn; UArg clientArg; notifyQ = Power_Module_State_notifyQ(); /* point to first client notify object */ elem = Queue_head(notifyQ); /* walk the queue and notify each registered client of the event */ do { if (((Power_NotifyObj *)elem)->eventTypes & eventType) { /* pull params from notify object */ notifyFxn = ((Power_NotifyObj *)elem)->notifyFxn; clientArg = ((Power_NotifyObj *)elem)->clientArg; /* call the client's notification function */ clientStatus = (Power_NotifyResponse) (*(Fxn)notifyFxn)( eventType, clientArg); /* if client declared error stop all further notifications */ if (clientStatus == Power_NOTIFYERROR) { return (Power_NOTIFYERROR); } } /* get next element in the notification queue */ elem = Queue_next(elem); } while (elem != (Queue_Elem *) notifyQ); return (returnStatus); }
/* * ======== GateMutexPri_insertPri ======== * Inserts the element in order by priority, with higher priority * elements at the head of the queue. */ Void GateMutexPri_insertPri(Queue_Object* queue, Queue_Elem* newElem, Int newPri) { Queue_Elem* qelem; /* Iterate over the queue. */ for (qelem = Queue_head(queue); qelem != (Queue_Elem *)queue; qelem = Queue_next(qelem)) { /* Tasks of equal priority will be FIFO, so '>', not '>='. */ if (newPri > Task_getPri((Task_Handle)qelem)) { /* Place the new element in front of the current qelem. */ Queue_insert(qelem, newElem); return; } } /* * Put the task at the back of the queue if: * 1. The queue was empty. * 2. The task had the lowest priority in the queue. */ Queue_enqueue(queue, newElem); }
/* * ======== Power_notify ======== * * Note: when this function is called, Swi and Task scheduling are disabled, * but interrupts are enabled. */ Power_Status Power_notify(Power_Event eventType, UInt timeout, Power_SigType sigType, UArg extArg1, UArg extArg2) { Power_NotifyResponse clientStatus; UInt32 notifyStartTime; Queue_Handle notifyQ; Queue_Elem * elem; Arg eventArg1; Arg eventArg2; UInt clients = 0; Fxn notifyFxn; Arg clientArg; UInt key; /* determine the appropriate notification queue */ notifyQ = (Queue_Handle)((UInt8 *)(Power_module->notifyQ) + (UInt)(eventType * (2 * sizeof(Ptr)))); /* if queue is empty, return immediately */ if (Queue_empty(notifyQ)) { return (Power_SOK); } /* reset the count of clients doing delayed completion */ ti_sysbios_family_c674_Power_notifyWaitCount[(UInt)eventType] = 0; /* grab notification start time (# ticks) */ notifyStartTime = Clock_getTicks(); /* point to first client notify object */ elem = Queue_head(notifyQ); /* walk the queue and notify each registered client of the event */ do { clients++; /* count each registered client being notified */ /* pull params from notify object */ notifyFxn = ((Power_NotifyObj *)elem)->notifyFxn; clientArg = ((Power_NotifyObj *)elem)->clientArg; /* determine the event arguments... */ /* if event triggered internally then Power determines event args: */ if (sigType == Power_SigType_INTERNAL) { if (eventType == Power_PENDING_CPU_SETPOINTCHANGE) { eventArg1 = (Arg) Power_module->currentSetpointCPU; eventArg2 = (Arg) Power_module->nextSP; } else if (eventType == Power_DONE_CPU_SETPOINTCHANGE) { eventArg1 = (Arg) Power_module->previousSP; eventArg2 = (Arg) Power_module->currentSetpointCPU; } else if (eventType == Power_PENDING_PER_SETPOINTCHANGE) { eventArg1 = (Arg) Power_module->currentSetpointPER; eventArg2 = (Arg) Power_module->nextSPPER; } else if (eventType == Power_DONE_PER_SETPOINTCHANGE) { eventArg1 = (Arg) Power_module->previousSPPER; eventArg2 = (Arg) Power_module->currentSetpointPER; } else { eventArg1 = NULL; eventArg2 = NULL; } } /* else for externally triggered events use client-specified args: */ else { eventArg1 = (Arg) extArg1; eventArg2 = (Arg) extArg2; } asm(" .global _Power_ntfy"); asm("_Power_ntfy:"); clientStatus = (Power_NotifyResponse) (*(Fxn)notifyFxn)(eventType, eventArg1, eventArg2, clientArg); /* if client said not done, increment count of clients to wait for */ if (clientStatus == Power_NOTIFYNOTDONE) { key = Hwi_disable(); ti_sysbios_family_c674_Power_notifyWaitCount[(UInt)eventType] += 1; Hwi_restore(key); } else if (clientStatus == Power_NOTIFYERROR) { return (Power_EFAIL); } /* get next element in this notify queue */ elem = Queue_next(elem); } while (elem != (Queue_Elem *) notifyQ); /* if no timout and a client said not done, return immediately */ if ((timeout == 0) && (ti_sysbios_family_c674_Power_notifyWaitCount[(UInt)eventType] != 0)) { return (Power_ETIMEOUT); } /* if any client said not done: wait until they signal completion */ while (ti_sysbios_family_c674_Power_notifyWaitCount[(UInt)eventType] != 0) { if ((Clock_getTicks() - notifyStartTime) > timeout) { return (Power_ETIMEOUT); } } return (Power_SOK); }
/* * ======== Clock_workFunc ======== * Service Clock Queue for TickMode_PERIODIC */ Void Clock_workFunc(UArg arg0, UArg arg1) { Queue_Elem *elem; UInt hwiKey, count; UInt32 time, compare; Clock_Object *obj; Queue_Handle clockQ; hwiKey = Hwi_disable(); time = Clock_module->ticks; count = Clock_module->swiCount; Clock_module->swiCount = 0; Hwi_restore(hwiKey); /* Log when count > 1, meaning Clock_swi is delayed */ if (count > 1) { Log_write1(Clock_LW_delayed, (UArg)count); } compare = time - count; /* * Here count can be zero. When Clock_tick() runs it increments * swiCount and posts the Clock_workFunc. In Clock_workFunc we * get the value of swiCount atomically. Before we read swiCount, an * interrupt could occur, Clock_tick() will post the swi again. * That post is unnecessary as we are getting ready to process that * tick. The next time this swi runs the count will be zero. */ while (count) { compare = compare + 1; count = count - 1; /* Traverse clock queue */ clockQ = Clock_Module_State_clockQ(); elem = Queue_head(clockQ); while (elem != (Queue_Elem *)(clockQ)) { obj = (Clock_Object *)elem; elem = Queue_next(elem); /* if event has timed out */ if ((obj->active == TRUE) && (obj->currTimeout == compare)) { if (obj->period == 0) { /* oneshot? */ /* mark object idle */ obj->active = FALSE; } else { /* periodic */ /* refresh timeout */ obj->currTimeout += obj->period; } Log_write2(Clock_LM_begin, (UArg)obj, (UArg)obj->fxn); /* call handler */ obj->fxn(obj->arg); } } } }
/* * ======== Semaphore_pend ======== */ Bool Semaphore_pend(Semaphore_Object *sem, UInt timeout) { UInt hwiKey, tskKey; Semaphore_PendElem elem; Queue_Handle pendQ; Clock_Struct clockStruct; Log_write3(Semaphore_LM_pend, (IArg)sem, (UArg)sem->count, (IArg)((Int)timeout)); /* * Consider fast path check for count != 0 here!!! */ /* * elem is filled in entirely before interrupts are disabled. * This significantly reduces latency. */ /* add Clock event if timeout is not FOREVER nor NO_WAIT */ if (BIOS_clockEnabled && (timeout != BIOS_WAIT_FOREVER) && (timeout != BIOS_NO_WAIT)) { Clock_Params clockParams; Clock_Params_init(&clockParams); clockParams.arg = (UArg)&elem; clockParams.startFlag = FALSE; /* will start when necessary, thankyou */ Clock_construct(&clockStruct, (Clock_FuncPtr)Semaphore_pendTimeout, timeout, &clockParams); elem.tpElem.clock = Clock_handle(&clockStruct); elem.pendState = Semaphore_PendState_CLOCK_WAIT; } else { elem.tpElem.clock = NULL; elem.pendState = Semaphore_PendState_WAIT_FOREVER; } pendQ = Semaphore_Instance_State_pendQ(sem); hwiKey = Hwi_disable(); /* check semaphore count */ if (sem->count == 0) { if (timeout == BIOS_NO_WAIT) { Hwi_restore(hwiKey); return (FALSE); } Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Task), Semaphore_A_badContext); /* lock task scheduler */ tskKey = Task_disable(); /* get task handle and block tsk */ elem.tpElem.task = Task_self(); /* leave a pointer for Task_delete() */ elem.tpElem.task->pendElem = (Task_PendElem *)&(elem); Task_blockI(elem.tpElem.task); if (((UInt)sem->mode & 0x2) != 0) { /* if PRIORITY bit is set */ Semaphore_PendElem *tmpElem; Task_Handle tmpTask; UInt selfPri; tmpElem = Queue_head(pendQ); selfPri = Task_getPri(elem.tpElem.task); while (tmpElem != (Semaphore_PendElem *)pendQ) { tmpTask = tmpElem->tpElem.task; /* use '>' here so tasks wait FIFO for same priority */ if (selfPri > Task_getPri(tmpTask)) { break; } else { tmpElem = Queue_next((Queue_Elem *)tmpElem); } } Queue_insert((Queue_Elem *)tmpElem, (Queue_Elem *)&elem); } else { /* put task at the end of the pendQ */ Queue_enqueue(pendQ, (Queue_Elem *)&elem); } /* start Clock if appropriate */ if (BIOS_clockEnabled && (elem.pendState == Semaphore_PendState_CLOCK_WAIT)) { Clock_startI(elem.tpElem.clock); } Hwi_restore(hwiKey); Task_restore(tskKey); /* the calling task will block here */ /* Here on unblock due to Semaphore_post or timeout */ if (Semaphore_supportsEvents && (sem->event != NULL)) { /* synchronize Event state */ hwiKey = Hwi_disable(); Semaphore_eventSync(sem->event, sem->eventId, sem->count); Hwi_restore(hwiKey); } /* deconstruct Clock if appropriate */ if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) { Clock_destruct(Clock_struct(elem.tpElem.clock)); } elem.tpElem.task->pendElem = NULL; return ((Bool)(elem.pendState)); } else { --sem->count; if (Semaphore_supportsEvents && (sem->event != NULL)) { /* synchronize Event state */ Semaphore_eventSync(sem->event, sem->eventId, sem->count); } Hwi_restore(hwiKey); /* deconstruct Clock if appropriate */ if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) { Clock_destruct(Clock_struct(elem.tpElem.clock)); } return (TRUE); } }