/* * ======== Clock_walkQueueDynamic ======== * Walk the Clock Queue for TickMode_DYNAMIC, optionally servicing a * specific tick */ UInt32 Clock_walkQueueDynamic(Bool service, UInt32 thisTick) { UInt32 distance = ~0; Queue_Handle clockQ; Queue_Elem *elem; Clock_Object *obj; UInt32 delta; /* Traverse clock queue */ clockQ = Clock_Module_State_clockQ(); elem = Queue_head(clockQ); while (elem != (Queue_Elem *)(clockQ)) { obj = (Clock_Object *)elem; elem = Queue_next(elem); /* if the object is active ... */ if (obj->active == TRUE) { /* optionally service if tick matches timeout */ if (service == TRUE) { /* if this object is timing out update its state */ if (obj->currTimeout == thisTick) { if (obj->period == 0) { /* oneshot? */ /* mark object idle */ obj->active = FALSE; } else { /* periodic */ /* refresh timeout */ obj->currTimeout += obj->period; } Log_write2(Clock_LM_begin, (UArg)obj, (UArg)obj->fxn); /* call handler */ obj->fxn(obj->arg); } } /* if object still active update distance to soonest tick */ if (obj->active == TRUE) { delta = obj->currTimeout - thisTick; /* if this is the soonest tick update distance to soonest */ if (delta < distance) { distance = delta; } } } } return (distance); }
/* * ======== Task_exit ======== */ Void Task_exit() { UInt tskKey, hwiKey; Task_Object *tsk; #ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS Int i; #endif tsk = Task_self(); #ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS /* * Process Task_exit hooks. Should be called outside the Task kernel. */ for (i = 0; i < Task_hooks.length; i++) { if (Task_hooks.elem[i].exitFxn != NULL) { Task_hooks.elem[i].exitFxn(tsk); } } #endif Log_write2(Task_LD_exit, (UArg)tsk, (UArg)tsk->fxn); tskKey = Task_disable(); hwiKey = Hwi_disable(); Task_blockI(tsk); tsk->mode = Task_Mode_TERMINATED; Task_processVitalTaskFlag(tsk); Hwi_restore(hwiKey); Queue_elemClear((Queue_Elem *)tsk); /* add to terminated task list if it was dynamically created */ if (Task_deleteTerminatedTasks == TRUE) { Task_Handle dynTask; dynTask = Task_Object_first(); while (dynTask) { if (tsk == dynTask) { tsk->readyQ = Task_Module_State_terminatedQ(); Queue_put(tsk->readyQ, (Queue_Elem *)tsk); break; } else { dynTask = Task_Object_next(dynTask); } } } Task_restore(tskKey); }
/* * ======== Rta_writeMask ======== */ Void Rta_writeMask(Rta_ResponsePacket *resp, UArg addr, UArg val) { /* * The address passed in is the address of diagsMask__C, which holds the * address of the actual diagsMask in the module state structure. So * the address passed in must be dereferenced twice. */ Bits16 *maskAddr = *((Bits16 **) xdc_uargToPtr(addr)); /* The diagsMask is a Bits16. */ *maskAddr = (Bits16) val; Log_write2(Rta_LD_writeMask, (UArg) maskAddr, (Bits16) val); /* Acknowledge the command. */ Rta_acknowledgeCmd(resp); }
/* * ======== Semaphore_post ======== */ Void Semaphore_post(Semaphore_Object *sem) { UInt tskKey, hwiKey; Semaphore_PendElem *elem; Queue_Handle pendQ; /* Event_post will do a Log_write, should we do one here too? */ Log_write2(Semaphore_LM_post, (UArg)sem, (UArg)sem->count); pendQ = Semaphore_Instance_State_pendQ(sem); hwiKey = Hwi_disable(); if (Queue_empty(pendQ)) { if (((UInt)sem->mode & 0x1) != 0) { /* if BINARY bit is set */ sem->count = 1; } else { sem->count++; Assert_isTrue((sem->count != 0), Semaphore_A_overflow); } Hwi_restore(hwiKey); if (Semaphore_supportsEvents && (sem->event != NULL)) { Semaphore_eventPost(sem->event, sem->eventId); } return; } /* lock task scheduler */ tskKey = Task_disable(); /* dequeue tsk from semaphore queue */ elem = (Semaphore_PendElem *)Queue_dequeue(pendQ); /* mark the Semaphore as having been posted */ elem->pendState = Semaphore_PendState_POSTED; /* put task back into readyQ */ Task_unblockI(elem->tpElem.task, hwiKey); Hwi_restore(hwiKey); Task_restore(tskKey); }
/* * ======== Task_blockI ======== * Block a task. * * Remove a task from its ready list. * Must be called within Task_disable/Task_restore block * and with interrupts disabled */ Void Task_blockI(Task_Object *tsk) { Queue_Object *readyQ = tsk->readyQ; UInt curset = Task_module->curSet; UInt mask = tsk->mask; Log_write2(Task_LD_block, (UArg)tsk, (UArg)tsk->fxn); Queue_remove((Queue_Elem *)tsk); /* if last task in readyQ, remove corresponding bit in curSet */ if (Queue_empty(readyQ)) { Task_module->curSet = curset & ~mask; } if (Task_module->curTask == tsk) { Task_module->curQ = NULL; /* force a Task_switch() */ } tsk->mode = Task_Mode_BLOCKED; Task_module->workFlag = 1; }
/* * ======== Clock_workFunc ======== * Service Clock Queue for TickMode_PERIODIC */ Void Clock_workFunc(UArg arg0, UArg arg1) { Queue_Elem *elem; UInt hwiKey, count; UInt32 time, compare; Clock_Object *obj; Queue_Handle clockQ; hwiKey = Hwi_disable(); time = Clock_module->ticks; count = Clock_module->swiCount; Clock_module->swiCount = 0; Hwi_restore(hwiKey); /* Log when count > 1, meaning Clock_swi is delayed */ if (count > 1) { Log_write1(Clock_LW_delayed, (UArg)count); } compare = time - count; /* * Here count can be zero. When Clock_tick() runs it increments * swiCount and posts the Clock_workFunc. In Clock_workFunc we * get the value of swiCount atomically. Before we read swiCount, an * interrupt could occur, Clock_tick() will post the swi again. * That post is unnecessary as we are getting ready to process that * tick. The next time this swi runs the count will be zero. */ while (count) { compare = compare + 1; count = count - 1; /* Traverse clock queue */ clockQ = Clock_Module_State_clockQ(); elem = Queue_head(clockQ); while (elem != (Queue_Elem *)(clockQ)) { obj = (Clock_Object *)elem; elem = Queue_next(elem); /* if event has timed out */ if ((obj->active == TRUE) && (obj->currTimeout == compare)) { if (obj->period == 0) { /* oneshot? */ /* mark object idle */ obj->active = FALSE; } else { /* periodic */ /* refresh timeout */ obj->currTimeout += obj->period; } Log_write2(Clock_LM_begin, (UArg)obj, (UArg)obj->fxn); /* call handler */ obj->fxn(obj->arg); } } } }