/* * ======== ThreadSupport_getPriority ======== */ ThreadSupport_Priority ThreadSupport_getPriority(ThreadSupport_Handle obj, Error_Block* eb) { Int bios6Pri; ThreadSupport_Priority threadPri = ThreadSupport_Priority_INVALID; bios6Pri = Task_getPri(obj->task); if (bios6Pri == ThreadSupport_lowestPriority) { threadPri = ThreadSupport_Priority_LOWEST; } else if (bios6Pri == ThreadSupport_belowNormalPriority) { threadPri = ThreadSupport_Priority_BELOW_NORMAL; } else if (bios6Pri == ThreadSupport_normalPriority) { threadPri = ThreadSupport_Priority_NORMAL; } else if (bios6Pri == ThreadSupport_aboveNormalPriority) { threadPri = ThreadSupport_Priority_ABOVE_NORMAL; } else if (bios6Pri == ThreadSupport_highestPriority) { threadPri = ThreadSupport_Priority_HIGHEST; } return (threadPri); }
/* * ======== ti_sdo_ipc_Ipc_procSyncFinish ======== * Each processor writes its reserve memory address in SharedRegion 0 * to let the other processors know its finished the process of * synchronization. */ Int ti_sdo_ipc_Ipc_procSyncFinish(UInt16 remoteProcId, Ptr sharedAddr) { volatile ti_sdo_ipc_Ipc_Reserved *self, *remote; SizeT reservedSize = ti_sdo_ipc_Ipc_reservedSizePerProc(); Bool cacheEnabled = SharedRegion_isCacheEnabled(0); UInt oldPri; /* don't do any synchronization if procSync is NONE */ if (ti_sdo_ipc_Ipc_procSync == ti_sdo_ipc_Ipc_ProcSync_NONE) { return (Ipc_S_SUCCESS); } /* determine self and remote pointers */ if (MultiProc_self() < remoteProcId) { self = Ipc_getSlaveAddr(remoteProcId, sharedAddr); remote = ti_sdo_ipc_Ipc_getMasterAddr(remoteProcId, sharedAddr); } else { self = ti_sdo_ipc_Ipc_getMasterAddr(remoteProcId, sharedAddr); remote = Ipc_getSlaveAddr(remoteProcId, sharedAddr); } /* set my processor's reserved key to finish */ self->startedKey = ti_sdo_ipc_Ipc_PROCSYNCFINISH; /* write back my processor's reserve key */ if (cacheEnabled) { Cache_wbInv((Ptr)self, reservedSize, Cache_Type_ALL, TRUE); } /* if slave processor, wait for remote to finish sync */ if (MultiProc_self() < remoteProcId) { if (BIOS_getThreadType() == BIOS_ThreadType_Task) { oldPri = Task_getPri(Task_self()); } /* wait for remote processor to finish */ while (remote->startedKey != ti_sdo_ipc_Ipc_PROCSYNCFINISH && remote->startedKey != ti_sdo_ipc_Ipc_PROCSYNCDETACH) { /* Set self priority to 1 [lowest] and yield cpu */ if (BIOS_getThreadType() == BIOS_ThreadType_Task) { Task_setPri(Task_self(), 1); Task_yield(); } /* Check the remote's sync flag */ if (cacheEnabled) { Cache_inv((Ptr)remote, reservedSize, Cache_Type_ALL, TRUE); } } /* Restore self priority */ if (BIOS_getThreadType() == BIOS_ThreadType_Task) { Task_setPri(Task_self(), oldPri); } } return (Ipc_S_SUCCESS); }
/* * ======== pthread_setschedparam ======== */ int pthread_setschedparam(pthread_t pthread, int policy, const struct sched_param *param) { pthread_Obj *thread = (pthread_Obj *)pthread; Task_Handle task = thread->task; UInt oldPri; int priority = param->sched_priority; UInt key; #if ti_sysbios_posix_Settings_supportsMutexPriority__D int maxPri; #endif if ((priority >= Task_numPriorities) || ((priority == 0)) || (priority < -1)) { /* Bad priority value */ return (EINVAL); } key = Task_disable(); oldPri = Task_getPri(task); thread->priority = priority; #if ti_sysbios_posix_Settings_supportsMutexPriority__D /* * If the thread is holding a PTHREAD_PRIO_PROTECT or * PTHREAD_PRIO_INHERIT mutex and running at its ceiling, we don't * want to set its priority to a lower value. Instead, we save the * new priority to set it to, once the mutexes of higher priority * ceilings are released. */ if (!Queue_empty(Queue_handle(&(thread->mutexList)))) { maxPri = _pthread_getMaxPrioCeiling(thread); if (priority > maxPri) { Task_setPri(task, priority); } } else { /* The thread owns no mutexes */ oldPri = Task_setPri(task, priority); } #else oldPri = Task_setPri(task, priority); #endif Task_restore(key); /* Suppress warning about oldPri not being used. */ (void)oldPri; return (0); }
/* * ======== GateMutexPri_insertPri ======== * Inserts the element in order by priority, with higher priority * elements at the head of the queue. */ Void GateMutexPri_insertPri(Queue_Object* queue, Queue_Elem* newElem, Int newPri) { Queue_Elem* qelem; /* Iterate over the queue. */ for (qelem = Queue_head(queue); qelem != (Queue_Elem *)queue; qelem = Queue_next(qelem)) { /* Tasks of equal priority will be FIFO, so '>', not '>='. */ if (newPri > Task_getPri((Task_Handle)qelem)) { /* Place the new element in front of the current qelem. */ Queue_insert(qelem, newElem); return; } } /* * Put the task at the back of the queue if: * 1. The queue was empty. * 2. The task had the lowest priority in the queue. */ Queue_enqueue(queue, newElem); }
/* * ======== Semaphore_pend ======== */ Bool Semaphore_pend(Semaphore_Object *sem, UInt timeout) { UInt hwiKey, tskKey; Semaphore_PendElem elem; Queue_Handle pendQ; Clock_Struct clockStruct; Log_write3(Semaphore_LM_pend, (IArg)sem, (UArg)sem->count, (IArg)((Int)timeout)); /* * Consider fast path check for count != 0 here!!! */ /* * elem is filled in entirely before interrupts are disabled. * This significantly reduces latency. */ /* add Clock event if timeout is not FOREVER nor NO_WAIT */ if (BIOS_clockEnabled && (timeout != BIOS_WAIT_FOREVER) && (timeout != BIOS_NO_WAIT)) { Clock_Params clockParams; Clock_Params_init(&clockParams); clockParams.arg = (UArg)&elem; clockParams.startFlag = FALSE; /* will start when necessary, thankyou */ Clock_construct(&clockStruct, (Clock_FuncPtr)Semaphore_pendTimeout, timeout, &clockParams); elem.tpElem.clock = Clock_handle(&clockStruct); elem.pendState = Semaphore_PendState_CLOCK_WAIT; } else { elem.tpElem.clock = NULL; elem.pendState = Semaphore_PendState_WAIT_FOREVER; } pendQ = Semaphore_Instance_State_pendQ(sem); hwiKey = Hwi_disable(); /* check semaphore count */ if (sem->count == 0) { if (timeout == BIOS_NO_WAIT) { Hwi_restore(hwiKey); return (FALSE); } Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Task), Semaphore_A_badContext); /* lock task scheduler */ tskKey = Task_disable(); /* get task handle and block tsk */ elem.tpElem.task = Task_self(); /* leave a pointer for Task_delete() */ elem.tpElem.task->pendElem = (Task_PendElem *)&(elem); Task_blockI(elem.tpElem.task); if (((UInt)sem->mode & 0x2) != 0) { /* if PRIORITY bit is set */ Semaphore_PendElem *tmpElem; Task_Handle tmpTask; UInt selfPri; tmpElem = Queue_head(pendQ); selfPri = Task_getPri(elem.tpElem.task); while (tmpElem != (Semaphore_PendElem *)pendQ) { tmpTask = tmpElem->tpElem.task; /* use '>' here so tasks wait FIFO for same priority */ if (selfPri > Task_getPri(tmpTask)) { break; } else { tmpElem = Queue_next((Queue_Elem *)tmpElem); } } Queue_insert((Queue_Elem *)tmpElem, (Queue_Elem *)&elem); } else { /* put task at the end of the pendQ */ Queue_enqueue(pendQ, (Queue_Elem *)&elem); } /* start Clock if appropriate */ if (BIOS_clockEnabled && (elem.pendState == Semaphore_PendState_CLOCK_WAIT)) { Clock_startI(elem.tpElem.clock); } Hwi_restore(hwiKey); Task_restore(tskKey); /* the calling task will block here */ /* Here on unblock due to Semaphore_post or timeout */ if (Semaphore_supportsEvents && (sem->event != NULL)) { /* synchronize Event state */ hwiKey = Hwi_disable(); Semaphore_eventSync(sem->event, sem->eventId, sem->count); Hwi_restore(hwiKey); } /* deconstruct Clock if appropriate */ if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) { Clock_destruct(Clock_struct(elem.tpElem.clock)); } elem.tpElem.task->pendElem = NULL; return ((Bool)(elem.pendState)); } else { --sem->count; if (Semaphore_supportsEvents && (sem->event != NULL)) { /* synchronize Event state */ Semaphore_eventSync(sem->event, sem->eventId, sem->count); } Hwi_restore(hwiKey); /* deconstruct Clock if appropriate */ if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) { Clock_destruct(Clock_struct(elem.tpElem.clock)); } return (TRUE); } }
/* * ======== ThreadSupport_getOsPriority ======== */ Int ThreadSupport_getOsPriority(ThreadSupport_Handle obj, Error_Block* eb) { return (Task_getPri(obj->task)); }
/* * ======== GateMutexPri_Gate ======== * Returns FIRST_ENTER when it gets the gate, returns NESTED_ENTER * on nested calls. */ IArg GateMutexPri_enter(GateMutexPri_Object *obj) { Task_Handle tsk; UInt tskKey; Int tskPri; Task_PendElem elem; Queue_Handle pendQ; /* make sure we're not calling from Hwi or Swi context */ Assert_isTrue(((BIOS_getThreadType() == BIOS_ThreadType_Task) || (BIOS_getThreadType() == BIOS_ThreadType_Main)), GateMutexPri_A_badContext); pendQ = GateMutexPri_Instance_State_pendQ(obj); tsk = Task_self(); /* * Prior to tasks starting, Task_self() will return NULL. * Simply return NESTED_ENTER here as, by definition, there is * is only one thread running at this time. */ if (tsk == NULL) { return (NESTED_ENTER); } tskPri = Task_getPri(tsk); /* * Gate may only be called from task context, so Task_disable is sufficient * protection. */ tskKey = Task_disable(); /* If the gate is free, take it. */ if (obj->mutexCnt == 1) { obj->mutexCnt = 0; obj->owner = tsk; obj->ownerOrigPri = tskPri; Task_restore(tskKey); return (FIRST_ENTER); } /* At this point, the gate is already held by someone. */ /* If this is a nested call to gate... */ if (obj->owner == tsk) { Task_restore(tskKey); return (NESTED_ENTER); } /* * Donate priority if necessary. The owner is guaranteed to have the * highest priority of anyone waiting on the gate, so just compare this * task's priority against the owner's. */ if (tskPri > Task_getPri(obj->owner)) { Task_setPri(obj->owner, tskPri); } /* Remove tsk from ready list. */ Task_block(tsk); elem.task = tsk; elem.clock = NULL; /* leave a pointer for Task_delete() */ tsk->pendElem = &elem; /* Insert tsk in wait queue in order by priority (high pri at head) */ GateMutexPri_insertPri(pendQ, (Queue_Elem *)&elem, tskPri); /* Task_restore will call the scheduler and this task will block. */ Task_restore(tskKey); tsk->pendElem = NULL; /* * At this point, tsk has the gate. Initialization of the gate is handled * by the previous owner's call to leave. */ return (FIRST_ENTER); }
/* * ======== GateMutexPri_leave ======== * Only releases the gate if key == FIRST_ENTER. */ Void GateMutexPri_leave(GateMutexPri_Object *obj, IArg key) { UInt tskKey, hwiKey; Task_Handle owner; Task_Handle newOwner; Task_PendElem *elem; Queue_Handle pendQ; pendQ = GateMutexPri_Instance_State_pendQ(obj); owner = Task_self(); /* * Prior to tasks starting, Task_self() will return NULL. * Simply return here as, by definition, there is * is only one thread running at this time. */ if (owner == NULL) { return; } /* * Gate may only be called from task context, so Task_disable is sufficient * protection. */ tskKey = Task_disable(); /* Assert that caller is gate owner. */ // ASSERT(owner == obj->owner); /* If this is not the outermost call to leave, just return. */ if (key != FIRST_ENTER) { Task_restore(tskKey); return; } /* * Restore this task's priority. The if-test is worthwhile because of the * cost of a call to setPri. */ if (obj->ownerOrigPri != Task_getPri(owner)) { Task_setPri(owner, obj->ownerOrigPri); } /* If the list of waiting tasks is not empty... */ if (!Queue_empty(pendQ)) { /* * Get the next owner from the front of the queue (the task with the * highest priority of those waiting on the queue). */ elem = (Task_PendElem *)Queue_dequeue(pendQ); newOwner = elem->task; /* Setup the gate. */ obj->owner = newOwner; obj->ownerOrigPri = Task_getPri(newOwner); /* Task_unblockI must be called with interrupts disabled. */ hwiKey = Hwi_disable(); Task_unblockI(newOwner, hwiKey); Hwi_restore(hwiKey); } /* If the gate is to be posted... */ else { obj->mutexCnt = 1; } Task_restore(tskKey); }