Example #1
0
// -----------------------------------------------------------------------------
//! \brief      Critical section entrance. Disables Tasks and HWI
//!
//! \return     _npiCSKey_t   CS Key used to later exit CS
// -----------------------------------------------------------------------------
_npiCSKey_t NPIUtil_EnterCS(void)
{
    _npiCSKey_t key;
    key.taskkey = (uint_least16_t) Task_disable();
    key.hwikey = (uint_least16_t) Hwi_disable();
    return key;
}
Example #2
0
/*
 *  ======== pthread_detach ========
 */
int pthread_detach(pthread_t pthread)
{
    pthread_Obj  *thread = (pthread_Obj *)pthread;
    UInt          key;

    key = Task_disable();

    if ((thread->joinThread != NULL) || (thread->detached)) {
        Task_restore(key);

        /*
         *  A thread has already called pthread_join() or
         *  the thread is already detached.
         */
        return (EINVAL);
    }

    /*
     *  pthread_detach() marks the thread as detached.  When
     *  the thread terminates, its resources will automatically
     *  be freed without the need for another thread to call
     *  pthread_join().
     */
    thread->detached = 1;

    Task_restore(key);

    return (0);
}
Example #3
0
/*
 *  ======== Power_signalEvent ========
 *  Signal a Power event to registered clients.
 *
 */
Power_Status Power_signalEvent(Power_Event eventType, UArg eventArg1,
    UArg eventArg2, UInt notifyTimeout)
{
    Power_Status status;
    UInt key;

    /* check for out of range event type */
    if ((eventType < 0) || (eventType >= Power_INVALIDEVENT)) {
        return (Power_EINVALIDEVENT);
    }

    /* disable task scheduling */
    key = Task_disable();

    /* notify clients registered for this event */
    status = Power_notify(eventType, notifyTimeout, Power_SigType_EXTERNAL, 
        eventArg1, eventArg2);

    /* reenable task scheduling */
    Task_restore(key);

    /* map a non-timeout error to Power_EFAIL */
    if ((status != Power_ETIMEOUT) && (status != Power_SOK)) {
        status = Power_EFAIL;
    }

    return (status);
}
void osSuspendAllTasks(void)
{
   //Make sure the operating system is running
   if(running)
   {
      //Disable the task scheduler
      Task_disable();
   }
}
/*
 *  ======== GateTask_enter ========
 *  Return the key for Task_disable.
 */
IArg GateTask_enter(GateTask_Object *obj)
{
    /* make sure we're not calling from Hwi or Swi context */
    Assert_isTrue(((BIOS_getThreadType() == BIOS_ThreadType_Task) ||
                   (BIOS_getThreadType() == BIOS_ThreadType_Main)),
                   GateTask_A_badContext);

    return(Task_disable());
}
/*
 *  ======== Task_exit ========
 */
Void Task_exit()
{
    UInt tskKey, hwiKey;
    Task_Object *tsk;
#ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS
    Int i;
#endif

    tsk = Task_self();

#ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS
    /*
     * Process Task_exit hooks.  Should be called outside the Task kernel.
     */
    for (i = 0; i < Task_hooks.length; i++) {
        if (Task_hooks.elem[i].exitFxn != NULL) {
            Task_hooks.elem[i].exitFxn(tsk);
        }
    }
#endif

    Log_write2(Task_LD_exit, (UArg)tsk, (UArg)tsk->fxn);

    tskKey = Task_disable();
    hwiKey = Hwi_disable();

    Task_blockI(tsk);

    tsk->mode = Task_Mode_TERMINATED;

    Task_processVitalTaskFlag(tsk);

    Hwi_restore(hwiKey);

    Queue_elemClear((Queue_Elem *)tsk);

    /* add to terminated task list if it was dynamically created */
    if (Task_deleteTerminatedTasks == TRUE) {
        Task_Handle dynTask;

        dynTask = Task_Object_first();

        while (dynTask) {
            if (tsk == dynTask) {
                tsk->readyQ = Task_Module_State_terminatedQ();
                Queue_put(tsk->readyQ, (Queue_Elem *)tsk);
                break;
            }
            else {
                dynTask = Task_Object_next(dynTask);
            }
        }
    }

    Task_restore(tskKey);
}
/*
 *  ======== Event_post ========
 */
Void Event_post(Event_Object *event, UInt eventId)
{
    UInt tskKey, hwiKey;
    Event_PendElem *elem;
    Queue_Handle pendQ;

    Assert_isTrue((eventId != 0), Event_A_nullEventId);

    Log_write3(Event_LM_post, (UArg)event, (UArg)event->postedEvents, (UArg)eventId);

    pendQ = Event_Instance_State_pendQ(event);

    /* atomically post this event */
    hwiKey = Hwi_disable();

    /* or in this eventId */
    event->postedEvents |= eventId;

    /* confirm that ANY tasks are pending on this event */
    if (Queue_empty(pendQ)) {
        Hwi_restore(hwiKey);
        return;
    }

    tskKey = Task_disable();

    /* examine pendElem on pendQ */
    elem = (Event_PendElem *)Queue_head(pendQ);

    /* check for match, consume matching eventIds if so. */
    elem->matchingEvents = Event_checkEvents(event, elem->andMask, elem->orMask);

    if (elem->matchingEvents != 0) {

        /* remove event elem from elem queue */
        Queue_remove((Queue_Elem *)elem);

        /* mark the Event as having been posted */
        elem->pendState = Event_PendState_POSTED;

        /* disable Clock object */
        if (BIOS_clockEnabled && (elem->tpElem.clock != NULL)) {
            Clock_stop(elem->tpElem.clock);
        }

        /* put task back into readyQ */
        Task_unblockI(elem->tpElem.task, hwiKey);
    }

    Hwi_restore(hwiKey);

    /* context switch may occur here */
    Task_restore(tskKey);
}
Example #8
0
/*
 *  ======== Task_sleep ========
 */
Void Task_sleep(UInt timeout)
{
    Task_PendElem elem;
    UInt hwiKey, tskKey;
    Clock_Struct clockStruct;

    if (timeout == BIOS_NO_WAIT) {
        return;
    }

    Assert_isTrue((timeout != BIOS_WAIT_FOREVER), Task_A_badTimeout);

    /* add Clock event if timeout is not FOREVER */
    if (BIOS_clockEnabled) {
        Clock_Params clockParams;
        Clock_Params_init(&clockParams);
        clockParams.arg = (UArg)&elem;
        clockParams.startFlag = FALSE;  /* will start when necessary, thankyou */
        Clock_construct(&clockStruct, (Clock_FuncPtr)Task_sleepTimeout, timeout, &clockParams);
        elem.clock = Clock_handle(&clockStruct);
    }

    hwiKey = Hwi_disable();

    /* lock scheduler */
    tskKey = Task_disable();

    /* get task handle and block tsk */
    elem.task = Task_self();

    Task_blockI(elem.task);

    if (BIOS_clockEnabled) {
        Clock_startI(elem.clock);
    }

    /* Only needed for Task_delete() */
    Queue_elemClear(&elem.qElem);

    elem.task->pendElem = (Ptr)(&elem);

    Hwi_restore(hwiKey);

    Log_write3(Task_LM_sleep, (UArg)elem.task, (UArg)elem.task->fxn, 
               (UArg)timeout);

    Task_restore(tskKey);       /* the calling task will block here */

    /* deconstruct Clock if appropriate */
    if (BIOS_clockEnabled) {
        Clock_destruct(Clock_struct(elem.clock));
    }
}
/*
 *  ======== Task_postInit ========
 *  Function to be called during module startup to complete the
 *  initialization of any statically created or constructed task.
 *  Initialize stack.
 *  Build Initial stack image.
 *  Add task to corresponding ready Queue.
 *
 *  returns (0) and clean 'eb' on success
 *  returns (0) and 'eb' if Task_SupportProxy_start() fails.
 *  returns (n) and 'eb' for number of successful createFxn() calls iff
 *     one of the createFxn() calls fails
 */
Int Task_postInit(Task_Object *tsk, Error_Block *eb)
{
    UInt tskKey, hwiKey;
    Queue_Handle readyQ;
#ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS
    Int i;
#endif

    tsk->context = Task_SupportProxy_start(tsk,
                (Task_SupportProxy_FuncPtr)Task_enter,
                (Task_SupportProxy_FuncPtr)Task_exit,
                eb);

    if (Error_check(eb)) {
        return (0);
    }

    tsk->mode = Task_Mode_READY;

    tsk->pendElem = NULL;

#ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS
    for (i = 0; i < Task_hooks.length; i++) {
        tsk->hookEnv[i] = (Ptr)0;
        if (Task_hooks.elem[i].createFxn != NULL) {
            Task_hooks.elem[i].createFxn(tsk, eb);

            if (Error_check(eb)) {
                return (i);
            }
        }
    }
#endif

    if (tsk->priority < 0) {
        tsk->mask = 0;
        tsk->readyQ = Task_Module_State_inactiveQ();
        Queue_put(tsk->readyQ, (Queue_Elem *)tsk);
    }
    else {
        tsk->mask = 1 << tsk->priority;
        readyQ = Queue_Object_get(Task_module->readyQ, tsk->priority);
        tsk->readyQ = readyQ;

        tskKey = Task_disable();
        hwiKey = Hwi_disable();
        Task_unblock(tsk);
        Hwi_restore(hwiKey);
        Task_restore(tskKey);
    }

    return (0);
}
Example #10
0
/*
 *  ======== pthread_setschedparam ========
 */
int pthread_setschedparam(pthread_t pthread, int policy,
        const struct sched_param *param)
{
    pthread_Obj        *thread = (pthread_Obj *)pthread;
    Task_Handle         task = thread->task;
    UInt                oldPri;
    int                 priority = param->sched_priority;
    UInt                key;
#if ti_sysbios_posix_Settings_supportsMutexPriority__D
    int                 maxPri;
#endif

    if ((priority >= Task_numPriorities) || ((priority == 0)) ||
            (priority < -1)) {
        /* Bad priority value */
        return (EINVAL);
    }

    key = Task_disable();

    oldPri = Task_getPri(task);
    thread->priority = priority;

#if ti_sysbios_posix_Settings_supportsMutexPriority__D
    /*
     *  If the thread is holding a PTHREAD_PRIO_PROTECT or
     *  PTHREAD_PRIO_INHERIT mutex and running at its ceiling, we don't
     *  want to set its priority to a lower value.  Instead, we save the
     *  new priority to set it to, once the mutexes of higher priority
     *  ceilings are released.
     */
    if (!Queue_empty(Queue_handle(&(thread->mutexList)))) {
        maxPri = _pthread_getMaxPrioCeiling(thread);

        if (priority > maxPri) {
            Task_setPri(task, priority);
        }
    }
    else {
        /* The thread owns no mutexes */
        oldPri = Task_setPri(task, priority);
    }
#else
    oldPri = Task_setPri(task, priority);
#endif
    Task_restore(key);

    /* Suppress warning about oldPri not being used. */
    (void)oldPri;

    return (0);
}
Example #11
0
/*
 *  ======== pthread_cancel ========
 *  The specification of this API is that it be used as a means for one thread
 *  to termintate the execution of another thread.  There is no mention of
 *  returning an error if the argument, pthread, is the same thread as the
 *  calling thread.
 */
int pthread_cancel(pthread_t pthread)
{
    pthread_Obj  *thread = (pthread_Obj *)pthread;
    UInt          key;

    /*
     *  Cancel the thread.  Only asynchronous cancellation is supported,
     *  since functions that would normally be cancellation points (eg,
     *  printf()), are not cancellation points for BIOS.
     */
    key = Task_disable();

    /* Indicate that cancellation is requested. */
    thread->cancelPending = 1;

    if (thread->cancelState == PTHREAD_CANCEL_ENABLE) {
        /* Set this task's priority to -1 to stop it from running. */
        Task_setPri(thread->task, -1);

        Task_restore(key);

        /* Pop and execute the cleanup handlers */
        while (thread->cleanupList != NULL) {
            _pthread_cleanup_pop(thread->cleanupList, 1);
        }

        /* Cleanup any pthread specific data */
        _pthread_removeThreadKeys(pthread);

        if (thread->detached) {
            /* Free memory */
#if ti_sysbios_posix_Settings_supportsMutexPriority__D
            Queue_destruct(&(thread->mutexList));
#endif
            Semaphore_destruct(&(thread->joinSem));
            Task_delete(&(thread->task));

            Memory_free(Task_Object_heap(), thread, sizeof(pthread_Obj));
        }
        else {
            /* pthread_join() will clean up. */
            thread->ret = PTHREAD_CANCELED;
            Semaphore_post(Semaphore_handle(&(thread->joinSem)));
        }
    }
    else {
        Task_restore(key);
    }

    return (0);
}
Example #12
0
/*
 *  ======== SemaphoreMP_post ========
 */
Void SemaphoreMP_post(SemaphoreMP_Object *obj)
{
    UInt tskKey;
    SemaphoreMP_PendElem *elem;
    IArg gateMPKey;
    Int status;

    /* Enter the gate */
    gateMPKey = GateMP_enter((GateMP_Handle)obj->gate);

    if (ListMP_empty((ListMP_Handle)obj->pendQ)) {
        if (obj->mode == SemaphoreMP_Mode_BINARY) {
            obj->attrs->count = 1;
        }
        else {
            obj->attrs->count++;
        }
        if (obj->cacheEnabled) {
            Cache_wbInv(obj->attrs, sizeof(SemaphoreMP_Attrs), Cache_Type_ALL,
                    TRUE);
        }
            
        /* Leave the gate */
        GateMP_leave((GateMP_Handle)obj->gate, gateMPKey);
        
        return;
    }

    /* lock task scheduler */
    tskKey = Task_disable();

    /* dequeue tsk from semaphore queue */
    elem = (SemaphoreMP_PendElem *)ListMP_getHead((ListMP_Handle)obj->pendQ);
    
    if (elem->procId != MultiProc_self()) {
        /* Unblock remote task */
        status = Notify_sendEvent(elem->procId, 0, SemaphoreMP_notifyEventId, 
                elem->task, TRUE);
        Assert_isTrue(status >= 0, ti_sdo_ipc_Ipc_A_internal);
    }
    else {
        /* put task back into readyQ */
        Task_unblock((Task_Handle)elem->task);
    }

    /* Leave the gate */
    GateMP_leave((GateMP_Handle)obj->gate, gateMPKey);

    Task_restore(tskKey);
}
Example #13
0
/*
 *  ======== pthread_once ========
 */
int pthread_once(pthread_once_t *once, void (*initFxn)(void))
{
    UInt    key;

    key = Task_disable();

    if (*once == PTHREAD_ONCE_INIT) {
        (*initFxn)();
        *once = ~PTHREAD_ONCE_INIT;
    }

    Task_restore(key);

    return (0);
}
Example #14
0
/*
 *  ======== _pthread_runStub ========
 */
static void _pthread_runStub(UArg arg0, UArg arg1)
{
    UInt         key;
    Ptr          arg;
    pthread_Obj *thread = (pthread_Obj *)(xdc_uargToPtr(arg1));

    arg = Task_getEnv(thread->task);
    thread->ret = thread->fxn(arg);

    /* Pop and execute the cleanup handlers */
    while (thread->cleanupList != NULL) {
        _pthread_cleanup_pop(thread->cleanupList, 1);
    }

    /* Cleanup any pthread specific data */
    _pthread_removeThreadKeys((pthread_t)thread);

    key = Task_disable();

    if (!thread->detached) {
        Semaphore_post(Semaphore_handle(&(thread->joinSem)));

        /*
         * Set this task's priority to -1 to prevent it from being put
         * on the terminated queue (and deleted if Task.deleteTerminatedTasks
         * is true). pthread_join() will delete the Task object.
         */
        Task_setPri(thread->task, -1);
        Task_restore(key);
    }
    else {
        Task_restore(key);

        /* Free memory */
#if ti_sysbios_posix_Settings_supportsMutexPriority__D
        Queue_destruct(&(thread->mutexList));
#endif
        Semaphore_destruct(&(thread->joinSem));

        Memory_free(Task_Object_heap(), thread, sizeof(pthread_Obj));

        /* The system will have to clean up the Task object */
    }

    /* Task_exit() is called when returning from this function */
}
Example #15
0
/*
 *  ======== SemaphoreMP_cbFxn ========
 */
Void SemaphoreMP_cbFxn(UInt16 procId, UInt16 lineId, UInt32 eventId, 
                       UArg arg, UInt32 payload)
{
    Task_Handle task;
    UInt tskKey;
    
    task = (Task_Handle)payload;
    
    /* lock task scheduler */
    tskKey = Task_disable();
    
    /* put task back into readyQ */
    Task_unblock(task);
    
    /* Force the task scheduler to run */
    Task_restore(tskKey);
}
Example #16
0
/*
 *  ======== pthread_join ========
 *  Wait for thread to terminate.
 *
 *  If multiple threads simultaneously try to join with the same
 *  thread, the results are undefined.  We will return an error.
 *
 *  If the thread calling pthread_join() is canceled, then the target
 *  thread will remain joinable (i.e., it will not be detached).
 */
int pthread_join(pthread_t pthread, void **thread_return)
{
    pthread_Obj  *thread = (pthread_Obj *)pthread;
    UInt          key;

    key = Task_disable();

    if ((thread->joinThread != NULL) || (thread->detached != 0)) {
        /*
         *  Error - Another thread has already called pthread_join()
         *  for this thread, or the thread is in the detached state.
         */
        Task_restore(key);
        return (EINVAL);
    }

    if (pthread == pthread_self()) {
        Task_restore(key);
        return (EDEADLK);
    }

    /*
     *  Allow pthread_join() to be called from a BIOS Task.  If we
     *  set joinThread to pthread_self(), we could get NULL if the
     *  Task arg1 is 0.  All we need is a non-NULL value for joinThread.
     */
    thread->joinThread = Task_self();

    Task_restore(key);

    Semaphore_pend(Semaphore_handle(&(thread->joinSem)), BIOS_WAIT_FOREVER);

    if (thread_return) {
        *thread_return = thread->ret;
    }

#if ti_sysbios_posix_Settings_supportsMutexPriority__D
    Queue_destruct(&(thread->mutexList));
#endif
    Semaphore_destruct(&(thread->joinSem));

    Task_delete(&(thread->task));

    Memory_free(Task_Object_heap(), thread, sizeof(pthread_Obj));
    return (0);
}
Example #17
0
/*
 *  ======== Semaphore_post ========
 */
Void Semaphore_post(Semaphore_Object *sem)
{
    UInt tskKey, hwiKey;
    Semaphore_PendElem *elem;
    Queue_Handle pendQ;

    /* Event_post will do a Log_write, should we do one here too? */
    Log_write2(Semaphore_LM_post, (UArg)sem, (UArg)sem->count);

    pendQ = Semaphore_Instance_State_pendQ(sem);

    hwiKey = Hwi_disable();

    if (Queue_empty(pendQ)) {
        if (((UInt)sem->mode & 0x1) != 0) {   /* if BINARY bit is set */
            sem->count = 1;
        }
        else {
            sem->count++;
            Assert_isTrue((sem->count != 0), Semaphore_A_overflow);
        }

        Hwi_restore(hwiKey);
        
        if (Semaphore_supportsEvents && (sem->event != NULL)) {
            Semaphore_eventPost(sem->event, sem->eventId);
        }
        return;
    }
    
    /* lock task scheduler */
    tskKey = Task_disable();

    /* dequeue tsk from semaphore queue */
    elem = (Semaphore_PendElem *)Queue_dequeue(pendQ);

    /* mark the Semaphore as having been posted */
    elem->pendState = Semaphore_PendState_POSTED;

    /* put task back into readyQ */
    Task_unblockI(elem->tpElem.task, hwiKey);

    Hwi_restore(hwiKey);

    Task_restore(tskKey);
}
Example #18
0
void SB_callCallback(SB_State_Transition transition, SB_State state) {
	UInt taskKey;
    SB_CallbackFunc *current = callbackTable.callbacks;
	if (current == NULL) {
		return;
	}

	// disable context switching for other tasks in this section
	taskKey = Task_disable();
	do {
		if (transition == current->transition && state == current->state && NULL != current->function) {
			current->function(transition, state);
		}

		current = current->next;
	} while (current != NULL);

	// restore context switching for other tasks at this point
	Task_restore(taskKey);
}
/*
 *  ======== Task_yield ========
 */
Void Task_yield()
{
    UInt tskKey, hwiKey;

    tskKey = Task_disable();
    hwiKey = Hwi_disable();

    if (Task_module->curQ) {
        /* move current task to end of curQ */
        Queue_enqueue(Task_module->curQ,
            Queue_dequeue(Task_module->curQ));
    }
    Task_module->curQ = NULL;  /* force a Task_switch() */
    Task_module->workFlag = 1;

    Hwi_restore(hwiKey);

    Log_write3(Task_LM_yield, (UArg)Task_module->curTask, (UArg)(Task_module->curTask->fxn), (UArg)(BIOS_getThreadType()));

    Task_restore(tskKey);
}
/*
 *  ======== Task_deleteTerminatedTasksFunc ========
 */
Void Task_deleteTerminatedTasksFunc()
{
    UInt hwiKey, taskKey;
    Task_Handle tsk;

    taskKey = Task_disable();

    hwiKey = Hwi_disable();

    if (!Queue_empty(Task_Module_State_terminatedQ())) {
        tsk = Queue_head(Task_Module_State_terminatedQ());
        Hwi_restore(hwiKey);
        tsk->readyQ = NULL;
        Task_delete(&tsk);
    }
    else {
        Hwi_restore(hwiKey);
    }

    Task_restore(taskKey);
}
/*
 *  ======== PowerMSP432_schedulerDisable ========
 */
void PowerMSP432_schedulerDisable()
{
    PowerMSP432_taskKey = Task_disable();
    PowerMSP432_swiKey = Swi_disable();
}
Example #22
0
/*
 *  ======== Power_sleepDSP ========
 */
Power_Status Power_sleepDSP(UInt sleepCode, UInt sleepArg, UInt notifyTimeout)
{
    Power_Status status = Power_SOK;
    Bool exitNow = FALSE;
    Power_Event preEvent;
    Power_Event postEvent;
    PMI_Sleep sleepMode;
    UInt taskKey;
    UInt swiKey;
    UInt hwiKey;

    /* first validate the sleep code */
    if ( (sleepCode != Power_STANDBY) &&
         (sleepCode != Power_SLEEP) &&
         (sleepCode != Power_DEEPSLEEP) ) {
        status = Power_ENOTIMPLEMENTED;
    }

    /* make sure sleep request doesn't violate a registered constraint */
    else if ( ( (sleepCode == Power_STANDBY) &&
                ((Power_module->disallowedSleepModes & Power_STANDBY) != 0) ) ||
              ( (sleepCode == Power_SLEEP) &&
                ((Power_module->disallowedSleepModes & Power_SLEEP) != 0) ) ||
              ( (sleepCode == Power_DEEPSLEEP) &&
               ((Power_module->disallowedSleepModes & Power_DEEPSLEEP) != 0))) {
        status = Power_ENOTSUPPORTED;
    }

    /* check for valid sleepArg */
    else if (sleepCode == Power_DEEPSLEEP) {
        if ((sleepArg != Power_EXTERNAL) && (sleepArg != Power_RTC_ALARM)) {
            status = Power_EINVALIDVALUE;
        }
    }

    if (status == Power_SOK) {

        /* make sure Power is not still busy with a previous transition */
        hwiKey = Hwi_disable();

        if (Power_module->busy == FALSE) {
            Power_module->busy = TRUE;
        }
        else {
            exitNow = TRUE;
        }

        Hwi_restore(hwiKey);

        if (exitNow == TRUE) {
            status = Power_EBUSY;
        }

        else {

            /* setup sleep vars */
            if (sleepCode == Power_STANDBY) {
                preEvent = Power_GOINGTOSTANDBY;
                postEvent = Power_AWAKEFROMSTANDBY;
                sleepMode = PMI_STANDBY;
            }
            else if (sleepCode == Power_SLEEP) {
                preEvent = Power_GOINGTOSLEEP;
                postEvent = Power_AWAKEFROMSLEEP;
                sleepMode = PMI_SLEEP;
            }
            else {
                preEvent = Power_GOINGTODEEPSLEEP;
                postEvent = Power_AWAKEFROMDEEPSLEEP;
                sleepMode = PMI_DEEPSLEEP;
            }

            /* disable Task scheduling; allow Swis and Hwis for notifications */
            taskKey = Task_disable();

            /* signal all clients registered for pre-sleep notification */
            status = Power_notify(preEvent, notifyTimeout, 
                Power_SigType_INTERNAL, NULL, NULL);

            /* check for timeout or any other error */
            if (status != Power_SOK) {
                Power_module->busy = FALSE;     /* clear busy */
                Task_restore(taskKey);          /* re-enable scheduler */
                return (status);
            }

            /* now disable Swi scheduling */
            swiKey = Swi_disable();

            /* start the sleep sequence */
            hwiKey = Hwi_disable();

            /* call to PMI to go to and wake from sleep... */
            PMI_sleepCPU(sleepMode, Power_module->currentConfig.scaleVoltage,
                (UInt) sleepArg);

            /* when get here CPU has already processed the wakeup interrupt */

            /* restore the previous interrupt enable state */
            Hwi_restore(hwiKey);

            /* re-enable Swi scheduling */
            Swi_restore(swiKey);

            /* signal all clients registered for post-sleep notification */
            status = Power_notify(postEvent, notifyTimeout, 
                Power_SigType_INTERNAL, NULL, NULL);

            /* now clear the busy flag before re-enabling scheduler */
            Power_module->busy = FALSE;

            /* re-enable Task scheduling */
            Task_restore(taskKey);

            /* check for timeout or other notification error */
            if (status != Power_SOK) {
                return (status);
            }
        }
    }

    return (status);
}
Example #23
0
/*
 *  ======== Power_changeSetpoint ========
 *  Initiate a change to the F/V setpoint of the CPU, or the peripheral domain.
 *
 */
Power_Status Power_changeSetpoint(Power_Domain domain, UInt newSetpoint,
    UInt notifyTimeout)
{
    PSCL_Status statusPSCL = PSCL_CANNOT_CHANGE_SETPOINT;
    Power_Status status = Power_SOK;
    Power_Status statusNotifyCPU = Power_SOK;
    Power_Status statusNotifyPER = Power_SOK;
    Bool notifyCPU = FALSE;
    Bool notifyPER = FALSE;
    UInt willChange = FALSE;
    Bool proceed = FALSE;
    Types_FreqHz cpuFreq;
    UInt previousSPPER;
    UInt previousSP;
    UInt disallowed;
    PSCL_ClkID clk;
    UInt frequency;
    UInt voltage;
    UInt maskSP;
    UInt taskKey;
    UInt swiKey;
    UInt key;

    /* make sure V/F scaling is supported */
    if (!ti_sysbios_family_c674_Power_enableScaling) {
        status = Power_ENOTIMPLEMENTED;
    }

    /* check to make sure V/F scaling initialized OK */
    else if (Power_module->PSCLinitOK == FALSE) {
        status = Power_EINITFAILURE;
    }

    /* check that domain ID is valid */
    else if ((domain != Power_CPU) && (domain != Power_PER)) {
        status = Power_EINVALIDVALUE;
    }

    /* check if setpoint ID is out of range for the specified domain ID */
    else if (((domain == Power_CPU) && (newSetpoint > 
                 (Power_module->numSPCPU - 1))) ||
             ((domain == Power_PER) && (newSetpoint > 
                 (Power_module->numSPPER - 1)))) {
        status = Power_EOUTOFRANGE;
    }

    else {

        /* convert requested setpoint into a mask bit */
        maskSP = 1 << newSetpoint;

        /* go atomic */
        key = Hwi_disable();

        /* now check the requested SP against disallowed SP constraints */
        if (domain == Power_CPU) {
            disallowed = maskSP & Power_module->disallowedSetpointsCPU;
        }
        else {
            disallowed = maskSP & Power_module->disallowedSetpointsPER;
        }

        /* if the SP is *not* disallowed, check and set the 'busy' flag */
        if (disallowed == 0) {

            if (Power_module->busy == FALSE) {
                Power_module->busy = TRUE;
                proceed = TRUE;
            }
            else {
                status = Power_EBUSY;
            }
        }
        else {
            status = Power_ENOTSUPPORTED;
        }

        /* end ATOMIC */
        Hwi_restore(key);

        if (proceed == TRUE) {

            /* determine PSCL clock ID */
            clk = (PSCL_ClkID) domain == Power_CPU ? PSCL_CPU_CLK : 
                PSCL_PER_CLK;

            /* disable Task scheduling; allow Swi & Hwi for completion */
            taskKey = Task_disable();

            /* set flag for domain to be notified for SP change */
            if (domain == Power_CPU) {
                notifyCPU = TRUE;
            }
            else {
                notifyPER = TRUE;
            }

            /* if voltage scaling enabled, call to PSCL to see if this setpoint
               change will result in a voltage change; if yes, then must notify
               for other clk domain too, as there will be a voltage scale even
               though that clk frequency isn't changing */
            if (Power_module->currentConfig.scaleVoltage == TRUE) {

                PSCL_queryWillChangeVoltage(clk, newSetpoint, &willChange);

                if (willChange == TRUE) {
                    if (domain == Power_CPU) {
                        notifyPER = TRUE;
                    }
                    else {
                        notifyCPU = TRUE;
                    }
                }
            }

            /* now send any pre-notifications for CPU domain */
            if (notifyCPU == TRUE) {

                /* set nextSP to indicate the pending setpoint */
                if (domain == Power_CPU) {
                    Power_module->nextSP = newSetpoint; /* yes, new setpoint */
                    previousSP = Power_module->currentSetpointCPU;
                }
                else {                               /* not new CPU setpoint */
                    Power_module->nextSP = Power_module->currentSetpointCPU; 
                    previousSP = Power_module->currentSetpointCPU;
                }

                /* notify clients registered for pre CPU SP notification */
                statusNotifyCPU = 
                    Power_notify(Power_PENDING_CPU_SETPOINTCHANGE, 
                    notifyTimeout, Power_SigType_INTERNAL, NULL, NULL);
             }

            /* now send any pre-notifications for PER domain */
            if ((statusNotifyCPU == Power_SOK) && (notifyPER == TRUE)) {

                /* set nextSPPER to indicate the pending setpoint */
                if (domain == Power_CPU) {           /* not new PER SP */
                    Power_module->nextSPPER = Power_module->currentSetpointPER;
                    previousSPPER = Power_module->currentSetpointPER;
                }
                else {
                    Power_module->nextSPPER = newSetpoint; /* yes, new PER SP */
                    previousSPPER = Power_module->currentSetpointPER;
                }

                /* notify clients registered for pre PER SP notification */
                statusNotifyPER = 
                    Power_notify(Power_PENDING_PER_SETPOINTCHANGE,
                    notifyTimeout, Power_SigType_INTERNAL, NULL, NULL);
            }


            /* if all 'pre' notifications succeeded... do the scaling op */
            if ((statusNotifyCPU == Power_SOK) &&
                (statusNotifyPER == Power_SOK)) {

                swiKey = Swi_disable();

                /* call to PSCL to change the setpoint */
                statusPSCL = PSCL_changeSetpoint(clk, newSetpoint,
                    Power_module->currentConfig.scaleVoltage,
                    Power_module->currentConfig.waitForVoltageScale,NULL,NULL);

                Swi_restore(swiKey);
            }

            /* if scaling operation successful... */
            if (statusPSCL == PSCL_OK) {

                /* update previous and current SP globals */
                if (notifyCPU == TRUE) {
                    Power_module->currentSetpointCPU = Power_module->nextSP;
                    Power_module->previousSP = previousSP;
                }
                if (notifyPER == TRUE) {
                    Power_module->currentSetpointPER = Power_module->nextSPPER;
                    Power_module->previousSPPER = previousSPPER;
                }

                /* if scaled the CPU frequency... tell BIOS about it */
                if (domain == Power_CPU) {
                    Power_getSetpointInfo(Power_CPU, newSetpoint, &frequency,
                        &voltage);
                    cpuFreq.lo = frequency * 1000; /* convert kHz to Hz */
                    cpuFreq.hi = 0;             
                    BIOS_setCpuFreq(&cpuFreq);
                }

                /* now notify post-notification clients */
                if (notifyCPU == TRUE) {

                    /* notify clients registered of post CPU SP change */
                    statusNotifyCPU = 
                        Power_notify(Power_DONE_CPU_SETPOINTCHANGE, 
                            notifyTimeout, Power_SigType_INTERNAL, NULL, NULL);
                }

                if ((statusNotifyCPU == Power_SOK) && (notifyPER == TRUE)) {

                    /* notify clients registered of post PER SP change */
                    statusNotifyPER = 
                        Power_notify(Power_DONE_PER_SETPOINTCHANGE,
                            notifyTimeout, Power_SigType_INTERNAL, NULL, NULL);
                }
            }

            /* done, so clear Power busy flag */
            Power_module->busy = FALSE;

            /* resume Tasking */
            Task_restore(taskKey);

            /* figure out return code */
            if (statusPSCL == PSCL_INVALID_SETPOINT) {
                status = Power_EOUTOFRANGE;  /* invalid SP according to PSCL */
            }
            else if ((statusNotifyCPU == Power_ETIMEOUT) ||
                     (statusNotifyPER == Power_ETIMEOUT) ) {
                status = Power_ETIMEOUT;     /* notification timeout */
            }
            else if ((status != Power_SOK) || (statusPSCL != PSCL_OK)) {
                status = Power_EFAIL;        /* convert to 'general failure' */
            }
        }
    }

    return (status);
}
/*
 *  ======== Power_sleep ========
 */
Power_Status Power_sleep(Power_SleepState sleepState, UArg arg0, UArg arg1)
{
    Power_Status status = Power_SOK;
    UInt xosc_hf_active = FALSE;
    Power_Event postEventLate;
    UInt32 poweredDomains = 0;
    Bool exitNow = FALSE;
    Power_Event preEvent;
    Power_Event postEvent;
    UInt32 constraints;
    Bool retainCache = FALSE;
    UInt32 modeVIMS;
    UInt taskKey;
    UInt swiKey;
    UInt hwiKey;

    /* first validate the sleep code */
    if ( sleepState != Power_STANDBY) {
        status = Power_EFAIL;
    }

    if (status == Power_SOK) {

        /* make sure Power is not still busy with a previous transition */
        hwiKey = Hwi_disable();

        if (Power_module->state == Power_ACTIVE) {

            /* set transition state to entering sleep */
            Power_module->state = Power_ENTERING_SLEEP;
        }
        else {
            exitNow = TRUE;
        }

        Hwi_restore(hwiKey);

        if (exitNow == TRUE) {
            status = Power_EBUSY;
        }

        else {

            /* setup sleep vars */
            if (sleepState == Power_STANDBY) {
                preEvent = Power_ENTERING_STANDBY;
                postEvent = Power_AWAKE_STANDBY;
                postEventLate = Power_AWAKE_STANDBY_LATE;
            }

            /* disable Task scheduling; allow Swis and Hwis for notifications */
            taskKey = Task_disable();

            /* signal all clients registered for pre-sleep notification */
            status = Power_notify(preEvent);

            /* check for any error */
            if (status != Power_SOK) {
                Power_module->state = Power_ACTIVE;
                Task_restore(taskKey);          /* re-enable scheduler */
                return (status);
            }

            /* now disable Swi scheduling */
            swiKey = Swi_disable();

            /* freeze the IOs on the boundary between MCU and AON */
            AONIOCFreezeEnable();

            /* if XOSC_HF is active, force it off */
            if(OSCClockSourceGet(OSC_SRC_CLK_HF) == OSC_XOSC_HF) {
                xosc_hf_active = TRUE;
                ti_sysbios_family_arm_cc26xx_Power_XOSC_HF(DISABLE);
            }

            /* allow AUX to power down */
            AONWUCAuxWakeupEvent(AONWUC_AUX_ALLOW_SLEEP);

            /* make sure writes take effect */
            SysCtrlAonSync();

            /* invoke specific sequences to activate sleep states... */

            if (sleepState == Power_STANDBY) {

                /* query and save domain states before powering them off */
                if (Power_getDependencyCount(DOMAIN_RFCORE)) {
                    poweredDomains |= PRCM_DOMAIN_RFCORE;
                }
                if (Power_getDependencyCount(DOMAIN_SERIAL)){
                    poweredDomains |= PRCM_DOMAIN_SERIAL;
                }
                if (Power_getDependencyCount(DOMAIN_PERIPH)) {
                    poweredDomains |= PRCM_DOMAIN_PERIPH;
                }

                /* gate running deep sleep clocks for Crypto and DMA */
                if (Power_getDependencyCount(PERIPH_CRYPTO)) {
                    PRCMPeripheralDeepSleepDisable(
                        ti_sysbios_family_arm_cc26xx_Power_db[
                            PERIPH_CRYPTO].driverlibID);
                }
                if (Power_getDependencyCount(PERIPH_UDMA)) {
                    PRCMPeripheralDeepSleepDisable(
                        ti_sysbios_family_arm_cc26xx_Power_db[
                            PERIPH_UDMA].driverlibID);
                }
                /* make sure clock settings take effect */
                PRCMLoadSet();

                /* request power off of domains in the MCU voltage domain */
                PRCMPowerDomainOff(poweredDomains | PRCM_DOMAIN_CPU);

                /* request uLDO during standby */
                PRCMMcuUldoConfigure(true);

                /* query constraints to determine if cache should be retained */
                constraints = Power_getConstraintInfo();
                if ((constraints & Power_SB_VIMS_CACHE_RETAIN) != 0) {
                    retainCache = TRUE;
                }

                /* if don't want retention in standby, disable it now ... */
                if (retainCache == FALSE) {
                    modeVIMS = VIMSModeGet(VIMS_BASE);
                    /* wait if invalidate in progress... */
                    while (modeVIMS == VIMS_MODE_CHANGING) {
                        modeVIMS = VIMSModeGet(VIMS_BASE);
                    }
                    PRCMCacheRetentionDisable();
                    VIMSModeSet(VIMS_BASE, VIMS_MODE_OFF);
                }

                /* setup recharge parameters */
                SysCtrlSetRechargeBeforePowerDown(XoscInHighPowerMode);

                /* make sure all writes have taken effect */
                SysCtrlAonSync();

                /* invoke deep sleep to go to STANDBY */
                PRCMDeepSleep();

                /* if didn't retain cache in standby, re-enable retention now */
                if (retainCache == FALSE) {
                    VIMSModeSet(VIMS_BASE, modeVIMS);
                    PRCMCacheRetentionEnable();
                }

                /* force power on of AUX to keep it on when system is not
                 * sleeping; this also counts as a write to the AON interface
                 * ensuring that a following sync of the AON interface will
                 * force an update of all registers
                 */
                AONWUCAuxWakeupEvent(AONWUC_AUX_WAKEUP);
                while(!(AONWUCPowerStatusGet() & AONWUC_AUX_POWER_ON)) {};

                /* if XOSC_HF was forced off above, initiate switch back */
                if (xosc_hf_active == TRUE) {
                    ti_sysbios_family_arm_cc26xx_Power_XOSC_HF(ENABLE);
                }

                /* restore power domain states in effect before standby */
                PRCMPowerDomainOn(poweredDomains);
                while (PRCMPowerDomainStatus(poweredDomains) !=
                    PRCM_DOMAIN_POWER_ON){};

                /* restore deep sleep clocks of Crypto and DMA */
                if (Power_getDependencyCount(PERIPH_CRYPTO)) {
                    PRCMPeripheralDeepSleepEnable(
                        ti_sysbios_family_arm_cc26xx_Power_db[
                            PERIPH_CRYPTO].driverlibID);
                }
                if (Power_getDependencyCount(PERIPH_UDMA)) {
                    PRCMPeripheralDeepSleepEnable(
                        ti_sysbios_family_arm_cc26xx_Power_db[
                            PERIPH_UDMA].driverlibID);
                }
                /* make sure clock settings take effect */
                PRCMLoadSet();
            }

            /* release request for uLDO */
            PRCMMcuUldoConfigure(false);

            /* set transition state to EXITING_SLEEP */
            Power_module->state = Power_EXITING_SLEEP;

            /*
             * signal clients registered for early post-sleep notification;
             * this should be used to initialize any timing critical or IO
             * dependent hardware
             */
            status = Power_notify(postEvent);

            /* disable IO freeze and ensure RTC shadow value is updated */
            AONIOCFreezeDisable();
            SysCtrlAonSync();

            /* re-enable interrupts */
            CPUcpsie();

            /* signal all clients registered for late post-sleep notification */
            status = Power_notify(postEventLate);

            /* now clear the transition state before re-enabling scheduler */
            Power_module->state = Power_ACTIVE;

            /* re-enable Swi scheduling */
            Swi_restore(swiKey);

            /* adjust recharge parameters */
            SysCtrlAdjustRechargeAfterPowerDown();

            /* re-enable Task scheduling */
            Task_restore(taskKey);

            /* check for any notification error */
            if (status != Power_SOK) {
                return (status);
            }
        }
    }

    return (status);
}
Example #25
0
/*
 *  ======== Power_suspend ========
 */
UInt Power_suspend(Power_Suspend level)
{
    Bool l1CacheEnabled;
    Bool l2CacheEnabled;
    UInt32 *wordPtr;
    UInt32 taskKey;
    UInt32 swiKey;
    UInt32 hwiKey;
    UInt32 reset;
    UInt32 tmp1;
    UInt32 tmp2;
    UInt32 i;

    /* disable interrupts */
    hwiKey = Hwi_disable();

    /* disable scheduling */
    taskKey = Task_disable();
    swiKey = Swi_disable();

    /* check Unicache state; set 'enabled' flags */
    l1CacheEnabled = Cache_cache.L1_CONFIG & 0x2;
    l2CacheEnabled = Cache_cache.L2_CONFIG & 0x2;

#if _VERBOSE_
    System_printf("Power_suspend\n");
    System_printf("    suspend level = 0x%x\n", level);
    System_printf("    subsystem context = 0x%x\n", &ssContext);
    System_printf("    CPU context = 0x%x\n", 
        &ti_sysbios_family_c64p_tesla_Power_cpuRegs);
    System_printf("    CPU sys regs = 0x%x\n", &ssContext.cpuSysRegs);
    System_printf("    INTC context = 0x%x\n", &ssContext.configINTC);
    System_printf("    SYSC context = 0x%x\n", &ssContext.configSYSC);
    System_printf("    AMMU context = 0x%x\n", &ssContext.configAMMU);
    System_printf("    EDMA context = 0x%x\n", &ssContext.configEDMA);
    System_printf("    TSC flag = 0x%x\n", &ssContext.tscRunning);
    System_printf("    L1 context = 0x%x\n", &ssContext.configL1);
    System_printf("    L1 enabled = 0x%x\n", l1CacheEnabled);
    System_printf("    L2 context = 0x%x\n", &ssContext.configL2);
    System_printf("    L2 enabled = 0x%x\n", l2CacheEnabled);
#endif

    /* = = = = = = = = */
    
    /* if HIBERNATE: save Tesla subsystem context ... */
    if (level == Power_Suspend_HIBERNATE) {

        /* save Unicache config context */
        ssContext.configL1.CONFIG = (UInt32) Cache_cache.L1_CONFIG;
        ssContext.configL1.OCP = (UInt32) Cache_cache.L1_OCP;
        ssContext.configL2.CONFIG = (UInt32) Cache_cache.L2_CONFIG;
        ssContext.configL2.OCP = (UInt32) Cache_cache.L2_OCP;

        /* = = = = = = = = */

        /* save AMMU context */
        for (i = 0; i < AMMU_numLargePages; i++) {
            ssContext.configAMMU.largeAddr[i] = 
                (UInt32) AMMU_mmu.LARGE_ADDR[i];
            ssContext.configAMMU.largePolicy[i] = 
                (UInt32) AMMU_mmu.LARGE_POLICY[i];
        }
        for (i = 0; i < AMMU_numMediumPages; i++) {
            ssContext.configAMMU.medAddr[i] = 
                (UInt32) AMMU_mmu.MEDIUM_ADDR[i];
            ssContext.configAMMU.medPolicy[i] = 
                (UInt32) AMMU_mmu.MEDIUM_POLICY[i];
        }
        for (i = 0; i < AMMU_numSmallPages; i++) {
            ssContext.configAMMU.smallAddr[i] = 
                (UInt32) AMMU_mmu.SMALL_ADDR[i];
            ssContext.configAMMU.smallPolicy[i] = 
                (UInt32) AMMU_mmu.SMALL_POLICY[i];
        }

        /* = = = = = = = = */

        /* save SYSC context */
        ssContext.configSYSC.SYSCONFIG = 
            REG((UInt32)Power_syscRegs + SYSCONFIG_REG_OFFSET);
        ssContext.configSYSC.VBUSM2OCP = 
            REG((UInt32)Power_syscRegs + VBUSM2OCP_REG_OFFSET);
        ssContext.configSYSC.EDMA = 
            REG((UInt32)Power_syscRegs + EDMA_REG_OFFSET);
        ssContext.configSYSC.CORE = 
            REG((UInt32)Power_syscRegs + CORE_REG_OFFSET);
        ssContext.configSYSC.IVA_ICTRL = 
            REG((UInt32)Power_syscRegs + IVA_ICTRL_REG_OFFSET);
        ssContext.configSYSC.IDLEDLY = 
            REG((UInt32)Power_syscRegs + IDLEDLY_REG_OFFSET);

        /* = = = = = = = = */

        /* save INTC context */
        ssContext.configINTC.EVTMASK0 = REG(EVTMASK0_REG);
        ssContext.configINTC.EVTMASK1 = REG(EVTMASK1_REG);
        ssContext.configINTC.EVTMASK2 = REG(EVTMASK2_REG);
        ssContext.configINTC.EVTMASK3 = REG(EVTMASK3_REG);
        ssContext.configINTC.EXPMASK0 = REG(EXPMASK0_REG);
        ssContext.configINTC.EXPMASK1 = REG(EXPMASK1_REG);
        ssContext.configINTC.EXPMASK2 = REG(EXPMASK2_REG);
        ssContext.configINTC.EXPMASK3 = REG(EXPMASK3_REG);
        ssContext.configINTC.INTMUX1 = REG(INTMUX1_REG);
        ssContext.configINTC.INTMUX2 = REG(INTMUX2_REG);
        ssContext.configINTC.INTMUX3 = REG(INTMUX3_REG);
        ssContext.configINTC.AEGMUX0 = REG(AEGMUX0_REG);
        ssContext.configINTC.AEGMUX1 = REG(AEGMUX1_REG);
        ssContext.configINTC.INTDMASK = REG(INTDMASK_REG);

        /* = = = = = = = = */

        /* save EDMA context */
 
        ssContext.configEDMA.CLKGDIS = 
            REG((UInt32)Power_tpccRegs + CLKGDIS_REG_OFFSET);

        /* save DMA chan to PARAM mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DCHMAP0_REG_OFFSET);
        for (i = 0; i < 64; i++) {      
            ssContext.configEDMA.DCHMAP[i] = *wordPtr++;
        }

        /* save QDMA chan to PARAM mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + QCHMAP0_REG_OFFSET);
        for (i = 0; i < 8; i++) {       
            ssContext.configEDMA.QCHMAP[i] = *wordPtr++;
        }

        /* save DMA queue mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DMAQNUM0_REG_OFFSET);
        for (i = 0; i < 8; i++) {       
            ssContext.configEDMA.DMAQNUM[i] = *wordPtr++;
        }

        ssContext.configEDMA.QDMAQNUM = 
            REG((UInt32)Power_tpccRegs + QDMAQNUM_REG_OFFSET);
        ssContext.configEDMA.QUETCMAP = 
            REG((UInt32)Power_tpccRegs + QUETCMAP_REG_OFFSET);
        ssContext.configEDMA.QUEPRI = 
            REG((UInt32)Power_tpccRegs + QUEPRI_REG_OFFSET);

        /* save DMA and QDMA region access enable bits */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DRAEM0_REG_OFFSET);
        for (i = 0; i < (8 * 3); i++) { 
            ssContext.configEDMA.regionAccessBits[i] = *wordPtr++;
        }

        ssContext.configEDMA.QWMTHRA = 
            REG((UInt32)Power_tpccRegs + QWMTHRA_REG_OFFSET);
        ssContext.configEDMA.AETCTL = 
            REG((UInt32)Power_tpccRegs + AETCTL_REG_OFFSET);
        ssContext.configEDMA.IER = 
            REG((UInt32)Power_tpccRegs + IER_REG_OFFSET);
        ssContext.configEDMA.IERH = 
            REG((UInt32)Power_tpccRegs + IERH_REG_OFFSET);
        ssContext.configEDMA.QEER = 
            REG((UInt32)Power_tpccRegs + QEER_REG_OFFSET);

        /* bulk save of all PaRAMs (8 regs * 128 PaRAMs */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + OPT0_REG_OFFSET);
        for (i = 0; i < (8 * 128); i++) {       
            ssContext.configEDMA.PaRAMs[i] = *wordPtr++;
        }

        /* = = = = = = = = */

        /* save CPU control registers */
        ssContext.cpuSysRegs.AMR = AMR;
        ssContext.cpuSysRegs.CSR = CSR;
        ssContext.cpuSysRegs.IER = IER;
        ssContext.cpuSysRegs.ISTP = ISTP;
        ssContext.cpuSysRegs.IRP = IRP;
        ssContext.cpuSysRegs.SSR = SSR;
        ssContext.cpuSysRegs.GPLYB = GPLYB;
        ssContext.cpuSysRegs.GFPGFR = GFPGFR;
        ssContext.cpuSysRegs.TSR = TSR;
        ssContext.cpuSysRegs.ITSR = ITSR;
        ssContext.cpuSysRegs.IERR = IERR;

    }

    /* sample and set the 'TSC is running' state flag */
    tmp1 = TSCL;
    tmp2 = TSCL;
    if (tmp1 == tmp2) {
        ssContext.tscRunning = 0;
    }
    else {
        ssContext.tscRunning = 1;
    }

    /* if Unicache enabled, prepare for standby ... */
    if (l1CacheEnabled || l2CacheEnabled) {

        /* if HIBERNATE: write back all for L1 and L2 */
        if (level == Power_Suspend_HIBERNATE) { 
            Cache_wbAll();
        }
        /* else, retention, just clean the write buffers */
        else {
            Cache_wb(0, 0, Cache_Type_ALL, TRUE);/* start=end=0 -> clean bufs */
        }

        /* now bypass the caches... */
        if (l1CacheEnabled) {
            Cache_disable(Cache_Type_L1);
        }
        if (l2CacheEnabled) {
            Cache_disable(Cache_Type_L2);
        }
    }

    /* set reset-function-sampled 'doing a resume' flag */
    ti_sysbios_family_c64p_tesla_Power_doResume = 1;

    /* set the ready-to-standby flag (an FYI for the MPU) */
    ti_sysbios_family_c64p_tesla_Power_readyIdle = 1;

    /* setup PDC to put GEM into standby when execute IDLE */
    REG(PDCCMD_REG) = PDCCMD_STANDBY;
    REG(PDCCMD_REG);

    /* make function call to save child-preserved CPU regs and do standby ... */
    reset = ti_sysbios_family_c64p_tesla_Power_standby(
        &ti_sysbios_family_c64p_tesla_Power_cpuRegs);

    /* = = = = = = = = */

    /* NOTE: return here both when woke from IDLE, or resumed after reset */

    /* = = = = = = = = */

    /* note: this symbol is not used, but is defined for debug purposes only */
    asm(" .global ti_sysbios_family_c64p_tesla_Power_suspend_RESUME");
    asm("ti_sysbios_family_c64p_tesla_Power_suspend_RESUME:");

    /* if HIBERNATE and *did* reset: restore all context ... */
    if ((reset != 0) && (level == Power_Suspend_HIBERNATE)) {

        /* restore CPU control registers */
        AMR = ssContext.cpuSysRegs.AMR;
        CSR = ssContext.cpuSysRegs.CSR;
        IER = ssContext.cpuSysRegs.IER;
        ISTP = ssContext.cpuSysRegs.ISTP;
        IRP = ssContext.cpuSysRegs.IRP;
        SSR = ssContext.cpuSysRegs.SSR;
        GPLYB = ssContext.cpuSysRegs.GPLYB;
        GFPGFR = ssContext.cpuSysRegs.GFPGFR;
        TSR = ssContext.cpuSysRegs.TSR;
        ITSR = ssContext.cpuSysRegs.ITSR;
        IERR = ssContext.cpuSysRegs.IERR;

        /* = = = = = = = = */

        /* restore AMMU configuration */
        for (i = 0; i < AMMU_numLargePages; i++) {
            AMMU_mmu.LARGE_ADDR[i] = 
                (Char *) ssContext.configAMMU.largeAddr[i];
            AMMU_mmu.LARGE_POLICY[i] = 
                ssContext.configAMMU.largePolicy[i];
        }
        for (i = 0; i < AMMU_numMediumPages; i++) {
            AMMU_mmu.MEDIUM_ADDR[i] =
                (Char *) ssContext.configAMMU.medAddr[i];
            AMMU_mmu.MEDIUM_POLICY[i] =
                ssContext.configAMMU.medPolicy[i];
        }
        for (i = 0; i < AMMU_numSmallPages; i++) {
            AMMU_mmu.SMALL_ADDR[i] =
                (Char *) ssContext.configAMMU.smallAddr[i];
            AMMU_mmu.SMALL_POLICY[i] =
                ssContext.configAMMU.smallPolicy[i];
        }

        /* = = = = = = = = */

        /* restore Unicache config */
        Cache_cache.L1_OCP = ssContext.configL1.OCP;
        tmp1 = Cache_cache.L1_OCP;      /* read to ensure posted write done */
        Cache_cache.L1_CONFIG = ssContext.configL1.CONFIG;
        tmp1 = Cache_cache.L1_CONFIG;   /* read to ensure posted write done */
        Cache_cache.L2_OCP = ssContext.configL2.OCP;
        tmp1 = Cache_cache.L2_OCP;      /* read to ensure posted write done */
        Cache_cache.L2_CONFIG = ssContext.configL2.CONFIG;
        tmp1 = Cache_cache.L2_CONFIG;   /* read to ensure posted write done */

        /* = = = = = = = = */

        /* restore SYSC context */
        REG((UInt32)Power_syscRegs + SYSCONFIG_REG_OFFSET) =
            ssContext.configSYSC.SYSCONFIG;
        REG((UInt32)Power_syscRegs + VBUSM2OCP_REG_OFFSET) =
            ssContext.configSYSC.VBUSM2OCP;
        REG((UInt32)Power_syscRegs + EDMA_REG_OFFSET) =
            ssContext.configSYSC.EDMA;
        REG((UInt32)Power_syscRegs + CORE_REG_OFFSET) =
            ssContext.configSYSC.CORE;
        REG((UInt32)Power_syscRegs + IVA_ICTRL_REG_OFFSET) =
            ssContext.configSYSC.IVA_ICTRL;
        REG((UInt32)Power_syscRegs + IDLEDLY_REG_OFFSET) =
            ssContext.configSYSC.IDLEDLY;

        /* = = = = = = = = */

        /* restore INTC context */
        REG(EVTMASK0_REG) = ssContext.configINTC.EVTMASK0;
        REG(EVTMASK1_REG) = ssContext.configINTC.EVTMASK1;
        REG(EVTMASK2_REG) = ssContext.configINTC.EVTMASK2;
        REG(EVTMASK3_REG) = ssContext.configINTC.EVTMASK3;
        REG(EXPMASK0_REG) = ssContext.configINTC.EXPMASK0;
        REG(EXPMASK1_REG) = ssContext.configINTC.EXPMASK1;
        REG(EXPMASK2_REG) = ssContext.configINTC.EXPMASK2;
        REG(EXPMASK3_REG) = ssContext.configINTC.EXPMASK3;
        REG(INTMUX1_REG) = ssContext.configINTC.INTMUX1;
        REG(INTMUX2_REG) = ssContext.configINTC.INTMUX2;
        REG(INTMUX3_REG) = ssContext.configINTC.INTMUX3;
        REG(AEGMUX0_REG) = ssContext.configINTC.AEGMUX0;
        REG(AEGMUX1_REG) = ssContext.configINTC.AEGMUX1;
        REG(INTDMASK_REG) = ssContext.configINTC.INTDMASK;

        /* = = = = = = = = */

        /* restore EDMA context */

        REG((UInt32)Power_tpccRegs + CLKGDIS_REG_OFFSET) =
            ssContext.configEDMA.CLKGDIS;

        /* restore DMA chan to PARAM mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DCHMAP0_REG_OFFSET);
        for (i = 0; i < 64; i++) {      
            *wordPtr++ = ssContext.configEDMA.DCHMAP[i];
        }

        /* restore QDMA chan to PARAM mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + QCHMAP0_REG_OFFSET);
        for (i = 0; i < 8; i++) {       
            *wordPtr++ = ssContext.configEDMA.QCHMAP[i];
        }

        /* restore DMA queue mapping registers */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DMAQNUM0_REG_OFFSET);
        for (i = 0; i < 8; i++) {       
            *wordPtr++ = ssContext.configEDMA.DMAQNUM[i];
        }

        REG((UInt32)Power_tpccRegs + QDMAQNUM_REG_OFFSET) =
            ssContext.configEDMA.QDMAQNUM;
        REG((UInt32)Power_tpccRegs + QUETCMAP_REG_OFFSET) =
            ssContext.configEDMA.QUETCMAP;
        REG((UInt32)Power_tpccRegs + QUEPRI_REG_OFFSET) =
            ssContext.configEDMA.QUEPRI;

        /* restore DMA and QDMA region access enable bits */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + DRAEM0_REG_OFFSET);
        for (i = 0; i < (8 * 3); i++) { 
            *wordPtr++ = ssContext.configEDMA.regionAccessBits[i];
        }

        REG((UInt32)Power_tpccRegs + QWMTHRA_REG_OFFSET) =
            ssContext.configEDMA.QWMTHRA;
        REG((UInt32)Power_tpccRegs + AETCTL_REG_OFFSET) =
            ssContext.configEDMA.AETCTL;

        /* restore interrupt enable registers (using IESR and IESRH) */
        REG((UInt32)Power_tpccRegs + IESR_REG_OFFSET) =
            ssContext.configEDMA.IER;
        REG((UInt32)Power_tpccRegs + IESRH_REG_OFFSET) =
            ssContext.configEDMA.IERH;

        /* restore QDMA event enable register (using QEESR) */
        REG((UInt32)Power_tpccRegs + QEESR_REG_OFFSET) = 
            ssContext.configEDMA.QEER;

        /* restore all PaRAMs (8 regs * 128 PaRAMs */
        wordPtr = (UInt32 *)((UInt32)Power_tpccRegs + OPT0_REG_OFFSET);
        for (i = 0; i < (8 * 128); i++) {       
            *wordPtr++ = ssContext.configEDMA.PaRAMs[i];
        }

#if _VERBOSE_
        System_printf("hibernate: restored context\n");
#endif

    }

    /* Else: Restore caches to their pre-standby enable state.
     * Note: When come out of retention reset caches will always be enabled,
     *       even if they weren't enabled before standby. So, need to disable
     *       them now, if they weren't enabled when suspend was invoked. 
     */
    else {
        /* restore the enabled state of the caches ... */
        if (l1CacheEnabled) {
            Cache_enable(Cache_Type_L1);
        }
        else {
            Cache_disable(Cache_Type_L1);
        }
        if (l2CacheEnabled) {
            Cache_enable(Cache_Type_L2);
        }
        else {
            Cache_disable(Cache_Type_L2);
        }
    }

#if _VERBOSE_
    System_printf("reset flag = %d\n", reset);
#endif

    /* if TSC was enabled on entry: start it again */
    if (ssContext.tscRunning == 1) {
        TSCL = 1;       /* write any value to TSC to kick start it */
    }

    /* clear the ready-to-standby flag */
    ti_sysbios_family_c64p_tesla_Power_readyIdle = 0;

    /* clear the reset-sampled 'do resume' flag */
    ti_sysbios_family_c64p_tesla_Power_doResume = 0;

    /* re-enable scheduling */
    Task_restore(taskKey);
    Swi_restore(swiKey);

    /* re-enable interrupts */
    Hwi_restore(hwiKey);

    return (reset);
}
Example #26
0
/*
 *  ======== Semaphore_pend ========
 */
Bool Semaphore_pend(Semaphore_Object *sem, UInt timeout)
{
    UInt hwiKey, tskKey;
    Semaphore_PendElem elem;
    Queue_Handle pendQ;
    Clock_Struct clockStruct;

    Log_write3(Semaphore_LM_pend, (IArg)sem, (UArg)sem->count, (IArg)((Int)timeout));

    /*
     *  Consider fast path check for count != 0 here!!!
     */

    /* 
     *  elem is filled in entirely before interrupts are disabled.
     *  This significantly reduces latency.
     */

    /* add Clock event if timeout is not FOREVER nor NO_WAIT */
    if (BIOS_clockEnabled
            && (timeout != BIOS_WAIT_FOREVER) 
            && (timeout != BIOS_NO_WAIT)) {
        Clock_Params clockParams;
        Clock_Params_init(&clockParams);
        clockParams.arg = (UArg)&elem;
        clockParams.startFlag = FALSE;  /* will start when necessary, thankyou */
        Clock_construct(&clockStruct, (Clock_FuncPtr)Semaphore_pendTimeout, 
                                        timeout, &clockParams);
        elem.tpElem.clock = Clock_handle(&clockStruct);
        elem.pendState = Semaphore_PendState_CLOCK_WAIT;
    }
    else {
        elem.tpElem.clock = NULL;
        elem.pendState = Semaphore_PendState_WAIT_FOREVER;
    }

    pendQ = Semaphore_Instance_State_pendQ(sem);

    hwiKey = Hwi_disable();

    /* check semaphore count */
    if (sem->count == 0) {

        if (timeout == BIOS_NO_WAIT) {
            Hwi_restore(hwiKey);
            return (FALSE);
        }

        Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Task),
                        Semaphore_A_badContext);

        /* lock task scheduler */
        tskKey = Task_disable();

        /* get task handle and block tsk */
        elem.tpElem.task = Task_self();

        /* leave a pointer for Task_delete() */
        elem.tpElem.task->pendElem = (Task_PendElem *)&(elem);

        Task_blockI(elem.tpElem.task);

        if (((UInt)sem->mode & 0x2) != 0) {    /* if PRIORITY bit is set */
            Semaphore_PendElem *tmpElem;
            Task_Handle tmpTask;
            UInt selfPri;
            
            tmpElem = Queue_head(pendQ);
            selfPri = Task_getPri(elem.tpElem.task);

            while (tmpElem != (Semaphore_PendElem *)pendQ) {
                tmpTask = tmpElem->tpElem.task;
                /* use '>' here so tasks wait FIFO for same priority */
                if (selfPri > Task_getPri(tmpTask)) {
                    break;
                }
                else {
                    tmpElem = Queue_next((Queue_Elem *)tmpElem);
                }
            }
            
            Queue_insert((Queue_Elem *)tmpElem, (Queue_Elem *)&elem);
        }
        else {      
            /* put task at the end of the pendQ */
            Queue_enqueue(pendQ, (Queue_Elem *)&elem);
        }

        /* start Clock if appropriate */
        if (BIOS_clockEnabled && 
                (elem.pendState == Semaphore_PendState_CLOCK_WAIT)) {
            Clock_startI(elem.tpElem.clock);
        }

        Hwi_restore(hwiKey);

        Task_restore(tskKey);   /* the calling task will block here */

        /* Here on unblock due to Semaphore_post or timeout */

        if (Semaphore_supportsEvents && (sem->event != NULL)) {
            /* synchronize Event state */
            hwiKey = Hwi_disable();
            Semaphore_eventSync(sem->event, sem->eventId, sem->count);
            Hwi_restore(hwiKey);
        }

        /* deconstruct Clock if appropriate */
        if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) {
            Clock_destruct(Clock_struct(elem.tpElem.clock));
        }

        elem.tpElem.task->pendElem = NULL;

        return ((Bool)(elem.pendState));
    }
    else {
        --sem->count;

        if (Semaphore_supportsEvents && (sem->event != NULL)) {
            /* synchronize Event state */
            Semaphore_eventSync(sem->event, sem->eventId, sem->count);
        }

        Hwi_restore(hwiKey);

        /* deconstruct Clock if appropriate */
        if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) {
            Clock_destruct(Clock_struct(elem.tpElem.clock));
        }

        return (TRUE);
    }
}
/*
 *  ======== Event_pend ========
 */
UInt Event_pend(Event_Object *event, UInt andMask, UInt orMask, UInt32 timeout)
{
    UInt hwiKey, tskKey;
    Event_PendElem elem;
    UInt matchingEvents;
    Queue_Handle pendQ;
    Clock_Struct clockStruct;

    Assert_isTrue(((andMask | orMask) != 0), Event_A_nullEventMasks);

    Log_write5(Event_LM_pend, (UArg)event, (UArg)event->postedEvents,
                (UArg)andMask, (UArg)orMask, (IArg)((Int)timeout));

    /*
     * elem is filled in entirely before interrupts are disabled.
     * This significantly reduces latency at the potential cost of wasted time
     * if it turns out that there is already an event match.
     */

    /* add Clock event if timeout is not FOREVER nor NO_WAIT */
    if (BIOS_clockEnabled
            && (timeout != BIOS_WAIT_FOREVER)
            && (timeout != BIOS_NO_WAIT)) {
        Clock_addI(Clock_handle(&clockStruct), (Clock_FuncPtr)Event_pendTimeout, timeout, (UArg)&elem);
        elem.tpElem.clock = Clock_handle(&clockStruct);
        elem.pendState = Event_PendState_CLOCK_WAIT;
    }
    else {
        elem.tpElem.clock = NULL;
        elem.pendState = Event_PendState_WAIT_FOREVER;
    }

    /* fill in this task's Event_PendElem */
    elem.andMask = andMask;
    elem.orMask = orMask;

    pendQ = Event_Instance_State_pendQ(event);

    /* get task handle */
    elem.tpElem.task = Task_self();

    /* Atomically check for a match and block if none */
    hwiKey = Hwi_disable();

    /* check if events are already available */
    matchingEvents = Event_checkEvents(event, andMask, orMask);

    if (matchingEvents != 0) {
        /* remove Clock object from Clock Q */
        if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) {
            Clock_removeI(elem.tpElem.clock);
            elem.tpElem.clock = NULL;
        }

        Hwi_restore(hwiKey);

        return (matchingEvents);/* yes, then return with matching bits */
    }

    if (timeout == BIOS_NO_WAIT) {
        Hwi_restore(hwiKey);
        return (0);             /* No match, no wait */
    }

    Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Task),
                        Event_A_badContext);

    /*
     * Verify that THIS core hasn't already disabled the scheduler
     * so that the Task_restore() call below will indeed block
     */
    Assert_isTrue((Task_enabled()),
                        Event_A_pendTaskDisabled);

    /* lock scheduler */
    tskKey = Task_disable();

    /* only one Task allowed!!! */
    Assert_isTrue(Queue_empty(pendQ), Event_A_eventInUse);

    /* leave a pointer for Task_delete() */
    elem.tpElem.task->pendElem = (Task_PendElem *)&(elem);

    /* add it to Event_PendElem queue */
    Queue_enqueue(pendQ, (Queue_Elem *)&elem);

    Task_blockI(elem.tpElem.task);

    if (BIOS_clockEnabled &&
            (elem.pendState == Event_PendState_CLOCK_WAIT)) {
        Clock_startI(elem.tpElem.clock);
    }

    Hwi_restore(hwiKey);

    /* unlock task scheduler and block */
    Task_restore(tskKey);       /* the calling task will switch out here */

    /* Here on unblock due to Event_post or Event_pendTimeout */

    hwiKey = Hwi_disable();

    /* remove Clock object from Clock Q */
    if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) {
        Clock_removeI(elem.tpElem.clock);
        elem.tpElem.clock = NULL;
    }
        
    elem.tpElem.task->pendElem = NULL;

    Hwi_restore(hwiKey);
    
    /* event match? */
    if (elem.pendState != Event_PendState_TIMEOUT) {
        return (elem.matchingEvents);
    }
    else {
        return (0);             /* timeout */
    }
}
/*
 *  ======== PowerMSP432_policyFxn ========
 */
void PowerMSP432_policyFxn()
{
    UInt constraints;
    Bool slept = false;
    UInt taskKey;
    UInt swiKey;

    /* disable interrupts */
    CPU_cpsid();

    /* disable Swi and Task scheduling */
    swiKey = Swi_disable();
    taskKey = Task_disable();

    /* query the declared constraints */
    constraints = Power_getConstraintMask();

    /*
     *  Check if can go to a sleep state, starting with the deepest level.
     *  Do not go to a sleep state if a lesser sleep state is disallowed.
     */

     /* check if can go to DEEPSLEEP_1 */
    if ((constraints & ((1 << PowerMSP432_DISALLOW_SLEEP) |
                        (1 << PowerMSP432_DISALLOW_DEEPSLEEP_0) |
                        (1 << PowerMSP432_DISALLOW_DEEPSLEEP_1))) == 0) {

        /* go to DEEPSLEEP_1 */
        Power_sleep(PowerMSP432_DEEPSLEEP_1);

        /* set 'slept' to true*/
        slept = true;
    }

    /* if didn't sleep yet, now check if can go to DEEPSLEEP_0 */
    if (!slept && ((constraints & ((1 << PowerMSP432_DISALLOW_SLEEP) |
                        (1 << PowerMSP432_DISALLOW_DEEPSLEEP_0))) == 0)) {

        /* go to DEEPSLEEP_0 */
        Power_sleep(PowerMSP432_DEEPSLEEP_0);

        /* set 'slept' to true*/
        slept = true;
    }

    /* if didn't sleep yet, now check if can go to SLEEP */
    if (!slept && ((constraints & (1 << PowerMSP432_DISALLOW_SLEEP)) == 0)) {

        /* go to SLEEP */
        Power_sleep(PowerMSP432_SLEEP);

        /* set 'slept' to true*/
        slept = true;
    }

    /* re-enable interrupts */
    CPU_cpsie();

    /* restore Swi scheduling */
    Swi_restore(swiKey);

    /* restore Task scheduling */
    Task_restore(taskKey);

    /* if didn't sleep yet, just do WFI */
    if (!slept) {
        __asm(" wfi");
    }
}
Example #29
0
/*
 *  ======== SemaphoreMP_pend ========
 */
Bool SemaphoreMP_pend(SemaphoreMP_Object *obj)
{
    UInt tskKey;
    SemaphoreMP_PendElem *elem;
    IArg gateMPKey;

    /* Check for correct calling context */
    Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Task),
                    SemaphoreMP_A_badContext);

    elem = ThreadLocal_getspecific(SemaphoreMP_pendElemKey);
    if (elem == NULL) {
        /* 
         * Choose region zero (instead of the region that contains the 
         * SemaphoreMP) since region zero is always accessible by all cores
         */
        elem = Memory_alloc(SharedRegion_getHeap(0), 
                sizeof(SemaphoreMP_PendElem), 0, NULL);
        ThreadLocal_setspecific(SemaphoreMP_pendElemKey, elem);
    }
    
    /* Enter the gate */
    gateMPKey = GateMP_enter((GateMP_Handle)obj->gate);

    if (obj->cacheEnabled) {
        Cache_inv(obj->attrs, sizeof(SemaphoreMP_Attrs), Cache_Type_ALL, TRUE);
    }
    
    /* check semaphore count */
    if (obj->attrs->count == 0) {
        /* lock task scheduler */
        tskKey = Task_disable();

        /* get task handle and block tsk */
        elem->task = (Bits32)Task_self();
        elem->procId = MultiProc_self();
        
        Task_block((Task_Handle)elem->task);
        
        if (obj->cacheEnabled) {
            Cache_wbInv(elem, sizeof(SemaphoreMP_PendElem), Cache_Type_ALL, TRUE);
        }

        /* add it to pendQ */
        ListMP_putTail((ListMP_Handle)obj->pendQ, (ListMP_Elem *)elem);

        /* Leave the gate */
        GateMP_leave((GateMP_Handle)obj->gate, gateMPKey);

        Task_restore(tskKey);/* the calling task will switch out here */

        return (TRUE);
    }
    else {
        obj->attrs->count--;
        if (obj->cacheEnabled) {
            Cache_wbInv(obj->attrs, sizeof(SemaphoreMP_Attrs), Cache_Type_ALL, 
                    TRUE);
        }

        /* Leave the gate */
        GateMP_leave((GateMP_Handle)obj->gate, gateMPKey);

        return (TRUE);
    }
}
/*
 *  ======== PowerCC3200_sleepPolicy ========
 */
void PowerCC3200_sleepPolicy()
{
    bool returnFromSleep = FALSE;
    uint32_t constraintMask;
    uint32_t ticks;
    uint64_t time;
    uint64_t match;
    uint64_t curr;
    uint64_t remain;
    uint32_t taskKey;
    uint32_t swiKey;

    /* disable interrupts */
    CPUcpsid();

    /* disable Swi and Task scheduling */
    swiKey = Swi_disable();
    taskKey = Task_disable();

    /* query the declared constraints */
    constraintMask = Power_getConstraintMask();

    /*
     *  Do not go into LPDS if not allowed into DEEPSLEEP.
     *  Check to see if we can go into LPDS (lowest level sleep).
     *  If not allowed, then attempt to go into DEEPSLEEP.
     *  If not allowed in DEEPSLEEP then just SLEEP.
     */

     /* check if we are allowed to go to LPDS */
    if ((constraintMask &
        ((1 << PowerCC3200_DISALLOW_LPDS) |
        (1 << PowerCC3200_DISALLOW_DEEPSLEEP))) == 0) {
        /*
         * Check how many ticks until the next scheduled wakeup.  A value of
         * zero indicates a wakeup will occur as the current Clock tick period
         * expires; a very large value indicates a very large number of Clock
         * tick periods will occur before the next scheduled wakeup.
         */
        /* Get the time remaining for the RTC timer to expire */
        ticks = Clock_getTicksUntilInterrupt();

        /* convert ticks to microseconds */
        time = ticks * Clock_tickPeriod;

        /* check if can go to LPDS */
        if (time > Power_getTransitionLatency(PowerCC3200_LPDS, Power_TOTAL)) {
            /* get the current and match values for RTC */
            match = MAP_PRCMSlowClkCtrMatchGet();
            curr = MAP_PRCMSlowClkCtrGet();
            remain = match - curr -
                (((uint64_t)PowerCC3200_TOTALTIMELPDS * 32768) / 1000000);

            /* set the LPDS wakeup time interval */
            MAP_PRCMLPDSIntervalSet(remain);

            /* enable the wake source to be timer */
            MAP_PRCMLPDSWakeupSourceEnable(PRCM_LPDS_TIMER);

            /* go to LPDS mode */
            Power_sleep(PowerCC3200_LPDS);

            /* set 'returnFromSleep' to TRUE*/
            returnFromSleep = TRUE;
        }
    }

    /* check if we are allowed to go to DEEPSLEEP */
    if ((constraintMask & (1 << PowerCC3200_DISALLOW_DEEPSLEEP) == 0) &&
        (!returnFromSleep)) {
        /*
         * Check how many ticks until the next scheduled wakeup.  A value of
         * zero indicates a wakeup will occur as the current Clock tick period
         * expires; a very large value indicates a very large number of Clock
         * tick periods will occur before the next scheduled wakeup.
         */
        ticks = Clock_getTicksUntilInterrupt();

        /* convert ticks to microseconds */
        time = ticks * Clock_tickPeriod;

        /* check if can go to DEEPSLEEP */
        if (time > Power_getTransitionLatency(PowerCC3200_DEEPSLEEP,
            Power_TOTAL)) {
            /* schedule the wakeup event */
            ticks -= PowerCC3200_RESUMETIMEDEEPSLEEP / Clock_tickPeriod;
            Clock_setTimeout(Clock_handle(&clockObj), ticks);
            Clock_start(Clock_handle(&clockObj));

            /* go to DEEPSLEEP mode */
            Power_sleep(PowerCC3200_DEEPSLEEP);
            Clock_stop(Clock_handle(&clockObj));

            /* set 'returnFromSleep' to TRUE so we don't go to sleep (below) */
            returnFromSleep = TRUE;
        }
    }

    /* re-enable interrupts */
    CPUcpsie();

    /* restore Swi scheduling */
    Swi_restore(swiKey);

    /* restore Task scheduling */
    Task_restore(taskKey);

    /* sleep only if we are not returning from one of the sleep modes above */
    if (!(returnFromSleep)) {
        MAP_PRCMSleepEnter();
    }
}