/*
 *  ======== Clock_Instance_init ========
 */
Void Clock_Instance_init(Clock_Object *obj, Clock_FuncPtr func, UInt timeout,
    const Clock_Params *params)
{
    Queue_Handle clockQ;

    Assert_isTrue((BIOS_clockEnabled == TRUE), Clock_A_clockDisabled);

    Assert_isTrue(((BIOS_getThreadType() != BIOS_ThreadType_Hwi) &&
                   (BIOS_getThreadType() != BIOS_ThreadType_Swi)),
                        Clock_A_badThreadType);

    Assert_isTrue(!(params->startFlag && (timeout == 0)), (Assert_Id)NULL);

    obj->timeout = timeout;
    obj->period = params->period;
    obj->fxn = func;
    obj->arg = params->arg;
    obj->active = FALSE;

    /*
     * Clock object is always placed on Clock work Q
     */
    clockQ = Clock_Module_State_clockQ();
    Queue_put(clockQ, &obj->elem);

    if (params->startFlag) {
        Clock_start(obj);
    }
}
Beispiel #2
0
/*
 *  ======== ti_sdo_ipc_Ipc_procSyncFinish ========
 *  Each processor writes its reserve memory address in SharedRegion 0
 *  to let the other processors know its finished the process of
 *  synchronization.
 */
Int ti_sdo_ipc_Ipc_procSyncFinish(UInt16 remoteProcId, Ptr sharedAddr)
{
    volatile ti_sdo_ipc_Ipc_Reserved *self, *remote;
    SizeT reservedSize = ti_sdo_ipc_Ipc_reservedSizePerProc();
    Bool cacheEnabled = SharedRegion_isCacheEnabled(0);
    UInt oldPri;

    /* don't do any synchronization if procSync is NONE */
    if (ti_sdo_ipc_Ipc_procSync == ti_sdo_ipc_Ipc_ProcSync_NONE) {
        return (Ipc_S_SUCCESS);
    }

    /* determine self and remote pointers */
    if (MultiProc_self() < remoteProcId) {
        self = Ipc_getSlaveAddr(remoteProcId, sharedAddr);
        remote = ti_sdo_ipc_Ipc_getMasterAddr(remoteProcId, sharedAddr);
    }
    else {
        self = ti_sdo_ipc_Ipc_getMasterAddr(remoteProcId, sharedAddr);
        remote = Ipc_getSlaveAddr(remoteProcId, sharedAddr);
    }

    /* set my processor's reserved key to finish */
    self->startedKey = ti_sdo_ipc_Ipc_PROCSYNCFINISH;

    /* write back my processor's reserve key */
    if (cacheEnabled) {
        Cache_wbInv((Ptr)self, reservedSize, Cache_Type_ALL, TRUE);
    }

    /* if slave processor, wait for remote to finish sync */
    if (MultiProc_self() < remoteProcId) {
        if (BIOS_getThreadType() == BIOS_ThreadType_Task) {
            oldPri = Task_getPri(Task_self());
        }

        /* wait for remote processor to finish */
        while (remote->startedKey != ti_sdo_ipc_Ipc_PROCSYNCFINISH &&
                remote->startedKey != ti_sdo_ipc_Ipc_PROCSYNCDETACH) {
            /* Set self priority to 1 [lowest] and yield cpu */
            if (BIOS_getThreadType() == BIOS_ThreadType_Task) {
                Task_setPri(Task_self(), 1);
                Task_yield();
            }

            /* Check the remote's sync flag */
            if (cacheEnabled) {
                Cache_inv((Ptr)remote, reservedSize, Cache_Type_ALL, TRUE);
            }
        }

        /* Restore self priority */
        if (BIOS_getThreadType() == BIOS_ThreadType_Task) {
            Task_setPri(Task_self(), oldPri);
        }
    }

    return (Ipc_S_SUCCESS);
}
/*
 *  ======== GateTask_enter ========
 *  Return the key for Task_disable.
 */
IArg GateTask_enter(GateTask_Object *obj)
{
    /* make sure we're not calling from Hwi or Swi context */
    Assert_isTrue(((BIOS_getThreadType() == BIOS_ThreadType_Task) ||
                   (BIOS_getThreadType() == BIOS_ThreadType_Main)),
                   GateTask_A_badContext);

    return(Task_disable());
}
/*
 *  ======== Clock_Instance_finalize ========
 */
Void Clock_Instance_finalize(Clock_Object *obj)
{
    UInt key;

    Assert_isTrue(((BIOS_getThreadType() != BIOS_ThreadType_Hwi) &&
                   (BIOS_getThreadType() != BIOS_ThreadType_Swi)),
                        Clock_A_badThreadType);

    key = Hwi_disable();
    Queue_remove(&obj->elem);
    Hwi_restore(key);
}
Beispiel #5
0
/*
 *  ======== GateMutex_enter ========
 *  Returns FIRST_ENTER when it gets the gate, returns NESTED_ENTER
 *  on nested calls.
 *
 *  During startup, Task_self returns NULL.  So all calls to the 
 *  GateMutex_enter look like it is a nested call, so nothing done.
 *  Then the leave's will do nothing either. 
 */
IArg GateMutex_enter(GateMutex_Object *obj)
{
    Semaphore_Handle sem;

    /* make sure we're not calling from Hwi or Swi context */
    Assert_isTrue(((BIOS_getThreadType() == BIOS_ThreadType_Task) ||
                   (BIOS_getThreadType() == BIOS_ThreadType_Main)),
                   GateMutex_A_badContext);

    if (obj->owner != Task_self()) {
        sem = GateMutex_Instance_State_sem(obj);
        Semaphore_pend(sem, BIOS_WAIT_FOREVER);

        obj->owner = Task_self();

        return (FIRST_ENTER);
    }

    return (NESTED_ENTER);
}
Beispiel #6
0
/*
 *  ======== IpcPower_canHibernate ========
 */
Bool IpcPower_canHibernate()
{
#ifndef SMP
    if (IpcPower_hibLocks[0] || IpcPower_hibLocks[1]) {
#else
    if (IpcPower_hibLocks) {
#endif
        return (FALSE);
    }

    return (TRUE);
}

/*
 *  ======== IpcPower_registerCallback ========
 */
#define FXNN "IpcPower_registerCallback"
Int IpcPower_registerCallback(Int event, IpcPower_CallbackFuncPtr cbck,
                              Ptr data)
{
    IArg hwiKey;
    IpcPower_CallbackElem **list, *node;
    BIOS_ThreadType context = BIOS_getThreadType();

    if ((context != BIOS_ThreadType_Task) &&
        (context != BIOS_ThreadType_Main)) {
        Log_print0(Diags_ERROR, FXNN":Invalid context\n");
        return (IpcPower_E_FAIL);
    }

    list = &IpcPower_callbackList;

    /* Allocate and update new element */
    node = Memory_alloc(NULL, sizeof(IpcPower_CallbackElem), 0, NULL);
    if (node == NULL) {
        Log_print0(Diags_ERROR, FXNN":out of memory\n");
        return (IpcPower_E_MEMORY);
    }

    node->next     = NULL;
    node->event    = (IpcPower_Event) event;
    node->callback = cbck;
    node->data     = data;

    hwiKey = Hwi_disable();  /* begin: critical section */
    while (*list != NULL) {
        list = &(*list)->next;
    }
    *list = node;
    Hwi_restore(hwiKey);  /* end: critical section */

    return (IpcPower_S_SUCCESS);
}
Beispiel #7
0
static inline Void _FNPROF_STG1_exitHook(VoidFcnPtr fcnAddr)
{
  if ((FNPROF_GBLINFO.curTaskCtx != NULL) &&
      (BIOS_getThreadType() == BIOS_ThreadType_Task)) {
    UInt32 fxnIndex = _FNPROF_STG1_getFxnEntryIndex(fcnAddr);
    
    if (FNPROF_E_ENTRYNOTFOUND != fxnIndex) {
      //_FNPROF_STG1_fxnExitProcess(fxnIndex);
      FNPROF_GBLINFO.curTaskCtx->stg1Ctx->activeFxnIndex--;
      UTILS_assert ((FNPROF_GBLINFO.curTaskCtx->stg1Ctx->activeFxnIndex >= 0));
      UTILS_assert((FNPROF_GBLINFO.curTaskCtx->stg1Ctx->activeFxns[FNPROF_GBLINFO.curTaskCtx->stg1Ctx->activeFxnIndex] == fxnIndex));
    }
  }
}
Beispiel #8
0
static inline Void _FNPROF_STG1_entryHook (VoidFcnPtr fcnAddr)
{
  if ((FNPROF_GBLINFO.curTaskCtx != NULL) &&
      (BIOS_getThreadType() == BIOS_ThreadType_Task)) {
    UInt32 fxnIndex = _FNPROF_STG1_getFxnEntryIndex(fcnAddr);
    
    if (FNPROF_E_ENTRYNOTFOUND == fxnIndex) {
      fxnIndex = _FNPROF_STG1_getFreeFxnEntryIndex((VoidFcnPtr)(fcnAddr));
      UTILS_assert ((fxnIndex != FNPROF_E_ENTRYNOTFOUND));
      _FNPROF_STG1_initFxnProfEntry(fxnIndex, (VoidFcnPtr)(fcnAddr));
    }
    FNPROF_GBLINFO.curTaskCtx->stg1Ctx->activeFxns[
        FNPROF_GBLINFO.curTaskCtx->stg1Ctx->activeFxnIndex] = fxnIndex;
      FNPROF_GBLINFO.curTaskCtx->stg1Ctx->activeFxnIndex++;
    _FNPROF_STG1_fxnEntryProcess(fxnIndex);
  } /* if (FNPROF_GBLINFO.curTaskCtx != NULL) */
}
Beispiel #9
0
Int IpcPower_unregisterCallback(Int event, IpcPower_CallbackFuncPtr cbck)
{
    IArg hwiKey;
    IpcPower_CallbackElem **list, *node;
    Int status = IpcPower_E_FAIL;
    BIOS_ThreadType context = BIOS_getThreadType();

    if ((context != BIOS_ThreadType_Task) &&
        (context != BIOS_ThreadType_Main)) {
        Log_print0(Diags_ERROR, FXNN":Invalid context\n");
        return (status);
    }


    list = &IpcPower_callbackList;
    node  = NULL;

    hwiKey = Hwi_disable();  /* begin: critical section */
    while (*list != NULL) {
        if ( ((*list)->callback == cbck) &&
             ((*list)->event == event) ) {
            node   = *list;
            *list  = (*list)->next;
            status = IpcPower_S_SUCCESS;
            break;
        }
        list = &(*list)->next;
    }
    Hwi_restore(hwiKey);  /* end: critical section */

    if (status == IpcPower_S_SUCCESS) {
        if (node != NULL) {
            Memory_free(NULL, node, sizeof(IpcPower_CallbackElem));
        }
        else {
            Log_print0(Diags_ERROR, FXNN":Invalid pointer\n");
        }
    }

    return (status);
}
Beispiel #10
0
/*
 *  ======== GateMutexPri_Gate ========
 *  Returns FIRST_ENTER when it gets the gate, returns NESTED_ENTER
 *  on nested calls.
 */
IArg GateMutexPri_enter(GateMutexPri_Object *obj)
{
    Task_Handle tsk;
    UInt tskKey;
    Int tskPri;
    Task_PendElem elem;
    Queue_Handle pendQ;

    /* make sure we're not calling from Hwi or Swi context */
    Assert_isTrue(((BIOS_getThreadType() == BIOS_ThreadType_Task) ||
                   (BIOS_getThreadType() == BIOS_ThreadType_Main)),
                   GateMutexPri_A_badContext);

    pendQ = GateMutexPri_Instance_State_pendQ(obj);
    
    tsk = Task_self();

    /* 
     * Prior to tasks starting, Task_self() will return NULL.
     * Simply return NESTED_ENTER here as, by definition, there is
     * is only one thread running at this time.
     */
    if (tsk == NULL) {
        return (NESTED_ENTER);
    }

    tskPri = Task_getPri(tsk);
    
    /* 
     * Gate may only be called from task context, so Task_disable is sufficient
     * protection.
     */
    tskKey = Task_disable();
    
    /* If the gate is free, take it. */
    if (obj->mutexCnt == 1) {
        obj->mutexCnt = 0;
        obj->owner = tsk;
        obj->ownerOrigPri = tskPri;
   
        Task_restore(tskKey);
        return (FIRST_ENTER);
    }
   
    /* At this point, the gate is already held by someone. */
    
    /* If this is a nested call to gate... */
    if (obj->owner == tsk) {
        Task_restore(tskKey);
        return (NESTED_ENTER);
    }
    
    /*
     * Donate priority if necessary. The owner is guaranteed to have the
     * highest priority of anyone waiting on the gate, so just compare this
     * task's priority against the owner's. 
     */        
    if (tskPri > Task_getPri(obj->owner)) {
        Task_setPri(obj->owner, tskPri);
    }

    /* Remove tsk from ready list. */
    Task_block(tsk);

    elem.task = tsk;
    elem.clock = NULL;
    /* leave a pointer for Task_delete() */
    tsk->pendElem = &elem;

    /* Insert tsk in wait queue in order by priority (high pri at head) */
    GateMutexPri_insertPri(pendQ, (Queue_Elem *)&elem, tskPri);

    /* Task_restore will call the scheduler and this task will block. */
    Task_restore(tskKey);
    
    tsk->pendElem = NULL;

    /* 
     * At this point, tsk has the gate. Initialization of the gate is handled
     * by the previous owner's call to leave. 
     */
    return (FIRST_ENTER);
}
/*
 *  ======== Event_pend ========
 */
UInt Event_pend(Event_Object *event, UInt andMask, UInt orMask, UInt32 timeout)
{
    UInt hwiKey, tskKey;
    Event_PendElem elem;
    UInt matchingEvents;
    Queue_Handle pendQ;
    Clock_Struct clockStruct;

    Assert_isTrue(((andMask | orMask) != 0), Event_A_nullEventMasks);

    Log_write5(Event_LM_pend, (UArg)event, (UArg)event->postedEvents,
                (UArg)andMask, (UArg)orMask, (IArg)((Int)timeout));

    /*
     * elem is filled in entirely before interrupts are disabled.
     * This significantly reduces latency at the potential cost of wasted time
     * if it turns out that there is already an event match.
     */

    /* add Clock event if timeout is not FOREVER nor NO_WAIT */
    if (BIOS_clockEnabled
            && (timeout != BIOS_WAIT_FOREVER)
            && (timeout != BIOS_NO_WAIT)) {
        Clock_addI(Clock_handle(&clockStruct), (Clock_FuncPtr)Event_pendTimeout, timeout, (UArg)&elem);
        elem.tpElem.clock = Clock_handle(&clockStruct);
        elem.pendState = Event_PendState_CLOCK_WAIT;
    }
    else {
        elem.tpElem.clock = NULL;
        elem.pendState = Event_PendState_WAIT_FOREVER;
    }

    /* fill in this task's Event_PendElem */
    elem.andMask = andMask;
    elem.orMask = orMask;

    pendQ = Event_Instance_State_pendQ(event);

    /* get task handle */
    elem.tpElem.task = Task_self();

    /* Atomically check for a match and block if none */
    hwiKey = Hwi_disable();

    /* check if events are already available */
    matchingEvents = Event_checkEvents(event, andMask, orMask);

    if (matchingEvents != 0) {
        /* remove Clock object from Clock Q */
        if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) {
            Clock_removeI(elem.tpElem.clock);
            elem.tpElem.clock = NULL;
        }

        Hwi_restore(hwiKey);

        return (matchingEvents);/* yes, then return with matching bits */
    }

    if (timeout == BIOS_NO_WAIT) {
        Hwi_restore(hwiKey);
        return (0);             /* No match, no wait */
    }

    Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Task),
                        Event_A_badContext);

    /*
     * Verify that THIS core hasn't already disabled the scheduler
     * so that the Task_restore() call below will indeed block
     */
    Assert_isTrue((Task_enabled()),
                        Event_A_pendTaskDisabled);

    /* lock scheduler */
    tskKey = Task_disable();

    /* only one Task allowed!!! */
    Assert_isTrue(Queue_empty(pendQ), Event_A_eventInUse);

    /* leave a pointer for Task_delete() */
    elem.tpElem.task->pendElem = (Task_PendElem *)&(elem);

    /* add it to Event_PendElem queue */
    Queue_enqueue(pendQ, (Queue_Elem *)&elem);

    Task_blockI(elem.tpElem.task);

    if (BIOS_clockEnabled &&
            (elem.pendState == Event_PendState_CLOCK_WAIT)) {
        Clock_startI(elem.tpElem.clock);
    }

    Hwi_restore(hwiKey);

    /* unlock task scheduler and block */
    Task_restore(tskKey);       /* the calling task will switch out here */

    /* Here on unblock due to Event_post or Event_pendTimeout */

    hwiKey = Hwi_disable();

    /* remove Clock object from Clock Q */
    if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) {
        Clock_removeI(elem.tpElem.clock);
        elem.tpElem.clock = NULL;
    }
        
    elem.tpElem.task->pendElem = NULL;

    Hwi_restore(hwiKey);
    
    /* event match? */
    if (elem.pendState != Event_PendState_TIMEOUT) {
        return (elem.matchingEvents);
    }
    else {
        return (0);             /* timeout */
    }
}
/*
 *  ======== Task_Instance_init ========
 */
Int Task_Instance_init(Task_Object *tsk, Task_FuncPtr fxn,
                const Task_Params *params, Error_Block *eb)
{
    Int align;
    Int status;
    SizeT stackSize;

    Assert_isTrue((BIOS_taskEnabled == TRUE), Task_A_taskDisabled);

    Assert_isTrue(((BIOS_getThreadType() != BIOS_ThreadType_Hwi) &&
                   (BIOS_getThreadType() != BIOS_ThreadType_Swi)), Task_A_badThreadType);

    Assert_isTrue((((params->priority == -1) || (params->priority > 0)) &&
                   (params->priority < (Int)Task_numPriorities)),
                   Task_A_badPriority);

    tsk->priority = params->priority;

    /* deal with undefined Task_Params defaults */
    if (params->stackHeap == NULL) {
        tsk->stackHeap = Task_defaultStackHeap;
    }
    else {
        tsk->stackHeap = params->stackHeap;
    }

    if (params->stackSize == 0) {
        stackSize = Task_defaultStackSize;
    }
    else {
        stackSize = params->stackSize;
    }

    align = Task_SupportProxy_getStackAlignment();

    if (params->stack != NULL) {
        if (align != 0) {
            UArg stackTemp;
            /* align low address to stackAlignment */
            stackTemp = (UArg)params->stack;
            stackTemp += align - 1;
            stackTemp &= -align;
            tsk->stack = (Ptr)xdc_uargToPtr(stackTemp);

            /* subtract what we removed from the low address from stackSize */
            tsk->stackSize = stackSize - (stackTemp - (UArg)params->stack);

            /* lower the high address as necessary */
            tsk->stackSize &= -align;
        }
        else {
            tsk->stack = params->stack;
            tsk->stackSize = stackSize;
        }
        /* tell Task_delete that stack was provided */
        tsk->stackHeap = (xdc_runtime_IHeap_Handle)(-1);
    }
    else {
        if (BIOS_runtimeCreatesEnabled) {
            if (align != 0) {
                /*
                 * round stackSize up to the nearest multiple of the alignment.
                 */
                tsk->stackSize = (stackSize + align - 1) & -align;
            }
            else {
                tsk->stackSize = stackSize;
            }

            tsk->stack = Memory_alloc(tsk->stackHeap, tsk->stackSize,
                                      align, eb);

            if (tsk->stack == NULL) {
                return (1);
            }
        }
    }

    tsk->fxn = fxn;
    tsk->arg0 = params->arg0;
    tsk->arg1 = params->arg1;

    tsk->env = params->env;

    tsk->vitalTaskFlag = params->vitalTaskFlag;
    if (tsk->vitalTaskFlag == TRUE) {
        Task_module->vitalTasks += 1;
    }

#ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS
    if (Task_hooks.length > 0) {
        tsk->hookEnv = Memory_calloc(Task_Object_heap(),
                Task_hooks.length * sizeof (Ptr), 0, eb);

        if (tsk->hookEnv == NULL) {
            return (2);
        }
    }
#endif

    status = Task_postInit(tsk, eb);

    if (Error_check(eb)) {
        return (3 + status);
    }

    return (0);   /* no failure states */
}
/*
 *  ======== Task_yield ========
 */
Void Task_yield()
{
    UInt tskKey, hwiKey;

    tskKey = Task_disable();
    hwiKey = Hwi_disable();

    if (Task_module->curQ) {
        /* move current task to end of curQ */
        Queue_enqueue(Task_module->curQ,
            Queue_dequeue(Task_module->curQ));
    }
    Task_module->curQ = NULL;  /* force a Task_switch() */
    Task_module->workFlag = 1;

    Hwi_restore(hwiKey);

    Log_write3(Task_LM_yield, (UArg)Task_module->curTask, (UArg)(Task_module->curTask->fxn), (UArg)(BIOS_getThreadType()));

    Task_restore(tskKey);
}
/*
 *  ======== Task_Instance_finalize ========
 */
Void Task_Instance_finalize(Task_Object *tsk, Int status)
{
#ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS
    Int i, cnt;
#endif
    UInt taskKey, hwiKey;

    /*
     * Task's can only be deleted from main and task threads.
     * Running Tasks can not be deleted.
     */
    if (status == 0) {
        taskKey = Task_disable();

        /*
         * Bar users from calling Task_delete() on terminated tasks
         * if deleteTerminatedTasks is enabled.
         */
        if ((Task_deleteTerminatedTasks == TRUE)
             && (Task_getMode(tsk) == Task_Mode_TERMINATED)
             && (tsk->readyQ == Task_Module_State_terminatedQ())) {
            Error_raise(NULL, Task_E_deleteNotAllowed, tsk, 0);
        }

        Assert_isTrue((Task_getMode(tsk) != Task_Mode_RUNNING),
                        Task_A_badTaskState);

        Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Main) ||
                      (BIOS_getThreadType() == BIOS_ThreadType_Task),
                        Task_A_badThreadType);

        hwiKey = Hwi_disable();

        if (tsk->mode == Task_Mode_READY) {
            /* remove task from its ready list */
            Queue_remove((Queue_Elem *)tsk);
            /* if last task in readyQ, remove corresponding bit in curSet */
            if (Queue_empty(tsk->readyQ)) {
                Task_module->curSet &= ~tsk->mask;
            }
            
            /* 
             * if task was made ready by a pend timeout but hasn't run yet
             * then its clock object is still on the Clock service Q.
             */
            if (tsk->pendElem != NULL) {
                if (BIOS_clockEnabled && tsk->pendElem->clock) {
                    Clock_removeI(tsk->pendElem->clock);
                }
            }
        }

        if (tsk->mode == Task_Mode_BLOCKED) {
            Assert_isTrue(tsk->pendElem != NULL, Task_A_noPendElem);

            /* Seemingly redundant test in case Asserts are disabled */
            if (tsk->pendElem != NULL) {
                Queue_remove(&(tsk->pendElem->qElem));
                if (BIOS_clockEnabled && tsk->pendElem->clock) {
                    Clock_removeI(tsk->pendElem->clock);
                }
            }
        }

        if (tsk->mode == Task_Mode_TERMINATED) {
            /* remove task from terminated task list */
            Queue_remove((Queue_Elem *)tsk);
        }
        else {
            Task_processVitalTaskFlag(tsk);
        }

        Hwi_restore(hwiKey);

        Task_restore(taskKey);
    }

    /* return if failed before allocating stack */
    if (status == 1) {
        return;
    }

    if (BIOS_runtimeCreatesEnabled) {
        /* free stack if it was allocated dynamically */
        if (tsk->stackHeap != (xdc_runtime_IHeap_Handle)(-1)) {
            Memory_free(tsk->stackHeap, tsk->stack, tsk->stackSize);
        }
    }

    /* return if failed to allocate Hook Env */
    if (status == 2) {
        return;
    }

    /* status == 0 or status == 3 - in both cases create hook was called */

#ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS
    /* free any allocated Hook Envs */
    if (Task_hooks.length > 0) {
        if (status == 0) {
            cnt = Task_hooks.length;
        }
        else {
            cnt = status - 3;   /* # successful createFxn() calls */
        }

        /*
         * only call deleteFxn() if createFxn() was successful
         */
        for (i = 0; i < cnt; i++) {
            if (Task_hooks.elem[i].deleteFxn != NULL) {
                Task_hooks.elem[i].deleteFxn(tsk);
            }
        }

        Memory_free(Task_Object_heap(), tsk->hookEnv,
                Task_hooks.length * sizeof (Ptr));
    }
#endif
}
Beispiel #15
0
/*
 *  ======== SemaphoreMP_pend ========
 */
Bool SemaphoreMP_pend(SemaphoreMP_Object *obj)
{
    UInt tskKey;
    SemaphoreMP_PendElem *elem;
    IArg gateMPKey;

    /* Check for correct calling context */
    Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Task),
                    SemaphoreMP_A_badContext);

    elem = ThreadLocal_getspecific(SemaphoreMP_pendElemKey);
    if (elem == NULL) {
        /* 
         * Choose region zero (instead of the region that contains the 
         * SemaphoreMP) since region zero is always accessible by all cores
         */
        elem = Memory_alloc(SharedRegion_getHeap(0), 
                sizeof(SemaphoreMP_PendElem), 0, NULL);
        ThreadLocal_setspecific(SemaphoreMP_pendElemKey, elem);
    }
    
    /* Enter the gate */
    gateMPKey = GateMP_enter((GateMP_Handle)obj->gate);

    if (obj->cacheEnabled) {
        Cache_inv(obj->attrs, sizeof(SemaphoreMP_Attrs), Cache_Type_ALL, TRUE);
    }
    
    /* check semaphore count */
    if (obj->attrs->count == 0) {
        /* lock task scheduler */
        tskKey = Task_disable();

        /* get task handle and block tsk */
        elem->task = (Bits32)Task_self();
        elem->procId = MultiProc_self();
        
        Task_block((Task_Handle)elem->task);
        
        if (obj->cacheEnabled) {
            Cache_wbInv(elem, sizeof(SemaphoreMP_PendElem), Cache_Type_ALL, TRUE);
        }

        /* add it to pendQ */
        ListMP_putTail((ListMP_Handle)obj->pendQ, (ListMP_Elem *)elem);

        /* Leave the gate */
        GateMP_leave((GateMP_Handle)obj->gate, gateMPKey);

        Task_restore(tskKey);/* the calling task will switch out here */

        return (TRUE);
    }
    else {
        obj->attrs->count--;
        if (obj->cacheEnabled) {
            Cache_wbInv(obj->attrs, sizeof(SemaphoreMP_Attrs), Cache_Type_ALL, 
                    TRUE);
        }

        /* Leave the gate */
        GateMP_leave((GateMP_Handle)obj->gate, gateMPKey);

        return (TRUE);
    }
}
Beispiel #16
0
/*
 *  ======== Semaphore_pend ========
 */
Bool Semaphore_pend(Semaphore_Object *sem, UInt timeout)
{
    UInt hwiKey, tskKey;
    Semaphore_PendElem elem;
    Queue_Handle pendQ;
    Clock_Struct clockStruct;

    Log_write3(Semaphore_LM_pend, (IArg)sem, (UArg)sem->count, (IArg)((Int)timeout));

    /*
     *  Consider fast path check for count != 0 here!!!
     */

    /* 
     *  elem is filled in entirely before interrupts are disabled.
     *  This significantly reduces latency.
     */

    /* add Clock event if timeout is not FOREVER nor NO_WAIT */
    if (BIOS_clockEnabled
            && (timeout != BIOS_WAIT_FOREVER) 
            && (timeout != BIOS_NO_WAIT)) {
        Clock_Params clockParams;
        Clock_Params_init(&clockParams);
        clockParams.arg = (UArg)&elem;
        clockParams.startFlag = FALSE;  /* will start when necessary, thankyou */
        Clock_construct(&clockStruct, (Clock_FuncPtr)Semaphore_pendTimeout, 
                                        timeout, &clockParams);
        elem.tpElem.clock = Clock_handle(&clockStruct);
        elem.pendState = Semaphore_PendState_CLOCK_WAIT;
    }
    else {
        elem.tpElem.clock = NULL;
        elem.pendState = Semaphore_PendState_WAIT_FOREVER;
    }

    pendQ = Semaphore_Instance_State_pendQ(sem);

    hwiKey = Hwi_disable();

    /* check semaphore count */
    if (sem->count == 0) {

        if (timeout == BIOS_NO_WAIT) {
            Hwi_restore(hwiKey);
            return (FALSE);
        }

        Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Task),
                        Semaphore_A_badContext);

        /* lock task scheduler */
        tskKey = Task_disable();

        /* get task handle and block tsk */
        elem.tpElem.task = Task_self();

        /* leave a pointer for Task_delete() */
        elem.tpElem.task->pendElem = (Task_PendElem *)&(elem);

        Task_blockI(elem.tpElem.task);

        if (((UInt)sem->mode & 0x2) != 0) {    /* if PRIORITY bit is set */
            Semaphore_PendElem *tmpElem;
            Task_Handle tmpTask;
            UInt selfPri;
            
            tmpElem = Queue_head(pendQ);
            selfPri = Task_getPri(elem.tpElem.task);

            while (tmpElem != (Semaphore_PendElem *)pendQ) {
                tmpTask = tmpElem->tpElem.task;
                /* use '>' here so tasks wait FIFO for same priority */
                if (selfPri > Task_getPri(tmpTask)) {
                    break;
                }
                else {
                    tmpElem = Queue_next((Queue_Elem *)tmpElem);
                }
            }
            
            Queue_insert((Queue_Elem *)tmpElem, (Queue_Elem *)&elem);
        }
        else {      
            /* put task at the end of the pendQ */
            Queue_enqueue(pendQ, (Queue_Elem *)&elem);
        }

        /* start Clock if appropriate */
        if (BIOS_clockEnabled && 
                (elem.pendState == Semaphore_PendState_CLOCK_WAIT)) {
            Clock_startI(elem.tpElem.clock);
        }

        Hwi_restore(hwiKey);

        Task_restore(tskKey);   /* the calling task will block here */

        /* Here on unblock due to Semaphore_post or timeout */

        if (Semaphore_supportsEvents && (sem->event != NULL)) {
            /* synchronize Event state */
            hwiKey = Hwi_disable();
            Semaphore_eventSync(sem->event, sem->eventId, sem->count);
            Hwi_restore(hwiKey);
        }

        /* deconstruct Clock if appropriate */
        if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) {
            Clock_destruct(Clock_struct(elem.tpElem.clock));
        }

        elem.tpElem.task->pendElem = NULL;

        return ((Bool)(elem.pendState));
    }
    else {
        --sem->count;

        if (Semaphore_supportsEvents && (sem->event != NULL)) {
            /* synchronize Event state */
            Semaphore_eventSync(sem->event, sem->eventId, sem->count);
        }

        Hwi_restore(hwiKey);

        /* deconstruct Clock if appropriate */
        if (BIOS_clockEnabled && (elem.tpElem.clock != NULL)) {
            Clock_destruct(Clock_struct(elem.tpElem.clock));
        }

        return (TRUE);
    }
}
Beispiel #17
0
/*
 *  ======== Task_Instance_finalize ========
 *  free stack if alloced during create
 */
Void Task_Instance_finalize(Task_Object *tsk, Int status)
{
    Int i, cnt;

    /* 
     * Task's can only be deleted from main and task threads.
     * Task's can only be deleted when they are in these states:
     *  Task_Mode_TERMINATED
     *  Task_Mode_READY
     */
    if (status == 0) {
        Assert_isTrue((tsk->mode == Task_Mode_TERMINATED) ||
                      (tsk->mode == Task_Mode_BLOCKED) ||
                      ((tsk->mode == Task_Mode_READY) && (tsk != Task_self())),
                        Task_A_badTaskState);

        Assert_isTrue((BIOS_getThreadType() == BIOS_ThreadType_Main) ||
                      (BIOS_getThreadType() == BIOS_ThreadType_Task), 
                        Task_A_badThreadType);

        if (tsk->mode == Task_Mode_READY) {
            /* remove task from its ready list */
            Queue_remove((Queue_Elem *)tsk);
            /* if last task in readyQ, remove corresponding bit in curSet */
            if (Queue_empty(tsk->readyQ)) {
                Task_module->curSet &= ~tsk->mask;
            }
        }

        if (tsk->mode == Task_Mode_BLOCKED) {
            Assert_isTrue(tsk->pendElem != NULL, Task_A_noPendElem);

            if (tsk->pendElem != NULL) {
                Queue_remove(&(tsk->pendElem->qElem));
                if (tsk->pendElem->clock) {
                    Clock_destruct(Clock_struct(tsk->pendElem->clock));
                }
            }
        }

        if (tsk->mode == Task_Mode_TERMINATED) {
            /* remove task from terminated task list */
            Queue_remove((Queue_Elem *)tsk);
        }

    }

    /* return if failed before allocating stack */
    if (status == 1) {
        return;
    }

    /* free stack if it was allocated dynamically */
    if (tsk->stackHeap != (xdc_runtime_IHeap_Handle)(-1)) {
        Memory_free(tsk->stackHeap, tsk->stack, tsk->stackSize);
    }

    /* return if failed to allocate Hook Env */
    if (status == 2) {
        return;
    }

    /* status == 0 or status == 3 - in both cases create hook was called */

#ifndef ti_sysbios_knl_Task_DISABLE_ALL_HOOKS
    /* free any allocated Hook Envs */
    if (Task_hooks.length > 0) {
        if (status == 0) {
            cnt = Task_hooks.length;
        }
        else {
            cnt = status - 3;   /* # successful createFxn() calls */
        }

        /* 
         * only call deleteFxn() if createFxn() was successful
         */
        for (i = 0; i < cnt; i++) {
            if (Task_hooks.elem[i].deleteFxn != NULL) {
                Task_hooks.elem[i].deleteFxn(tsk);
            }
        }

        Memory_free(Task_Object_heap(), tsk->hookEnv,
                Task_hooks.length * sizeof (Ptr));
    }
#endif
}
Beispiel #18
0
/*
 *  ======== Exception_excHandler ========
 */
Void Exception_excHandler(UInt *excStack, UInt pc)
{
    Exception_ExcContext excContext, *excContextp;
    SizeT stackSize = 0;
    UInt8 *stack = NULL;

    Exception_module->excActive = TRUE;

    if (Exception_module->excContext == NULL) {
        Exception_module->excContext = &excContext;
        excContextp = &excContext;
    }
    else {
        excContextp = Exception_module->excContext;
    }

    /* copy registers from stack to excContext */
    excContextp->r0  = (Ptr)excStack[4];        /* r0 */
    excContextp->r1  = (Ptr)excStack[5];        /* r1 */
    excContextp->r2  = (Ptr)excStack[6];        /* r2 */
    excContextp->r3  = (Ptr)excStack[7];        /* r3 */
    excContextp->r4  = (Ptr)excStack[8];        /* r4 */
    excContextp->r5  = (Ptr)excStack[9];        /* r5 */
    excContextp->r6  = (Ptr)excStack[10];       /* r6 */
    excContextp->r7  = (Ptr)excStack[11];       /* r7 */
    excContextp->r8  = (Ptr)excStack[12];       /* r8 */
    excContextp->r9  = (Ptr)excStack[13];       /* r9 */
    excContextp->r10 = (Ptr)excStack[14];       /* r10 */
    excContextp->r11 = (Ptr)excStack[15];       /* r11 */
    excContextp->r12 = (Ptr)excStack[16];       /* r12 */
    excContextp->sp  = (Ptr)excStack[1];        /* sp */
    excContextp->lr  = (Ptr)excStack[2];        /* lr */
    excContextp->pc  = (Ptr)pc;         /* pc */
    excContextp->psr = (Ptr)excStack[0];        /* psr */

    excContextp->type = (Exception_Type)(excStack[3] &0x1f);    /* psr */

    excContextp->threadType = BIOS_getThreadType();

    switch (excContextp->threadType) {
    case BIOS_ThreadType_Task: {
        if (BIOS_taskEnabled == TRUE) {
            excContextp->threadHandle = (Ptr)Task_self();
            stack = (UInt8 *)(Task_self())->stack;
            stackSize = (Task_self())->stackSize;
        }
        break;
    }
    case BIOS_ThreadType_Swi: {
        if (BIOS_swiEnabled == TRUE) {
            excContextp->threadHandle = (Ptr)Swi_self();
            stack = STACK_BASE;
            stackSize = (SizeT)(&__STACK_SIZE);
        }
        break;
    }
    case BIOS_ThreadType_Hwi: {
        excContextp->threadHandle = NULL;
        stack = STACK_BASE;
        stackSize = (SizeT)(&__STACK_SIZE);
        break;
    }
    case BIOS_ThreadType_Main: {
        excContextp->threadHandle = NULL;
        stack = STACK_BASE;
        stackSize = (SizeT)(&__STACK_SIZE);
        break;
    }
    }

    excContextp->threadStackSize = stackSize;
    excContextp->threadStack = (Ptr)stack;

    /* copy thread's stack contents if user has provided a buffer */
    if (Exception_module->excStackBuffer != NULL) {
        UInt8 *from, *to;
        from = stack;
        to = (UInt8 *)Exception_module->excStackBuffer;
        while (stackSize--) {
            *to++ = *from++;
        }
    }

    /* Force MAIN threadtype So we can safely call System_printf */

    BIOS_setThreadType(BIOS_ThreadType_Main);

    if (Exception_enableDecode == TRUE) {
        Exception_excDumpContext(pc);
    }

    /* Call user's exception hook */
    if (Exception_excHookFunc != NULL) {
        Exception_excHookFunc(excContextp);
    }

    /* raise a corresponding Error */
    switch(excContextp->type) {

    case Exception_Type_Supervisor:
        Error_raise(0, Exception_E_swi, pc, excStack[2]);
        break;

    case Exception_Type_PreAbort:
        Error_raise(0, Exception_E_prefetchAbort, pc, excStack[2]);
        break;

    case Exception_Type_DataAbort:
        Error_raise(0, Exception_E_dataAbort, pc, excStack[2]);
        break;

    case Exception_Type_UndefInst:
        Error_raise(0, Exception_E_undefinedInstruction, pc, excStack[2]);
        break;
    }
}
Beispiel #19
0
/*
 *  ======== Exception_excHandler ========
 */
Void Exception_excHandler(UInt *excStack, UInt pc)
{
    Exception_ExcContext excContext, *excContextp;
    SizeT stackSize = 0;
    UInt8 *stack = NULL;
    UInt coreId = 0;

#if (ti_sysbios_BIOS_smpEnabled__D)
    coreId = Core_getId();
#endif

#if defined(ti_sysbios_family_arm_a8_intcps_Hwi_enableAsidTagging__D) && \
    (ti_sysbios_family_arm_a8_intcps_Hwi_enableAsidTagging__D)
    Mmu_switchContext(0, Mmu_getMmuTableAddr());
#elif defined(ti_sysbios_family_arm_gic_Hwi_enableAsidTagging__D) && \
    (ti_sysbios_family_arm_gic_Hwi_enableAsidTagging__D)
    Mmu_switchContext(0, Mmu_getFirstLevelTableAddr());
#endif

    Exception_module->excActive[coreId] = TRUE;

    if (Exception_module->excContext[coreId] == NULL) {
        Exception_module->excContext[coreId] = &excContext;
        excContextp = &excContext;
    }
    else {
        excContextp = Exception_module->excContext[coreId];
    }

    /* copy registers from stack to excContext */
    excContextp->r0  = (Ptr)excStack[8];        /* r0 */
    excContextp->r1  = (Ptr)excStack[9];        /* r1 */
    excContextp->r2  = (Ptr)excStack[10];       /* r2 */
    excContextp->r3  = (Ptr)excStack[11];       /* r3 */
    excContextp->r4  = (Ptr)excStack[12];       /* r4 */
    excContextp->r5  = (Ptr)excStack[13];       /* r5 */
    excContextp->r6  = (Ptr)excStack[14];       /* r6 */
    excContextp->r7  = (Ptr)excStack[15];       /* r7 */
    excContextp->r8  = (Ptr)excStack[16];       /* r8 */
    excContextp->r9  = (Ptr)excStack[17];       /* r9 */
    excContextp->r10 = (Ptr)excStack[18];       /* r10 */
    excContextp->r11 = (Ptr)excStack[19];       /* r11 */
    excContextp->r12 = (Ptr)excStack[20];       /* r12 */
    excContextp->ifar = (Ptr)excStack[4];       /* IFAR */
    excContextp->dfar = (Ptr)excStack[5];       /* DFAR */
    excContextp->ifsr = (Ptr)excStack[6];       /* IFSR */
    excContextp->dfsr = (Ptr)excStack[7];       /* DFSR */
    excContextp->sp  = (Ptr)excStack[1];        /* sp */
    excContextp->lr  = (Ptr)excStack[2];        /* lr */
    excContextp->pc  = (Ptr)pc;                 /* pc */
    excContextp->psr = (Ptr)excStack[0];        /* psr */

    excContextp->type = (Exception_Type)(excStack[3] &0x1f);    /* psr */

    excContextp->threadType = BIOS_getThreadType();

    switch (excContextp->threadType) {
        case BIOS_ThreadType_Task: {
            if (BIOS_taskEnabled == TRUE) {
                excContextp->threadHandle = (Ptr)Task_self();
                stack = (UInt8 *)(Task_self())->stack;
                stackSize = (Task_self())->stackSize;
            }
            break;
        }
        case BIOS_ThreadType_Swi: {
            if (BIOS_swiEnabled == TRUE) {
                excContextp->threadHandle = (Ptr)Swi_self();
                stack = STACK_BASE;
                stackSize = (SizeT)(&__STACK_SIZE);
            }
            break;
        }
        case BIOS_ThreadType_Hwi: {
                excContextp->threadHandle = NULL;
                stack = STACK_BASE;
                stackSize = (SizeT)(&__STACK_SIZE);
            break;
        }
        case BIOS_ThreadType_Main: {
                excContextp->threadHandle = NULL;
                stack = STACK_BASE;
                stackSize = (SizeT)(&__STACK_SIZE);
            break;
        }
    }

    excContextp->threadStackSize = stackSize;
    excContextp->threadStack = (Ptr)stack;

    /* copy thread's stack contents if user has provided a buffer */
    if (Exception_module->excStackBuffers[coreId] != NULL) {
        UInt8 *from, *to;
        from = stack;
        to = (UInt8 *)Exception_module->excStackBuffers[coreId];
        while (stackSize--) {
            *to++ = *from++;
        }
    }

    /* Force MAIN threadtype So we can safely call System_printf */

    BIOS_setThreadType(BIOS_ThreadType_Main);

    if (Exception_enableDecode == TRUE) {
        Exception_excDumpContext(pc);
    }

    /* Call user's exception hook */
    if (Exception_excHookFuncs[coreId] != NULL) {
        Exception_excHookFuncs[coreId](excContextp);
    }

    /* raise a corresponding Error */
    switch(excContextp->type) {

        case Exception_Type_Supervisor:
            Error_raise(0, Exception_E_swi, pc, excStack[2]);
            break;

        case Exception_Type_PreAbort:
            Error_raise(0, Exception_E_prefetchAbort, pc, excStack[2]);
            break;

        case Exception_Type_DataAbort:
            Error_raise(0, Exception_E_dataAbort, pc, excStack[2]);
            break;

        case Exception_Type_UndefInst:
            Error_raise(0, Exception_E_undefinedInstruction, pc, excStack[2]);
            break;
    }
}
Beispiel #20
0
/*
 *  ======== Deh_excHandler ========
 *  Read data from HWI exception handler and print it to crash dump buffer.
 *  Notify host that exception has occurred.
 */
Void Deh_excHandler(UInt *excStack, UInt lr)
{
    Hwi_ExcContext  exc;
    Deh_ExcRegs    *excRegs;
    Char           *ttype;
    UInt            excNum;
    Char           *etype;
    Char           *name;
    UInt           sCnt = 0;

    excRegs = (Deh_ExcRegs *) Deh_module->outbuf;

    /* Copy registers from stack to excContext */
    excRegs->r0  = exc.r0 = (Ptr)excStack[8];      /* r0 */
    excRegs->r1  = exc.r1 = (Ptr)excStack[9];      /* r1 */
    excRegs->r2  = exc.r2 = (Ptr)excStack[10];     /* r2 */
    excRegs->r3  = exc.r3 = (Ptr)excStack[11];     /* r3 */
    excRegs->r4  = exc.r4 = (Ptr)excStack[0];      /* r4 */
    excRegs->r5  = exc.r5 = (Ptr)excStack[1];      /* r5 */
    excRegs->r6  = exc.r6 = (Ptr)excStack[2];      /* r6 */
    excRegs->r7  = exc.r7 = (Ptr)excStack[3];      /* r7 */
    excRegs->r8  = exc.r8 = (Ptr)excStack[4];      /* r8 */
    excRegs->r9  = exc.r9 = (Ptr)excStack[5];      /* r9 */
    excRegs->r10 = exc.r10 = (Ptr)excStack[6];     /* r10 */
    excRegs->r11 = exc.r11 = (Ptr)excStack[7];     /* r11 */
    excRegs->r12 = exc.r12 = (Ptr)excStack[12];    /* r12 */
    excRegs->sp  = exc.sp  = (Ptr)(UInt32)(excStack+16); /* sp */
    excRegs->lr  = exc.lr  = (Ptr)excStack[13];    /* lr */
    excRegs->pc  = exc.pc  = (Ptr)excStack[14];    /* pc */
    excRegs->psr = exc.psr = (Ptr)excStack[15];    /* psr */

    exc.threadType = BIOS_getThreadType();
    switch (exc.threadType) {
        case BIOS_ThreadType_Task:
            if (BIOS_taskEnabled == TRUE) {
                exc.threadHandle = (Ptr)Task_self();
                exc.threadStack = (Task_self())->stack;
                exc.threadStackSize = (Task_self())->stackSize;
            }
            break;
        case BIOS_ThreadType_Swi:
            if (BIOS_swiEnabled == TRUE) {
                exc.threadHandle = (Ptr)Swi_self();
                exc.threadStack = Deh_module->isrStackBase;
                exc.threadStackSize = Deh_module->isrStackSize;
            }
            break;
        case BIOS_ThreadType_Hwi:
        case BIOS_ThreadType_Main:
            exc.threadHandle = NULL;
            exc.threadStack = Deh_module->isrStackBase;
            exc.threadStackSize = Deh_module->isrStackSize;
            break;
        default:
            exc.threadHandle = NULL;
            exc.threadStack = NULL;
            exc.threadStackSize = 0;
            break;
    }

    excRegs->ICSR  = exc.ICSR  = (Ptr)Hwi_nvic.ICSR;
    excRegs->MMFSR = exc.MMFSR = (Ptr)Hwi_nvic.MMFSR;
    excRegs->BFSR  = exc.BFSR  = (Ptr)Hwi_nvic.BFSR;
    excRegs->UFSR  = exc.UFSR  = (Ptr)Hwi_nvic.UFSR;
    excRegs->HFSR  = exc.HFSR  = (Ptr)Hwi_nvic.HFSR;
    excRegs->DFSR  = exc.DFSR  = (Ptr)Hwi_nvic.DFSR;
    excRegs->MMAR  = exc.MMAR  = (Ptr)Hwi_nvic.MMAR;
    excRegs->BFAR  = exc.BFAR  = (Ptr)Hwi_nvic.BFAR;
    excRegs->AFSR  = exc.AFSR  = (Ptr)Hwi_nvic.AFSR;

    /* Force MAIN threadtype So we can safely call System_printf */
    BIOS_setThreadType(BIOS_ThreadType_Main);

    excNum = Hwi_nvic.ICSR & 0xff;
    if (Watchdog_isException(excNum)) {
        etype = "Watchdog fired";
    }
    else {
        VirtQueue_postCrashToMailbox();
        etype = "Exception occurred";
    }

    System_printf("%s at (PC) = %08x\n", etype, exc.pc);

    switch (lr) {
        case 0xfffffff1:
            System_printf("CPU context: ISR\n");
            break;
        case 0xfffffff9:
        case 0xfffffffd:
            System_printf("CPU context: thread\n");
            break;
        default:
            System_printf("CPU context: unknown. LR: %08x\n", lr);
            break;
    }

    switch (exc.threadType) {
        case BIOS_ThreadType_Task: {
            ttype = "Task";
            break;
        }
        case BIOS_ThreadType_Swi: {
            ttype = "Swi";
            break;
        }
        case BIOS_ThreadType_Hwi: {
            ttype = "Hwi";
            break;
        }
        case BIOS_ThreadType_Main: {
            ttype = "Main";
            break;
        }
        default:
            ttype = "Invalid!";
            break;
    }

    if (exc.threadHandle) {
        name = Task_Handle_name(exc.threadHandle);
        if (!name) {
            name = "(unnamed)";
        }
    }
    else {
        name = "(null task)";
    }
    System_printf("BIOS %s name: %s handle: 0x%x.\n", ttype, name,
                  exc.threadHandle);

    System_printf("BIOS %s stack base: 0x%x.\n", ttype, exc.threadStack);
    System_printf("BIOS %s stack size: 0x%x.\n", ttype, exc.threadStackSize);

    switch (excNum) {
        case 2:
            ti_sysbios_family_arm_m3_Hwi_excNmi(excStack);
            break;
        case 3:
            ti_sysbios_family_arm_m3_Hwi_excHardFault(excStack);
            break;
        case 4:
            ti_sysbios_family_arm_m3_Hwi_excMemFault(excStack);
            break;
        case 5:
            ti_sysbios_family_arm_m3_Hwi_excBusFault(excStack);
            break;
        case 6:
            ti_sysbios_family_arm_m3_Hwi_excUsageFault(excStack);
            break;
        case 11:
            ti_sysbios_family_arm_m3_Hwi_excSvCall(excStack);
            break;
        case 12:
            ti_sysbios_family_arm_m3_Hwi_excDebugMon(excStack);
            break;
        case 7:
        case 8:
        case 9:
        case 10:
        case 13:
            ti_sysbios_family_arm_m3_Hwi_excReserved(excStack, excNum);
            break;
        default:
            if (!Watchdog_isException(excNum)) {
                ti_sysbios_family_arm_m3_Hwi_excNoIsr(excStack, excNum);
            }
            break;
    }

    System_printf ("R0 = 0x%08x  R8  = 0x%08x\n", exc.r0, exc.r8);
    System_printf ("R1 = 0x%08x  R9  = 0x%08x\n", exc.r1, exc.r9);
    System_printf ("R2 = 0x%08x  R10 = 0x%08x\n", exc.r2, exc.r10);
    System_printf ("R3 = 0x%08x  R11 = 0x%08x\n", exc.r3, exc.r11);
    System_printf ("R4 = 0x%08x  R12 = 0x%08x\n", exc.r4, exc.r12);
    System_printf ("R5 = 0x%08x  SP(R13) = 0x%08x\n", exc.r5, exc.sp);
    System_printf ("R6 = 0x%08x  LR(R14) = 0x%08x\n", exc.r6, exc.lr);
    System_printf ("R7 = 0x%08x  PC(R15) = 0x%08x\n", exc.r7, exc.pc);
    System_printf ("PSR = 0x%08x\n", exc.psr);
    System_printf ("ICSR = 0x%08x\n", Hwi_nvic.ICSR);
    System_printf ("MMFSR = 0x%02x\n", Hwi_nvic.MMFSR);
    System_printf ("BFSR = 0x%02x\n", Hwi_nvic.BFSR);
    System_printf ("UFSR = 0x%04x\n", Hwi_nvic.UFSR);
    System_printf ("HFSR = 0x%08x\n", Hwi_nvic.HFSR);
    System_printf ("DFSR = 0x%08x\n", Hwi_nvic.DFSR);
    System_printf ("MMAR = 0x%08x\n", Hwi_nvic.MMAR);
    System_printf ("BFAR = 0x%08x\n", Hwi_nvic.BFAR);
    System_printf ("AFSR = 0x%08x\n", Hwi_nvic.AFSR);

    System_printf ("Stack trace\n");
    StackDbg_walkStack((UInt)exc.threadStack, (UInt)exc.threadStackSize,
                       (UInt)exc.sp, printStackEntry, &sCnt);

    System_printf ("Stack dump base %08x size %ld sp %08x:\n", exc.threadStack,
                   exc.threadStackSize, exc.sp);
    dump_hex((UInt)exc.threadStack, exc.threadStackSize / sizeof(UInt),
             (UInt)exc.sp);

    System_abort("Terminating execution...\n");

}