コード例 #1
0
ファイル: atomport.c プロジェクト: Archer-sys/atomthreads
/**
 * \b thread_shell
 *
 * Shell routine which is used to call all thread entry points.
 *
 * This routine is called whenever a new thread is starting, and
 * provides a simple wrapper around the thread entry point that
 * allows us to carry out any actions we want to do on thread's
 * first starting up, or returning after completion.
 *
 * We mainly just want to make sure interrupts are enabled when a
 * thread is run for the first time. This can be done via stack
 * restores when threads are first run, but it's handy to have this
 * wrapper anyway to run some common code if threads run to
 * completion.
 *
 * A thread shell is also handy for providing port users with a place
 * to do any other initialisation that must be done for each thread
 * (e.g. opening stdio files etc).
 *
 * @return None
 */
static void thread_shell (void)
{
    ATOM_TCB *curr_tcb;

    /* Get the TCB of the thread being started */
    curr_tcb = atomCurrentContext();

    /**
     * Open a stdout file descriptor so that the thread has its own stdout.
     * In theory threads could open stdout to different output drivers
     * if syscalls.s supported different output write functions.
     */
    stdout = fopen ("/debuguart", "w");
    setvbuf (stdout, 0, _IONBF, 0);
 
    /**
     * Enable interrupts - these will not be enabled when a thread
     * is first restored.
     */
    contextEnableInterrupts ();

    /* Call the thread entry point */
    if (curr_tcb && curr_tcb->entry_point)
    {
        curr_tcb->entry_point(curr_tcb->entry_param);
    }

    /* Clean up after thread completion */
    fclose (stdout);
    _reclaim_reent (&(curr_tcb->port_priv.reent));

    /* Thread has run to completion: remove it from the ready list */
    curr_tcb->terminated = TRUE;
    atomSched (FALSE);
}
コード例 #2
0
ファイル: atomkernel.c プロジェクト: Zuph/nerdmometer
/**
 * \b atomIntExit
 *
 * Interrupt handler exit routine.
 *
 * Must be called at the end of any interrupt handlers that may
 * call an OS primitive and make a thread ready.
 *
 * This is responsible for calling the scheduler at the end of
 * interrupt handlers to determine whether a new thread has now
 * been made ready and should be scheduled in.
 *
 * @param timer_tick TRUE if this is a timer tick
 *
 * @return None
 */
void atomIntExit (uint8_t timer_tick)
{
    /* Decrement the interrupt count */
    atomIntCnt--;

    /* Call the scheduler */
    atomSched (timer_tick);
}
コード例 #3
0
ファイル: atomport.c プロジェクト: mxrtos/atomthreads
/**
 * \b thread_shell
 *
 * Shell routine which is used to call all thread entry points.
 *
 * This routine is called whenever a new thread is starting, and
 * provides a simple wrapper around the thread entry point that
 * allows us to carry out any actions we want to do on thread's
 * first starting up, or returning after completion.
 *
 * We mainly just want to make sure interrupts are enabled when a
 * thread is run for the first time. This can be done via stack
 * restores when threads are first run, but it's handy to have this
 * wrapper anyway to run some common code if threads run to
 * completion.
 *
 * A thread shell is also handy for providing port users with a place
 * to do any other initialisation that must be done for each thread
 * (e.g. opening stdio files etc).
 *
 * @return None
 */
static void thread_shell (void)
{
    ATOM_TCB *curr_tcb;

    /* Get the TCB of the thread being started */
    curr_tcb = atomCurrentContext();

    /**
     * Enable interrupts - these will not be enabled when a thread
     * is first restored.
     */
    asm("FSET I");

    /* Call the thread entry point */
    if (curr_tcb && curr_tcb->entry_point)
    {
        curr_tcb->entry_point(curr_tcb->entry_param);
    }

    /* Thread has run to completion: remove it from the ready list */
    curr_tcb->suspended = TRUE;
    atomSched (FALSE);
}
コード例 #4
0
ファイル: atomsem.c プロジェクト: Zuph/nerdmometer
/**
 * \b atomSemPut
 *
 * Perform a put operation on a semaphore.
 *
 * This increments the current count value for the semaphore and returns.
 *
 * If the count value was previously zero and there are threads blocking on the
 * semaphore, the call will wake up the highest priority thread suspended. Only
 * one thread is woken per call to atomSemPut(). If multiple threads of the
 * same priority are suspended, they are woken in order of suspension (FIFO).
 *
 * This function can be called from interrupt context.
 *
 * @param[in] sem Pointer to semaphore object
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_OVF The semaphore count would have overflowed (>255)
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
 * @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread
 */
uint8_t atomSemPut (ATOM_SEM * sem)
{
    uint8_t status;
    CRITICAL_STORE;
    ATOM_TCB *tcb_ptr;

    /* Check parameters */
    if (sem == NULL)
    {
        /* Bad semaphore pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Protect access to the semaphore object and OS queues */
        CRITICAL_START ();

        /* If any threads are blocking on the semaphore, wake up one */
        if (sem->suspQ)
        {
            /**
             * Threads are woken up in priority order, with a FIFO system
             * used on same priority threads. We always take the head,
             * ordering is taken care of by an ordered list enqueue.
             */
            tcb_ptr = tcbDequeueHead (&sem->suspQ);
            if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
            {
                /* Exit critical region */
                CRITICAL_END ();

                /* There was a problem putting the thread on the ready queue */
                status = ATOM_ERR_QUEUE;
            }
            else
            {
                /* Set OK status to be returned to the waiting thread */
                tcb_ptr->suspend_wake_status = ATOM_OK;

                /* If there's a timeout on this suspension, cancel it */
                if ((tcb_ptr->suspend_timo_cb != NULL)
                    && (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
                {
                    /* There was a problem cancelling a timeout on this semaphore */
                    status = ATOM_ERR_TIMER;
                }
                else
                {
                    /* Flag as no timeout registered */
                    tcb_ptr->suspend_timo_cb = NULL;

                    /* Successful */
                    status = ATOM_OK;
                }

                /* Exit critical region */
                CRITICAL_END ();

                /**
                 * The scheduler may now make a policy decision to thread
                 * switch if we are currently in thread context. If we are
                 * in interrupt context it will be handled by atomIntExit().
                 */
                if (atomCurrentContext())
                    atomSched (FALSE);
            }
        }

        /* If no threads waiting, just increment the count and return */
        else
        {
            /* Check for count overflow */
            if (sem->count == 255)
            {
                /* Don't increment, just return error status */
                status = ATOM_ERR_OVF;
            }
            else
            {
                /* Increment the count and return success */
                sem->count++;
                status = ATOM_OK;
            }

            /* Exit critical region */
            CRITICAL_END ();
        }
    }

    return (status);
}
コード例 #5
0
ファイル: atomsem.c プロジェクト: Zuph/nerdmometer
/**
 * \b atomSemGet
 *
 * Perform a get operation on a semaphore.
 *
 * This decrements the current count value for the semaphore and returns.
 * If the count value is already zero then the call will block until the
 * count is incremented by another thread, or until the specified \c timeout
 * is reached. Blocking threads will also be woken if the semaphore is
 * deleted by another thread while blocking.
 *
 * Depending on the \c timeout value specified the call will do one of
 * the following if the count value is zero:
 *
 * \c timeout == 0 : Call will block until the count is non-zero \n
 * \c timeout > 0 : Call will block until non-zero up to the specified timeout \n
 * \c timeout == -1 : Return immediately if the count is zero \n
 *
 * If the call needs to block and \c timeout is zero, it will block
 * indefinitely until atomSemPut() or atomSemDelete() is called on the
 * semaphore.
 *
 * If the call needs to block and \c timeout is non-zero, the call will only
 * block for the specified number of system ticks after which time, if the
 * thread was not already woken, the call will return with \c ATOM_TIMEOUT.
 *
 * If the call would normally block and \c timeout is -1, the call will
 * return immediately with \c ATOM_WOULDBLOCK.
 *
 * This function can only be called from interrupt context if the \c timeout
 * parameter is -1 (in which case it does not block).
 *
 * @param[in] sem Pointer to semaphore object
 * @param[in] timeout Max system ticks to block (0 = forever)
 *
 * @retval ATOM_OK Success
 * @retval ATOM_TIMEOUT Semaphore timed out before being woken
 * @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero
 * @retval ATOM_ERR_DELETED Semaphore was deleted while suspended
 * @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
 * @retval ATOM_ERR_TIMER Problem registering the timeout
 */
uint8_t atomSemGet (ATOM_SEM *sem, int32_t timeout)
{
    CRITICAL_STORE;
    uint8_t status;
    SEM_TIMER timer_data;
    ATOM_TIMER timer_cb;
    ATOM_TCB *curr_tcb_ptr;

    /* Check parameters */
    if (sem == NULL)
    {
        /* Bad semaphore pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Protect access to the semaphore object and OS queues */
        CRITICAL_START ();

        /* If count is zero, block the calling thread */
        if (sem->count == 0)
        {
            /* If called with timeout >= 0, we should block */
            if (timeout >= 0)
            {
                /* Count is zero, block the calling thread */

                /* Get the current TCB */
                curr_tcb_ptr = atomCurrentContext();

                /* Check we are actually in thread context */
                if (curr_tcb_ptr)
                {
                    /* Add current thread to the suspend list on this semaphore */
                    if (tcbEnqueuePriority (&sem->suspQ, curr_tcb_ptr) != ATOM_OK)
                    {
                        /* Exit critical region */
                        CRITICAL_END ();

                        /* There was an error putting this thread on the suspend list */
                        status = ATOM_ERR_QUEUE;
                    }
                    else
                    {
                        /* Set suspended status for the current thread */
                        curr_tcb_ptr->suspended = TRUE;

                        /* Track errors */
                        status = ATOM_OK;

                        /* Register a timer callback if requested */
                        if (timeout)
                        {
                            /* Fill out the data needed by the callback to wake us up */
                            timer_data.tcb_ptr = curr_tcb_ptr;
                            timer_data.sem_ptr = sem;

                            /* Fill out the timer callback request structure */
                            timer_cb.cb_func = atomSemTimerCallback;
                            timer_cb.cb_data = (POINTER)&timer_data;
                            timer_cb.cb_ticks = timeout;

                            /**
                             * Store the timer details in the TCB so that we can
                             * cancel the timer callback if the semaphore is put
                             * before the timeout occurs.
                             */
                            curr_tcb_ptr->suspend_timo_cb = &timer_cb;

                            /* Register a callback on timeout */
                            if (atomTimerRegister (&timer_cb) != ATOM_OK)
                            {
                                /* Timer registration failed */
                                status = ATOM_ERR_TIMER;

                                /* Clean up and return to the caller */
                                (void)tcbDequeueEntry (&sem->suspQ, curr_tcb_ptr);
                                curr_tcb_ptr->suspended = FALSE;
                                curr_tcb_ptr->suspend_timo_cb = NULL;
                            }
                        }

                        /* Set no timeout requested */
                        else
                        {
                            /* No need to cancel timeouts on this one */
                            curr_tcb_ptr->suspend_timo_cb = NULL;
                        }

                        /* Exit critical region */
                        CRITICAL_END ();

                        /* Check no errors have occurred */
                        if (status == ATOM_OK)
                        {
                            /**
                             * Current thread now blocking, schedule in a new
                             * one. We already know we are in thread context
                             * so can call the scheduler from here.
                             */
                            atomSched (FALSE);

                            /**
                             * Normal atomSemPut() wakeups will set ATOM_OK status,
                             * while timeouts will set ATOM_TIMEOUT and semaphore
                             * deletions will set ATOM_ERR_DELETED.
                             */
                            status = curr_tcb_ptr->suspend_wake_status;

                            /**
                             * If we have been woken up with ATOM_OK then
                             * another thread incremented the semaphore and
                             * handed control to this thread. In theory the
                             * the posting thread increments the counter and
                             * as soon as this thread wakes up we decrement
                             * the counter here, but to prevent another
                             * thread preempting this thread and decrementing
                             * the semaphore before this section was
                             * scheduled back in, we emulate the increment
                             * and decrement by not incrementing in the
                             * atomSemPut() and not decrementing here. The
                             * count remains zero throughout preventing other
                             * threads preempting before we decrement the
                             * count again.
                             */

                        }
                    }
                }
                else
                {
                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Not currently in thread context, can't suspend */
                    status = ATOM_ERR_CONTEXT;
                }
            }
            else
            {
                /* timeout == -1, requested not to block and count is zero */
                CRITICAL_END();
                status = ATOM_WOULDBLOCK;
            }
        }
        else
        {
            /* Count is non-zero, just decrement it and return to calling thread */
            sem->count--;

            /* Exit critical region */
            CRITICAL_END ();

            /* Successful */
            status = ATOM_OK;
        }
    }

    return (status);
}
コード例 #6
0
ファイル: atomsem.c プロジェクト: Zuph/nerdmometer
/**
 * \b atomSemDelete
 *
 * Deletes a semaphore object.
 *
 * Any threads currently suspended on the semaphore will be woken up with
 * return status ATOM_ERR_DELETED. If called at thread context then the
 * scheduler will be called during this function which may schedule in one
 * of the woken threads depending on relative priorities.
 *
 * This function can be called from interrupt context, but loops internally
 * waking up all threads blocking on the semaphore, so the potential
 * execution cycles cannot be determined in advance.
 *
 * @param[in] sem Pointer to semaphore object
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
 * @retval ATOM_ERR_TIMER Problem cancelling a timeout on a woken thread
 */
uint8_t atomSemDelete (ATOM_SEM *sem)
{
    uint8_t status;
    CRITICAL_STORE;
    ATOM_TCB *tcb_ptr;
    uint8_t woken_threads = FALSE;

    /* Parameter check */
    if (sem == NULL)
    {
        /* Bad semaphore pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Default to success status unless errors occur during wakeup */
        status = ATOM_OK;

        /* Wake up all suspended tasks */
        while (1)
        {
            /* Enter critical region */
            CRITICAL_START ();

            /* Check if any threads are suspended */
            tcb_ptr = tcbDequeueHead (&sem->suspQ);

            /* A thread is suspended on the semaphore */
            if (tcb_ptr)
            {
                /* Return error status to the waiting thread */
                tcb_ptr->suspend_wake_status = ATOM_ERR_DELETED;

                /* Put the thread on the ready queue */
                if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
                {
                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Quit the loop, returning error */
                    status = ATOM_ERR_QUEUE;
                    break;
                }

                /* If there's a timeout on this suspension, cancel it */
                if (tcb_ptr->suspend_timo_cb)
                {
                    /* Cancel the callback */
                    if (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK)
                    {
                        /* Exit critical region */
                        CRITICAL_END ();

                        /* Quit the loop, returning error */
                        status = ATOM_ERR_TIMER;
                        break;
                    }

                    /* Flag as no timeout registered */
                    tcb_ptr->suspend_timo_cb = NULL;

                }

                /* Exit critical region */
                CRITICAL_END ();

                /* Request a reschedule */
                woken_threads = TRUE;
            }

            /* No more suspended threads */
            else
            {
                /* Exit critical region and quit the loop */
                CRITICAL_END ();
                break;
            }
        }

        /* Call scheduler if any threads were woken up */
        if (woken_threads == TRUE)
        {
            /**
             * Only call the scheduler if we are in thread context, otherwise
             * it will be called on exiting the ISR by atomIntExit().
             */
            if (atomCurrentContext())
                atomSched (FALSE);
        }
    }

    return (status);
}
コード例 #7
0
ファイル: atomkernel.c プロジェクト: Zuph/nerdmometer
/**
 * \b atomThreadCreate
 *
 * Creates and starts a new thread.
 *
 * Callers provide the ATOM_TCB structure storage, these are not obtained
 * from an internal TCB free list.
 *
 * The function puts the new thread on the ready queue and calls the
 * scheduler. If the priority is higher than the current priority, then the
 * new thread may be scheduled in before the function returns.
 *
 * Optionally prefills the thread stack with a known value to enable stack
 * usage checking (if the ATOM_STACK_CHECKING macro is defined).
 *
 * @param[in] tcb_ptr Pointer to the thread's TCB storage
 * @param[in] priority Priority of the thread (0 to 255)
 * @param[in] entry_point Thread entry point
 * @param[in] entry_param Parameter passed to thread entry point
 * @param[in] stack_top Top of the stack area
 * @param[in] stack_size Size of the stack area in bytes
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_PARAM Bad parameters
 * @retval ATOM_ERR_QUEUE Error putting the thread on the ready queue
 */
uint8_t atomThreadCreate (ATOM_TCB *tcb_ptr, uint8_t priority, void (*entry_point)(uint32_t), uint32_t entry_param, void *stack_top, uint32_t stack_size)
{
    CRITICAL_STORE;
    uint8_t status;

    if ((tcb_ptr == NULL) || (entry_point == NULL) || (stack_top == NULL)
        || (stack_size == 0))
    {
        /* Bad parameters */
        status = ATOM_ERR_PARAM;
    }
    else
    {

        /* Set up the TCB initial values */
        tcb_ptr->suspended = FALSE;
        tcb_ptr->priority = priority;
        tcb_ptr->prev_tcb = NULL;
        tcb_ptr->next_tcb = NULL;
        tcb_ptr->suspend_timo_cb = NULL;

        /**
         * Store the thread entry point and parameter in the TCB. This may
         * not be necessary for all architecture ports if they put all of
         * this information in the initial thread stack.
         */
        tcb_ptr->entry_point = entry_point;
        tcb_ptr->entry_param = entry_param;

        /**
         * Additional processing only required if stack-checking is
         * enabled. Incurs a slight overhead on each thread creation
         * and uses some additional storage in the TCB, but can be
         * compiled out if not desired.
         */
#ifdef ATOM_STACK_CHECKING

        /* Store the stack details for use by the stack-check function */
        tcb_ptr->stack_top = stack_top;
        tcb_ptr->stack_size = stack_size;

        /**
         * Prefill the stack with a known value. This is used later in
         * calls to atomThreadStackCheck() to get an indication of how
         * much stack has been used during runtime.
         */
        while (stack_size > 0)
        {
            /* Initialise all stack bytes from bottom up to 0x5A */
            *((uint8_t *)stack_top - (stack_size - 1)) = STACK_CHECK_BYTE;
            stack_size--;
        }
#else
        /* Avoid compiler warnings due to unused stack_size variable */
        stack_size = stack_size;
#endif

        /**
         * Call the arch-specific routine to set up the stack. This routine
         * is responsible for creating the context save area necessary for
         * allowing atomThreadSwitch() to schedule it in. The initial
         * archContextSwitch() call when this thread gets scheduled in the
         * first time will then restore the program counter to the thread
         * entry point, and any other necessary register values ready for
         * it to start running.
         */
        archThreadContextInit (tcb_ptr, stack_top, entry_point, entry_param);

        /* Protect access to the OS queue */
        CRITICAL_START ();

        /* Put this thread on the ready queue */
        if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Queue-related error */
            status = ATOM_ERR_QUEUE;
        }
        else
        {
            /* Exit critical region */
            CRITICAL_END ();

            /**
             * If the OS is started and we're in thread context, check if we
             * should be scheduled in now.
             */
            if ((atomOSStarted == TRUE) && atomCurrentContext())
                atomSched (FALSE);

            /* Success */
            status = ATOM_OK;
        }
    }

    return (status);
}
コード例 #8
0
ファイル: atomtimer.c プロジェクト: simonccn/atomthreads
/**
 * \b atomTimerDelay
 *
 * Suspend a thread for the given number of system ticks.
 *
 * Note that the wakeup time is the number of ticks from the current system
 * tick, therefore, for a one tick delay, the thread may be woken up at any
 * time between the atomTimerDelay() call and the next system tick. For
 * a minimum number of ticks, you should specify minimum number of ticks + 1.
 *
 * This function can only be called from thread context.
 *
 * @param[in] ticks Number of system ticks to delay (must be > 0)
 *
 * @retval ATOM_OK Successful delay
 * @retval ATOM_ERR_PARAM Bad parameter (ticks must be non-zero)
 * @retval ATOM_ERR_CONTEXT Not called from thread context
 */
uint8_t atomTimerDelay (uint32_t ticks)
{
    ATOM_TCB *curr_tcb_ptr;
    ATOM_TIMER timer_cb;
    DELAY_TIMER timer_data;
    CRITICAL_STORE;
    uint8_t status;

    /* Get the current TCB  */
    curr_tcb_ptr = atomCurrentContext();

    /* Parameter check */
    if (ticks == 0)
    {
        /* Return error */
        status = ATOM_ERR_PARAM;
    }

    /* Check we are actually in thread context */
    else if (curr_tcb_ptr == NULL)
    {
        /* Not currently in thread context, can't suspend */
        status = ATOM_ERR_CONTEXT;
    }

    /* Otherwise safe to proceed */
    else
    {
        /* Protect the system queues */
        CRITICAL_START ();

        /* Set suspended status for the current thread */
        curr_tcb_ptr->suspended = TRUE;

        /* Register the timer callback */

        /* Fill out the data needed by the callback to wake us up */
        timer_data.tcb_ptr = curr_tcb_ptr;

        /* Fill out the timer callback request structure */
        timer_cb.cb_func = atomTimerDelayCallback;
        timer_cb.cb_data = (POINTER)&timer_data;
        timer_cb.cb_ticks = ticks;

        /* Store the timeout callback details, though we don't use it */
        curr_tcb_ptr->suspend_timo_cb = &timer_cb;

        /* Register the callback */
        if (atomTimerRegister (&timer_cb) != ATOM_OK)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Timer registration didn't work, won't get a callback */
            status = ATOM_ERR_TIMER;
        }
        else
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Successful timer registration */
            status = ATOM_OK;

            /* Current thread should now block, schedule in another */
            atomSched (FALSE);
        }
    }

    return (status);
}
コード例 #9
0
ファイル: atommutex.c プロジェクト: MHageH/atomthreads_custom
/**
 * \b atomMutexPut
 *
 * Give back the lock on a mutex.
 *
 * This checks that the mutex is owned by the calling thread, and decrements
 * the recursive lock count. Once the lock count reaches zero, the lock is
 * considered relinquished and no longer owned by this thread.
 *
 * If the lock is relinquished and there are threads blocking on the mutex, the
 * call will wake up the highest priority thread suspended. Only one thread is
 * woken per call to atomMutexPut(). If multiple threads of the same priority
 * are suspended, they are woken in order of suspension (FIFO).
 *
 * This function can only be called from thread context. A mutex has the
 * concept of an owner thread, so it is never valid to make a mutex call
 * from interrupt context when there is no thread to associate with.
 *
 * @param[in] mutex Pointer to mutex object
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
 * @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread
 * @retval ATOM_ERR_OWNERSHIP Attempt to unlock mutex not owned by this thread
 */
uint8_t atomMutexPut (ATOM_MUTEX * mutex){
    uint8_t status;
    CRITICAL_STORE;
    ATOM_TCB *tcb_ptr, *curr_tcb_ptr;

    /* Check parameters */
    if (mutex == NULL)
    {
        /* Bad mutex pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Get the current TCB */
        curr_tcb_ptr = atomCurrentContext();

        /* Protect access to the mutex object and OS queues */
        CRITICAL_START ();

        /* Check if the calling thread owns this mutex */
        if (mutex->owner != curr_tcb_ptr)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Attempt to unlock by non-owning thread */
            status = ATOM_ERR_OWNERSHIP;
        }
        else
        {
            /* Lock is owned by this thread, decrement the recursive lock count */
            mutex->count--;

            /* Once recursive lock count reaches zero, we relinquish ownership */
            if (mutex->count == 0)
            {
                /* Relinquish ownership */
                mutex->owner = NULL;

                /* If any threads are blocking on this mutex, wake them now */
                if (mutex->suspQ)
                {
                    /**
                     * Threads are woken up in priority order, with a FIFO system
                     * used on same priority threads. We always take the head,
                     * ordering is taken care of by an ordered list enqueue.
                     */
                    tcb_ptr = tcbDequeueHead (&mutex->suspQ);
                    if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
                    {
                        /* Exit critical region */
                        CRITICAL_END ();

                        /* There was a problem putting the thread on the ready queue */
                        status = ATOM_ERR_QUEUE;
                    }
                    else
                    {
                        /* Set OK status to be returned to the waiting thread */
                        tcb_ptr->suspend_wake_status = ATOM_OK;

                        /* Set this thread as the new owner of the mutex */
                        mutex->owner = tcb_ptr;

                        /* If there's a timeout on this suspension, cancel it */
                        if ((tcb_ptr->suspend_timo_cb != NULL)
                            && (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
                        {
                            /* There was a problem cancelling a timeout on this mutex */
                            status = ATOM_ERR_TIMER;
                        }
                        else
                        {
                            /* Flag as no timeout registered */
                            tcb_ptr->suspend_timo_cb = NULL;

                            /* Successful */
                            status = ATOM_OK;
                        }

                        /* Exit critical region */
                        CRITICAL_END ();

                        /**
                         * The scheduler may now make a policy decision to
                         * thread switch. We already know we are in thread
                         * context so can call the scheduler from here.
                         */
                        atomSched (FALSE);
                    }
                }
                else
                {
                    /**
                     * Relinquished ownership and no threads waiting.
                     * Nothing to do.
                     */

                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Successful */
                    status = ATOM_OK;
                }
            }
            else
            {
                /**
                 * Decremented lock but still retain ownership due to
                 * recursion. Nothing to do.
                 */

                /* Exit critical region */
                CRITICAL_END ();

                /* Successful */
                status = ATOM_OK;
            }
        }
    }

    return (status);
    }
コード例 #10
0
ファイル: atommutex.c プロジェクト: MHageH/atomthreads_custom
/**
 * \b atomMutexGet
 *
 * Take the lock on a mutex.
 *
 * This takes ownership of a mutex if it is not currently owned. Ownership
 * is held by this thread until a corresponding call to atomMutexPut() by
 * the same thread.
 *
 * Can be called recursively by the original locking thread (owner).
 * Recursive calls are counted, and ownership is not relinquished until
 * the number of unlock (atomMutexPut()) calls by the owner matches the
 * number of lock (atomMutexGet()) calls.
 *
 * No thread other than the owner can lock or unlock the mutex while it is
 * locked by another thread.
 *
 * Depending on the \c timeout value specified the call will do one of
 * the following if the mutex is already locked by another thread:
 *
 * \c timeout == 0 : Call will block until the mutex is available \n
 * \c timeout > 0 : Call will block until available up to the specified timeout \n
 * \c timeout == -1 : Return immediately if mutex is locked by another thread \n
*
 * If the call needs to block and \c timeout is zero, it will block
 * indefinitely until the owning thread calls atomMutexPut() or
 * atomMutexDelete() is called on the mutex.
 *
 * If the call needs to block and \c timeout is non-zero, the call will only
 * block for the specified number of system ticks after which time, if the
 * thread was not already woken, the call will return with \c ATOM_TIMEOUT.
 *
 * If the call would normally block and \c timeout is -1, the call will
 * return immediately with \c ATOM_WOULDBLOCK.
 *
 * This function can only be called from thread context. A mutex has the
 * concept of an owner thread, so it is never valid to make a mutex call
 * from interrupt context when there is no thread to associate with.
 *
 * @param[in] mutex Pointer to mutex object
 * @param[in] timeout Max system ticks to block (0 = forever)
 *
 * @retval ATOM_OK Success
 * @retval ATOM_TIMEOUT Mutex timed out before being woken
 * @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero
 * @retval ATOM_ERR_DELETED Mutex was deleted while suspended
 * @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
 * @retval ATOM_ERR_TIMER Problem registering the timeout
 * @retval ATOM_ERR_OVF The recursive lock count would have overflowed (>255)
 */
uint8_t atomMutexGet (ATOM_MUTEX *mutex, int32_t timeout){
    CRITICAL_STORE;
    uint8_t status;
    MUTEX_TIMER timer_data;
    ATOM_TIMER timer_cb;
    ATOM_TCB *curr_tcb_ptr;

    /* Check parameters */
    if (mutex == NULL)
    {
        /* Bad mutex pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Get the current TCB */
        curr_tcb_ptr = atomCurrentContext();

        /* Protect access to the mutex object and OS queues */
        CRITICAL_START ();

        /**
         * Check we are at thread context. Because mutexes have the concept of
         * owner threads, it is never valid to call here from an ISR,
         * regardless of whether we will block.
         */
        if (curr_tcb_ptr == NULL)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Not currently in thread context, can't suspend */
            status = ATOM_ERR_CONTEXT;
        }

        /* Otherwise if mutex is owned by another thread, block the calling thread */
        else if ((mutex->owner != NULL) && (mutex->owner != curr_tcb_ptr))
        {
            /* If called with timeout >= 0, we should block */
            if (timeout >= 0)
            {
                /* Add current thread to the suspend list on this mutex */
                if (tcbEnqueuePriority (&mutex->suspQ, curr_tcb_ptr) != ATOM_OK)
                {
                    /* Exit critical region */
                    CRITICAL_END ();

                    /* There was an error putting this thread on the suspend list */
                    status = ATOM_ERR_QUEUE;
                }
                else
                {
                    /* Set suspended status for the current thread */
                    curr_tcb_ptr->suspended = TRUE;

                    /* Track errors */
                    status = ATOM_OK;

                    /* Register a timer callback if requested */
                    if (timeout)
                    {
                        /* Fill out the data needed by the callback to wake us up */
                        timer_data.tcb_ptr = curr_tcb_ptr;
                        timer_data.mutex_ptr = mutex;

                        /* Fill out the timer callback request structure */
                        timer_cb.cb_func = atomMutexTimerCallback;
                        timer_cb.cb_data = (POINTER)&timer_data;
                        timer_cb.cb_ticks = timeout;

                        /**
                         * Store the timer details in the TCB so that we can
                         * cancel the timer callback if the mutex is put
                         * before the timeout occurs.
                         */
                        curr_tcb_ptr->suspend_timo_cb = &timer_cb;

                        /* Register a callback on timeout */
                        if (atomTimerRegister (&timer_cb) != ATOM_OK)
                        {
                            /* Timer registration failed */
                            status = ATOM_ERR_TIMER;

                            /* Clean up and return to the caller */
                            (void)tcbDequeueEntry (&mutex->suspQ, curr_tcb_ptr);
                            curr_tcb_ptr->suspended = FALSE;
                            curr_tcb_ptr->suspend_timo_cb = NULL;
                        }
                    }

                    /* Set no timeout requested */
                    else
                    {
                        /* No need to cancel timeouts on this one */
                        curr_tcb_ptr->suspend_timo_cb = NULL;
                    }

                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Check no errors have occurred */
                    if (status == ATOM_OK)
                    {
                        /**
                         * Current thread now blocking, schedule in a new
                         * one. We already know we are in thread context
                         * so can call the scheduler from here.
                         */
                        atomSched (FALSE);

                        /**
                         * Normal atomMutexPut() wakeups will set ATOM_OK status,
                         * while timeouts will set ATOM_TIMEOUT and mutex
                         * deletions will set ATOM_ERR_DELETED. */
                        status = curr_tcb_ptr->suspend_wake_status;

                        /**
                         * If we were woken up by another thread relinquishing
                         * the mutex and handing this thread ownership, then
                         * the relinquishing thread will set status to ATOM_OK
                         * and will make this thread the owner. Setting the
                         * owner before waking the thread ensures that no other
                         * thread can preempt and take ownership of the mutex
                         * between this thread being made ready to run, and
                         * actually being scheduled back in here.
                         */
                        if (status == ATOM_OK)
                        {
                            /**
                             * Since this thread has just gained ownership, the
                             * lock count is zero and should be incremented
                             * once for this call.
                             */
                            mutex->count++;
                        }
                    }
                }
            }
            else
            {
                /* timeout == -1, requested not to block and mutex is owned by another thread */
                CRITICAL_END();
                status = ATOM_WOULDBLOCK;
            }
        }
        else
        {
            /* Thread is not owned or is owned by us, we can claim ownership */

            /* Increment the lock count, checking for count overflow */
            if (mutex->count == 255)
            {
                /* Don't increment, just return error status */
                status = ATOM_ERR_OVF;
            }
            else
            {
                /* Increment the count and return to the calling thread */
                mutex->count++;

                /* If the mutex is not locked, mark the calling thread as the new owner */
                if (mutex->owner == NULL)
                {
                    mutex->owner = curr_tcb_ptr;
                }

                /* Successful */
                status = ATOM_OK;
            }

            /* Exit critical region */
            CRITICAL_END ();
        }
    }

    return (status);
    }