Exemple #1
0
/**
 * \b test_thread_func
 *
 * Entry point for test thread. The same thread entry point is used for all
 * four test threads.
 *
 * @param[in] param Unused (optional thread entry parameter)
 *
 * @return None
 */
static void test_thread_func (uint32_t param)
{
    uint32_t loop_cnt;
    uint8_t status;
    CRITICAL_STORE;

    /* Compiler warnings */
    param = param;

    /* Run a Get/Put pair many times */
    loop_cnt = NUM_TEST_LOOPS;
    while (loop_cnt--)
    {
        if ((status = atomMutexGet (&mutex1, 0)) != ATOM_OK)
        {
            /* Error getting mutex, notify the status code */
            ATOMLOG (_STR("G%d\n"), status);
            CRITICAL_START ();
            g_failures++;
            CRITICAL_END ();
            break;
        }
        else if ((status = atomMutexPut (&mutex1)) != ATOM_OK)
        {
            /* Error putting mutex, notify the status code */
            ATOMLOG (_STR("P%d\n"), status);
            CRITICAL_START ();
            g_failures++;
            CRITICAL_END ();
            break;
        }
    }

    /* Post sem1 to notify the main thread we're finished */
    if (atomSemPut (&sem1) != ATOM_OK)
    {
        ATOMLOG (_STR("Sem1 putfail\n"));
        CRITICAL_START ();
        g_failures++;
        CRITICAL_END ();
    }

    /* Loop forever */
    while (1)
    {
        atomTimerDelay (SYSTEM_TICKS_PER_SEC);
    }
}
Exemple #2
0
/*
 *  能否假设使用到交互功能的时候,实时性能已经不重要
 *  2012.12.12  
 */
static void Tty_copy_to_cook(tty_t * tty)
{
    byte_t                  c       = 0;
    CRITICAL_DECLARE(tty->tty_lock);

    CRITICAL_BEGIN();

    /*  输入缓冲区有数据才能对其进行加工  */
    while( !TQ_IS_EMPTY(tty->tty_read_queue)  )
    {
        TQ_GET_CHAR(tty->tty_read_queue,c);

        switch(c)
        {
        case CHAR_BACK:
            if( TQ_IS_EMPTY(tty->tty_second_queue) )
                continue;
            TQ_DEC(tty->tty_second_queue.tq_head);
            break;
        default:
            TQ_PUT_CHAR(tty->tty_second_queue,c);
            break;
        }
    }

    CRITICAL_END();
}
/**
 * \b archIntEnable
 *
 * Enable/unmask an interrupt in the interrupt controller.
 * @param[in] int_vector Interrupt vector to enable/disable
 * @param[in] enable TRUE=enable, FALSE=disable
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERROR Error
 */
int archIntEnable (int int_vector, int enable)
{
	CRITICAL_STORE;
    int status;

    /* Check vector is valid */
    if ((int_vector < 0) || (int_vector > DM36X_INTC_MAX_VEC))
    {
        /* Invalid vector number */
        status = ATOM_ERROR;
    }
    else
    {
        /* Valid vector, mask or unmask it using RMW */
		CRITICAL_START();
		if (enable)
		{
			/* Enable/unmask the interrupt */
	    	INTC_REG(((int_vector >= 32) ? DM36X_INTC_EINT1 : DM36X_INTC_EINT0))
					|= (1 << ((int_vector >= 32) ? (int_vector - 32) : int_vector));
		}
		else
		{
			/* Disable/mask the interrupt */
	    	INTC_REG(((int_vector >= 32) ? DM36X_INTC_EINT1 : DM36X_INTC_EINT0))
					&= ~(1 << ((int_vector >= 32) ? (int_vector - 32) : int_vector));
		}
		CRITICAL_END();
        status = ATOM_OK;
    }

    return (status);
}
Exemple #4
0
/**
 * \b atomTimerDelayCallback
 *
 * This is an internal function not for use by application code.
 *
 * Callback for atomTimerDelay() calls. Wakes up the sleeping threads.
 *
 * @param[in] cb_data Callback parameter (DELAY_TIMER ptr for sleeping thread)
 *
 * @return None
 */
static void atomTimerDelayCallback (POINTER cb_data)
{
    DELAY_TIMER *timer_data_ptr;
    CRITICAL_STORE;

    /* Get the DELAY_TIMER structure pointer */
    timer_data_ptr = (DELAY_TIMER *)cb_data;

    /* Check parameter is valid */
    if (timer_data_ptr)
    {
        /* Enter critical region */
        CRITICAL_START ();

        /* Put the thread on the ready queue */
        (void)tcbEnqueuePriority (&tcbReadyQ, timer_data_ptr->tcb_ptr);

        /* Exit critical region */
        CRITICAL_END ();

        /**
         * Don't call the scheduler yet. The ISR exit routine will do this
         * in case there are other callbacks to be made, which may also make
         * threads ready.
         */
    }
}
Exemple #5
0
/**
 * \b atomSemTimerCallback
 *
 * This is an internal function not for use by application code.
 *
 * Timeouts on suspended threads are notified by the timer system through
 * this generic callback. The timer system calls us back with a pointer to
 * the relevant \c SEM_TIMER object which is used to retrieve the
 * semaphore details.
 *
 * @param[in] cb_data Pointer to a SEM_TIMER object
 */
static void atomSemTimerCallback (POINTER cb_data)
{
    SEM_TIMER *timer_data_ptr;
    CRITICAL_STORE;

    /* Get the SEM_TIMER structure pointer */
    timer_data_ptr = (SEM_TIMER *)cb_data;

    /* Check parameter is valid */
    if (timer_data_ptr)
    {
        /* Enter critical region */
        CRITICAL_START ();

        /* Set status to indicate to the waiting thread that it timed out */
        timer_data_ptr->tcb_ptr->suspend_wake_status = ATOM_TIMEOUT;

        /* Flag as no timeout registered */
        timer_data_ptr->tcb_ptr->suspend_timo_cb = NULL;

        /* Remove this thread from the semaphore's suspend list */
        (void)tcbDequeueEntry (&timer_data_ptr->sem_ptr->suspQ, timer_data_ptr->tcb_ptr);

        /* Put the thread on the ready queue */
        (void)tcbEnqueuePriority (&tcbReadyQ, timer_data_ptr->tcb_ptr);

        /* Exit critical region */
        CRITICAL_END ();

        /**
         * Note that we don't call the scheduler now as it will be called
         * when we exit the ISR by atomIntExit().
         */
    }
}
Exemple #6
0
/*
//////////////////////////////////////////////////////////////////////////////////////////
//  名  称 : Tty_read
//
//  功  能 : 
//
//  参  数 : 
//      ttyid               : int
//      说明: tty编号
//
//      c                   : byte_t
//      说明: 需要放入tty的数据
//
//  返回值 : 
//      类型 :result_t 
//      说明 : 
//
//  注  意: 
//
//  变更记录:
//  时间        |    作  者     |  说  明
//========================================================================================
//  2013-12-05
//  2011-11-11  |               |
//////////////////////////////////////////////////////////////////////////////////////////
*/
int         Tty_read(int ttyid,void * buffer,size_t  size)
{
    tty_t               *   tty     = tty_pool + ttyid;
    byte_t              *   buf     = buffer;
    byte_t                  c       = 0;
    CRITICAL_DECLARE(tty->tty_lock);

#ifdef _CFG_CHECK_PARAMETER_
    if( ttyid >= TTY_MAX )	return -1;
    if( NULL == buffer )    return -1;
#endif  /*  _CFG_CHECK_PARAMETER_   */

    /*  终止条件是缓冲区满    */
    TTY_LOCK(tty);
    while( size )
    {
        CRITICAL_BEGIN();
        if( TQ_IS_EMPTY(tty->tty_second_queue) )
        {	/*  缓冲区空需要等待数据  */
            Proc_wait_on(&(tty->tty_wait));
            TTY_FREE(tty);  /*  */
            CRITICAL_END();
            Proc_sched(0);
            TTY_LOCK(tty);
        }
        else
        {
            TQ_GET_CHAR(tty->tty_second_queue,c);
            CRITICAL_END();
            *buf++ = c;
            if( CHAR_CR == c && ( tty->tty_termios.temo_type & TERMIOS_TYPE_TTY ) )
			{	/*  如果TTY是终端,遇到回车符需要返回。将回车符修改为0  */
				*--buf = 0;
                break;
			}
            --size;
        }
    }
    TTY_FREE(tty);
    return buf - (byte_t *)buffer;
}
Exemple #7
0
/**
 * \b atomTimerCancel
 *
 * Cancel a timer callback previously registered using atomTimerRegister().
 *
 * This function can be called from interrupt context, but loops internally
 * through the time list, so the potential execution cycles cannot be
 * determined in advance.
 *
 * @param[in] timer_ptr Pointer to timer to cancel
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_PARAM Bad parameters
 * @retval ATOM_ERR_NOT_FOUND Timer registration was not found
 */
uint8_t atomTimerCancel (ATOM_TIMER *timer_ptr)
{
    uint8_t status = ATOM_ERR_NOT_FOUND;
    ATOM_TIMER *prev_ptr, *next_ptr;
    CRITICAL_STORE;

    /* Parameter check */
    if (timer_ptr == NULL)
    {
        /* Return error */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Protect the list */
        CRITICAL_START ();

        /* Walk the list to find the relevant timer */
        prev_ptr = next_ptr = timer_queue;
        while (next_ptr)
        {
            /* Is this entry the one we're looking for? */
            if (next_ptr == timer_ptr)
            {
                if (next_ptr == timer_queue)
                {
                    /* We're removing the list head */
                    timer_queue = next_ptr->next_timer;
                }
                else
                {
                    /* We're removing a mid or tail TCB */
                    prev_ptr->next_timer = next_ptr->next_timer;
                }

                /* Successful */
                status = ATOM_OK;
                break;
            }

            /* Move on to the next in the list */
            prev_ptr = next_ptr;
            next_ptr = next_ptr->next_timer;

        }

        /* End of list protection */
        CRITICAL_END ();
     }

    return (status);
}
Exemple #8
0
void krn_mutex_lock(krn_mutex *mutex)
{
	CRITICAL_STORE;
	CRITICAL_START();
	mutex->flag ++;
	if(mutex->flag > 1) {
		krn_thread_lock(krn_thread_current);
		krn_thread_current->mutex = mutex;
		krn_dispatch();
	}
	else mutex->thread = krn_thread_current;
	CRITICAL_END();
}
Exemple #9
0
/**
 * \b test_thread
 *
 * Function calling the test function of the Atomthreads test suite.
 *
 */
void
test_thread (uint32_t param)
{
    uint32_t failures ;
    CRITICAL_STORE ;

    failures = test_start ()  ;

    atomTimerDelay (10) ;
    CRITICAL_START() ;
    printf ("%s %s\r\n", ATOMTHREADS_TEST, failures ? "FAIL" : "PASS") ;
    exit (failures) ;
    CRITICAL_END() ;
}
Exemple #10
0
/**
 * \b atomTimerRegister
 *
 * Register a timer callback.
 *
 * Callers should fill out and pass in a timer descriptor, containing
 * the number of system ticks until they would like a callback, together
 * with a callback function and optional parameter. The number of ticks
 * must be greater than zero.
 *
 * On the relevant system tick count, the callback function will be
 * called.
 *
 * These timers are used by some of the OS library routines, but they
 * can also be used by application code requiring timer facilities at
 * system tick resolution.
 *
 * This function can be called from interrupt context, but loops internally
 * through the time list, so the potential execution cycles cannot be
 * determined in advance.
 *
 * @param[in] timer_ptr Pointer to timer descriptor
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_PARAM Bad parameters
 */
uint8_t atomTimerRegister (ATOM_TIMER *timer_ptr)
{
    uint8_t status;
    CRITICAL_STORE;

    /* Parameter check */
    if ((timer_ptr == NULL) || (timer_ptr->cb_func == NULL)
        || (timer_ptr->cb_ticks == 0))
    {
        /* Return error */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Protect the list */
        CRITICAL_START ();

        /*
         * Enqueue in the list of timers.
         *
         * The list is not ordered, all timers are inserted at the start
         * of the list. On each system tick increment the list is walked
         * and the remaining ticks count for that timer is decremented.
         * Once the remaining ticks reaches zero, the timer callback is
         * made.
         */
        if (timer_queue == NULL)
        {
            /* List is empty, insert new head */
            timer_ptr->next_timer = NULL;
            timer_queue = timer_ptr;
        }
        else
        {
            /* List has at least one entry, enqueue new timer before */
            timer_ptr->next_timer = timer_queue;
            timer_queue = timer_ptr;
        }

        /* End of list protection */
        CRITICAL_END ();

        /* Successful */
        status = ATOM_OK;
    }

    return (status);
}
Exemple #11
0
void
win_unmap( segix_t segix, void **pp )
{
	win_t *winp;

	CRITICAL_BEGIN();

	/* verify window mapped
	 */
	ASSERT( segix < tranp->t_segmaplen );
	winp = tranp->t_segmap[segix];
	ASSERT( winp );

	/* validate p
	 */
	ASSERT( pp );
	ASSERT( *pp );
	ASSERT( *pp >= winp->w_p );
	ASSERT( *pp < ( void * )( ( char * )( winp->w_p ) + tranp->t_segsz ));

	/* decrement the reference count. if zero, place at tail of LRU list.
	 */
	ASSERT( winp->w_refcnt > 0 );
	winp->w_refcnt--;
	ASSERT( ! winp->w_prevp );
	ASSERT( ! winp->w_nextp );
	if ( winp->w_refcnt == 0 ) {
		if ( tranp->t_lrutailp ) {
			ASSERT( tranp->t_lruheadp );
			winp->w_prevp = tranp->t_lrutailp;
			tranp->t_lrutailp->w_nextp = winp;
			tranp->t_lrutailp = winp;
		} else {
			ASSERT( ! tranp->t_lruheadp );
			ASSERT( ! winp->w_prevp );
			tranp->t_lruheadp = winp;
			tranp->t_lrutailp = winp;
		}
		ASSERT( ! winp->w_nextp );
	}

	/* zero the caller's pointer
	 */
	*pp = 0;

	CRITICAL_END();
}
Exemple #12
0
/* 2012.12.06*/
void *      Tty_echo_hook_set (int ttyid,void (* echo)(byte_t))
{
    tty_t               *   tty         = tty_pool + ttyid;
    void                *   handle      = NULL;
    CRITICAL_DECLARE(tty->tty_lock);

#ifdef _CFG_CHECK_PARAMETER_
    if( ttyid >= TTY_MAX )  return NULL;
    if( NULL == echo )      return NULL;
#endif  /*  _CFG_CHECK_PARAMETER_   */

    CRITICAL_BEGIN();
    handle = tty->tty_echo_hook;
    tty->tty_echo_hook = echo;
    CRITICAL_END();

    return handle;
}
void RecordEvent(EventType Type,void* arg1,void* arg2,void* arg3)
{
		CRITICAL_STORE;
		CRITICAL_START();
		EventIndex = (EventIndex+1) % MAX_EVENT;
		EventBuffer[EventIndex].timestamp = GetTime();
		
		if(EventBuffer[EventIndex].event==Empty||EventBuffer[EventIndex].event==NULL)
		{

			EventBuffer[EventIndex].event = Type;
			EventBuffer[EventIndex].arg1 = arg1;
			EventBuffer[EventIndex].arg2 = arg2;
			EventBuffer[EventIndex].arg3 = arg3;
			EventNumbers++;
		}
		CRITICAL_END();
}
Exemple #14
0
inline void krn_dispatch()
{
	krn_thread *old;
	CRITICAL_STORE;
	CRITICAL_START();
	if(krn_thread_nearest != 0) {
		if(krn_timer_nearest <= krn_timer_current) {
			krn_thread_move(krn_thread_nearest, krn_thread_current); //for hard realtime
			krn_thread_cont(krn_thread_nearest);
			old = krn_thread_nearest;
			krn_thread_nearest = krn_thread_nearest->t_next;
			krn_thread_nearest->t_prev = 0;
			krn_timer_nearest = krn_thread_nearest->timer;
			old->t_next = 0;
		}
	}
	while (!krn_dispatch_h());
	CRITICAL_END();
}
Exemple #15
0
//thread is put in right place of timer stack
void krn_sleep(int16_t ticks)
{
krn_thread *old, *post;
	CRITICAL_STORE;
	CRITICAL_START();
	post = 0;
	if(krn_thread_nearest) {
		old = krn_thread_nearest;
		do {
			old->timer -= krn_timer_current;
			if(post == 0) {
				if(ticks >= old->timer) {
					if(old->t_next) {
						if((old->t_next->timer - krn_timer_current) > ticks) post = old;
					} else {
						post = old;
					}
				}
			}
			old = old->t_next;
		} while(old);
	}
	krn_timer_current = 0;
	if(post != 0) {
		krn_thread_current->t_next = post->t_next;
		krn_thread_current->t_prev = post;
		if(post->t_next) post->t_next->t_prev = krn_thread_current;
		post->t_next = krn_thread_current;
		krn_thread_current->timer = ticks;
		krn_timer_nearest = krn_thread_nearest->timer;
	} else {
		krn_thread_current->t_next = krn_thread_nearest;
		krn_thread_current->t_prev = 0;
		krn_thread_current->timer = ticks;
		if(krn_thread_nearest) krn_thread_nearest->t_prev = krn_thread_current;
		krn_thread_nearest = krn_thread_current;
		krn_timer_nearest = ticks;
	}
	krn_thread_stop(krn_thread_current);
	CRITICAL_END();
	krn_dispatch();
}
Exemple #16
0
void krn_mutex_unlock(krn_mutex *mutex)
{
	krn_thread *first, *cur;
	CRITICAL_STORE;
	char flag = 0;
	CRITICAL_START();
	mutex->flag --;
	cur = first = krn_thread_first;
	do {
		if(cur->mutex == mutex) {
			cur->mutex = 0;
			krn_thread_unlock(cur);
			if(krn_thread_first != cur | 1)
				krn_thread_move(cur, krn_thread_current); //for hard realtime
			flag = 1;
			krn_dispatch();
			break;
		}
		cur = cur->next;
	} while(cur != first);
	//if(flag) krn_dispatch();
	CRITICAL_END();
}
Exemple #17
0
/**
 * \b atomMutexGet
 *
 * Take the lock on a mutex.
 *
 * This takes ownership of a mutex if it is not currently owned. Ownership
 * is held by this thread until a corresponding call to atomMutexPut() by
 * the same thread.
 *
 * Can be called recursively by the original locking thread (owner).
 * Recursive calls are counted, and ownership is not relinquished until
 * the number of unlock (atomMutexPut()) calls by the owner matches the
 * number of lock (atomMutexGet()) calls.
 *
 * No thread other than the owner can lock or unlock the mutex while it is
 * locked by another thread.
 *
 * Depending on the \c timeout value specified the call will do one of
 * the following if the mutex is already locked by another thread:
 *
 * \c timeout == 0 : Call will block until the mutex is available \n
 * \c timeout > 0 : Call will block until available up to the specified timeout \n
 * \c timeout == -1 : Return immediately if mutex is locked by another thread \n
*
 * If the call needs to block and \c timeout is zero, it will block
 * indefinitely until the owning thread calls atomMutexPut() or
 * atomMutexDelete() is called on the mutex.
 *
 * If the call needs to block and \c timeout is non-zero, the call will only
 * block for the specified number of system ticks after which time, if the
 * thread was not already woken, the call will return with \c ATOM_TIMEOUT.
 *
 * If the call would normally block and \c timeout is -1, the call will
 * return immediately with \c ATOM_WOULDBLOCK.
 *
 * This function can only be called from thread context. A mutex has the
 * concept of an owner thread, so it is never valid to make a mutex call
 * from interrupt context when there is no thread to associate with.
 *
 * @param[in] mutex Pointer to mutex object
 * @param[in] timeout Max system ticks to block (0 = forever)
 *
 * @retval ATOM_OK Success
 * @retval ATOM_TIMEOUT Mutex timed out before being woken
 * @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero
 * @retval ATOM_ERR_DELETED Mutex was deleted while suspended
 * @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
 * @retval ATOM_ERR_TIMER Problem registering the timeout
 * @retval ATOM_ERR_OVF The recursive lock count would have overflowed (>255)
 */
uint8_t atomMutexGet (ATOM_MUTEX *mutex, int32_t timeout){
    CRITICAL_STORE;
    uint8_t status;
    MUTEX_TIMER timer_data;
    ATOM_TIMER timer_cb;
    ATOM_TCB *curr_tcb_ptr;

    /* Check parameters */
    if (mutex == NULL)
    {
        /* Bad mutex pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Get the current TCB */
        curr_tcb_ptr = atomCurrentContext();

        /* Protect access to the mutex object and OS queues */
        CRITICAL_START ();

        /**
         * Check we are at thread context. Because mutexes have the concept of
         * owner threads, it is never valid to call here from an ISR,
         * regardless of whether we will block.
         */
        if (curr_tcb_ptr == NULL)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Not currently in thread context, can't suspend */
            status = ATOM_ERR_CONTEXT;
        }

        /* Otherwise if mutex is owned by another thread, block the calling thread */
        else if ((mutex->owner != NULL) && (mutex->owner != curr_tcb_ptr))
        {
            /* If called with timeout >= 0, we should block */
            if (timeout >= 0)
            {
                /* Add current thread to the suspend list on this mutex */
                if (tcbEnqueuePriority (&mutex->suspQ, curr_tcb_ptr) != ATOM_OK)
                {
                    /* Exit critical region */
                    CRITICAL_END ();

                    /* There was an error putting this thread on the suspend list */
                    status = ATOM_ERR_QUEUE;
                }
                else
                {
                    /* Set suspended status for the current thread */
                    curr_tcb_ptr->suspended = TRUE;

                    /* Track errors */
                    status = ATOM_OK;

                    /* Register a timer callback if requested */
                    if (timeout)
                    {
                        /* Fill out the data needed by the callback to wake us up */
                        timer_data.tcb_ptr = curr_tcb_ptr;
                        timer_data.mutex_ptr = mutex;

                        /* Fill out the timer callback request structure */
                        timer_cb.cb_func = atomMutexTimerCallback;
                        timer_cb.cb_data = (POINTER)&timer_data;
                        timer_cb.cb_ticks = timeout;

                        /**
                         * Store the timer details in the TCB so that we can
                         * cancel the timer callback if the mutex is put
                         * before the timeout occurs.
                         */
                        curr_tcb_ptr->suspend_timo_cb = &timer_cb;

                        /* Register a callback on timeout */
                        if (atomTimerRegister (&timer_cb) != ATOM_OK)
                        {
                            /* Timer registration failed */
                            status = ATOM_ERR_TIMER;

                            /* Clean up and return to the caller */
                            (void)tcbDequeueEntry (&mutex->suspQ, curr_tcb_ptr);
                            curr_tcb_ptr->suspended = FALSE;
                            curr_tcb_ptr->suspend_timo_cb = NULL;
                        }
                    }

                    /* Set no timeout requested */
                    else
                    {
                        /* No need to cancel timeouts on this one */
                        curr_tcb_ptr->suspend_timo_cb = NULL;
                    }

                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Check no errors have occurred */
                    if (status == ATOM_OK)
                    {
                        /**
                         * Current thread now blocking, schedule in a new
                         * one. We already know we are in thread context
                         * so can call the scheduler from here.
                         */
                        atomSched (FALSE);

                        /**
                         * Normal atomMutexPut() wakeups will set ATOM_OK status,
                         * while timeouts will set ATOM_TIMEOUT and mutex
                         * deletions will set ATOM_ERR_DELETED. */
                        status = curr_tcb_ptr->suspend_wake_status;

                        /**
                         * If we were woken up by another thread relinquishing
                         * the mutex and handing this thread ownership, then
                         * the relinquishing thread will set status to ATOM_OK
                         * and will make this thread the owner. Setting the
                         * owner before waking the thread ensures that no other
                         * thread can preempt and take ownership of the mutex
                         * between this thread being made ready to run, and
                         * actually being scheduled back in here.
                         */
                        if (status == ATOM_OK)
                        {
                            /**
                             * Since this thread has just gained ownership, the
                             * lock count is zero and should be incremented
                             * once for this call.
                             */
                            mutex->count++;
                        }
                    }
                }
            }
            else
            {
                /* timeout == -1, requested not to block and mutex is owned by another thread */
                CRITICAL_END();
                status = ATOM_WOULDBLOCK;
            }
        }
        else
        {
            /* Thread is not owned or is owned by us, we can claim ownership */

            /* Increment the lock count, checking for count overflow */
            if (mutex->count == 255)
            {
                /* Don't increment, just return error status */
                status = ATOM_ERR_OVF;
            }
            else
            {
                /* Increment the count and return to the calling thread */
                mutex->count++;

                /* If the mutex is not locked, mark the calling thread as the new owner */
                if (mutex->owner == NULL)
                {
                    mutex->owner = curr_tcb_ptr;
                }

                /* Successful */
                status = ATOM_OK;
            }

            /* Exit critical region */
            CRITICAL_END ();
        }
    }

    return (status);
    }
Exemple #18
0
/**
 * \b atomSemPut
 *
 * Perform a put operation on a semaphore.
 *
 * This increments the current count value for the semaphore and returns.
 *
 * If the count value was previously zero and there are threads blocking on the
 * semaphore, the call will wake up the highest priority thread suspended. Only
 * one thread is woken per call to atomSemPut(). If multiple threads of the
 * same priority are suspended, they are woken in order of suspension (FIFO).
 *
 * This function can be called from interrupt context.
 *
 * @param[in] sem Pointer to semaphore object
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_OVF The semaphore count would have overflowed (>255)
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
 * @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread
 */
uint8_t atomSemPut (ATOM_SEM * sem)
{
    uint8_t status;
    CRITICAL_STORE;
    ATOM_TCB *tcb_ptr;

    /* Check parameters */
    if (sem == NULL)
    {
        /* Bad semaphore pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Protect access to the semaphore object and OS queues */
        CRITICAL_START ();

        /* If any threads are blocking on the semaphore, wake up one */
        if (sem->suspQ)
        {
            /**
             * Threads are woken up in priority order, with a FIFO system
             * used on same priority threads. We always take the head,
             * ordering is taken care of by an ordered list enqueue.
             */
            tcb_ptr = tcbDequeueHead (&sem->suspQ);
            if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
            {
                /* Exit critical region */
                CRITICAL_END ();

                /* There was a problem putting the thread on the ready queue */
                status = ATOM_ERR_QUEUE;
            }
            else
            {
                /* Set OK status to be returned to the waiting thread */
                tcb_ptr->suspend_wake_status = ATOM_OK;

                /* If there's a timeout on this suspension, cancel it */
                if ((tcb_ptr->suspend_timo_cb != NULL)
                    && (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
                {
                    /* There was a problem cancelling a timeout on this semaphore */
                    status = ATOM_ERR_TIMER;
                }
                else
                {
                    /* Flag as no timeout registered */
                    tcb_ptr->suspend_timo_cb = NULL;

                    /* Successful */
                    status = ATOM_OK;
                }

                /* Exit critical region */
                CRITICAL_END ();

                /**
                 * The scheduler may now make a policy decision to thread
                 * switch if we are currently in thread context. If we are
                 * in interrupt context it will be handled by atomIntExit().
                 */
                if (atomCurrentContext())
                    atomSched (FALSE);
            }
        }

        /* If no threads waiting, just increment the count and return */
        else
        {
            /* Check for count overflow */
            if (sem->count == 255)
            {
                /* Don't increment, just return error status */
                status = ATOM_ERR_OVF;
            }
            else
            {
                /* Increment the count and return success */
                sem->count++;
                status = ATOM_OK;
            }

            /* Exit critical region */
            CRITICAL_END ();
        }
    }

    return (status);
}
Exemple #19
0
/**
 * \b atomSemGet
 *
 * Perform a get operation on a semaphore.
 *
 * This decrements the current count value for the semaphore and returns.
 * If the count value is already zero then the call will block until the
 * count is incremented by another thread, or until the specified \c timeout
 * is reached. Blocking threads will also be woken if the semaphore is
 * deleted by another thread while blocking.
 *
 * Depending on the \c timeout value specified the call will do one of
 * the following if the count value is zero:
 *
 * \c timeout == 0 : Call will block until the count is non-zero \n
 * \c timeout > 0 : Call will block until non-zero up to the specified timeout \n
 * \c timeout == -1 : Return immediately if the count is zero \n
 *
 * If the call needs to block and \c timeout is zero, it will block
 * indefinitely until atomSemPut() or atomSemDelete() is called on the
 * semaphore.
 *
 * If the call needs to block and \c timeout is non-zero, the call will only
 * block for the specified number of system ticks after which time, if the
 * thread was not already woken, the call will return with \c ATOM_TIMEOUT.
 *
 * If the call would normally block and \c timeout is -1, the call will
 * return immediately with \c ATOM_WOULDBLOCK.
 *
 * This function can only be called from interrupt context if the \c timeout
 * parameter is -1 (in which case it does not block).
 *
 * @param[in] sem Pointer to semaphore object
 * @param[in] timeout Max system ticks to block (0 = forever)
 *
 * @retval ATOM_OK Success
 * @retval ATOM_TIMEOUT Semaphore timed out before being woken
 * @retval ATOM_WOULDBLOCK Called with timeout == -1 but count is zero
 * @retval ATOM_ERR_DELETED Semaphore was deleted while suspended
 * @retval ATOM_ERR_CONTEXT Not called in thread context and attempted to block
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting the thread on the suspend queue
 * @retval ATOM_ERR_TIMER Problem registering the timeout
 */
uint8_t atomSemGet (ATOM_SEM *sem, int32_t timeout)
{
    CRITICAL_STORE;
    uint8_t status;
    SEM_TIMER timer_data;
    ATOM_TIMER timer_cb;
    ATOM_TCB *curr_tcb_ptr;

    /* Check parameters */
    if (sem == NULL)
    {
        /* Bad semaphore pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Protect access to the semaphore object and OS queues */
        CRITICAL_START ();

        /* If count is zero, block the calling thread */
        if (sem->count == 0)
        {
            /* If called with timeout >= 0, we should block */
            if (timeout >= 0)
            {
                /* Count is zero, block the calling thread */

                /* Get the current TCB */
                curr_tcb_ptr = atomCurrentContext();

                /* Check we are actually in thread context */
                if (curr_tcb_ptr)
                {
                    /* Add current thread to the suspend list on this semaphore */
                    if (tcbEnqueuePriority (&sem->suspQ, curr_tcb_ptr) != ATOM_OK)
                    {
                        /* Exit critical region */
                        CRITICAL_END ();

                        /* There was an error putting this thread on the suspend list */
                        status = ATOM_ERR_QUEUE;
                    }
                    else
                    {
                        /* Set suspended status for the current thread */
                        curr_tcb_ptr->suspended = TRUE;

                        /* Track errors */
                        status = ATOM_OK;

                        /* Register a timer callback if requested */
                        if (timeout)
                        {
                            /* Fill out the data needed by the callback to wake us up */
                            timer_data.tcb_ptr = curr_tcb_ptr;
                            timer_data.sem_ptr = sem;

                            /* Fill out the timer callback request structure */
                            timer_cb.cb_func = atomSemTimerCallback;
                            timer_cb.cb_data = (POINTER)&timer_data;
                            timer_cb.cb_ticks = timeout;

                            /**
                             * Store the timer details in the TCB so that we can
                             * cancel the timer callback if the semaphore is put
                             * before the timeout occurs.
                             */
                            curr_tcb_ptr->suspend_timo_cb = &timer_cb;

                            /* Register a callback on timeout */
                            if (atomTimerRegister (&timer_cb) != ATOM_OK)
                            {
                                /* Timer registration failed */
                                status = ATOM_ERR_TIMER;

                                /* Clean up and return to the caller */
                                (void)tcbDequeueEntry (&sem->suspQ, curr_tcb_ptr);
                                curr_tcb_ptr->suspended = FALSE;
                                curr_tcb_ptr->suspend_timo_cb = NULL;
                            }
                        }

                        /* Set no timeout requested */
                        else
                        {
                            /* No need to cancel timeouts on this one */
                            curr_tcb_ptr->suspend_timo_cb = NULL;
                        }

                        /* Exit critical region */
                        CRITICAL_END ();

                        /* Check no errors have occurred */
                        if (status == ATOM_OK)
                        {
                            /**
                             * Current thread now blocking, schedule in a new
                             * one. We already know we are in thread context
                             * so can call the scheduler from here.
                             */
                            atomSched (FALSE);

                            /**
                             * Normal atomSemPut() wakeups will set ATOM_OK status,
                             * while timeouts will set ATOM_TIMEOUT and semaphore
                             * deletions will set ATOM_ERR_DELETED.
                             */
                            status = curr_tcb_ptr->suspend_wake_status;

                            /**
                             * If we have been woken up with ATOM_OK then
                             * another thread incremented the semaphore and
                             * handed control to this thread. In theory the
                             * the posting thread increments the counter and
                             * as soon as this thread wakes up we decrement
                             * the counter here, but to prevent another
                             * thread preempting this thread and decrementing
                             * the semaphore before this section was
                             * scheduled back in, we emulate the increment
                             * and decrement by not incrementing in the
                             * atomSemPut() and not decrementing here. The
                             * count remains zero throughout preventing other
                             * threads preempting before we decrement the
                             * count again.
                             */

                        }
                    }
                }
                else
                {
                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Not currently in thread context, can't suspend */
                    status = ATOM_ERR_CONTEXT;
                }
            }
            else
            {
                /* timeout == -1, requested not to block and count is zero */
                CRITICAL_END();
                status = ATOM_WOULDBLOCK;
            }
        }
        else
        {
            /* Count is non-zero, just decrement it and return to calling thread */
            sem->count--;

            /* Exit critical region */
            CRITICAL_END ();

            /* Successful */
            status = ATOM_OK;
        }
    }

    return (status);
}
Exemple #20
0
/**
 * \b atomSemDelete
 *
 * Deletes a semaphore object.
 *
 * Any threads currently suspended on the semaphore will be woken up with
 * return status ATOM_ERR_DELETED. If called at thread context then the
 * scheduler will be called during this function which may schedule in one
 * of the woken threads depending on relative priorities.
 *
 * This function can be called from interrupt context, but loops internally
 * waking up all threads blocking on the semaphore, so the potential
 * execution cycles cannot be determined in advance.
 *
 * @param[in] sem Pointer to semaphore object
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
 * @retval ATOM_ERR_TIMER Problem cancelling a timeout on a woken thread
 */
uint8_t atomSemDelete (ATOM_SEM *sem)
{
    uint8_t status;
    CRITICAL_STORE;
    ATOM_TCB *tcb_ptr;
    uint8_t woken_threads = FALSE;

    /* Parameter check */
    if (sem == NULL)
    {
        /* Bad semaphore pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Default to success status unless errors occur during wakeup */
        status = ATOM_OK;

        /* Wake up all suspended tasks */
        while (1)
        {
            /* Enter critical region */
            CRITICAL_START ();

            /* Check if any threads are suspended */
            tcb_ptr = tcbDequeueHead (&sem->suspQ);

            /* A thread is suspended on the semaphore */
            if (tcb_ptr)
            {
                /* Return error status to the waiting thread */
                tcb_ptr->suspend_wake_status = ATOM_ERR_DELETED;

                /* Put the thread on the ready queue */
                if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
                {
                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Quit the loop, returning error */
                    status = ATOM_ERR_QUEUE;
                    break;
                }

                /* If there's a timeout on this suspension, cancel it */
                if (tcb_ptr->suspend_timo_cb)
                {
                    /* Cancel the callback */
                    if (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK)
                    {
                        /* Exit critical region */
                        CRITICAL_END ();

                        /* Quit the loop, returning error */
                        status = ATOM_ERR_TIMER;
                        break;
                    }

                    /* Flag as no timeout registered */
                    tcb_ptr->suspend_timo_cb = NULL;

                }

                /* Exit critical region */
                CRITICAL_END ();

                /* Request a reschedule */
                woken_threads = TRUE;
            }

            /* No more suspended threads */
            else
            {
                /* Exit critical region and quit the loop */
                CRITICAL_END ();
                break;
            }
        }

        /* Call scheduler if any threads were woken up */
        if (woken_threads == TRUE)
        {
            /**
             * Only call the scheduler if we are in thread context, otherwise
             * it will be called on exiting the ISR by atomIntExit().
             */
            if (atomCurrentContext())
                atomSched (FALSE);
        }
    }

    return (status);
}
Exemple #21
0
/**
 * \b test_thread_func
 *
 * Entry point for test thread.
 *
 * @param[in] param Thread ID (0 to 3)
 *
 * @return None
 */
static void test_thread_func (uint32_t param)
{
    int thread_id, expected_thread;
    int time_error, thread_error;
    uint32_t new_time;
    CRITICAL_STORE;

    /* Pull out thread ID */
    thread_id = (int)param;

    /* Run forever */
    while (1) {
        /* Check if test is currently in operation */
        if (test_started) {
            /*
             * If the system time has ticked over, check that the currently
             * running thread is not the one that was running last tick.
             */

            /* Default to no error this time */
            time_error = thread_error = FALSE;

            /* Do the whole operation with interrupts locked out */
            CRITICAL_START();

            /* Check if time has ticked over */
            new_time = atomTimeGet();

            /* Only perform the check if this is not the first thread to run */
            if ((last_time != 0) && (last_thread_id != -1)) {
                /* Check if the time has ticked over */
                if (new_time != last_time) {
                    /* Check time only ticked over by 1 */
                    if ((new_time - last_time) != 1) {
                        time_error = 1;
                    }

                    /*
                     * We are expecting the previous thread to be our thread_id
                     * minus one.
                     */
                    expected_thread = thread_id - 1;
                    if (expected_thread == -1) {
                        expected_thread = 3;
                    }

                    /* Check that the last thread was the expected one */
                    if (last_thread_id != expected_thread) {
                        thread_error = TRUE;
                    }

                    /* Increment the switch count */
                    switch_cnt++;
                }
            }

            /* Store the currently-running thread as the last-running */
            last_thread_id = thread_id;
            last_time = new_time;

            /* Finished with the interrupt lockout */
            CRITICAL_END();

            /* If we got an error above, increment the total failure count */
            if (test_started && (thread_error || time_error)) {
                failure_cnt[thread_id]++;
                ATOMLOG(_STR("T%d\n"), thread_id);
            }
        }
    }
}
Exemple #22
0
void
win_map( segix_t segix, void **pp )
{
	off64_t segoff;
	win_t *winp;

	CRITICAL_BEGIN();

#ifdef TREE_DEBUG
	mlog(MLOG_DEBUG | MLOG_TREE | MLOG_NOLOCK,
	     "win_map(segix=%u,addr=%p)\n", segix, pp);
#endif
	/* resize the array if necessary */
	if ( segix >= tranp->t_segmaplen )
		win_segmap_resize( segix );

	/* see if segment already mapped. if ref cnt zero,
	 * remove from LRU list.
	 */
	winp = tranp->t_segmap[segix];
	if ( winp ) {
#ifdef TREE_DEBUG
		mlog(MLOG_DEBUG | MLOG_TREE | MLOG_NOLOCK,
		     "win_map(): requested segment already mapped\n");
#endif
		if ( winp->w_refcnt == 0 ) {
			ASSERT( tranp->t_lruheadp );
			ASSERT( tranp->t_lrutailp );
			if ( tranp->t_lruheadp == winp ) {
				if ( tranp->t_lrutailp == winp ) {
					tranp->t_lruheadp = 0;
					tranp->t_lrutailp = 0;
				} else {
					tranp->t_lruheadp = winp->w_nextp;
					tranp->t_lruheadp->w_prevp = 0;
				}
			} else {
				if ( tranp->t_lrutailp == winp ) {
					tranp->t_lrutailp = winp->w_prevp;
					tranp->t_lrutailp->w_nextp = 0;
				} else {
					winp->w_prevp->w_nextp = winp->w_nextp;
					winp->w_nextp->w_prevp = winp->w_prevp;
				}
			}
			winp->w_prevp = 0;
			winp->w_nextp = 0;
		} else {
			ASSERT( ! winp->w_prevp );
			ASSERT( ! winp->w_nextp );
		}
		winp->w_refcnt++;
		*pp = winp->w_p;
		CRITICAL_END();
		return;
	}

	/* Allocate a new descriptor if we haven't yet hit the maximum,
	 * otherwise reuse any descriptor on the LRU list.
	 */
	if ( tranp->t_wincnt < tranp->t_winmax ) {
#ifdef TREE_DEBUG
		mlog(MLOG_DEBUG | MLOG_TREE | MLOG_NOLOCK,
		     "win_map(): create a new window\n");
#endif
		winp = ( win_t * )calloc( 1, sizeof( win_t ));
		ASSERT( winp );
		tranp->t_wincnt++;
	} else if ( tranp->t_lruheadp ) {
		/* REFERENCED */
		intgen_t rval;
#ifdef TREE_DEBUG
		mlog(MLOG_DEBUG | MLOG_TREE | MLOG_NOLOCK,
		     "win_map(): get head from lru freelist & unmap\n");
#endif
		ASSERT( tranp->t_lrutailp );
		winp = tranp->t_lruheadp;
		tranp->t_lruheadp = winp->w_nextp;
		if ( tranp->t_lruheadp ) {
			tranp->t_lruheadp->w_prevp = 0;
		} else {
			tranp->t_lrutailp = 0;
		}
		tranp->t_segmap[winp->w_segix] = NULL;
		rval = munmap( winp->w_p, tranp->t_segsz );
		ASSERT( ! rval );
		memset( ( void * )winp, 0, sizeof( win_t ));
	} else {
		ASSERT( tranp->t_wincnt == tranp->t_winmax );
		*pp = NULL;
		CRITICAL_END();
		mlog( MLOG_NORMAL | MLOG_WARNING, _(
		      "all map windows in use. Check virtual memory limits\n"));
		return;
	}

	/* calculate offset of segment
	 */
	segoff = segix * ( off64_t )tranp->t_segsz;

	/* map the window
	 */
	ASSERT( tranp->t_segsz >= 1 );
	ASSERT( tranp->t_firstoff
		<=
		OFF64MAX - segoff - ( off64_t )tranp->t_segsz + 1ll );
	ASSERT( ! ( tranp->t_segsz % pgsz ));
	ASSERT( ! ( ( tranp->t_firstoff + segoff ) % ( off64_t )pgsz ));
#ifdef TREE_DEBUG
	mlog(MLOG_DEBUG | MLOG_TREE | MLOG_NOLOCK,
	     "win_map(): mmap segment at %lld, size = %llu\n",
	    ( off64_t )( tranp->t_firstoff + segoff ), tranp->t_segsz);
#endif
	tranp->t_winmmaps++;
	winp->w_p = mmap_autogrow(
			    tranp->t_segsz,
			    tranp->t_fd,
			    ( off64_t )( tranp->t_firstoff + segoff ));
	if ( winp->w_p == (void *)-1 ) {
		int	error = errno;
		mlog( MLOG_NORMAL | MLOG_ERROR, _(
		      "win_map(): unable to map a node segment of size %d at %d: %s\n"),
		      tranp->t_segsz, tranp->t_firstoff + segoff,
		      strerror( error ));

		tranp->t_wincnt--;
		tranp->t_winmax--;
		CRITICAL_END();
		free(winp);

		if (error == ENOMEM && tranp->t_lruheadp) {
			mlog( MLOG_NORMAL | MLOG_ERROR,
		      		_("win_map(): try to select a different win_t\n"));
			win_map(segix, pp);
			return;
		}
		*pp = NULL;
		return;
	}
	winp->w_segix  = segix;
	ASSERT( winp->w_refcnt == 0 );
	winp->w_refcnt++;
	tranp->t_segmap[winp->w_segix] = winp;

	*pp = winp->w_p;

	CRITICAL_END();
}
Exemple #23
0
/**
 * \b atomTimerDelay
 *
 * Suspend a thread for the given number of system ticks.
 *
 * Note that the wakeup time is the number of ticks from the current system
 * tick, therefore, for a one tick delay, the thread may be woken up at any
 * time between the atomTimerDelay() call and the next system tick. For
 * a minimum number of ticks, you should specify minimum number of ticks + 1.
 *
 * This function can only be called from thread context.
 *
 * @param[in] ticks Number of system ticks to delay (must be > 0)
 *
 * @retval ATOM_OK Successful delay
 * @retval ATOM_ERR_PARAM Bad parameter (ticks must be non-zero)
 * @retval ATOM_ERR_CONTEXT Not called from thread context
 */
uint8_t atomTimerDelay (uint32_t ticks)
{
    ATOM_TCB *curr_tcb_ptr;
    ATOM_TIMER timer_cb;
    DELAY_TIMER timer_data;
    CRITICAL_STORE;
    uint8_t status;

    /* Get the current TCB  */
    curr_tcb_ptr = atomCurrentContext();

    /* Parameter check */
    if (ticks == 0)
    {
        /* Return error */
        status = ATOM_ERR_PARAM;
    }

    /* Check we are actually in thread context */
    else if (curr_tcb_ptr == NULL)
    {
        /* Not currently in thread context, can't suspend */
        status = ATOM_ERR_CONTEXT;
    }

    /* Otherwise safe to proceed */
    else
    {
        /* Protect the system queues */
        CRITICAL_START ();

        /* Set suspended status for the current thread */
        curr_tcb_ptr->suspended = TRUE;

        /* Register the timer callback */

        /* Fill out the data needed by the callback to wake us up */
        timer_data.tcb_ptr = curr_tcb_ptr;

        /* Fill out the timer callback request structure */
        timer_cb.cb_func = atomTimerDelayCallback;
        timer_cb.cb_data = (POINTER)&timer_data;
        timer_cb.cb_ticks = ticks;

        /* Store the timeout callback details, though we don't use it */
        curr_tcb_ptr->suspend_timo_cb = &timer_cb;

        /* Register the callback */
        if (atomTimerRegister (&timer_cb) != ATOM_OK)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Timer registration didn't work, won't get a callback */
            status = ATOM_ERR_TIMER;
        }
        else
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Successful timer registration */
            status = ATOM_OK;

            /* Current thread should now block, schedule in another */
            atomSched (FALSE);
        }
    }

    return (status);
}
Exemple #24
0
/**
 * \b test_start
 *
 * Start mutex test.
 *
 * Stress-tests mutex Get and Put operations. Four threads are created which are
 * continually Getting and Putting the same mutex, with no time delays between
 * each Get/Put.
 *
 * @retval Number of g_failures
 */
uint32_t test_start (void)
{
    CRITICAL_STORE;
    int finish_cnt;

    /* Default to zero g_failures */
    g_failures = 0;

    /* Create mutex to stress */
    if (atomMutexCreate (&mutex1) != ATOM_OK)
    {
        ATOMLOG (_STR("Error creating mutex\n"));
        g_failures++;
    }
    /* Create sem to receive thread-finished notification */
    else if (atomSemCreate (&sem1, 0) != ATOM_OK)
    {
        ATOMLOG (_STR("Error creating sem\n"));
        g_failures++;
    }
    else
    {
        /* Take ownership of the mutex to ensure all threads wait for now */
        if (atomMutexGet (&mutex1, 0) != ATOM_OK)
        {
            ATOMLOG (_STR("Error taking mutex\n"));
            g_failures++;
        }

        /* Create Thread 1 */
        if (atomThreadCreate(&tcb[0], TEST_THREAD_PRIO, test_thread_func, 1,
              &test_thread_stack[0][TEST_THREAD_STACK_SIZE - 1],
              TEST_THREAD_STACK_SIZE) != ATOM_OK)
        {
            /* Fail */
            ATOMLOG (_STR("Error creating test thread\n"));
            CRITICAL_START ();
            g_failures++;
            CRITICAL_END ();
        }

        /* Create Thread 2 */
        if (atomThreadCreate(&tcb[1], TEST_THREAD_PRIO, test_thread_func, 2,
              &test_thread_stack[1][TEST_THREAD_STACK_SIZE - 1],
              TEST_THREAD_STACK_SIZE) != ATOM_OK)
        {
            /* Fail */
            ATOMLOG (_STR("Error creating test thread\n"));
            CRITICAL_START ();
            g_failures++;
            CRITICAL_END ();
        }

        /* Create Thread 3 */
        if (atomThreadCreate(&tcb[2], TEST_THREAD_PRIO, test_thread_func, 3,
              &test_thread_stack[2][TEST_THREAD_STACK_SIZE - 1],
              TEST_THREAD_STACK_SIZE) != ATOM_OK)
        {
            /* Fail */
            ATOMLOG (_STR("Error creating test thread\n"));
            CRITICAL_START ();
            g_failures++;
            CRITICAL_END ();
        }

        /* Create Thread 4 */
        if (atomThreadCreate(&tcb[3], TEST_THREAD_PRIO, test_thread_func, 4,
              &test_thread_stack[3][TEST_THREAD_STACK_SIZE - 1],
              TEST_THREAD_STACK_SIZE) != ATOM_OK)
        {
            /* Fail */
            ATOMLOG (_STR("Error creating test thread\n"));
            CRITICAL_START ();
            g_failures++;
            CRITICAL_END ();
        }

        /* Release ownership of the mutex to kick the threads off */
        if (atomMutexPut (&mutex1) != ATOM_OK)
        {
            ATOMLOG (_STR("Error putting mutex\n"));
            g_failures++;
        }

        /*
         * All four threads will now be performing Gets/Puts on mutex1.
         * When they have finished they will post sem1, so we wait
         * until sem1 is posted four times.
         */
        finish_cnt = 0;
        while (1)
        {
            /*
             * Attempt to Get sem1. When we have managed to get
             * the semaphore four times, it must have been posted
             * by all four threads.
             */
            if (atomSemGet (&sem1, 0) == ATOM_OK)
            {
                /* Increment our count of finished threads */
                finish_cnt++;

                /* Check if all four threads have now posted sem1 */
                if (finish_cnt == 4)
                {
                    break;
                }
            }
        }

        /* Delete OS objects, test finished */
        if (atomMutexDelete (&mutex1) != ATOM_OK)
        {
            ATOMLOG (_STR("Delete failed\n"));
            CRITICAL_START ();
            g_failures++;
            CRITICAL_END ();
        }
        if (atomSemDelete (&sem1) != ATOM_OK)
        {
            ATOMLOG (_STR("Delete failed\n"));
            CRITICAL_START ();
            g_failures++;
            CRITICAL_END ();
        }
    }

    /* Check thread stack usage (if enabled) */
#ifdef ATOM_STACK_CHECKING
    {
        uint32_t used_bytes, free_bytes;
        int thread;

        /* Check all threads */
        for (thread = 0; thread < NUM_TEST_THREADS; thread++)
        {
            /* Check thread stack usage */
            if (atomThreadStackCheck (&tcb[thread], &used_bytes, &free_bytes) != ATOM_OK)
            {
                ATOMLOG (_STR("StackCheck\n"));
                g_failures++;
            }
            else
            {
                /* Check the thread did not use up to the end of stack */
                if (free_bytes == 0)
                {
                    ATOMLOG (_STR("StackOverflow %d\n"), thread);
                    g_failures++;
                }

                /* Log the stack usage */
#ifdef TESTS_LOG_STACK_USAGE
                ATOMLOG (_STR("StackUse:%d\n"), (int)used_bytes);
#endif
            }
        }
    }
#endif

    /* Quit */
    return g_failures;

}
Exemple #25
0
/**
 * \b atomSched
 *
 * This is an internal function not for use by application code.
 *
 * This is the main scheduler routine. It is called by the various OS
 * library routines to check if any threads should be scheduled in now.
 * If so, the context will be switched from the current thread to the
 * new one.
 *
 * The scheduler is priority-based with round-robin performed on threads
 * with the same priority. Round-robin is only performed on timer ticks
 * however. During reschedules caused by an OS operation (e.g. after
 * giving or taking a semaphore) we only allow the scheduling in of
 * threads with higher priority than current priority. On timer ticks we
 * also allow the scheduling of same-priority threads - in that case we
 * schedule in the head of the ready list for that priority and put the
 * current thread at the tail.
 *
 * @param[in] timer_tick Should be TRUE when called from the system tick
 *
 * @return None
 */
void atomSched (uint8_t timer_tick)
{
    CRITICAL_STORE;
    ATOM_TCB *new_tcb = NULL;
    int16_t lowest_pri;

    /**
     * Check the OS has actually started. As long as the proper initialisation
     * sequence is followed there should be no calls here until the OS is
     * started, but we check to handle badly-behaved ports.
     */
    if (atomOSStarted == FALSE)
    {
        /* Don't schedule anything in until the OS is started */
        return;
    }

    /* Enter critical section */
    CRITICAL_START ();

    /**
     * If the current thread is going into suspension, then
     * unconditionally dequeue the next thread for execution.
     */
    if (curr_tcb->suspended == TRUE)
    {
        /**
         * Dequeue the next ready to run thread. There will always be
         * at least the idle thread waiting. Note that this could
         * actually be the suspending thread if it was unsuspended
         * before the scheduler was called.
         */
        new_tcb = tcbDequeueHead (&tcbReadyQ);

        /**
         * Don't need to add the current thread to any queue because
         * it was suspended by another OS mechanism and will be
         * sitting on a suspend queue or similar within one of the OS
         * primitive libraries (e.g. semaphore).
         */

        /* Switch to the new thread */
        atomThreadSwitch (curr_tcb, new_tcb);
    }

    /**
     * Otherwise the current thread is still ready, but check
     * if any other threads are ready.
     */
    else
    {
        /* Calculate which priority is allowed to be scheduled in */
        if (timer_tick == TRUE)
        {
            /* Same priority or higher threads can preempt */
            lowest_pri = (int16_t)curr_tcb->priority;
        }
        else if (curr_tcb->priority > 0)
        {
            /* Only higher priority threads can preempt, invalid for 0 (highest) */
            lowest_pri = (int16_t)(curr_tcb->priority - 1);
        }
        else
        {
            /**
             * Current priority is already highest (0), don't allow preempt by
             * threads of any priority because this is not a time-slice.
             */
            lowest_pri = -1;
        }

        /* Check if a reschedule is allowed */
        if (lowest_pri >= 0)
        {
            /* Check for a thread at the given minimum priority level or higher */
            new_tcb = tcbDequeuePriority (&tcbReadyQ, (uint8_t)lowest_pri);

            /* If a thread was found, schedule it in */
            if (new_tcb)
            {
                /* Add the current thread to the ready queue */
                (void)tcbEnqueuePriority (&tcbReadyQ, curr_tcb);

                /* Switch to the new thread */
                atomThreadSwitch (curr_tcb, new_tcb);
            }
        }
    }

    /* Exit critical section */
    CRITICAL_END ();
}
Exemple #26
0
/**
 * \b atomMutexPut
 *
 * Give back the lock on a mutex.
 *
 * This checks that the mutex is owned by the calling thread, and decrements
 * the recursive lock count. Once the lock count reaches zero, the lock is
 * considered relinquished and no longer owned by this thread.
 *
 * If the lock is relinquished and there are threads blocking on the mutex, the
 * call will wake up the highest priority thread suspended. Only one thread is
 * woken per call to atomMutexPut(). If multiple threads of the same priority
 * are suspended, they are woken in order of suspension (FIFO).
 *
 * This function can only be called from thread context. A mutex has the
 * concept of an owner thread, so it is never valid to make a mutex call
 * from interrupt context when there is no thread to associate with.
 *
 * @param[in] mutex Pointer to mutex object
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_PARAM Bad parameter
 * @retval ATOM_ERR_QUEUE Problem putting a woken thread on the ready queue
 * @retval ATOM_ERR_TIMER Problem cancelling a timeout for a woken thread
 * @retval ATOM_ERR_OWNERSHIP Attempt to unlock mutex not owned by this thread
 */
uint8_t atomMutexPut (ATOM_MUTEX * mutex){
    uint8_t status;
    CRITICAL_STORE;
    ATOM_TCB *tcb_ptr, *curr_tcb_ptr;

    /* Check parameters */
    if (mutex == NULL)
    {
        /* Bad mutex pointer */
        status = ATOM_ERR_PARAM;
    }
    else
    {
        /* Get the current TCB */
        curr_tcb_ptr = atomCurrentContext();

        /* Protect access to the mutex object and OS queues */
        CRITICAL_START ();

        /* Check if the calling thread owns this mutex */
        if (mutex->owner != curr_tcb_ptr)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Attempt to unlock by non-owning thread */
            status = ATOM_ERR_OWNERSHIP;
        }
        else
        {
            /* Lock is owned by this thread, decrement the recursive lock count */
            mutex->count--;

            /* Once recursive lock count reaches zero, we relinquish ownership */
            if (mutex->count == 0)
            {
                /* Relinquish ownership */
                mutex->owner = NULL;

                /* If any threads are blocking on this mutex, wake them now */
                if (mutex->suspQ)
                {
                    /**
                     * Threads are woken up in priority order, with a FIFO system
                     * used on same priority threads. We always take the head,
                     * ordering is taken care of by an ordered list enqueue.
                     */
                    tcb_ptr = tcbDequeueHead (&mutex->suspQ);
                    if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
                    {
                        /* Exit critical region */
                        CRITICAL_END ();

                        /* There was a problem putting the thread on the ready queue */
                        status = ATOM_ERR_QUEUE;
                    }
                    else
                    {
                        /* Set OK status to be returned to the waiting thread */
                        tcb_ptr->suspend_wake_status = ATOM_OK;

                        /* Set this thread as the new owner of the mutex */
                        mutex->owner = tcb_ptr;

                        /* If there's a timeout on this suspension, cancel it */
                        if ((tcb_ptr->suspend_timo_cb != NULL)
                            && (atomTimerCancel (tcb_ptr->suspend_timo_cb) != ATOM_OK))
                        {
                            /* There was a problem cancelling a timeout on this mutex */
                            status = ATOM_ERR_TIMER;
                        }
                        else
                        {
                            /* Flag as no timeout registered */
                            tcb_ptr->suspend_timo_cb = NULL;

                            /* Successful */
                            status = ATOM_OK;
                        }

                        /* Exit critical region */
                        CRITICAL_END ();

                        /**
                         * The scheduler may now make a policy decision to
                         * thread switch. We already know we are in thread
                         * context so can call the scheduler from here.
                         */
                        atomSched (FALSE);
                    }
                }
                else
                {
                    /**
                     * Relinquished ownership and no threads waiting.
                     * Nothing to do.
                     */

                    /* Exit critical region */
                    CRITICAL_END ();

                    /* Successful */
                    status = ATOM_OK;
                }
            }
            else
            {
                /**
                 * Decremented lock but still retain ownership due to
                 * recursion. Nothing to do.
                 */

                /* Exit critical region */
                CRITICAL_END ();

                /* Successful */
                status = ATOM_OK;
            }
        }
    }

    return (status);
    }
Exemple #27
0
/**
 * \b atomThreadCreate
 *
 * Creates and starts a new thread.
 *
 * Callers provide the ATOM_TCB structure storage, these are not obtained
 * from an internal TCB free list.
 *
 * The function puts the new thread on the ready queue and calls the
 * scheduler. If the priority is higher than the current priority, then the
 * new thread may be scheduled in before the function returns.
 *
 * Optionally prefills the thread stack with a known value to enable stack
 * usage checking (if the ATOM_STACK_CHECKING macro is defined).
 *
 * @param[in] tcb_ptr Pointer to the thread's TCB storage
 * @param[in] priority Priority of the thread (0 to 255)
 * @param[in] entry_point Thread entry point
 * @param[in] entry_param Parameter passed to thread entry point
 * @param[in] stack_top Top of the stack area
 * @param[in] stack_size Size of the stack area in bytes
 *
 * @retval ATOM_OK Success
 * @retval ATOM_ERR_PARAM Bad parameters
 * @retval ATOM_ERR_QUEUE Error putting the thread on the ready queue
 */
uint8_t atomThreadCreate (ATOM_TCB *tcb_ptr, uint8_t priority, void (*entry_point)(uint32_t), uint32_t entry_param, void *stack_top, uint32_t stack_size)
{
    CRITICAL_STORE;
    uint8_t status;

    if ((tcb_ptr == NULL) || (entry_point == NULL) || (stack_top == NULL)
        || (stack_size == 0))
    {
        /* Bad parameters */
        status = ATOM_ERR_PARAM;
    }
    else
    {

        /* Set up the TCB initial values */
        tcb_ptr->suspended = FALSE;
        tcb_ptr->priority = priority;
        tcb_ptr->prev_tcb = NULL;
        tcb_ptr->next_tcb = NULL;
        tcb_ptr->suspend_timo_cb = NULL;

        /**
         * Store the thread entry point and parameter in the TCB. This may
         * not be necessary for all architecture ports if they put all of
         * this information in the initial thread stack.
         */
        tcb_ptr->entry_point = entry_point;
        tcb_ptr->entry_param = entry_param;

        /**
         * Additional processing only required if stack-checking is
         * enabled. Incurs a slight overhead on each thread creation
         * and uses some additional storage in the TCB, but can be
         * compiled out if not desired.
         */
#ifdef ATOM_STACK_CHECKING

        /* Store the stack details for use by the stack-check function */
        tcb_ptr->stack_top = stack_top;
        tcb_ptr->stack_size = stack_size;

        /**
         * Prefill the stack with a known value. This is used later in
         * calls to atomThreadStackCheck() to get an indication of how
         * much stack has been used during runtime.
         */
        while (stack_size > 0)
        {
            /* Initialise all stack bytes from bottom up to 0x5A */
            *((uint8_t *)stack_top - (stack_size - 1)) = STACK_CHECK_BYTE;
            stack_size--;
        }
#else
        /* Avoid compiler warnings due to unused stack_size variable */
        stack_size = stack_size;
#endif

        /**
         * Call the arch-specific routine to set up the stack. This routine
         * is responsible for creating the context save area necessary for
         * allowing atomThreadSwitch() to schedule it in. The initial
         * archContextSwitch() call when this thread gets scheduled in the
         * first time will then restore the program counter to the thread
         * entry point, and any other necessary register values ready for
         * it to start running.
         */
        archThreadContextInit (tcb_ptr, stack_top, entry_point, entry_param);

        /* Protect access to the OS queue */
        CRITICAL_START ();

        /* Put this thread on the ready queue */
        if (tcbEnqueuePriority (&tcbReadyQ, tcb_ptr) != ATOM_OK)
        {
            /* Exit critical region */
            CRITICAL_END ();

            /* Queue-related error */
            status = ATOM_ERR_QUEUE;
        }
        else
        {
            /* Exit critical region */
            CRITICAL_END ();

            /**
             * If the OS is started and we're in thread context, check if we
             * should be scheduled in now.
             */
            if ((atomOSStarted == TRUE) && atomCurrentContext())
                atomSched (FALSE);

            /* Success */
            status = ATOM_OK;
        }
    }

    return (status);
}
Exemple #28
0
/*
//////////////////////////////////////////////////////////////////////////////////////////
//  名  称 : Tty_put_char
//
//  功  能 : 
//
//  参  数 : 
//      ttyid               : int
//      说明: tty编号
//
//      c                   : byte_t
//      说明: 需要放入tty的数据
//
//  返回值 : 
//      类型 :result_t 
//      说明 : 
//
//  注  意: 在中断内调用
//
//  变更记录:
//  时间        |    作  者     |  说  明
//========================================================================================
//  2013-12-05
//  2011-11-11  |               |
//////////////////////////////////////////////////////////////////////////////////////////
*/
result_t    Tty_put_char(int ttyid,byte_t c)
{
    tty_t               *   tty     = tty_pool + ttyid;
    result_t                result  = RESULT_SUCCEED;
    CRITICAL_DECLARE(tty->tty_lock );

    if( ttyid >= TTY_MAX )
        return result;

    CRITICAL_BEGIN();
    /*
	 *  1、普通tty缓冲区满则丢弃数据,因为并不一定有等待数据的进程
	 *  2、如果是交互终端,则需要留下回车符和退格符的空间  
	 */
	if( TQ_LEFT(tty->tty_second_queue) == 0 ||
		( tty->tty_termios.temo_type == TERMIOS_TYPE_TTY && 
		!( TQ_LEFT(tty->tty_second_queue) > 4 || CHAR_BACK == c || CHAR_CR == c ) ) )
	{
        CRITICAL_END();
		return RESULT_FAILED;
	}
    TQ_PUT_CHAR( tty->tty_read_queue,c);
    CRITICAL_END();
    /*  定义了数据加工标志,则要对数据进行加工。  */
    if( tty->tty_termios.temo_iflags & TERMIOS_IFLAG_NEED_COOK )
        Tty_copy_to_cook(tty);
    else
    {
		TQ_INC((tty->tty_read_queue).tq_tail);
		TQ_PUT_CHAR(tty->tty_second_queue,c);
    }

    /*  如果终端定义了回显,也就是实时输出,则需要立即输出获得的数据  */
    if( tty->tty_termios.temo_oflags & TERMIOS_OFLAG_ECHO )
    {
        /*
         *  如果进入密码模式,一般的字符显示改为*号
         *  控制字符不改变显示
         */
        if( tty->tty_termios.temo_oflags & TERMIOS_OFLAG_PSW )
        {
            switch(c)
            {
            case CHAR_CR:
            case CHAR_BACK:
                break;
            default:
                c = '*';
                break;
            }
        }
        tty->tty_echo_hook(c);
    }

	switch(tty->tty_termios.temo_type)
	{
	case TERMIOS_TYPE_TTY:
		/*  交互终端对于回车符需要唤醒等待输入的进程 */
		if( CHAR_CR == c )
			TTY_WAKEUP(tty);
		break;
	default:
		/*  一般的tty在缓冲区满的时候,需要唤醒等待的进程  */
		if( TQ_LEFT(tty->tty_second_queue) == 0 )
			TTY_WAKEUP(tty);
		break;
	}

    return RESULT_SUCCEED;
}