Exemplo n.º 1
0
void k_queue_append_list(struct k_queue *queue, void *head, void *tail)
{
	__ASSERT(head && tail, "invalid head or tail");

	struct k_thread *first_thread, *thread;
	unsigned int key;

	key = irq_lock();

	first_thread = _peek_first_pending_thread(&queue->wait_q);
	while (head && ((thread = _unpend_first_thread(&queue->wait_q)))) {
		prepare_thread_to_run(thread, head);
		head = *(void **)head;
	}

	if (head) {
		sys_slist_append_list(&queue->data_q, head, tail);
	}

	if (first_thread) {
		if (!_is_in_isr() && _must_switch_threads()) {
			(void)_Swap(key);
			return;
		}
	} else {
		if (handle_poll_event(queue)) {
			(void)_Swap(key);
			return;
		}
	}

	irq_unlock(key);
}
Exemplo n.º 2
0
void k_queue_insert(struct k_queue *queue, void *prev, void *data)
{
	struct k_thread *first_pending_thread;
	unsigned int key;

	key = irq_lock();

	first_pending_thread = _unpend_first_thread(&queue->wait_q);

	if (first_pending_thread) {
		prepare_thread_to_run(first_pending_thread, data);
		if (!_is_in_isr() && _must_switch_threads()) {
			(void)_Swap(key);
			return;
		}
	} else {
		sys_slist_insert(&queue->data_q, prev, data);
		if (handle_poll_event(queue)) {
			(void)_Swap(key);
			return;
		}
	}

	irq_unlock(key);
}
Exemplo n.º 3
0
int k_msgq_get(struct k_msgq *q, void *data, s32_t timeout)
{
	__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");

	unsigned int key = irq_lock();
	struct k_thread *pending_thread;
	int result;

	if (q->used_msgs > 0) {
		/* take first available message from queue */
		memcpy(data, q->read_ptr, q->msg_size);
		q->read_ptr += q->msg_size;
		if (q->read_ptr == q->buffer_end) {
			q->read_ptr = q->buffer_start;
		}
		q->used_msgs--;

		/* handle first thread waiting to write (if any) */
		pending_thread = _unpend_first_thread(&q->wait_q);
		if (pending_thread) {
			/* add thread's message to queue */
			memcpy(q->write_ptr, pending_thread->base.swap_data,
			       q->msg_size);
			q->write_ptr += q->msg_size;
			if (q->write_ptr == q->buffer_end) {
				q->write_ptr = q->buffer_start;
			}
			q->used_msgs++;

			/* wake up waiting thread */
			_set_thread_return_value(pending_thread, 0);
			_abort_thread_timeout(pending_thread);
			_ready_thread(pending_thread);
			if (!_is_in_isr() && _must_switch_threads()) {
				_Swap(key);
				return 0;
			}
		}
		result = 0;
	} else if (timeout == K_NO_WAIT) {
		/* don't wait for a message to become available */
		result = -ENOMSG;
	} else {
		/* wait for get message success or timeout */
		_pend_current_thread(&q->wait_q, timeout);
		_current->base.swap_data = data;
		return _Swap(key);
	}

	irq_unlock(key);

	return result;
}
Exemplo n.º 4
0
bool _hsort_sift_down(HSQUIRRELVM v,SQArray *arr, int root, int bottom, SQInteger func)
{
	SQInteger maxChild;
	SQInteger done = 0;
	SQInteger ret;
	SQInteger root2;
	while (((root2 = root * 2) <= bottom) && (!done))
	{
		if (root2 == bottom) {
			maxChild = root2;
		}
		else {
			if(!_sort_compare(v,arr->_values[root2],arr->_values[root2 + 1],func,ret))
				return false;
			if (ret > 0) {
				maxChild = root2;
			}
			else {
				maxChild = root2 + 1;
			}
		}

		if(!_sort_compare(v,arr->_values[root],arr->_values[maxChild],func,ret))
			return false;
		if (ret < 0) {
			_Swap(arr->_values[root],arr->_values[maxChild]);
			root = maxChild;
		}
		else {
			done = 1;
		}
	}
	return true;
}
Exemplo n.º 5
0
void _k_thread_group_op(u32_t groups, void (*func)(struct k_thread *))
{
	unsigned int  key;

	__ASSERT(!_is_in_isr(), "");

	_sched_lock();

	/* Invoke func() on each static thread in the specified group set. */

	_FOREACH_STATIC_THREAD(thread_data) {
		if (is_in_any_group(thread_data, groups)) {
			key = irq_lock();
			func(thread_data->thread);
			irq_unlock(key);
		}
	}

	/*
	 * If the current thread is still in a ready state, then let the
	 * "unlock scheduler" code determine if any rescheduling is needed.
	 */
	if (_is_thread_ready(_current)) {
		k_sched_unlock();
		return;
	}

	/* The current thread is no longer in a ready state--reschedule. */
	key = irq_lock();
	_sched_unlock_no_reschedule();
	_Swap(key);
}
Exemplo n.º 6
0
uint32_t k_timer_status_sync(struct k_timer *timer)
{
	__ASSERT(!_is_in_isr(), "");

	unsigned int key = irq_lock();
	uint32_t result = timer->status;

	if (result == 0) {
		if (timer->timeout.delta_ticks_from_prev != _INACTIVE) {
			/* wait for timer to expire or stop */
			_pend_current_thread(&timer->wait_q, K_FOREVER);
			_Swap(key);

			/* get updated timer status */
			key = irq_lock();
			result = timer->status;
		} else {
			/* timer is already stopped */
		}
	} else {
		/* timer has already expired at least once */
	}

	timer->status = 0;
	irq_unlock(key);

	return result;
}
Exemplo n.º 7
0
int k_mem_pool_alloc(struct k_mem_pool *p, struct k_mem_block *block,
		     size_t size, s32_t timeout)
{
	int ret, key;
	s64_t end = 0;

	__ASSERT(!(_is_in_isr() && timeout != K_NO_WAIT), "");

	if (timeout > 0) {
		end = _tick_get() + _ms_to_ticks(timeout);
	}

	while (1) {
		ret = pool_alloc(p, block, size);

		if (ret == 0 || timeout == K_NO_WAIT ||
		    ret == -EAGAIN || (ret && ret != -ENOMEM)) {
			return ret;
		}

		key = irq_lock();
		_pend_current_thread(&p->wait_q, timeout);
		_Swap(key);

		if (timeout != K_FOREVER) {
			timeout = end - _tick_get();

			if (timeout < 0) {
				break;
			}
		}
	}

	return -EAGAIN;
}
Exemplo n.º 8
0
void *nano_fiber_lifo_get_wait_timeout(struct nano_lifo *lifo,
		int32_t timeout_in_ticks)
{
	unsigned int key = irq_lock_inline();
	void *data;

	if (!lifo->list) {
		if (unlikely(TICKS_NONE == timeout_in_ticks)) {
			irq_unlock_inline(key);
			return NULL;
		}
		if (likely(timeout_in_ticks != TICKS_UNLIMITED)) {
			_nano_timeout_add(_nanokernel.current, &lifo->wait_q,
					timeout_in_ticks);
		}
		_nano_wait_q_put(&lifo->wait_q);
		data = (void *)_Swap(key);
	} else {
		data = lifo->list;
		lifo->list = *(void **)data;
		irq_unlock_inline(key);
	}

	return data;
}
Exemplo n.º 9
0
int k_msgq_put(struct k_msgq *q, void *data, s32_t timeout)
{
	__ASSERT(!_is_in_isr() || timeout == K_NO_WAIT, "");

	unsigned int key = irq_lock();
	struct k_thread *pending_thread;
	int result;

	if (q->used_msgs < q->max_msgs) {
		/* message queue isn't full */
		pending_thread = _unpend_first_thread(&q->wait_q);
		if (pending_thread) {
			/* give message to waiting thread */
			memcpy(pending_thread->base.swap_data, data,
			       q->msg_size);
			/* wake up waiting thread */
			_set_thread_return_value(pending_thread, 0);
			_abort_thread_timeout(pending_thread);
			_ready_thread(pending_thread);
			if (!_is_in_isr() && _must_switch_threads()) {
				_Swap(key);
				return 0;
			}
		} else {
			/* put message in queue */
			memcpy(q->write_ptr, data, q->msg_size);
			q->write_ptr += q->msg_size;
			if (q->write_ptr == q->buffer_end) {
				q->write_ptr = q->buffer_start;
			}
			q->used_msgs++;
		}
		result = 0;
	} else if (timeout == K_NO_WAIT) {
		/* don't wait for message space to become available */
		result = -ENOMSG;
	} else {
		/* wait for put message success, failure, or timeout */
		_pend_current_thread(&q->wait_q, timeout);
		_current->base.swap_data = data;
		return _Swap(key);
	}

	irq_unlock(key);

	return result;
}
Exemplo n.º 10
0
/** \brief Heap_state_t HeapGetMin(Heap_t * heap, unsigned int *data )
 *
 * \param Heap_t * heap
 * \param unsigned int *data
 * \return Heap_state_t
 */
Heap_state_t HeapGetMin(Heap_t * heap, unsigned int *data ){
    if( heap->size == 0)    return HEAP_ERROR;
    *data = heap->array[1];
    _Swap(heap->array, 1, heap->size);
    heap->size--;
    _HeapSink(heap, 1);
    if( heap->size == 0)    return HEAP_EMPTY;
    return HEAP_OK;
}
Exemplo n.º 11
0
void task_fiber_wakeup(nano_thread_id_t fiber)
{
	int key = irq_lock();

	/* verify first if fiber is not waiting on an object */
	if ((fiber->nano_timeout.wait_q) || (_nano_timeout_abort(fiber) < 0)) {
		irq_unlock(key);
	} else {
		_nano_fiber_ready(fiber);
		_Swap(key);
	}
}
Exemplo n.º 12
0
void k_thread_suspend(struct k_thread *thread)
{
	unsigned int  key = irq_lock();

	_k_thread_single_suspend(thread);

	if (thread == _current) {
		_Swap(key);
	} else {
		irq_unlock(key);
	}
}
Exemplo n.º 13
0
/* This implements a "fair" scheduling policy: at the end of a POSIX
 * thread call that might result in a change of the current maximum
 * priority thread, we always check and context switch if needed.
 * Note that there is significant dispute in the community over the
 * "right" way to do this and different systems do it differently by
 * default.  Zephyr is an RTOS, so we choose latency over
 * throughput.  See here for a good discussion of the broad issue:
 *
 * https://blog.mozilla.org/nfroyd/2017/03/29/on-mutex-performance-part-1/
 */
static void swap_or_unlock(int key)
{
	/* API madness: use __ not _ here.  The latter checks for our
	 * preemption state, but we want to do a switch here even if
	 * we can be preempted.
	 */
	if (!_is_in_isr() && __must_switch_threads()) {
		_Swap(key);
	} else {
		irq_unlock(key);
	}
}
Exemplo n.º 14
0
/**
 * INTERNAL
 * There exists a separate nano_task_sem_take_wait() implementation since a task
 * context cannot pend on a nanokernel object.  Instead, tasks will poll
 * the sempahore object.
 */
void nano_fiber_sem_take_wait(struct nano_sem *sem)
{
	unsigned int imask;

	imask = irq_lock_inline();
	if (sem->nsig == 0) {
		_nano_wait_q_put(&sem->wait_q);
		_Swap(imask);
	} else {
		sem->nsig--;
		irq_unlock_inline(imask);
	}
}
Exemplo n.º 15
0
/*********************************************************************
*
*       _BubbleSort
*
* Purpose:
*   Bubble sort algorithm.
*/
static void _BubbleSort(unsigned lb, unsigned ub, SORT_OBJECT * pSortObject) {
    int Swapped;
    do {
        unsigned i;
        Swapped = 0;
        for (i = ub; i > lb; i--) {
            if (_Compare(i - 1, i, pSortObject) > 0) {
                _Swap(i - 1, i, pSortObject);
                Swapped = 1;
            }
        }
    } while (Swapped);
}
Exemplo n.º 16
0
/** \brief static void _HeapSink( Heap_t * heap, unsigned int index )
 *
 * \param Heap_t * heap
 * \param unsigned int index
 * \return void
 */
static void _HeapSink( Heap_t * heap, unsigned int index ){
    while(2*index <= heap->size){
        unsigned int j = 2*index;
        if( j+1 <= heap->size && heap->array[j]>heap->array[j+1]){
            j++;
        }
        if( heap->array[j] > heap->array[index]){
            break;
        }
        _Swap(heap->array, index, j);
        index = j;
    }
}
Exemplo n.º 17
0
void fiber_sleep(int32_t timeout_in_ticks)
{
    int key;

    if (timeout_in_ticks == TICKS_NONE) {
        fiber_yield();
        return;
    }

    key = irq_lock();
    _nano_timeout_add(_nanokernel.current, NULL, timeout_in_ticks);
    _Swap(key);
}
Exemplo n.º 18
0
static void switch_to_main_thread(void)
{
#ifdef CONFIG_ARCH_HAS_CUSTOM_SWAP_TO_MAIN
	_arch_switch_to_main_thread(_main_thread, _main_stack, MAIN_STACK_SIZE,
				    _main);
#else
	/*
	 * Context switch to main task (entry function is _main()): the
	 * current fake thread is not on a wait queue or ready queue, so it
	 * will never be rescheduled in.
	 */

	_Swap(irq_lock());
#endif
}
Exemplo n.º 19
0
void *nano_fiber_timer_test(struct nano_timer *timer, int32_t timeout_in_ticks)
{
	int key = irq_lock();
	struct _nano_timeout *t = &timer->timeout_data;
	void *user_data;

	if (_nano_timer_expire_wait(timer, timeout_in_ticks, &user_data)) {
		t->tcs = _nanokernel.current;
		_Swap(key);
		key = irq_lock();
		user_data = timer->user_data;
		timer->user_data = NULL;
	}
	irq_unlock(key);
	return user_data;
}
Exemplo n.º 20
0
bool _hsort(HSQUIRRELVM v,SQObjectPtr &arr, SQInteger l, SQInteger r,SQInteger func)
{
	SQArray *a = _array(arr);
	SQInteger i;
	SQInteger array_size = a->Size();
	for (i = (array_size / 2); i >= 0; i--) {
		if(!_hsort_sift_down(v,a, i, array_size - 1,func)) return false;
	}

	for (i = array_size-1; i >= 1; i--)
	{
		_Swap(a->_values[0],a->_values[i]);
		if(!_hsort_sift_down(v,a, 0, i-1,func)) return false;
	}
	return true;
}
Exemplo n.º 21
0
static void start_thread(struct k_thread *thread)
{
	int key = irq_lock(); /* protect kernel queues */

	_mark_thread_as_started(thread);

	if (_is_thread_ready(thread)) {
		_add_thread_to_ready_q(thread);
		if (_must_switch_threads()) {
			_Swap(key);
			return;
		}
	}

	irq_unlock(key);
}
Exemplo n.º 22
0
		void _CheckSize()
		{
			if (_tables.size() == _size)
			{
				size_t NewSize = GetNewSize();


				//现代写法
				HashTable<K, V> ht;
				ht._tables.resize(NewSize);
				for (size_t i = 0; i < _tables.size(); i++)
				{
					ht.Insert(_tables[i]->_Key, _tables[i]->_Value);
				}
				_Swap(ht);
			}
		}
Exemplo n.º 23
0
void k_thread_abort(k_tid_t thread)
{
	unsigned int key;

	key = irq_lock();

	_k_thread_single_abort(thread);
	_thread_monitor_exit(thread);

	if (_current == thread) {
		_Swap(key);
		CODE_UNREACHABLE;
	}

	/* The abort handler might have altered the ready queue. */
	_reschedule_threads(key);
}
Exemplo n.º 24
0
void fiber_yield(void)
{
	unsigned int imask = irq_lock();

	if ((_nanokernel.fiber != (struct tcs *)NULL) &&
	    (_nanokernel.current->prio >= _nanokernel.fiber->prio)) {
		/*
		 * Reinsert current thread into the list of runnable threads,
		 * and then swap to the thread at the head of the fiber list.
		 */

		_nano_fiber_ready(_nanokernel.current);
		_Swap(imask);
	} else {
		irq_unlock(imask);
	}
}
Exemplo n.º 25
0
void _fiber_start(char *pStack,
		unsigned stackSize, /* stack size in bytes */
		nano_fiber_entry_t pEntry,
		int parameter1,
		int parameter2,
		unsigned priority,
		unsigned options)
{
	struct tcs *tcs;
	unsigned int imask;

	tcs = (struct tcs *) pStack;
	_new_thread(pStack,
			stackSize,
			(_thread_entry_t)pEntry,
			(void *)parameter1,
			(void *)parameter2,
			(void *)0,
			priority,
			options);

	/*
	 * _new_thread() has already set the flags depending on the 'options'
	 * and 'priority' parameters passed to it
	 */

	/* lock interrupts to prevent corruption of the runnable fiber list */

	imask = irq_lock();

	/* make the newly crafted TCS a runnable fiber */

	_nano_fiber_ready(tcs);

	/*
	 * Simply return to the caller if the current thread is FIBER,
	 * otherwise swap into the newly created fiber
	 */

	if ((_nanokernel.current->flags & TASK) == TASK) {
		_Swap(imask);
	} else {
		irq_unlock(imask);
	}
}
Exemplo n.º 26
0
void nano_task_sem_give(struct nano_sem *sem)
{
	tCCS *ccs;
	unsigned int imask;

	imask = irq_lock_inline();
	ccs = _nano_wait_q_remove(&sem->wait_q);
	if (ccs) {
		_nano_timeout_abort(ccs);
		set_sem_available(ccs);
		_Swap(imask);
		return;
	} else {
		sem->nsig++;
	}

	irq_unlock_inline(imask);
}
Exemplo n.º 27
0
/** INTERNAL
 *
 * There exists a separate nano_task_lifo_get_wait() implementation since a
 * task context cannot pend on a nanokernel object.  Instead, tasks will poll
 * the lifo object.
 */
void *nano_fiber_lifo_get_wait(struct nano_lifo *lifo )
{
	void *data;
	unsigned int imask;

	imask = irq_lock_inline();

	if (!lifo->list) {
		_nano_wait_q_put(&lifo->wait_q);
		data = (void *) _Swap(imask);
	} else {
		data = lifo->list;
		lifo->list = *(void **) data;
		irq_unlock_inline(imask);
	}

	return data;
}
Exemplo n.º 28
0
void nano_task_lifo_put(struct nano_lifo *lifo, void *data)
{
	tCCS *ccs;
	unsigned int imask;

	imask = irq_lock_inline();
	ccs = _nano_wait_q_remove(&lifo->wait_q);
	if (ccs) {
		_nano_timeout_abort(ccs);
		fiberRtnValueSet(ccs, (unsigned int) data);
		_Swap(imask);
		return;
	} else {
		*(void **) data = lifo->list;
		lifo->list = data;
	}

	irq_unlock_inline(imask);
}
Exemplo n.º 29
0
void nano_task_lifo_put(struct nano_lifo *lifo, void *data)
{
	struct tcs *tcs;
	unsigned int imask;

	imask = irq_lock();
	tcs = _nano_wait_q_remove(&lifo->wait_q);
	if (tcs) {
		_nano_timeout_abort(tcs);
		fiberRtnValueSet(tcs, (unsigned int) data);
		_Swap(imask);
		return;
	}

	*(void **) data = lifo->list;
	lifo->list = data;

	irq_unlock(imask);
}
Exemplo n.º 30
0
void *_lifo_get(struct nano_lifo *lifo, int32_t timeout_in_ticks)
{
	void *data = NULL;
	unsigned int imask;

	imask = irq_lock();

	if (likely(lifo->list != NULL)) {
		data = lifo->list;
		lifo->list = *(void **) data;
	} else if (timeout_in_ticks != TICKS_NONE) {
		_NANO_TIMEOUT_ADD(&lifo->wait_q, timeout_in_ticks);
		_nano_wait_q_put(&lifo->wait_q);
		data = (void *) _Swap(imask);
		return data;
	}

	irq_unlock(imask);
	return data;
}