Esempio n. 1
0
/*
 * Set task private pointer.
 */
void
task_set_private (task_t *t, void *new_private)
{
	arch_state_t x;

	arch_intr_disable (&x);
	assert (STACK_GUARD (task_current));

	t->privatep = new_private;

	arch_intr_restore (x);
}
Esempio n. 2
0
/*
 * Stop listening on the group.
 * Detach slots from the lock->groups linked lists.
 * Use mutex_group_listen() to start listening.
 * Beware, multiple calls to mutex_group_unlisten() will crash the system!
 */
void
mutex_group_unlisten (mutex_group_t *g)
{
    arch_state_t x;
    mutex_slot_t *s;
    gr_assert_good(g);

    arch_intr_disable (&x);
    assert_task_good_stack(task_current);
    for (s = g->slot + g->num; --s >= g->slot; ) {
        assert (! list_is_empty (&s->item));
        //s->message = 0;
        list_unlink (&s->item);
    }
    arch_intr_restore (x);
}
Esempio n. 3
0
/*
 * Associate IRQ with the lock.
 * On interrupt, the signal message is sent to the lock.
 */
void mutex_attach_swi (mutex_t *m, mutex_irq_t* swi, handler_t func, void *arg)
{
	arch_state_t x;

	arch_intr_disable (&x);
	if (! m->item.next)
		mutex_init (m);

	m->irq = swi;
	assert (m->irq->lock == 0);
	m->irq->lock = m;
	m->irq->handler = func;
	m->irq->arg = arg;
	m->irq->pending = 0;

	arch_intr_restore (x);
}
Esempio n. 4
0
/*
 * Suspend the current task so that other tasks can run.
 */
void
task_yield ()
{
	arch_state_t x;

	arch_intr_disable (&x);

	/* Enqueue always puts element at the tail of the list. */
	list_append (&task_active, &task_current->item);

	/* Scheduler selects the first task.
	 * If there are several tasks with equal priority,
	 * this will allow them to run round-robin. */
	task_schedule ();

	arch_intr_restore (x);
}
Esempio n. 5
0
/** \~russian
 * сбрасывает статус активности с мутехов группы, ожидание активности
 * ведется с этого момента.
 * подключает прослушивание не подключенных мутехов
 */
void mutex_group_relisten(mutex_group_t* g) {
    arch_state_t x;
    mutex_slot_t *s;
    gr_assert_good(g);

    arch_intr_disable (&x);
    for (s = g->slot + g->num; --s >= g->slot; ) {
        s->active = 0;
#if UOS_MGROUP_SMART > 0
        if (s->lock != 0)
            if (list_is_empty (&s->item)) {
                s->message = 0;
                list_prepend (&s->lock->groups, &s->item);
            }
#endif
    }
    arch_intr_restore (x);
}
Esempio n. 6
0
/*
 * Start listening on all locks in the group.
 * Attach slots to the lock->groups linked list of every lock.
 * Use mutex_group_unlisten() to stop listening.
 * Beware, multiple calls to mutex_group_listen() will crash the system!
 */
void
mutex_group_listen (mutex_group_t *g)
{
    arch_state_t x;
    mutex_slot_t *s;
    gr_assert_good(g);

    arch_intr_disable (&x);
    assert_task_good_stack(task_current);
    for (s = g->slot + g->num; --s >= g->slot; ) {
#if UOS_MGROUP_SMART > 0
        if (s->lock == 0) continue;
#endif
        assert (list_is_empty (&s->item));
        s->message = 0;
        s->active = 0;
        list_prepend (&s->lock->groups, &s->item);
    }
    arch_intr_restore (x);
}
Esempio n. 7
0
void task_test2(void* arg)
{
	volatile int counter1 = 0;
	int counter2 = 0;
	arch_state_t x;
	
	while (1)
	{
		counter1++;
		if (counter1 == 10000000)
		{
			counter1 = 0;
			counter2++;
		    if (counter2 == 5) {
		        debug_printf("DISABLING INTERRUPTS\n");
			    arch_intr_disable(&x);
		    }
			debug_printf("counter reload, ISER0 = %08X\n", ARM_NVIC_ISER(0));
		}
	}
}
Esempio n. 8
0
bool_t mutex_group_remove (mutex_group_t* g, mutex_t* m)
{
    assert(UOS_MGROUP_SMART > 0);
    gr_assert_good(g);

#if UOS_MGROUP_SMART > 0
    // look 1st empty slot
    mutex_slot_t *s;
    arch_state_t x;
    arch_intr_disable (&x);
    for (s = g->slot + g->num; --s >= g->slot; ) {
        if (s->lock == m) {
            list_unlink (&s->item);
            s->lock = 0;
            arch_intr_restore (x);
            return 1;
        }
    }
    arch_intr_restore (x);
#endif
    return 0;
}
Esempio n. 9
0
bool_t
mutex_group_lockwaiting (mutex_t *m, mutex_group_t *g, mutex_t **lock_ptr, void **msg_ptr)
{
    if (mutex_recurcived_lock(m))
        return 1;

    arch_state_t x;
    struct mg_signal_ctx signaler = {g, lock_ptr, msg_ptr};

    arch_intr_disable (&x);
    assert_task_good_stack(task_current);
    assert (task_current->wait == 0);
    gr_assert_good(g);
    assert (g->num > 0);
    if (m != NULL)
        if (! m->item.next)
            mutex_init (m);


    for (;;) {
        if (m != NULL)
            if (mutex_trylock_in(m)) {
                arch_intr_restore (x);
                return true;
            }
        if (mutex_group_signaled(&signaler)) {
            arch_intr_restore (x);
            return false;
        }

        if (m != NULL)
            mutex_slaved_yield(m);
        else {
            /* Suspend the task. */
            list_unlink (&task_current->item);
            task_schedule ();
        }
    }
}
Esempio n. 10
0
/*
 * Wait for the signal on any lock in the group.
 * The calling task is blocked until the mutex_signal().
 * Returns the lock and the signalled message.
 */
void
mutex_group_wait (mutex_group_t *g, mutex_t **lock_ptr, void **msg_ptr)
{
    arch_state_t x;
    struct mg_signal_ctx signaler = {g, lock_ptr, msg_ptr};

    arch_intr_disable (&x);
    assert_task_good_stack(task_current);
    assert (task_current->wait == 0);
    gr_assert_good(g);
    assert (g->num > 0);

    for (;;) {
        if (mutex_group_signaled(&signaler)) {
            arch_intr_restore (x);
            return;
        }

        /* Suspend the task. */
        list_unlink (&task_current->item);
        g->waiter = task_current;
        task_schedule ();
    }
}