Пример #1
0
void
cancelAllIPC(endpoint_t *epptr)
{
    switch (endpoint_ptr_get_state(epptr)) {
    case EPState_Idle:
        break;

    default: {
        tcb_t *thread = TCB_PTR(endpoint_ptr_get_epQueue_head(epptr));

        /* Make endpoint idle */
        endpoint_ptr_set_state(epptr, EPState_Idle);
        endpoint_ptr_set_epQueue_head(epptr, 0);
        endpoint_ptr_set_epQueue_tail(epptr, 0);

        /* Set all blocked threads to restart */
        for (; thread; thread = thread->tcbEPNext) {
            setThreadState (thread, ThreadState_Restart);
            SCHED_ENQUEUE(thread);
        }

        rescheduleRequired();
        break;
    }
    }
}
Пример #2
0
/* Immediately switch to a particular process */
static void proc_switchTo(Process *proc)
{
	Process *old_process = current_process;

	SCHED_ENQUEUE(current_process);
	preempt_reset_quantum();
	current_process = proc;
	proc_context_switch(current_process, old_process);
}
Пример #3
0
/**
 * Preempt the current task.
 */
void proc_preempt(void)
{
	IRQ_ASSERT_DISABLED();
	ASSERT(current_process);

	/* Perform the kernel preemption */
	LOG_INFO("preempting %p:%s\n", current_process, proc_currentName());
	/* We are inside a IRQ context, so ATOMIC is not needed here */
	SCHED_ENQUEUE(current_process);
	preempt_reset_quantum();
	proc_schedule();
}
Пример #4
0
void cancelAllSignals(notification_t *ntfnPtr)
{
    if (notification_ptr_get_state(ntfnPtr) == NtfnState_Waiting) {
        tcb_t *thread = TCB_PTR(notification_ptr_get_ntfnQueue_head(ntfnPtr));

        notification_ptr_set_state(ntfnPtr, NtfnState_Idle);
        notification_ptr_set_ntfnQueue_head(ntfnPtr, 0);
        notification_ptr_set_ntfnQueue_tail(ntfnPtr, 0);

        /* Set all waiting threads to Restart */
        for (; thread; thread = thread->tcbEPNext) {
            setThreadState(thread, ThreadState_Restart);
            SCHED_ENQUEUE(thread);
        }
        rescheduleRequired();
    }
}
Пример #5
0
void
cancelBadgedSends(endpoint_t *epptr, word_t badge)
{
    switch (endpoint_ptr_get_state(epptr)) {
    case EPState_Idle:
    case EPState_Recv:
        break;

    case EPState_Send: {
        tcb_t *thread, *next;
        tcb_queue_t queue = ep_ptr_get_queue(epptr);

        /* this is a de-optimisation for verification
         * reasons. it allows the contents of the endpoint
         * queue to be ignored during the for loop. */
        endpoint_ptr_set_state(epptr, EPState_Idle);
        endpoint_ptr_set_epQueue_head(epptr, 0);
        endpoint_ptr_set_epQueue_tail(epptr, 0);

        for (thread = queue.head; thread; thread = next) {
            word_t b = thread_state_ptr_get_blockingIPCBadge(
                           &thread->tcbState);
            next = thread->tcbEPNext;
            if (b == badge) {
                setThreadState(thread, ThreadState_Restart);
                SCHED_ENQUEUE(thread);
                queue = tcbEPDequeue(thread, queue);
            }
        }
        ep_ptr_set_queue(epptr, queue);

        if (queue.head) {
            endpoint_ptr_set_state(epptr, EPState_Send);
        }

        rescheduleRequired();

        break;
    }

    default:
        fail("invalid EP state");
    }
}
Пример #6
0
/**
 * Create a new process, starting at the provided entry point.
 *
 *
 * \note The function
 * \code
 * proc_new(entry, data, stacksize, stack)
 * \endcode
 * is a more convenient way to create a process, as you don't have to specify
 * the name.
 *
 * \return Process structure of new created process
 *         if successful, NULL otherwise.
 */
struct Process *proc_new_with_name(UNUSED_ARG(const char *, name), void (*entry)(void), iptr_t data, size_t stack_size, cpu_stack_t *stack_base)
{
	Process *proc;
	LOG_INFO("name=%s", name);
#if CONFIG_KERN_HEAP
	bool free_stack = false;

	/*
	 * Free up resources of a zombie process.
	 *
	 * We're implementing a kind of lazy garbage collector here for
	 * efficiency reasons: we can avoid to introduce overhead into another
	 * kernel task dedicated to free up resources (e.g., idle) and we're
	 * not introducing any overhead into the scheduler after a context
	 * switch (that would be *very* bad, because the scheduler runs with
	 * IRQ disabled).
	 *
	 * In this way we are able to release the memory of the zombie tasks
	 * without disabling IRQs and without introducing any significant
	 * overhead in any other kernel task.
	 */
	proc_freeZombies();

	/* Did the caller provide a stack for us? */
	if (!stack_base)
	{
		/* Did the caller specify the desired stack size? */
		if (!stack_size)
			stack_size = KERN_MINSTACKSIZE;

		/* Allocate stack dinamically */
		PROC_ATOMIC(stack_base =
			(cpu_stack_t *)heap_allocmem(&proc_heap, stack_size));
		if (stack_base == NULL)
			return NULL;

		free_stack = true;
	}

#else // CONFIG_KERN_HEAP

	/* Stack must have been provided by the user */
	ASSERT_VALID_PTR(stack_base);
	ASSERT(stack_size);

#endif // CONFIG_KERN_HEAP

#if CONFIG_KERN_MONITOR
	/*
	 * Fill-in the stack with a special marker to help debugging.
	 * On 64bit platforms, CONFIG_KERN_STACKFILLCODE is larger
	 * than an int, so the (int) cast is required to silence the
	 * warning for truncating its size.
	 */
	memset(stack_base, (int)CONFIG_KERN_STACKFILLCODE, stack_size);
#endif

	/* Initialize the process control block */
	if (CPU_STACK_GROWS_UPWARD)
	{
		proc = (Process *)stack_base;
		proc->stack = stack_base + PROC_SIZE_WORDS;
		// On some architecture stack should be aligned, so we do it.
		proc->stack = (cpu_stack_t *)((uintptr_t)proc->stack + (sizeof(cpu_aligned_stack_t) - ((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t))));
		if (CPU_SP_ON_EMPTY_SLOT)
			proc->stack++;
	}
	else
	{
		proc = (Process *)(stack_base + stack_size / sizeof(cpu_stack_t) - PROC_SIZE_WORDS);
		// On some architecture stack should be aligned, so we do it.
		proc->stack = (cpu_stack_t *)((uintptr_t)proc - ((uintptr_t)proc % sizeof(cpu_aligned_stack_t)));
		if (CPU_SP_ON_EMPTY_SLOT)
			proc->stack--;
	}
	/* Ensure stack is aligned */
	ASSERT((uintptr_t)proc->stack % sizeof(cpu_aligned_stack_t) == 0);

	stack_size -= PROC_SIZE_WORDS * sizeof(cpu_stack_t);
	proc_initStruct(proc);
	proc->user_data = data;

#if CONFIG_KERN_HEAP | CONFIG_KERN_MONITOR
	proc->stack_base = stack_base;
	proc->stack_size = stack_size;
	#if CONFIG_KERN_HEAP
	if (free_stack)
		proc->flags |= PF_FREESTACK;
	#endif
#endif
	proc->user_entry = entry;
	CPU_CREATE_NEW_STACK(proc->stack);

#if CONFIG_KERN_MONITOR
	monitor_add(proc, name);
#endif

	/* Add to ready list */
	ATOMIC(SCHED_ENQUEUE(proc));

	return proc;
}