Exemple #1
0
void fpu_handler(void)
{
	task_t* task = per_core(current_task);
	uint32_t core_id = CORE_ID;

	task->flags |= TASK_FPU_USED;

	if (!(task->flags & TASK_FPU_INIT))  {
		// use the FPU at the first time => Initialize FPU
		fpu_init(&task->fpu);
		task->flags |= TASK_FPU_INIT;
	}

	if (readyqueues[core_id].fpu_owner == task->id)
		return;

	spinlock_irqsave_lock(&readyqueues[core_id].lock);
	// did another already use the the FPU? => save FPU state
	if (readyqueues[core_id].fpu_owner) {
		save_fpu_state(&(task_table[readyqueues[core_id].fpu_owner].fpu));
		task_table[readyqueues[core_id].fpu_owner].flags &= ~TASK_FPU_USED;
	}
	readyqueues[core_id].fpu_owner = task->id;
	spinlock_irqsave_unlock(&readyqueues[core_id].lock);

	restore_fpu_state(&task->fpu);
}
Exemple #2
0
void check_timers(void)
{
	readyqueues_t* readyqueue = &readyqueues[CORE_ID];
	spinlock_irqsave_lock(&readyqueue->lock);

	// since IRQs are disabled, get_clock_tick() won't increase here
	const uint64_t current_tick = get_clock_tick();

	// wakeup tasks whose deadline has expired
	task_t* task;
	while ((task = readyqueue->timers.first) && (task->timeout <= current_tick))
	{
		// pops task from timer queue, so next iteration has new first element
		wakeup_task(task->id);
	}

#ifdef DYNAMIC_TICKS
	task = readyqueue->timers.first;
	if (task) {
		update_timer(task);
	}
#endif

	spinlock_irqsave_unlock(&readyqueue->lock);
}
Exemple #3
0
int kputs(const char *str)
{
	int pos, i, len = strlen(str);

	if (early_print != NO_EARLY_PRINT)
		spinlock_irqsave_lock(&olock);

	for(i=0; i<len; i++) {
		pos = atomic_int32_inc(&kmsg_counter);
		kmessages[pos % KMSG_SIZE] = str[i];
#ifdef CONFIG_VGA
		if (early_print & VGA_EARLY_PRINT)
			vga_putchar(str[i]);
#endif
#ifdef CONFIG_UART
		if (early_print & UART_EARLY_PRINT)
			uart_putchar(str[i]);
#endif
	}

	if (early_print != NO_EARLY_PRINT)
		spinlock_irqsave_unlock(&olock);

	return len;
}
Exemple #4
0
int block_task(tid_t id)
{
	task_t* task;
	uint32_t core_id;
	int ret = -EINVAL;
	uint8_t flags;

	flags = irq_nested_disable();

	task = &task_table[id];
	core_id = task->last_core;

	if (task->status == TASK_RUNNING) {
		LOG_DEBUG("block task %d on core %d\n", id, core_id);

		task->status = TASK_BLOCKED;

		spinlock_irqsave_lock(&readyqueues[core_id].lock);

		// remove task from ready queue
		readyqueues_remove(core_id, task);

		// reduce the number of ready tasks
		readyqueues[core_id].nr_tasks--;
		LOG_DEBUG("update nr_tasks on core %d to %d\n", core_id, readyqueues[core_id].nr_tasks);

		spinlock_irqsave_unlock(&readyqueues[core_id].lock);

		ret = 0;
	}

	irq_nested_enable(flags);

	return ret;
}
Exemple #5
0
void NORETURN do_exit(int arg)
{
	task_t* curr_task = per_core(current_task);
	const uint32_t core_id = CORE_ID;

	LOG_INFO("Terminate task: %u, return value %d\n", curr_task->id, arg);

	uint8_t flags = irq_nested_disable();

	// decrease the number of active tasks
	spinlock_irqsave_lock(&readyqueues[core_id].lock);
	readyqueues[core_id].nr_tasks--;
	spinlock_irqsave_unlock(&readyqueues[core_id].lock);

	// release the thread local storage
	destroy_tls();

	curr_task->status = TASK_FINISHED;

	reschedule();

	irq_nested_enable(flags);

	LOG_ERROR("Kernel panic: scheduler found no valid task\n");
	while(1) {
		HALT;
	}
}
Exemple #6
0
tid_t set_idle_task(void)
{
	uint32_t core_id = CORE_ID;
	tid_t id = ~0;

	spinlock_irqsave_lock(&table_lock);

	for(uint32_t i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = id = i;
			task_table[i].status = TASK_IDLE;
			task_table[i].last_core = core_id;
			task_table[i].last_stack_pointer = NULL;
			task_table[i].stack = NULL;
			task_table[i].ist_addr = NULL;
			task_table[i].prio = IDLE_PRIO;
			task_table[i].heap = NULL;
			readyqueues[core_id].idle = task_table+i;
			set_per_core(current_task, readyqueues[core_id].idle);

			break;
		}
	}

	spinlock_irqsave_unlock(&table_lock);

	return id;
}
Exemple #7
0
void finish_task_switch(void)
{
	task_t* old;
	uint8_t prio;

	spinlock_irqsave_lock(&readyqueues.lock);

	if ((old = readyqueues.old_task) != NULL) {
		if (old->status == TASK_INVALID) {
			old->stack = NULL;
			old->last_stack_pointer = NULL;
			readyqueues.old_task = NULL;
		} else {
			prio = old->prio;
			if (!readyqueues.queue[prio-1].first) {
				old->next = old->prev = NULL;
				readyqueues.queue[prio-1].first = readyqueues.queue[prio-1].last = old;
			} else {
				old->next = NULL;
				old->prev = readyqueues.queue[prio-1].last;
				readyqueues.queue[prio-1].last->next = old;
				readyqueues.queue[prio-1].last = old;
			}
			readyqueues.old_task = NULL;
			readyqueues.prio_bitmap |= (1 << prio);
		}
	}

	spinlock_irqsave_unlock(&readyqueues.lock);

	if (current_task->heap)
		kfree(current_task->heap);
}
Exemple #8
0
/** @brief A procedure to be called by
 * procedures which are called by exiting tasks. */
static void NORETURN do_exit(int arg)
{
  int task_id;
  
  task_t* curr_task = current_task;

  kprintf("Terminate task: %u, return value %d\n", curr_task->id, arg);

  page_map_drop();

  for (task_id = 0; task_id = MAX_TASKS; task_id++) {
    task_t* task = &task_table[task_id];
    if (task->status == TASK_IDLE && task->wait_id == curr_task->id) {
      task->wait_id = -1;
      task->status = TASK_READY;
    }
  }
  // decrease the number of active tasks
  spinlock_irqsave_lock(&readyqueues.lock);
  readyqueues.nr_tasks--;
  spinlock_irqsave_unlock(&readyqueues.lock);

  curr_task->status = TASK_FINISHED;
  reschedule();

  kprintf("Kernel panic: scheduler found no valid task\n");
  while(1) {
    HALT;
  }
}
Exemple #9
0
int wakeup_task(tid_t id)
{
	task_t* task;
	uint32_t core_id;
	int ret = -EINVAL;

	spinlock_irqsave_lock(&table_lock);
	task = &task_table[id];
	core_id = task->last_core;

	if (task->status == TASK_BLOCKED) {
		LOG_DEBUG("wakeup task %d on core %d\n", id, core_id);

		task->status = TASK_READY;
		spinlock_irqsave_unlock(&table_lock);

		ret = 0;

		spinlock_irqsave_lock(&readyqueues[core_id].lock);

		// if task is in timer queue, remove it
		if (task->flags & TASK_TIMER) {
			task->flags &= ~TASK_TIMER;

			timer_queue_remove(core_id, task);
		}

		// add task to the ready queue
		readyqueues_push_back(core_id, task);

		// increase the number of ready tasks
		readyqueues[core_id].nr_tasks++;

		// should we wakeup the core?
		if (readyqueues[core_id].nr_tasks == 1)
			wakeup_core(core_id);

		LOG_DEBUG("update nr_tasks on core %d to %d\n", core_id, readyqueues[core_id].nr_tasks);

		spinlock_irqsave_unlock(&readyqueues[core_id].lock);
	} else {
		spinlock_irqsave_unlock(&table_lock);
	}

	return ret;
}
Exemple #10
0
static void timer_queue_push(uint32_t core_id, task_t* task)
{
	task_list_t* timer_queue = &readyqueues[core_id].timers;

	spinlock_irqsave_lock(&readyqueues[core_id].lock);

	task_t* first = timer_queue->first;

	if(!first) {
		timer_queue->first = timer_queue->last = task;
		task->next = task->prev = NULL;

#ifdef DYNAMIC_TICKS
		update_timer(task);
#endif
	} else {
		// lookup position where to insert task
		task_t* tmp = first;
		while(tmp && (task->timeout >= tmp->timeout))
			tmp = tmp->next;

		if(!tmp) {
			// insert at the end of queue
			task->next = NULL;
			task->prev = timer_queue->last;

			// there has to be a last element because there is also a first one
			timer_queue->last->next = task;
			timer_queue->last = task;
		} else {
			task->next = tmp;
			task->prev = tmp->prev;
			tmp->prev = task;

			if(task->prev)
				task->prev->next = task;

			if(timer_queue->first == tmp) {
				timer_queue->first = task;

#ifdef DYNAMIC_TICKS
				update_timer(task);
#endif
			}
		}
	}

	spinlock_irqsave_unlock(&readyqueues[core_id].lock);
}
Exemple #11
0
void finish_task_switch(void)
{
	task_t* old;
	const uint32_t core_id = CORE_ID;

	spinlock_irqsave_lock(&readyqueues[core_id].lock);

	if ((old = readyqueues[core_id].old_task) != NULL) {
		readyqueues[core_id].old_task = NULL;

		if (old->status == TASK_FINISHED) {
			/* cleanup task */
			if (old->stack) {
				//LOG_INFO("Release stack at 0x%zx\n", old->stack);
				destroy_stack(old->stack, DEFAULT_STACK_SIZE);
				old->stack = NULL;
			}

			if (!old->parent && old->heap) {
				kfree(old->heap);
				old->heap = NULL;
			}

			if (old->ist_addr) {
				destroy_stack(old->ist_addr, KERNEL_STACK_SIZE);
				old->ist_addr = NULL;
			}

			old->last_stack_pointer = NULL;

			if (readyqueues[core_id].fpu_owner == old->id)
				readyqueues[core_id].fpu_owner = 0;

			/* signalizes that this task could be reused */
			old->status = TASK_INVALID;
		} else {
			// re-enqueue old task
			readyqueues_push_back(core_id, old);
		}
	}

	spinlock_irqsave_unlock(&readyqueues[core_id].lock);
}
Exemple #12
0
/** @brief Block current task
 *
 * The current task's status will be changed to TASK_BLOCKED
 *
 * @return
 * - 0 on success
 * - -EINVAL (-22) on failure
 */
int block_current_task(void)
{
	tid_t id;
	uint32_t prio;
	int ret = -EINVAL;
	uint8_t flags;

	flags = irq_nested_disable();

	id = current_task->id;
	prio = current_task->prio;

	if (task_table[id].status == TASK_RUNNING) {
		task_table[id].status = TASK_BLOCKED;
		ret = 0;

		spinlock_irqsave_lock(&readyqueues.lock);
		// reduce the number of ready tasks
		readyqueues.nr_tasks--;

		// remove task from queue
		if (task_table[id].prev)
			task_table[id].prev->next = task_table[id].next;
		if (task_table[id].next)
			task_table[id].next->prev = task_table[id].prev;
		if (readyqueues.queue[prio-1].first == task_table+id)
			readyqueues.queue[prio-1].first = task_table[id].next;
		if (readyqueues.queue[prio-1].last == task_table+id) {
			readyqueues.queue[prio-1].last = task_table[id].prev;
			if (!readyqueues.queue[prio-1].last)
				readyqueues.queue[prio-1].last = readyqueues.queue[prio-1].first;
		}

		// No valid task in queue => update prio_bitmap
		if (!readyqueues.queue[prio-1].first)
			readyqueues.prio_bitmap &= ~(1 << prio);
		spinlock_irqsave_unlock(&readyqueues.lock);
	}

	irq_nested_enable(flags);

	return ret;
}
Exemple #13
0
/** @brief Wakeup a blocked task
 * @param id The task's tid_t structure
 * @return
 * - 0 on success
 * - -EINVAL (-22) on failure
 */
int wakeup_task(tid_t id)
{
	task_t* task;
	uint32_t prio;
	int ret = -EINVAL;
	uint8_t flags;

	flags = irq_nested_disable();

	task = task_table + id;
	prio = task->prio;

	if (task->status == TASK_BLOCKED) {
		task->status = TASK_READY;
		ret = 0;

		spinlock_irqsave_lock(&readyqueues.lock);
		// increase the number of ready tasks
		readyqueues.nr_tasks++;

		// add task to the runqueue
		if (!readyqueues.queue[prio-1].last) {
			readyqueues.queue[prio-1].last = readyqueues.queue[prio-1].first = task;
			task->next = task->prev = NULL;
			readyqueues.prio_bitmap |= (1 << prio);
		} else {
			task->prev = readyqueues.queue[prio-1].last;
			task->next = NULL;
			readyqueues.queue[prio-1].last->next = task;
			readyqueues.queue[prio-1].last = task;
		}
		spinlock_irqsave_unlock(&readyqueues.lock);
	}

	irq_nested_enable(flags);

	return ret;
}
Exemple #14
0
int kputs(const char *str)
{
	int len;

	spinlock_irqsave_lock(&stdio_lock);

	if (is_single_kernel()) {
		len = uart_puts(str);
	} else {
		int pos;

		len = strlen(str);

		for(int i=0; i<len; i++) {
			pos = atomic_int32_inc(&kmsg_counter);
			kmessages[pos % KMSG_SIZE] = str[i];
		}
	}

	spinlock_irqsave_unlock(&stdio_lock);

	return len;
}
Exemple #15
0
int kputchar(int c)
{
	int pos;

	if (early_print != NO_EARLY_PRINT)
		spinlock_irqsave_lock(&olock);

	pos = atomic_int32_inc(&kmsg_counter);
	kmessages[pos % KMSG_SIZE] = (unsigned char) c;

#ifdef CONFIG_VGA
	if (early_print & VGA_EARLY_PRINT)
		vga_putchar(c);
#endif
#ifdef CONFIG_UART
	if (early_print & UART_EARLY_PRINT)
		uart_putchar(c);
#endif

	if (early_print != NO_EARLY_PRINT)
		spinlock_irqsave_unlock(&olock);

	return 1;
}
Exemple #16
0
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id)
{
	int ret = -ENOMEM;
	uint32_t i;
	void* stack = NULL;
	void* ist = NULL;
	void* counter = NULL;

	if (BUILTIN_EXPECT(!ep, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(core_id >= MAX_CORES, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(!readyqueues[core_id].idle, 0))
		return -EINVAL;

	stack = create_stack(DEFAULT_STACK_SIZE);
	if (BUILTIN_EXPECT(!stack, 0))
		return -ENOMEM;

	ist = create_stack(KERNEL_STACK_SIZE);
	if (BUILTIN_EXPECT(!ist, 0)) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		return -ENOMEM;
	}

	counter = kmalloc(sizeof(atomic_int64_t));
	if (BUILTIN_EXPECT(!counter, 0)) {
		destroy_stack(stack, KERNEL_STACK_SIZE);
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		return -ENOMEM;
	}
	atomic_int64_set((atomic_int64_t*) counter, 0);

	spinlock_irqsave_lock(&table_lock);

	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = i;
			task_table[i].status = TASK_READY;
			task_table[i].last_core = core_id;
			task_table[i].last_stack_pointer = NULL;
			task_table[i].stack = stack;
			task_table[i].prio = prio;
			task_table[i].heap = NULL;
			task_table[i].start_tick = get_clock_tick();
			task_table[i].last_tsc = 0;
			task_table[i].parent = 0;
			task_table[i].ist_addr = ist;
			task_table[i].tls_addr = 0;
			task_table[i].tls_size = 0;
			task_table[i].lwip_err = 0;
			task_table[i].signal_handler = NULL;

			if (id)
				*id = i;

			ret = create_default_frame(task_table+i, ep, arg, core_id);
			if (ret)
				goto out;

			// add task in the readyqueues
			spinlock_irqsave_lock(&readyqueues[core_id].lock);
			readyqueues[core_id].prio_bitmap |= (1 << prio);
			readyqueues[core_id].nr_tasks++;
			if (!readyqueues[core_id].queue[prio-1].first) {
				task_table[i].next = task_table[i].prev = NULL;
				readyqueues[core_id].queue[prio-1].first = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			} else {
				task_table[i].prev = readyqueues[core_id].queue[prio-1].last;
				task_table[i].next = NULL;
				readyqueues[core_id].queue[prio-1].last->next = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			}
			spinlock_irqsave_unlock(&readyqueues[core_id].lock);
			break;
		}
	}

	if (!ret)
		LOG_INFO("start new task %d on core %d with stack address %p\n", i, core_id, stack);

out:
	spinlock_irqsave_unlock(&table_lock);

	if (ret) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		destroy_stack(ist, KERNEL_STACK_SIZE);
		kfree(counter);
	}

	return ret;
}
Exemple #17
0
int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
{
	int ret = -EINVAL;
	uint32_t i;
	void* stack = NULL;
	void* ist = NULL;
	task_t* curr_task;
	uint32_t core_id;

	if (BUILTIN_EXPECT(!ep, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
		return -EINVAL;

	curr_task = per_core(current_task);

	stack = create_stack(DEFAULT_STACK_SIZE);
	if (BUILTIN_EXPECT(!stack, 0))
		return -ENOMEM;

	ist =  create_stack(KERNEL_STACK_SIZE);
	if (BUILTIN_EXPECT(!ist, 0)) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		return -ENOMEM;
	}

	spinlock_irqsave_lock(&table_lock);

	core_id = get_next_core_id();
	if (BUILTIN_EXPECT(core_id >= MAX_CORES, 0))
	{
		spinlock_irqsave_unlock(&table_lock);
		ret = -EINVAL;
		goto out;
	}

	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = i;
			task_table[i].status = TASK_READY;
			task_table[i].last_core = core_id;
			task_table[i].last_stack_pointer = NULL;
			task_table[i].stack = stack;
			task_table[i].prio = prio;
			task_table[i].heap = curr_task->heap;
			task_table[i].start_tick = get_clock_tick();
			task_table[i].last_tsc = 0;
			task_table[i].parent = curr_task->id;
			task_table[i].tls_addr = curr_task->tls_addr;
			task_table[i].tls_size = curr_task->tls_size;
			task_table[i].ist_addr = ist;
			task_table[i].lwip_err = 0;
			task_table[i].signal_handler = NULL;

			if (id)
				*id = i;

			ret = create_default_frame(task_table+i, ep, arg, core_id);
			if (ret)
				goto out;

                        // add task in the readyqueues
			spinlock_irqsave_lock(&readyqueues[core_id].lock);
			readyqueues[core_id].prio_bitmap |= (1 << prio);
			readyqueues[core_id].nr_tasks++;
			if (!readyqueues[core_id].queue[prio-1].first) {
				task_table[i].next = task_table[i].prev = NULL;
				readyqueues[core_id].queue[prio-1].first = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			} else {
				task_table[i].prev = readyqueues[core_id].queue[prio-1].last;
				task_table[i].next = NULL;
				readyqueues[core_id].queue[prio-1].last->next = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			}
			// should we wakeup the core?
			if (readyqueues[core_id].nr_tasks == 1)
				wakeup_core(core_id);
			spinlock_irqsave_unlock(&readyqueues[core_id].lock);
 			break;
		}
	}

	spinlock_irqsave_unlock(&table_lock);

	if (!ret) {
		LOG_DEBUG("start new thread %d on core %d with stack address %p\n", i, core_id, stack);
	}

out:
	if (ret) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		destroy_stack(ist, KERNEL_STACK_SIZE);
	}

	return ret;
}
Exemple #18
0
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
{
	int ret = -ENOMEM;
	uint32_t i;

	if (BUILTIN_EXPECT(!ep, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
		return -EINVAL;

	spinlock_irqsave_lock(&table_lock);

	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = i;
			task_table[i].status = TASK_READY;
			task_table[i].last_stack_pointer = NULL;
			task_table[i].stack = create_stack(i);
			task_table[i].prio = prio;
			spinlock_init(&task_table[i].vma_lock);
			task_table[i].vma_list = NULL;
			task_table[i].heap = NULL;
			task_table[i].wait_id = -1;

			spinlock_irqsave_init(&task_table[i].page_lock);
			atomic_int32_set(&task_table[i].user_usage, 0);

			/* Allocated new PGD or PML4 and copy page table */
			task_table[i].page_map = get_pages(1);
			if (BUILTIN_EXPECT(!task_table[i].page_map, 0))
				goto out;

			/* Copy page tables & user frames of current task to new one */
			page_map_copy(&task_table[i]);

			if (id)
				*id = i;

			ret = create_default_frame(task_table+i, ep, arg);

			// add task in the readyqueues
			spinlock_irqsave_lock(&readyqueues.lock);
			readyqueues.prio_bitmap |= (1 << prio);
			readyqueues.nr_tasks++;
			if (!readyqueues.queue[prio-1].first) {
				task_table[i].next = task_table[i].prev = NULL;
				readyqueues.queue[prio-1].first = task_table+i;
				readyqueues.queue[prio-1].last = task_table+i;
			} else {
				task_table[i].prev = readyqueues.queue[prio-1].last;
				task_table[i].next = NULL;
				readyqueues.queue[prio-1].last->next = task_table+i;
				readyqueues.queue[prio-1].last = task_table+i;
			}
			spinlock_irqsave_unlock(&readyqueues.lock);
			break;
		}
	}

out:
	spinlock_irqsave_unlock(&table_lock);

	return ret;
}