Beispiel #1
0
int set_timer(uint64_t deadline)
{
	task_t* curr_task;
	uint32_t core_id;
	uint8_t flags;
	int ret = -EINVAL;

	flags = irq_nested_disable();

	curr_task = per_core(current_task);
	core_id = CORE_ID;

	if (curr_task->status == TASK_RUNNING) {
		// blocks task and removes from ready queue
		block_task(curr_task->id);

		curr_task->flags |= TASK_TIMER;
		curr_task->timeout = deadline;

		timer_queue_push(core_id, curr_task);

		ret = 0;
	} else {
		LOG_INFO("Task is already blocked. No timer will be set!\n");
	}

	irq_nested_enable(flags);

	return ret;
}
Beispiel #2
0
void NORETURN do_exit(int arg)
{
	task_t* curr_task = per_core(current_task);
	const uint32_t core_id = CORE_ID;

	LOG_INFO("Terminate task: %u, return value %d\n", curr_task->id, arg);

	uint8_t flags = irq_nested_disable();

	// decrease the number of active tasks
	spinlock_irqsave_lock(&readyqueues[core_id].lock);
	readyqueues[core_id].nr_tasks--;
	spinlock_irqsave_unlock(&readyqueues[core_id].lock);

	// release the thread local storage
	destroy_tls();

	curr_task->status = TASK_FINISHED;

	reschedule();

	irq_nested_enable(flags);

	LOG_ERROR("Kernel panic: scheduler found no valid task\n");
	while(1) {
		HALT;
	}
}
Beispiel #3
0
void fpu_handler(void)
{
	task_t* task = per_core(current_task);
	uint32_t core_id = CORE_ID;

	task->flags |= TASK_FPU_USED;

	if (!(task->flags & TASK_FPU_INIT))  {
		// use the FPU at the first time => Initialize FPU
		fpu_init(&task->fpu);
		task->flags |= TASK_FPU_INIT;
	}

	if (readyqueues[core_id].fpu_owner == task->id)
		return;

	spinlock_irqsave_lock(&readyqueues[core_id].lock);
	// did another already use the the FPU? => save FPU state
	if (readyqueues[core_id].fpu_owner) {
		save_fpu_state(&(task_table[readyqueues[core_id].fpu_owner].fpu));
		task_table[readyqueues[core_id].fpu_owner].flags &= ~TASK_FPU_USED;
	}
	readyqueues[core_id].fpu_owner = task->id;
	spinlock_irqsave_unlock(&readyqueues[core_id].lock);

	restore_fpu_state(&task->fpu);
}
Beispiel #4
0
static int init_tls(void)
{
	task_t* curr_task = per_core(current_task);

	// do we have a thread local storage?
	if (((size_t) &tls_end - (size_t) &tls_start) > 0) {
		size_t tdata_size = (size_t) &tdata_end - (size_t) &tls_start;

		curr_task->tls_addr = (size_t) &tls_start;
		curr_task->tls_size = (size_t) &tls_end - (size_t) &tls_start;

		thread_block_t* tcb = (thread_block_t*) kmalloc(curr_task->tls_size+sizeof(thread_block_t));
		if (BUILTIN_EXPECT(!tcb, 0)) {
			LOG_ERROR("load_task: heap is missing!\n");
			return -ENOMEM;
		}

		memset((void*) tcb, 0x00, curr_task->tls_size+sizeof(thread_block_t));
		tcb->dtv = (dtv_t*) &tcb[1];
		memcpy((char*) tcb->dtv, (void*) curr_task->tls_addr, tdata_size);

		set_tpidr((size_t) tcb);
		LOG_INFO("TLS of task %d on core %d starts at 0x%zx (tls size 0x%zx)\n", curr_task->id, CORE_ID, get_tpidr(), curr_task->tls_size);
	} else set_tpidr(0); // no TLS => clear tpidr_el0 register

	return 0;
}
Beispiel #5
0
int sys_getprio(tid_t* id)
{
	task_t* task = per_core(current_task);

	if (!id || (task->id == *id))
		return task->prio;
	return -EINVAL;
}
Beispiel #6
0
void check_ticks(void)
{
	// do we already know the timer frequency? => if not, ignore this check
	if (!freq_hz)
		return;

	const uint64_t curr_tsc = get_cntpct();
	mb();

	const uint64_t diff_tsc = curr_tsc - per_core(last_tsc);
	const uint64_t diff_ticks = (diff_tsc * (uint64_t) TIMER_FREQ) / freq_hz;

	if (diff_ticks > 0) {
		set_per_core(timer_ticks, per_core(timer_ticks) + diff_ticks);
		set_per_core(last_tsc, curr_tsc);
		rmb();
	}
}
Beispiel #7
0
int timer_wait(unsigned int ticks)
{
	uint64_t eticks = per_core(timer_ticks) + ticks;

	task_t* curr_task = per_core(current_task);

	if (curr_task->status == TASK_IDLE)
	{
		/*
		 * This will continuously loop until the given time has
		 * been reached
		 */
		while (per_core(timer_ticks) < eticks) {
			check_workqueues();

			// recheck break condition
			if (per_core(timer_ticks) >= eticks)
				break;

			PAUSE;
		}
	} else if (per_core(timer_ticks) < eticks) {
		check_workqueues();

		if (per_core(timer_ticks) < eticks) {
			set_timer(eticks);
			reschedule();
		}
	}

	return 0;
}
Beispiel #8
0
size_t* get_current_stack(void)
{
	task_t* curr_task = per_core(current_task);
	size_t stptr = (size_t) curr_task->stack;

	if (curr_task->status == TASK_IDLE)
		stptr += KERNEL_STACK_SIZE - 0x10;
	else
		stptr = (stptr + DEFAULT_STACK_SIZE - sizeof(size_t)) & ~0x1F;

	//set_tss(stptr, (size_t) curr_task->ist_addr + KERNEL_STACK_SIZE - 0x10);

	return curr_task->last_stack_pointer;
}
Beispiel #9
0
/* 
 * All of our Exception handling Interrupt Service Routines will
 * point to this function. This will tell us what exception has
 * occured! Right now, we simply abort the current task. 
 * All ISRs disable interrupts while they are being
 * serviced as a 'locking' mechanism to prevent an IRQ from
 * happening and messing up kernel data structures
 */
static void fault_handler(struct state *s)
{
	
	if (s->int_no < 32)
		kputs(exception_messages[s->int_no]);
	else
		kprintf("Unknown exception %d", s->int_no);

	kprintf(" Exception (%d) on core %d at %#x:%#lx, fs = %#lx, gs = %#lx, error code = 0x%#lx, task id = %u, rflags = %#x\n",
		s->int_no, CORE_ID, s->cs, s->rip, s->fs, s->gs, s->error, per_core(current_task)->id, s->rflags);
	kprintf("rax %#lx, rbx %#lx, rcx %#lx, rdx %#lx, rbp, %#lx, rsp %#lx rdi %#lx, rsi %#lx, r8 %#lx, r9 %#lx, r10 %#lx, r11 %#lx, r12 %#lx, r13 %#lx, r14 %#lx, r15 %#lx\n",
		s->rax, s->rbx, s->rcx, s->rdx, s->rbp, s->rsp, s->rdi, s->rsi, s->r8, s->r9, s->r10, s->r11, s->r12, s->r13, s->r14, s->r15);

	apic_eoi(s->int_no);
	irq_enable();
	//do_abort();
	sys_exit(-EFAULT);
}
Beispiel #10
0
/*
 * Handles the timer. In this case, it's very simple: We
 * increment the 'timer_ticks' variable every time the
 * timer fires.
 */
static void timer_handler(struct state *s)
{
#ifndef DYNAMIC_TICKS
	/* Increment our 'tick counter' */
	set_per_core(timer_ticks, per_core(timer_ticks)+1);
	restart_periodic_timer();
#else
	timer_disable();
#endif

#if 0
	/*
	 * Every TIMER_FREQ clocks (approximately 1 second), we will
	 * display a message on the screen
	 */
	if (timer_ticks % TIMER_FREQ == 0) {
		LOG_INFO("One second has passed %d\n", CORE_ID);
	}
#endif
}
Beispiel #11
0
void check_scheduling(void)
{
	uint32_t prio = get_highest_priority();
	task_t* curr_task = per_core(current_task);

	if (prio > curr_task->prio) {
		reschedule();
#ifdef DYNAMIC_TICKS
	} else if ((prio > 0) && (prio == curr_task->prio)) {
		// if a task is ready, check if the current task runs already one tick (one time slice)
		// => reschedule to realize round robin

		const uint64_t diff_cycles = get_rdtsc() - curr_task->last_tsc;
		const uint64_t cpu_freq_hz = 1000000ULL * (uint64_t) get_cpu_frequency();

		if (((diff_cycles * (uint64_t) TIMER_FREQ) / cpu_freq_hz) > 0) {
			LOG_DEBUG("Time slice expired for task %d on core %d. New task has priority %u.\n", curr_task->id, CORE_ID, prio);
			reschedule();
		}
#endif
	}
}
Beispiel #12
0
tid_t sys_getpid(void)
{
	task_t* task = per_core(current_task);

	return task->id;
}
Beispiel #13
0
int block_current_task(void)
{
	return block_task(per_core(current_task)->id);
}
Beispiel #14
0
int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
{
	int ret = -EINVAL;
	uint32_t i;
	void* stack = NULL;
	void* ist = NULL;
	task_t* curr_task;
	uint32_t core_id;

	if (BUILTIN_EXPECT(!ep, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
		return -EINVAL;

	curr_task = per_core(current_task);

	stack = create_stack(DEFAULT_STACK_SIZE);
	if (BUILTIN_EXPECT(!stack, 0))
		return -ENOMEM;

	ist =  create_stack(KERNEL_STACK_SIZE);
	if (BUILTIN_EXPECT(!ist, 0)) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		return -ENOMEM;
	}

	spinlock_irqsave_lock(&table_lock);

	core_id = get_next_core_id();
	if (BUILTIN_EXPECT(core_id >= MAX_CORES, 0))
	{
		spinlock_irqsave_unlock(&table_lock);
		ret = -EINVAL;
		goto out;
	}

	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = i;
			task_table[i].status = TASK_READY;
			task_table[i].last_core = core_id;
			task_table[i].last_stack_pointer = NULL;
			task_table[i].stack = stack;
			task_table[i].prio = prio;
			task_table[i].heap = curr_task->heap;
			task_table[i].start_tick = get_clock_tick();
			task_table[i].last_tsc = 0;
			task_table[i].parent = curr_task->id;
			task_table[i].tls_addr = curr_task->tls_addr;
			task_table[i].tls_size = curr_task->tls_size;
			task_table[i].ist_addr = ist;
			task_table[i].lwip_err = 0;
			task_table[i].signal_handler = NULL;

			if (id)
				*id = i;

			ret = create_default_frame(task_table+i, ep, arg, core_id);
			if (ret)
				goto out;

                        // add task in the readyqueues
			spinlock_irqsave_lock(&readyqueues[core_id].lock);
			readyqueues[core_id].prio_bitmap |= (1 << prio);
			readyqueues[core_id].nr_tasks++;
			if (!readyqueues[core_id].queue[prio-1].first) {
				task_table[i].next = task_table[i].prev = NULL;
				readyqueues[core_id].queue[prio-1].first = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			} else {
				task_table[i].prev = readyqueues[core_id].queue[prio-1].last;
				task_table[i].next = NULL;
				readyqueues[core_id].queue[prio-1].last->next = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			}
			// should we wakeup the core?
			if (readyqueues[core_id].nr_tasks == 1)
				wakeup_core(core_id);
			spinlock_irqsave_unlock(&readyqueues[core_id].lock);
 			break;
		}
	}

	spinlock_irqsave_unlock(&table_lock);

	if (!ret) {
		LOG_DEBUG("start new thread %d on core %d with stack address %p\n", i, core_id, stack);
	}

out:
	if (ret) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		destroy_stack(ist, KERNEL_STACK_SIZE);
	}

	return ret;
}