Example #1
0
int vma_arch_init(void)
{
	int ret = 0;

	if (mb_info) {
		ret = vma_add((size_t)mb_info & PAGE_MASK, ((size_t)mb_info & PAGE_MASK) + PAGE_SIZE,
			VMA_READ|VMA_WRITE|VMA_CACHEABLE);
		if (BUILTIN_EXPECT(ret, 0))
			goto out;

		if ((mb_info->flags & MULTIBOOT_INFO_CMDLINE) && cmdline) {
			LOG_INFO("vma_arch_init: map cmdline %p (size 0x%zd)\n", cmdline, cmdsize);

			size_t i = 0;
			while(((size_t) cmdline + i) < ((size_t) cmdline + cmdsize))
			{
				if ((((size_t)cmdline + i) & PAGE_MASK) != ((size_t) mb_info & PAGE_MASK)) {
					ret = vma_add(((size_t)cmdline + i) & PAGE_MASK, (((size_t)cmdline + i) & PAGE_MASK) + PAGE_SIZE,
						VMA_READ|VMA_WRITE|VMA_CACHEABLE);
					if (BUILTIN_EXPECT(ret, 0))
						goto out;
				}

				i += PAGE_SIZE;
			}
		}
	}

out:
	return ret;
}
Example #2
0
static ssize_t kmsg_read(fildes_t* file, uint8_t* buffer, size_t size)
{
	size_t start, i = 0;

	if (BUILTIN_EXPECT(!buffer, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(!size, 0))
		return 0;

	if (kmessages[(atomic_int32_read(&kmsg_counter) + 1) % KMSG_SIZE] == 0)
		start = 0;
	else
		start = (atomic_int32_read(&kmsg_counter) + 1) % KMSG_SIZE;

	if (((start + file->offset) % KMSG_SIZE) == atomic_int32_read(&kmsg_counter))
		return 0;
	if (file->offset >= KMSG_SIZE)
		return 0;

	for(i=0; i<size; i++, file->offset++) {
		buffer[i] = kmessages[(start + file->offset) % KMSG_SIZE];
		if (((start + file->offset) % KMSG_SIZE) == atomic_int32_read(&kmsg_counter))
			return i;
	}

	return size;
}
Example #3
0
/* Init Functions */
int kmsg_init(vfs_node_t * node, const char *name)
{
	uint32_t        i, j;
	vfs_node_t*     new_node;
	dir_block_t*    blockdir;
	dirent_t*       dirent;
	block_list_t*   blist;

	if (BUILTIN_EXPECT(!node || !name, 0))
		return -EINVAL;

	if (BUILTIN_EXPECT(node->type != FS_DIRECTORY, 0))
		return -EINVAL;

	if (finddir_fs(node, name))
		return -EINVAL;

	new_node = kmalloc(sizeof(vfs_node_t));
	if (BUILTIN_EXPECT(!new_node, 0))
		return -ENOMEM;

	memset(new_node, 0x00, sizeof(vfs_node_t));
	new_node->type = FS_CHARDEVICE;
	new_node->open = &kmsg_open;
	new_node->close = &kmsg_close;
	new_node->read = &kmsg_read;
	new_node->write = NULL;
	spinlock_init(&new_node->lock);

	blist = &node->block_list;
	do {
		for (i = 0; i < MAX_DATABLOCKS; i++) {
			if (blist->data[i]) {
				blockdir = (dir_block_t *) blist->data[i];
				for (j = 0; j < MAX_DIRENTRIES; j++) {
					dirent = &blockdir->entries[j];
					if (!dirent->vfs_node) {
						dirent->vfs_node = new_node;
						strncpy(dirent->name, name, MAX_FNAME);
						return 0;
					}
				}
			}
		}

		if (!blist->next) {
			blist->next = (block_list_t *) kmalloc(sizeof(block_list_t));
			if (blist->next)
				memset(blist->next, 0x00, sizeof(block_list_t));
		}
	} while (blist);

	kfree(new_node);

	return -ENOMEM;
}
Example #4
0
File: memory.c Project: stv0g/eduOS
void* create_stack(tid_t id)
{
	// idle task uses stack, which is defined in entry.asm
	if (BUILTIN_EXPECT(!id, 0))
		return NULL;
	// do we have a valid task id?
	if (BUILTIN_EXPECT(id >= MAX_TASKS, 0))
		return NULL;

	return (void*) stack[id-1];
}
Example #5
0
int create_default_frame(task_t* task, entry_point_t ep, void* arg, uint32_t core_id)
{
	size_t *stack;
	struct state *stptr;
	size_t state_size;

	if (BUILTIN_EXPECT(!task, 0))
		return -EINVAL;

	if (BUILTIN_EXPECT(!task->stack, 0))
		return -EINVAL;

	LOG_INFO("Task %d uses memory region [%p - %p] as stack\n", task->id, task->stack, (char*) task->stack + DEFAULT_STACK_SIZE - 1);
	LOG_INFO("Task %d uses memory region [%p - %p] as IST1\n", task->id, task->ist_addr, (char*) task->ist_addr + KERNEL_STACK_SIZE - 1);

	memset(task->stack, 0xCD, DEFAULT_STACK_SIZE);

	/* The difference between setting up a task for SW-task-switching
	 * and not for HW-task-switching is setting up a stack and not a TSS.
	 * This is the stack which will be activated and popped off for iret later.
	 */
	stack = (size_t*) (((size_t) task->stack + DEFAULT_STACK_SIZE - sizeof(size_t)) & ~0x1F);	// => stack is 32byte aligned

	/* Only marker for debugging purposes, ... */
	*stack-- = 0xDEADBEEF;
	*stack-- = 0xDEADBEEF;

	/* Next bunch on the stack is the initial register state.
	 * The stack must look like the stack of a task which was
	 * scheduled away previously. */
	state_size = sizeof(struct state);
	stack = (size_t*) ((size_t) stack - state_size);

	stptr = (struct state *) stack;
	memset(stptr, 0x00, state_size);
	//stptr->sp = (size_t)stack + state_size;
	/* the first-function-to-be-called's arguments, ... */
	stptr->x0 = (size_t) arg;

	/* The procedure link register needs to hold the address of the
	 * first function to be called when returning from switch_context. */
	stptr->elr_el1 = (size_t)thread_entry;
	stptr->x1 = (size_t)ep; // use second argument to transfer the entry point

	/* Zero the condition flags. */
	stptr->spsr_el1 = 0x205;

	/* Set the task's stack pointer entry to the stack we have crafted right now. */
	task->last_stack_pointer = (size_t*)stack;

	return 0;
}
Example #6
0
static ssize_t sys_sbrk(int incr)
{
    task_t* task = current_task;
    vma_t* heap = task->heap;
    ssize_t ret;

    spinlock_lock(&task->vma_lock);

    if (BUILTIN_EXPECT(!heap, 0)) {
        kprintf("sys_sbrk: missing heap!\n");
        abort();
    }

    ret = heap->end;
    heap->end += incr;
    if (heap->end < heap->start)
        heap->end = heap->start;

    // allocation and mapping of new pages for the heap
    // is catched by the pagefault handler

    spinlock_unlock(&task->vma_lock);

    return ret;
}
Example #7
0
static int init_tls(void)
{
	task_t* curr_task = per_core(current_task);

	// do we have a thread local storage?
	if (((size_t) &tls_end - (size_t) &tls_start) > 0) {
		size_t tdata_size = (size_t) &tdata_end - (size_t) &tls_start;

		curr_task->tls_addr = (size_t) &tls_start;
		curr_task->tls_size = (size_t) &tls_end - (size_t) &tls_start;

		thread_block_t* tcb = (thread_block_t*) kmalloc(curr_task->tls_size+sizeof(thread_block_t));
		if (BUILTIN_EXPECT(!tcb, 0)) {
			LOG_ERROR("load_task: heap is missing!\n");
			return -ENOMEM;
		}

		memset((void*) tcb, 0x00, curr_task->tls_size+sizeof(thread_block_t));
		tcb->dtv = (dtv_t*) &tcb[1];
		memcpy((char*) tcb->dtv, (void*) curr_task->tls_addr, tdata_size);

		set_tpidr((size_t) tcb);
		LOG_INFO("TLS of task %d on core %d starts at 0x%zx (tls size 0x%zx)\n", curr_task->id, CORE_ID, get_tpidr(), curr_task->tls_size);
	} else set_tpidr(0); // no TLS => clear tpidr_el0 register

	return 0;
}
Example #8
0
int get_task(tid_t id, task_t** task)
{
	if (BUILTIN_EXPECT(task == NULL, 0)) {
		return -ENOMEM;
	}

	if (BUILTIN_EXPECT(id >= MAX_TASKS, 0)) {
		return -ENOENT;
	}

	if (BUILTIN_EXPECT(task_table[id].status == TASK_INVALID, 0)) {
		return -EINVAL;
	}

	*task = &task_table[id];

	return 0;
}
Example #9
0
static int sys_write(int fd, const char* buf, size_t len)
{
    //TODO: Currently, we ignore the file descriptor

    if (BUILTIN_EXPECT(!buf, 0))
        return -1;

    kputs(buf);

    return 0;
}
Example #10
0
size_t strlen(const char *str)
{
	size_t len = 0;

	if (BUILTIN_EXPECT(!str, 0))
		return len;

	while (str[len] != '\0')
		len++;

	return len;
}
Example #11
0
void *memset(void *dest, int val, size_t count)
{
	size_t i;

	if (BUILTIN_EXPECT(!dest, 0))
		return dest;

	for (i = 0; i < count; i++)
		((char*) dest)[i] = (char) val;

	return dest;
}
Example #12
0
void *memcpy(void *dest, const void *src, size_t count)
{
	size_t i;

	if (BUILTIN_EXPECT(!dest || !src, 0))
		return dest;

	for (i = 0; i < count; i++)
		((char*)dest)[i] = ((char*)src)[i];
	
	return dest;
}
Example #13
0
char* strcpy(char *dest, const char *src)
{
        size_t i;

	if (BUILTIN_EXPECT(!dest || !src, 0))
		return dest;

        for (i = 0 ; src[i] != '\0' ; i++)
                dest[i] = src[i];
        dest[i] = '\0';

        return dest;
}
Example #14
0
int strncmp(const char *s1, const char *s2, size_t n)
{
	if (BUILTIN_EXPECT(n == 0, 0))
		return 0;

	while (n-- != 0 && *s1 == *s2) {
		if (n == 0 || *s1 == '\0')
			break;
		s1++;
		s2++;
	}

	return (*(unsigned char *) s1) - (*(unsigned char *) s2);
}
Example #15
0
int kputchar(int c)
{
	/* add place holder for end of string */
	if (BUILTIN_EXPECT(!c, 0))
		c = '?';

	if (is_single_kernel()) {
		uart_putchar(c);
	} else {
		int pos = atomic_int32_inc(&kmsg_counter);
		kmessages[pos % KMSG_SIZE] = (unsigned char) c;
	}

	return 1;
}
Example #16
0
char* strncpy(char *dest, const char *src, size_t n)
{
	size_t i;

	if (BUILTIN_EXPECT(!dest || !src, 0))
		return dest;

	for (i = 0 ; i < n && src[i] != '\0' ; i++)
		dest[i] = src[i];
	if (i < n)
		dest[i] = '\0';
	else
		dest[n-1] = '\0';

	return dest;
}
Example #17
0
int multitasking_init(void)
{
	if (BUILTIN_EXPECT(task_table[0].status != TASK_IDLE, 0)) {
		kputs("Task 0 is not an idle task\n");
		return -ENOMEM;
	}

	task_table[0].prio = IDLE_PRIO;
	task_table[0].stack = (void*) &boot_stack;
	task_table[0].page_map = read_cr3();

	// register idle task
	register_task();

	return 0;
}
Example #18
0
static void timer_queue_remove(uint32_t core_id, task_t* task)
{
	if(BUILTIN_EXPECT(!task, 0)) {
		return;
	}

	task_list_t* timer_queue = &readyqueues[core_id].timers;

#ifdef DYNAMIC_TICKS
	// if task is first in timer queue, we need to update the oneshot
	// timer for the next task
	if(timer_queue->first == task) {
		update_timer(task->next);
	}
#endif

	task_list_remove_task(timer_queue, task);
}
Example #19
0
int multitasking_init(void)
{
	uint32_t core_id = CORE_ID;

	if (BUILTIN_EXPECT(task_table[0].status != TASK_IDLE, 0)) {
		LOG_ERROR("Task 0 is not an idle task\n");
		return -ENOMEM;
	}

	task_table[0].prio = IDLE_PRIO;
	task_table[0].stack = NULL; // will be initialized later
	task_table[0].ist_addr = NULL; // will be initialized later
	set_per_core(current_task, task_table+0);

	readyqueues[core_id].idle = task_table+0;

	return 0;
}
Example #20
0
static uint32_t get_next_core_id(void)
{
	uint32_t i;
	static uint32_t core_id = MAX_CORES;

	if (core_id >= MAX_CORES)
		core_id = CORE_ID;

	// we assume OpenMP applications
	// => number of threads is (normaly) equal to the number of cores
	// => search next available core
	for(i=0, core_id=(core_id+1)%MAX_CORES; i<2*MAX_CORES; i++, core_id=(core_id+1)%MAX_CORES)
		if (readyqueues[core_id].idle)
			break;

	if (BUILTIN_EXPECT(!readyqueues[core_id].idle, 0)) {
		LOG_ERROR("BUG: no core available!\n");
		return MAX_CORES;
	}

	return core_id;
}
Example #21
0
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio, uint32_t core_id)
{
	int ret = -ENOMEM;
	uint32_t i;
	void* stack = NULL;
	void* ist = NULL;
	void* counter = NULL;

	if (BUILTIN_EXPECT(!ep, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(core_id >= MAX_CORES, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(!readyqueues[core_id].idle, 0))
		return -EINVAL;

	stack = create_stack(DEFAULT_STACK_SIZE);
	if (BUILTIN_EXPECT(!stack, 0))
		return -ENOMEM;

	ist = create_stack(KERNEL_STACK_SIZE);
	if (BUILTIN_EXPECT(!ist, 0)) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		return -ENOMEM;
	}

	counter = kmalloc(sizeof(atomic_int64_t));
	if (BUILTIN_EXPECT(!counter, 0)) {
		destroy_stack(stack, KERNEL_STACK_SIZE);
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		return -ENOMEM;
	}
	atomic_int64_set((atomic_int64_t*) counter, 0);

	spinlock_irqsave_lock(&table_lock);

	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = i;
			task_table[i].status = TASK_READY;
			task_table[i].last_core = core_id;
			task_table[i].last_stack_pointer = NULL;
			task_table[i].stack = stack;
			task_table[i].prio = prio;
			task_table[i].heap = NULL;
			task_table[i].start_tick = get_clock_tick();
			task_table[i].last_tsc = 0;
			task_table[i].parent = 0;
			task_table[i].ist_addr = ist;
			task_table[i].tls_addr = 0;
			task_table[i].tls_size = 0;
			task_table[i].lwip_err = 0;
			task_table[i].signal_handler = NULL;

			if (id)
				*id = i;

			ret = create_default_frame(task_table+i, ep, arg, core_id);
			if (ret)
				goto out;

			// add task in the readyqueues
			spinlock_irqsave_lock(&readyqueues[core_id].lock);
			readyqueues[core_id].prio_bitmap |= (1 << prio);
			readyqueues[core_id].nr_tasks++;
			if (!readyqueues[core_id].queue[prio-1].first) {
				task_table[i].next = task_table[i].prev = NULL;
				readyqueues[core_id].queue[prio-1].first = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			} else {
				task_table[i].prev = readyqueues[core_id].queue[prio-1].last;
				task_table[i].next = NULL;
				readyqueues[core_id].queue[prio-1].last->next = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			}
			spinlock_irqsave_unlock(&readyqueues[core_id].lock);
			break;
		}
	}

	if (!ret)
		LOG_INFO("start new task %d on core %d with stack address %p\n", i, core_id, stack);

out:
	spinlock_irqsave_unlock(&table_lock);

	if (ret) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		destroy_stack(ist, KERNEL_STACK_SIZE);
		kfree(counter);
	}

	return ret;
}
Example #22
0
int create_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
{
	int ret = -ENOMEM;
	uint32_t i;

	if (BUILTIN_EXPECT(!ep, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
		return -EINVAL;

	spinlock_irqsave_lock(&table_lock);

	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = i;
			task_table[i].status = TASK_READY;
			task_table[i].last_stack_pointer = NULL;
			task_table[i].stack = create_stack(i);
			task_table[i].prio = prio;
			spinlock_init(&task_table[i].vma_lock);
			task_table[i].vma_list = NULL;
			task_table[i].heap = NULL;
			task_table[i].wait_id = -1;

			spinlock_irqsave_init(&task_table[i].page_lock);
			atomic_int32_set(&task_table[i].user_usage, 0);

			/* Allocated new PGD or PML4 and copy page table */
			task_table[i].page_map = get_pages(1);
			if (BUILTIN_EXPECT(!task_table[i].page_map, 0))
				goto out;

			/* Copy page tables & user frames of current task to new one */
			page_map_copy(&task_table[i]);

			if (id)
				*id = i;

			ret = create_default_frame(task_table+i, ep, arg);

			// add task in the readyqueues
			spinlock_irqsave_lock(&readyqueues.lock);
			readyqueues.prio_bitmap |= (1 << prio);
			readyqueues.nr_tasks++;
			if (!readyqueues.queue[prio-1].first) {
				task_table[i].next = task_table[i].prev = NULL;
				readyqueues.queue[prio-1].first = task_table+i;
				readyqueues.queue[prio-1].last = task_table+i;
			} else {
				task_table[i].prev = readyqueues.queue[prio-1].last;
				task_table[i].next = NULL;
				readyqueues.queue[prio-1].last->next = task_table+i;
				readyqueues.queue[prio-1].last = task_table+i;
			}
			spinlock_irqsave_unlock(&readyqueues.lock);
			break;
		}
	}

out:
	spinlock_irqsave_unlock(&table_lock);

	return ret;
}
Example #23
0
int clone_task(tid_t* id, entry_point_t ep, void* arg, uint8_t prio)
{
	int ret = -EINVAL;
	uint32_t i;
	void* stack = NULL;
	void* ist = NULL;
	task_t* curr_task;
	uint32_t core_id;

	if (BUILTIN_EXPECT(!ep, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio == IDLE_PRIO, 0))
		return -EINVAL;
	if (BUILTIN_EXPECT(prio > MAX_PRIO, 0))
		return -EINVAL;

	curr_task = per_core(current_task);

	stack = create_stack(DEFAULT_STACK_SIZE);
	if (BUILTIN_EXPECT(!stack, 0))
		return -ENOMEM;

	ist =  create_stack(KERNEL_STACK_SIZE);
	if (BUILTIN_EXPECT(!ist, 0)) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		return -ENOMEM;
	}

	spinlock_irqsave_lock(&table_lock);

	core_id = get_next_core_id();
	if (BUILTIN_EXPECT(core_id >= MAX_CORES, 0))
	{
		spinlock_irqsave_unlock(&table_lock);
		ret = -EINVAL;
		goto out;
	}

	for(i=0; i<MAX_TASKS; i++) {
		if (task_table[i].status == TASK_INVALID) {
			task_table[i].id = i;
			task_table[i].status = TASK_READY;
			task_table[i].last_core = core_id;
			task_table[i].last_stack_pointer = NULL;
			task_table[i].stack = stack;
			task_table[i].prio = prio;
			task_table[i].heap = curr_task->heap;
			task_table[i].start_tick = get_clock_tick();
			task_table[i].last_tsc = 0;
			task_table[i].parent = curr_task->id;
			task_table[i].tls_addr = curr_task->tls_addr;
			task_table[i].tls_size = curr_task->tls_size;
			task_table[i].ist_addr = ist;
			task_table[i].lwip_err = 0;
			task_table[i].signal_handler = NULL;

			if (id)
				*id = i;

			ret = create_default_frame(task_table+i, ep, arg, core_id);
			if (ret)
				goto out;

                        // add task in the readyqueues
			spinlock_irqsave_lock(&readyqueues[core_id].lock);
			readyqueues[core_id].prio_bitmap |= (1 << prio);
			readyqueues[core_id].nr_tasks++;
			if (!readyqueues[core_id].queue[prio-1].first) {
				task_table[i].next = task_table[i].prev = NULL;
				readyqueues[core_id].queue[prio-1].first = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			} else {
				task_table[i].prev = readyqueues[core_id].queue[prio-1].last;
				task_table[i].next = NULL;
				readyqueues[core_id].queue[prio-1].last->next = task_table+i;
				readyqueues[core_id].queue[prio-1].last = task_table+i;
			}
			// should we wakeup the core?
			if (readyqueues[core_id].nr_tasks == 1)
				wakeup_core(core_id);
			spinlock_irqsave_unlock(&readyqueues[core_id].lock);
 			break;
		}
	}

	spinlock_irqsave_unlock(&table_lock);

	if (!ret) {
		LOG_DEBUG("start new thread %d on core %d with stack address %p\n", i, core_id, stack);
	}

out:
	if (ret) {
		destroy_stack(stack, DEFAULT_STACK_SIZE);
		destroy_stack(ist, KERNEL_STACK_SIZE);
	}

	return ret;
}