uint64_t get_zeroed_page_trans_obj(uint64_t *phys_addr) {

    uint64_t phys = alloc_phys_pages(1);
    if(phys_addr) *phys_addr = phys;
    uint64_t page_vaddr = VIRTUAL_ADDR(phys);

    memset((void*)page_vaddr, 0, SIZEOF_PAGE_TRANS);

    return page_vaddr;
}
Exemple #2
0
/*-----------------------------------------------------------------------------
 *		Execute kernel process
 *---------------------------------------------------------------------------*/
process_t* exec_proc(char* name, bool kernel)
{
	u32int		pdir_vaddr = 0;			/* Page directory virtual address */
	physaddr_t	page_dir = 0;			/* Page directory physical address */

	size_t		page_count;				/* Allocation page's count */
	size_t		stack_page_count;		/* Stack page's count */
	size_t		seg_page_count;			/* ELF segments page's count */
	size_t		heap_page_count;		/* Heap page's count */
	size_t		blocks_page_count;		/* Heap info blocks page count */

	physaddr_t	tmp_paddr = 0;			/* Temporary physical address */
	physaddr_t	stack_paddr = 0;
	physaddr_t	user_stack_paddr = 0;
	physaddr_t	seg_paddr = 0;
	physaddr_t	heap_paddr = 0;
	physaddr_t	blocks_paddr = 0;

	s8int		err = -1;				/* Error code */
	int			i = 0;
	size_t		sz = 0;					/* Reading data size */
	process_t*	proc = 0;				/* Process handler */
	thread_t*	thread = 0;				/* Thread handler */

	u32int		stack = 0;				/* Stack start address */
	u32int		stack_size = 0x4000;	/* Stack size */

	u32int		usr_stack = 0;

	u32int		eflags = 0;				/* EFLAGS buffer */
	u32int		seg_size = 0;

	heap_t*		heap;

	/* Load ELF info */
	elf_sections_t* elf = load_elf(name);

	/* Check file format */
	if (elf->elf_header->e_type != ET_EXEC)
	{
		print_text("This file is not executable...FAIL\n");
		return NULL;
	}

	/* Check architecture */
	if (elf->elf_header->e_mashine != EM_386)
	{
		print_text("This file is not for i386 architecture...FAIL\n");
		return NULL;
	}

	/* Create page directory */
	page_dir = clone_kernel_dir(&pdir_vaddr);

	/* Allocate pages for ELF segments */
	for (i = 0; i < elf->elf_header->e_phnum; i++)
		seg_size += elf->p_header[i].p_memsz;

	page_count = seg_size / PAGE_SIZE + 1;
	seg_page_count = page_count;

	tmp_paddr = alloc_phys_pages(page_count);
	seg_paddr = tmp_paddr;

	err = map_pages(page_dir,
			        (void*) elf->p_header[0].p_vaddr,
			        tmp_paddr,
			        page_count,
			        0x07);

	if (err == -1)
	{
		print_text("Memory mapping error...FAIL\n");
		return NULL;
	}

	/* kernel stack */
	stack = (u32int) kmalloc(stack_size);

	/* user stack */
	usr_stack = elf->p_header[0].p_vaddr + page_count*PAGE_SIZE;

	page_count = stack_size / PAGE_SIZE;
	tmp_paddr = alloc_phys_pages(page_count);
	user_stack_paddr = tmp_paddr;
	stack_page_count = page_count;

	err = map_pages(page_dir,
				    (void*) usr_stack,
				    tmp_paddr,
				    page_count,
				    0x07);

	if (err == -1)
	{
		print_text("Memory mapping error...FAIL\n");
		return NULL;
	}

	/* Process heap creation */
	page_count = USER_HEAP_SIZE / PAGE_SIZE;
	heap_page_count = page_count;

	tmp_paddr = alloc_phys_pages(page_count);
	heap_paddr = tmp_paddr;

	err = map_pages(page_dir,
				    USER_HEAP_START,
				    tmp_paddr,
				    page_count,
				    0x07);

	if (err == -1)
	{
		print_text("Memory mapping error...FAIL\n");
		return NULL;
	}

	page_count = USER_HEAP_INFO_SIZE / PAGE_SIZE;
	blocks_page_count = page_count;

	tmp_paddr = alloc_phys_pages(page_count);
	blocks_paddr = tmp_paddr;

	err =map_pages(page_dir,
				    USER_HEAP_BLOKS_INFO,
				    tmp_paddr,
				    page_count,
				    0x07);

	if (err == -1)
	{
		print_text("Memory mapping error...FAIL\n");
		return NULL;
	}

	/* Create process */
	proc = (process_t*) kmalloc(sizeof(process_t));

	proc->page_dir = page_dir;
	proc->pid = get_pid();
	proc->list_item.list = NULL;
	strcpy(proc->name, name);
	proc->suspend = false;
	proc->threads_count = 0;

	proc->page_dir_vaddr = (void*) pdir_vaddr;

	proc->stack_paddr = stack_paddr;
	proc->user_stack_vaddr = (void*) usr_stack;
	proc->user_stack_paddr = user_stack_paddr;
	proc->stack_page_count = stack_page_count;
	proc->seg_paddr = seg_paddr;
	proc->seg_page_count = seg_page_count;
	proc->heap_paddr = heap_paddr;
	proc->heap_page_count = heap_page_count;
	proc->blocks_paddr = blocks_paddr;
	proc->blocks_page_count = blocks_page_count;

	add_process(proc);

	/* Create main thread */
	thread = (thread_t*) kmalloc(sizeof(thread_t));

	thread->id = get_thread_id();
	thread->suspend = false;
	thread->process = proc;
	thread->entry_point = elf->elf_header->e_entry;
	thread->list_item.list = NULL;
	thread->stack = (void*) stack;
	thread->stack_size = stack_size;
	thread->stack_top = stack + stack_size;
	thread->esp = stack + stack_size - 28;

	proc->thread_id[proc->threads_count++] = thread->id;

	u32int* esp = (u32int*) (stack + stack_size);

	eflags = read_eflags();

	eflags |= (1 << 9);

	if (kernel)
	{
		esp[-4] = (u32int) &destroy_proc;
		esp[-5] = elf->elf_header->e_entry;
		esp[-7] = eflags;
	}
	else
	{
		esp[-2] = (u32int) proc;
		esp[-3] = (u32int) elf;
		esp[-5] = (u32int) &start_elf;
		esp[-7] = eflags;
	}

	add_thread(thread);

	return proc;
}
static int cow_fork_level(uint64_t *v_src, uint64_t *v_dest, int level) {

    for(int i = 0; i < NUM_PAGE_TRANS_ENTRIES; i ++) {

        if(v_src[i] & PAGE_TRANS_PRESENT) {

            if(level == 1) {
                // Mark src as read only
                v_src[i] &= (~PAGE_TRANS_READ_WRITE);

                // Copy to dest
                v_dest[i] = v_src[i];

                // Physical page
                uint64_t phys = PAGE_TRANS_NEXT_LEVEL_ADDR(v_src[i])
                                        | PAGE_TRANS_ADDR_SIGN_EXT(v_src[i]);

                // Get page descriptor
                struct phys_page_t *page_desc = get_phys_page_desc(phys);
                if(!page_desc) {
                    // TODO: Free stuff!
                    return -1;
                }

                // Update refcount, mark COW
                page_desc->refcount ++;
                page_desc->flag = PAGE_COW; // TODO: OR instead of assigning

                continue;
            }

            // Shallow copy kernel page table entries
            if(level == 4 && i >= PML4_KERNEL_ENTRY_START &&
                                                i <= PML4_KERNEL_ENTRY_END) {
                v_dest[i] = v_src[i];
                continue;
            }

            // Do not copy self reference entry
            if(level == 4 && i == SELF_REF_ENTRY) {
                continue;
            }

            uint64_t src_phys = PAGE_TRANS_NEXT_LEVEL_ADDR(v_src[i])
                                        | PAGE_TRANS_ADDR_SIGN_EXT(v_src[i]);

            // Create a page translation object
            uint64_t dest_phys = alloc_phys_pages(1);
            if(!dest_phys) {
                // TODO: Free stuff!
                return -1;
            }

            // Copy flags and address of next level page table
            v_dest[i] = PAGE_TRANS_NON_ADDR_FIELDS(v_src[i])
                                        | PAGE_TRANS_NEXT_LEVEL_ADDR(dest_phys);
            if(-1 == cow_fork_level((uint64_t*)VIRTUAL_ADDR(src_phys),
                            (uint64_t*)VIRTUAL_ADDR(dest_phys), level - 1)) {
                // TODO: Free stuff!
                return -1;
            }
        }
    }

    return 0;
}