void schedule(isr_regs * regs) { int temp_stack[100]; if (kernel_reenter) { return; } memcpy((char*)&cur_process->registers, (char*)regs, sizeof(isr_regs)); context_t *orig = cur_process; do { cur_process = cur_process->next; if (cur_process == 0) cur_process = context_list; if (cur_process == orig) { /*We've looped the whole list*/ if (cur_process->status != PROCESS_STATUS_RUNNING) { kernel_reenter = 1; set_kernel_tss_stack ((void*)temp_stack); __asm__("sti\n" "hlt"); set_kernel_tss_stack (0); __asm__("cli"); kernel_reenter = 0; } } } while (cur_process->status != PROCESS_STATUS_RUNNING); memcpy((char*)regs,(char*) &cur_process->registers, sizeof(isr_regs)); set_cr3(cur_process->space->cr3); }
ac_bool test_crs(void) { ac_bool error = AC_FALSE; union cr0_u cr0 = { .raw = get_cr0() }; // cr1 is reserved ac_uint cr2 = get_cr2(); union cr3_u cr3 = { .raw = get_cr3() }; union cr4_u cr4 = { .raw = get_cr4() }; ac_uint cr8 = get_cr8(); print_cr0("cr0", cr0.raw); ac_printf("cr2: 0x%p\n", cr2); print_cr3("cr3", cr3.raw); print_cr4("cr4", cr4.raw); ac_printf("cr8: 0x%p\n", cr8); set_cr0(cr0.raw); // cr2 is read only set_cr3(cr3.raw); set_cr4(cr4.raw); set_cr8(cr8); ac_uint cr0_1 = get_cr0(); ac_uint cr3_1 = get_cr3(); ac_uint cr4_1 = get_cr4(); ac_uint cr8_1 = get_cr8(); error |= AC_TEST(cr0.raw == cr0_1); error |= AC_TEST(cr3.raw == cr3_1); error |= AC_TEST(cr4.raw == cr4_1); error |= AC_TEST(cr8 == cr8_1); return error; }
void ArchCpu::_swap(kernel::sys::sched::task::Task *prev, kernel::sys::sched::task::Task *next) { set_cr3((uint32_t)(next->vmap.pgd)); cpu_switch(&prev->tsk_context.kregs, &next->tsk_context.kregs); }
int kernel_start() { int i; // point to low address (0GB ~ ...) pgd_tmp[PGD_INDEX(0)] = (uint32_t)pte_low | PAGE_PRESENT | PAGE_WRITE; // point to high address (3GB ~ 4GB) pgd_tmp[PGD_INDEX(PAGE_OFFSET)] = (uint32_t)pte_hig | PAGE_PRESENT | PAGE_WRITE; for (i = 0; i < 1024; i++) { pte_low[i] = (i << 12) | PAGE_PRESENT | PAGE_WRITE; } for (i = 0; i < 1024; i++) { pte_hig[i] = (i << 12) | PAGE_PRESENT | PAGE_WRITE; } // set page directory set_cr3(pgd_tmp); page_on(); kernel_stack_top = ((uint32_t)kernel_stack + STACK_SIZE) & 0xFFFFFFF0; set_esp(kernel_stack_top); global_mboot_ptr = (multiboot_t *)((uint32_t)global_mboot_tmp + PAGE_OFFSET); kernel_init(); return 0; }
int create_process() { context_t * new_context, *t; new_context = (context_t*)allocate_from_slab(context_slab); if (context_list == 0) { context_list = new_context; } else { for (t = context_list; t->next != 0; t = t->next); /*Get to end*/ t->next = new_context; } new_context->next = 0; new_context->registers.eax = 0xDEADBEEF; new_context->registers.gs = 0x23; new_context->registers.fs = 0x23; new_context->registers.es = 0x23; new_context->registers.ds = 0x23; new_context->registers.ss = 0x23; new_context->registers.eip = 0x40000000; new_context->registers.cs = 0x1B; new_context->registers.eflags = 0x200; /*IF set*/ new_context->registers.useresp = 0xBFFFFFF0; /*0xC0000FFF;*/ new_context->space = create_address_space(); set_cr3(new_context->space->cr3); new_context->pid = next_avail_pid; next_avail_pid++; new_context->status = PROCESS_STATUS_RUNNING; return new_context->pid; }
/* Initializes paging for the system address space */ void init_mm() { init_table_pages(); init_frames(); init_dir_pages(); set_cr3(get_DIR(&task[0].task)); set_pe_flag(); }
/* Initializes paging for the system address space */ void init_mm() { init_table_pages(); init_frames(); init_dir_pages(); init_heap_structs(); allocate_DIR(&task[0].task); set_cr3(get_DIR(&task[0].task)); set_pe_flag(); }
// sets the CR3 register with the start address of the page structure for process # [index] void set_pdir_base(unsigned int index) { // TODO if (index >= NUM_IDS) { // process id exceed the boundary return; } set_cr3(PDirPool[index]); }
// sets the CR3 register with the start address of the page structure for process # [index] void set_pdir_base(unsigned int index) { // TODO if (index >= NUM_IDS) { // process id exceed the boundary dprintf("Process ID:%u is out of boundary", index); return; } set_cr3(PDirPool[index]); }
/* Initializes paging for the system address space */ void init_mm() { int i; init_table_pages(); init_frames(); init_dir_pages(); for (i = 0; i < NR_TASKS; ++i){ vdir[i] = 0; } allocate_DIR(&task[0].task); set_cr3(get_DIR(&task[0].task)); set_pe_flag(); }
/* ** we map more than available ** since we only use 1GB or 2MB pages */ static void vmm_pagemem_init() { pml4e_t *pml4; pdpe_t *pdp; pde64_t *pd; cr3_reg_t cr3; offset_t pfn; uint32_t i, j, k; offset_t limit; size_t pdp_nr, pd_nr, pt_nr; size_t pml4e_max, pdpe_max, pde_max; pml4 = info->vmm.cpu.pg.pml4; limit = info->hrd.mem.top - 1; pdp_nr = pdp_nr(limit) + 1; pd_nr = pd64_nr(limit) + 1; pt_nr = pt64_nr(limit) + 1; pfn = 0; pml4e_max = pdp_nr; /* only one pml4 */ for(i=0 ; i<pml4e_max ; i++) { pdp = info->vmm.cpu.pg.pdp[i]; pg_set_entry(&pml4[i], PG_KRN|PG_RW, page_nr(pdp)); pdpe_max = min(pd_nr, PDPE_PER_PDP); pd_nr -= pdpe_max; for(j=0 ; j<pdpe_max ; j++) { if(info->vmm.cpu.skillz.pg_1G) pg_set_large_entry(&pdp[j], PG_KRN|PG_RW, pfn++); else { pd = info->vmm.cpu.pg.pd[j]; pg_set_entry(&pdp[j], PG_KRN|PG_RW, page_nr(pd)); pde_max = min(pt_nr, PDE64_PER_PD); pt_nr -= pde_max; for(k=0 ; k<pde_max ; k++) pg_set_large_entry(&pd[k], PG_KRN|PG_RW, pfn++); } } } cr3.raw = 0UL; cr3.pml4.addr = page_nr(pml4); set_cr3(cr3.raw); }
void scheduler() { for (;;) { struct process *proc = current_proc ? current_proc->next : list_head.next; /* Find a runnable process */ while (proc == &list_head || proc->state != PROC_STATE_RUNNING) { if (proc != &list_head && proc->state == PROC_STATE_DEAD) { /* Release the dead process */ struct process *dead = proc; proc = proc->next; sched_remove(dead); proc_free(dead); } else { proc = proc->next; } } close_int(); current_proc = proc; /* Update TSS */ tss.ss0 = KERNEL_DATA_SELECTOR; tss.esp0 = proc->kernel_stack; flush_tss(); /* Change to process virtual address space */ set_cr3(CAST_VIRTUAL_TO_PHYSICAL(proc->page_dir)); if (!proc->context) init_context(proc); switch_kcontext(&sched_context, proc->context); /* Call schedule task */ if (sched_task) { sched_task(current_proc); sched_task = NULL; } } }
/* Gestiona la copia del cbuffer i els problemes ocasionats al ser un * procés diferent del bloquejat el que ha d'accedir al espai d'@ del * procés bloquejat. * Llegeix min(keystoread,size del cbuffer) elements. * - Retorna 1: * Si ha pogut realitzar la copia. * - Retorna 0: * Si no ha pogut realitzar la copia per falta de pagines * lliures en el procés per poder accedir al espai d'@ del * procés bloquejat. * */ int keyboard_cbuffer_read() { struct list_head *list_blocked = list_first(&keyboardqueue); struct task_struct * blocked_pcb = list_head_to_task_struct(list_blocked); struct task_struct * current_pcb = current(); page_table_entry * pt_blocked = get_PT(blocked_pcb); page_table_entry * pt_current = get_PT(current_pcb); page_table_entry * dir_blocked = get_DIR(blocked_pcb); page_table_entry * dir_current = get_DIR(current_pcb); char bread; if (dir_blocked != dir_current) { // Si són 2 procesos independents int id_pag_buffer = ((int)blocked_pcb->kbinfo.keybuffer&0x003ff000)>>12; int addr_buffer = ((int)blocked_pcb->kbinfo.keybuffer&0x00000FFF); /*Cerca de entradeslliures a la taula de pàgines */ int free_pag = FIRST_FREE_PAG_P; while(pt_current[free_pag].entry != 0 && free_pag<TOTAL_PAGES) free_pag++; if (free_pag == TOTAL_PAGES) return 0; // Cas d'error set_ss_pag(pt_current,free_pag, pt_blocked[id_pag_buffer].bits.pbase_addr); while (!circularbIsEmpty(&cbuffer) && blocked_pcb->kbinfo.keystoread > 0) { circularbRead(&cbuffer,&bread); copy_to_user(&bread, (void *)((free_pag<<12)+addr_buffer), 1); blocked_pcb->kbinfo.keystoread--; blocked_pcb->kbinfo.keysread++; blocked_pcb->kbinfo.keybuffer++; addr_buffer++; /* Si s'ha de canviar la pàgina */ if (addr_buffer == PAGE_SIZE) { id_pag_buffer++; set_ss_pag(pt_current,free_pag, pt_blocked[id_pag_buffer].bits.pbase_addr); set_cr3(dir_current); } } del_ss_pag(pt_current, free_pag); }
int setup_kernel_memory(uint64_t kernmem, uint64_t p_kern_start, uint64_t p_kern_end, uint64_t p_vdo_buff_start, uint32_t *modulep) { struct kernel_mm_struct *mm = get_kernel_mm(); // Set up vma // Kernel virtual memory space if(-1 == set_kernel_memory(kernmem , kernmem - p_kern_start + p_kern_end)) { return -1; } // Video buffer memory // TODO: Check return value uint64_t vdo_start_addr = get_unmapped_area(&(mm->mmap), kernmem + p_vdo_buff_start, SIZEOF_PAGE); if(-1 == set_video_buffer_memory(vdo_start_addr, vdo_start_addr + SIZEOF_PAGE)) { return -1; } //ASCI memory uint64_t ahci_start_addr = get_unmapped_area(&(mm->mmap), kernmem, SIZEOF_PAGE); if(-1 == set_ahci_memory(ahci_start_addr, ahci_start_addr + SIZEOF_PAGE)) { return -1; } // Scan physical pages struct smap_t { uint64_t base, length; uint32_t type; }__attribute__((packed)) *smap; uint64_t phys_end_addr = 0; int lower_chunk = 0; uint64_t lower_chunk_start = 0; uint64_t lower_chunk_end = 0; while(modulep[0] != 0x9001) modulep += modulep[1]+2; for(smap = (struct smap_t*)(modulep+2); smap < (struct smap_t*)((char*)modulep + modulep[1] + 2*4); ++smap) { if (smap->type == 1 && smap->length != 0) { if(phys_end_addr < smap->base + smap->length) { phys_end_addr = smap->base + smap->length; } if(!lower_chunk) { lower_chunk_start = smap->base; lower_chunk_end = smap->base + smap->length; lower_chunk ++; } if(!new_chunk(smap->base, smap->base + smap->length)) { return -1; } } } // TODO: Check return value uint64_t phys_mem_offset = get_unmapped_area(&(mm->mmap), kernmem, phys_end_addr); if(-1 == set_phys_memory(phys_mem_offset, phys_mem_offset + phys_end_addr)) { return -1; } if(-1 == scan_all_chunks()) { return -1; } // Mark used physical pages // The first page - just like that if(0 > inc_ref_count_pages(0, SIZEOF_PAGE)) { return -1; } // Video buffer memory - is not part of chunks obtained from modulep. No // need to mark. // Kernel physical pages if(0 > inc_ref_count_pages(p_kern_start, p_kern_end)) { return -1; } // Ignore lower chunk if(0 > inc_ref_count_pages(lower_chunk_start, lower_chunk_end)) { return -1; } // Initialize free pages if(-1 == init_free_phys_page_manager()) { return -1; } /* printf("start kernel: %p\n", mm->start_kernel); printf("end kernel : %p\n", mm->end_kernel); printf("start vdo : %p\n", mm->start_vdo_buff); printf("end vdo : %p\n", mm->end_vdo_buff); printf("start phys : %p\n", mm->start_phys_mem); printf("end phys : %p\n", mm->end_phys_mem); printf("start ahci : %p\n", mm->start_ahci_mem); printf("end ahci : %p\n", mm->end_ahci_mem); */ // Set up page tables uint64_t pml4_page = get_selfref_PML4(NULL); uint64_t paddr = p_kern_start; uint64_t vaddr = kernmem; while(paddr < p_kern_end) { update_page_table_idmap(pml4_page, paddr, vaddr, PAGE_TRANS_READ_WRITE); paddr += SIZEOF_PAGE; vaddr += SIZEOF_PAGE; } // TODO: Remove user supervisor permission from video buffer update_page_table_idmap(pml4_page, p_vdo_buff_start, vdo_start_addr, PAGE_TRANS_READ_WRITE | PAGE_TRANS_USER_SUPERVISOR); update_page_table_idmap(pml4_page, P_AHCI_START, ahci_start_addr, PAGE_TRANS_READ_WRITE | PAGE_TRANS_USER_SUPERVISOR); phys_mem_offset_map(pml4_page, phys_mem_offset); // Protect read-only pages from supervisor-level writes set_cr0(get_cr0() | CR0_WP); // Set cr3 struct str_cr3 cr3 = get_default_cr3(); cr3.p_PML4E_4Kb = pml4_page >> 12; set_cr3(cr3); // Indicate memory set up done kmDeviceMemorySetUpDone(); global_video_vaddr = (void *)vdo_start_addr; set_phys_mem_virt_map_base(phys_mem_offset); return 0; }
void load_elf_args(Task *tsk, int argc, char *argv[], char *envp[]) { if (tsk->mm->start_stack == 0) panic("Task not set up\n"); char tmp_c; pml4_t *kern_cr3; pml4_t *task_cr3; kern_cr3 = get_cr3(); task_cr3 = (pml4_t*)tsk->registers.cr3; set_cr3(task_cr3); uint64_t *new_stack = (uint64_t*)((tsk->mm->start_stack + PAGE_SIZE) & PG_ALIGN); if (kmalloc_vma(task_cr3, (uint64_t)new_stack, 1, USER_SETTINGS) == NULL) { panic("malloc failed\n"); return; } set_cr3(kern_cr3); struct vm_area_struct *vma = (struct vm_area_struct *)kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_prot = 0; vma->vm_start =(uint64_t) new_stack; vma->vm_end = vma->vm_start + PAGE_SIZE; add_vma(tsk->mm, vma); set_cr3(task_cr3); new_stack = (uint64_t*)tsk->mm->start_stack; *new_stack = argc; new_stack++; //tsk->args.argv = PHYS_TO_VIRT(kmalloc_pg()); tsk->args.argv = (uint64_t)kmalloc_vma(task_cr3, (tsk->mm->start_stack + (4*PAGE_SIZE)) & PG_ALIGN, 1, USER_SETTINGS); char *tsk_argv = (char*)tsk->args.argv; set_cr3(kern_cr3); vma = (struct vm_area_struct *)kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_prot = 0; vma->vm_start =(uint64_t) tsk_argv; vma->vm_end = vma->vm_start + PAGE_SIZE; add_vma(tsk->mm, vma); set_cr3(task_cr3); for (int i = 0; i < argc; i++, new_stack++) { *new_stack = (uint64_t)tsk_argv; set_cr3(kern_cr3); for (int j = 0; *(argv[i]+j) != '\0'; j++, tsk_argv++) { //if(get_pte((pml4_t*)tsk->registers.cr3, (uint64_t)tsk_argv)) panic("VERY BAD!!!\n"); //printk("char: %c\n", *(argv[i]+j)); //*tsk_argv = *(argv[i] + j); set_cr3(kern_cr3); tmp_c = *(argv[i] + j); set_cr3(task_cr3); *tsk_argv = tmp_c; set_cr3(kern_cr3); //printk("%c\n", *(argv[i]+j)); //if(i == 1 && j == 3)halt(); } set_cr3(task_cr3); *tsk_argv = '\0'; tsk_argv++; } *new_stack= 0; new_stack++; //tsk->args.envp = PHYS_TO_VIRT(kmalloc_pg()); tsk->args.envp = (uint64_t)kmalloc_vma((pml4_t*)tsk->registers.cr3, (tsk->mm->start_stack + (5*PAGE_SIZE)) & PG_ALIGN, 1, USER_SETTINGS); char *tsk_env = (char*)tsk->args.envp; set_cr3(kern_cr3); vma = (struct vm_area_struct *)kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_prot = 0; vma->vm_start =(uint64_t) tsk_env; vma->vm_end = vma->vm_start + PAGE_SIZE; add_vma(tsk->mm, vma); set_cr3(task_cr3); //printk("adder: %p\n", new_stack); set_cr3(kern_cr3); for (int i = 0; envp[i] != NULL; i++, new_stack++) { set_cr3(task_cr3); *new_stack = (uint64_t)tsk_env; set_cr3(kern_cr3); for (int j = 0; *(envp[i]+j) != '\0'; j++, tsk_env++) { set_cr3(kern_cr3); tmp_c = *(envp[i] + j); set_cr3(task_cr3); *tsk_env = tmp_c; set_cr3(kern_cr3); } set_cr3(task_cr3); *tsk_env = '\0'; tsk_env++; set_cr3(kern_cr3); } set_cr3(task_cr3); *new_stack= 0; new_stack++; set_cr3(kern_cr3); }
struct mm_struct* load_elf(char *data, int len, Task *task, pml4_t *proc_pml4) { if (validate_header(data)) { pml4_t *kern_pml4 = get_cr3(); //get the header Elf64_Ehdr *hdr = (Elf64_Ehdr*)data; //create new mm_struct struct mm_struct *mm = (struct mm_struct*)kmalloc_kern(PAGE_SIZE); memset(mm, 0, sizeof(struct mm_struct)); if(hdr->e_shstrndx == 0x00) panic("NO STRING TABLE"); mm->start_code = ((Elf64_Ehdr*) data)->e_entry; Elf64_Phdr *prgm_hdr = (Elf64_Phdr*)(data + hdr->e_phoff); uint64_t high_addr = 0; for(int i = 0; i < hdr->e_phnum; prgm_hdr++, i++) { //printk("--------------LOAD-ELF-----------------\n"); if (prgm_hdr->p_type == PT_LOAD && prgm_hdr->p_filesz > 0) { if (prgm_hdr->p_filesz > prgm_hdr->p_memsz) { panic("Bad Elf!!!\n"); halt(); } struct vm_area_struct *vma = (struct vm_area_struct*) kmalloc_kern(sizeof(struct vm_area_struct)); if(kmalloc_vma(proc_pml4, prgm_hdr->p_vaddr, prgm_hdr->p_memsz, USER_SETTINGS) == NULL) { panic("KMALLOC FAILED - elf.c:load_elf:34\n"); printk("SIZE: %d\n", prgm_hdr->p_filesz); } // printk("ELF Virtual memory address: %p\n", prgm_hdr->p_vaddr); set_cr3(proc_pml4); memset((void*)prgm_hdr->p_vaddr, 0, prgm_hdr->p_memsz); //printk("memcpy dest: %p src: %p size: %p\n", prgm_hdr->p_vaddr, data + prgm_hdr->p_offset, prgm_hdr->p_filesz); memcpy((void*)prgm_hdr->p_vaddr, data + prgm_hdr->p_offset, prgm_hdr->p_filesz); //memcpy((void*)prgm_hdr->p_vaddr, data + prgm_hdr->p_offset, prgm_hdr->p_memsz); set_cr3(kern_pml4); vma->vm_start = prgm_hdr->p_vaddr; vma->vm_end = (uint64_t)(prgm_hdr->p_vaddr + prgm_hdr->p_memsz); vma->vm_prot = prgm_hdr->p_flags; add_vma(mm, vma); if(vma->next != NULL) { panic("not null\n"); halt(); } if(vma->vm_end > high_addr) high_addr = vma->vm_end; if (prgm_hdr->p_vaddr == mm->start_code) { // its the txt section mm->end_code = (uint64_t)(prgm_hdr->p_vaddr + prgm_hdr->p_filesz); mm->start_data = mm->end_code +1; } } } high_addr += PAGE_SIZE; high_addr &= PG_ALIGN; mm->brk = high_addr; mm->start_brk = mm->brk; struct vm_area_struct *vma = (struct vm_area_struct*) kmalloc_kern(sizeof(struct vm_area_struct)); vma->vm_start = mm->start_brk; vma->vm_end = mm->brk; vma->vm_prot = 0; add_vma(mm, vma); return mm; } else { return NULL; } }
int sys_fork() { update_user_to_system(); int PID=-1; int i; int j; // creates the child process /* a) Get a free task_struct for the process. If there is no space for a new process, an error will be returned. b) Inherit system data: copy the parent’s task_union to the child. Determine whether it is necessary to modify the page table of the parent to access the child’s system data. The copy_data function can be used to copy. c) Initialize field dir_pages_baseAddr with a new directory to store the process address space using the allocate_DIR routine. d) Search physical pages in which to map logical pages for data+stack of the child process (using the alloc_frames function). If there is no enough free pages, an error will be return. */ //a if(list_empty( &freequeue )){ update_system_to_user(current()); return -ENOMEM; } struct list_head * freequeue_head = list_first( &freequeue ); struct task_struct * child_struct = list_head_to_task_struct(freequeue_head); list_del(freequeue_head); // not in freequeue anymore //b struct task_struct * current_struct = current(); union task_union * current_union = (union task_union *) current_struct; union task_union * child_union = (union task_union *) child_struct; copy_data(current_union, child_union, sizeof(union task_union)); // TODO determine whether it is necessary to modify the page table of the parent to access the child’s system data //c allocate_DIR(child_struct); //d int physical_pages[NUM_PAG_DATA]; for(i = 0; i < NUM_PAG_DATA; ++i) { physical_pages[i] = alloc_frame(); if( physical_pages[i] < 0){ for(j = i-1; j >= 0; j--) { free_frame((unsigned int)j); } update_system_to_user(current()); return -EAGAIN; } } /* e) Inherit user data: i) Create new address space: Access page table of the child process through the direc- tory field in the task_struct to initialize it (get_PT routine can be used): A) Page table entries for the system code and data and for the user code can be a copy of the page table entries of the parent process (they will be shared) */ page_table_entry * child_pt = get_PT(child_struct); page_table_entry * parent_pt = get_PT(current_struct); int child_logical_address; for(child_logical_address = 0; child_logical_address < NUM_PAG_KERNEL + NUM_PAG_CODE; child_logical_address++) { int physical_parent_frame = get_frame(parent_pt, child_logical_address); set_ss_pag( child_pt, child_logical_address, physical_parent_frame); } /* B) Page table entries for the user data+stack have to point to new allocated pages which hold this region*/ for(; child_logical_address < NUM_PAG_KERNEL + NUM_PAG_CODE + NUM_PAG_DATA; child_logical_address++) { set_ss_pag(child_pt, child_logical_address, physical_pages[child_logical_address - (NUM_PAG_KERNEL + NUM_PAG_CODE)]); } /* ii) Copy the user data+stack pages from the parent process to the child process. The child’s physical pages cannot be directly accessed because they are not mapped in the parent’s page table. In addition, they cannot be mapped directly because the logical parent process pages are the same. They must therefore be mapped in new entries of the page table temporally (only for the copy). Thus, both pages can be accessed simultaneously as follows: A) Use temporal free entries on the page table of the parent. Use the set_ss_pag and del_ss_pag functions. B) Copy data+stack pages. C) Free temporal entries in the page table and flush the TLB to really disable the parent process to access the child pages. */ // direccion logica del systemcode+systemdata+usercode = direccion logica datos user int parent_log = NUM_PAG_KERNEL + NUM_PAG_CODE; for(i = 0; i < NUM_PAG_DATA; ++i){ set_ss_pag(parent_pt, parent_log + NUM_PAG_DATA + i, physical_pages[i]); copy_data( (void*) ((parent_log + i) * PAGE_SIZE), (void*) ((parent_log + NUM_PAG_DATA + i) * PAGE_SIZE), PAGE_SIZE ); del_ss_pag(parent_pt, parent_log + NUM_PAG_DATA + i); } //set_cr3(parent_pt); // flush tlb fallaba, por que? TODO set_cr3(get_DIR(current_struct)); /* f) Assign a new PID to the process. The PID must be different from its position in the task_array table. g) Initialize the fields of the task_struct that are not common to the child. i) Think about the register or registers that will not be common in the returning of the child process and modify its content in the system stack so that each one receive its values when the context is restored. h) Prepare the child stack emulating a call to context_switch and be able to restore its context in a known position. The stack of the new process must be forged so it can be restored at some point in the future by a task_switch. In fact the new process has to restore its hardware context and continue the execution of the user process, so you can create a routine ret_from_fork which does exactly this. And use it as the restore point like in the idle process initialization 4.4. i) Insert the new process into the ready list: readyqueue. This list will contain all processes that are ready to execute but there is no processor to run them. j) Return the pid of the child process. */ if(_PID == INT_MAX){ update_system_to_user(current()); return -1; // please restart } child_struct->PID = _PID++; PID = child_struct->PID; /* * 0 -19 * ret_from_fork -18 * sys_call_handler // -17 * SAVEALL // -16 * iret // -5 */ child_struct->kernel_esp = (unsigned long *)&child_union->stack[KERNEL_STACK_SIZE-19]; child_union->stack[KERNEL_STACK_SIZE-19] = 0; child_union->stack[KERNEL_STACK_SIZE-18] = (int)ret_from_fork; list_add_tail(&child_struct->list, &readyqueue); child_struct->stats.elapsed_total_ticks = get_ticks(); child_struct->stats.user_ticks = 0; child_struct->stats.system_ticks = 0; child_struct->stats.blocked_ticks = 0; child_struct->stats.ready_ticks = 0; child_struct->stats.total_trans = 0; child_struct->stats.remaining_ticks = 0; // last_forked_child = child_struct = 0; update_system_to_user(current()); return PID; }
// sets the CR3 register with the start address of the page structure for process # [index] void set_pdir_base(unsigned int index) { set_cr3(PDirPool[index]); }
/** * Set up the Boot Page-Directory and Tables, and enable Paging. * * This function is executed at the physicallKernel address. * Therefore it can not use global variables or switch() statements. **/ void paging_init(void) { uint index; uint loop; uint* table; // // Boot Page-Directory // uint* dir = (uint*)0x124000; index = 0; // The 1e Page-Table. dir[index++] = 0x125000 | X86_PAGE_PRESENT | X86_PAGE_WRITE; // The rest of the tables till 3GB are not present. for (loop = 0; loop < 768 - 1; loop++) dir[index++] = 0; // Not present. // The 2e Page-Table. dir[index++] = 0x126000 | X86_PAGE_PRESENT | X86_PAGE_WRITE; // The rest of the tables is not present. for (loop = 0; loop < 256 - 1; loop++) dir[index++] = 0; // // Page-Table 1. // table = (uint*)0x125000; index = 0; // Map the First 2MB (512 Pages) for (loop = 0; loop < 512; loop++) table[index++] = (PAGE_SIZE * loop) | X86_PAGE_PRESENT | X86_PAGE_WRITE; // The rest is not present. for (loop = 0; loop < 512; loop++) table[index++] = 0; // // Page-Table 2. // table = (uint*)0x126000; index = 0; // Map the First 2MB (512 Pages) for (loop = 0; loop < 512; loop++) table[index++] = (PAGE_SIZE * loop) | X86_PAGE_PRESENT | X86_PAGE_WRITE; // The rest is not present. for (loop = 0; loop < 512; loop++) table[index++] = 0; // load cr3 (Page-Directory Base Register) with the Page-Directory we are going to use, // which is the Page-Directory from the Kernel process. set_cr3((uint)dir); // // Enable paging. // set_cr0(get_cr0() | 0x80000000); // Set the paging bit. }