/* * The implementation of fork(2). Once this works, * you're practically home free. This is what the * entirety of Weenix has been leading up to. * Go forth and conquer. */ int do_fork(struct regs *regs) { /*NOT_YET_IMPLEMENTED("VM: do_fork");*/ KASSERT(regs != NULL); dbg(DBG_PRINT, "(GRADING3A 7.a)\n"); KASSERT(curproc != NULL); dbg(DBG_PRINT, "(GRADING3A 7.a)\n"); KASSERT(curproc->p_state == PROC_RUNNING); dbg(DBG_PRINT, "(GRADING3A 7.a)\n"); proc_t *childproc=proc_create("childproc"); KASSERT(childproc); vmmap_t *cmap=vmmap_clone(curproc->p_vmmap); cmap->vmm_proc=childproc; vmmap_destroy(childproc->p_vmmap);/********************/ childproc->p_vmmap=cmap; childproc->p_status = curproc->p_status; childproc->p_brk = curproc->p_brk; childproc->p_start_brk = curproc->p_start_brk; KASSERT(childproc->p_state == PROC_RUNNING); dbg(DBG_PRINT, "(GRADING3A 7.a)\n"); KASSERT(childproc->p_pagedir != NULL); dbg(DBG_PRINT, "(GRADING3A 7.a)\n"); /* vmarea_t* temp_pvmarea=NULL; list_link_t *childbegin=childproc->p_vmmap->vmm_list.l_next; list_iterate_begin(&(curproc->p_vmmap->vmm_list),temp_pvmarea,vmarea_t,vma_plink) { if(temp_pvmarea->vma_flags&MAP_PRIVATE) { mmobj_t *shadow_obj=shadow_create(); shadow_obj->mmo_shadowed=temp_pvmarea->vma_obj; shadow_obj->mmo_un.mmo_bottom_obj=temp_pvmarea->vma_obj->mmo_un.mmo_bottom_obj; vmarea_t* tpvmarea1=list_item(childbegin,vmarea_t,vma_plink); mmobj_t *chshadow_obj=shadow_create(); chshadow_obj->mmo_shadowed=temp_pvmarea->vma_obj; chshadow_obj->mmo_un.mmo_bottom_obj=temp_pvmarea->vma_obj->mmo_un.mmo_bottom_obj; temp_pvmarea->vma_obj->mmo_ops->ref(temp_pvmarea->vma_obj); tpvmarea1->vma_obj=chshadow_obj; temp_pvmarea->vma_obj=shadow_obj; } childbegin=childbegin->l_next; }list_iterate_end(); */ list_link_t* pindex=NULL; list_link_t* cindex=NULL; for (pindex = (curproc->p_vmmap->vmm_list.l_next),cindex= (childproc->p_vmmap->vmm_list.l_next);pindex !=&(curproc->p_vmmap->vmm_list);pindex=pindex->l_next,cindex=cindex->l_next) { vmarea_t* temp_pvmarea=list_item(pindex,vmarea_t,vma_plink); vmarea_t* tpvmarea1; if(temp_pvmarea->vma_flags&MAP_PRIVATE) { mmobj_t *shadow_obj=shadow_create(); shadow_obj->mmo_shadowed=temp_pvmarea->vma_obj; shadow_obj->mmo_un.mmo_bottom_obj=temp_pvmarea->vma_obj->mmo_un.mmo_bottom_obj; temp_pvmarea->vma_obj=shadow_obj; tpvmarea1=list_item(cindex,vmarea_t,vma_plink); mmobj_t *chshadow_obj=shadow_create(); chshadow_obj->mmo_shadowed=temp_pvmarea->vma_obj->mmo_shadowed; chshadow_obj->mmo_un.mmo_bottom_obj=temp_pvmarea->vma_obj->mmo_un.mmo_bottom_obj; chshadow_obj->mmo_shadowed->mmo_ops->ref(chshadow_obj->mmo_shadowed); list_insert_tail(&(chshadow_obj->mmo_un.mmo_bottom_obj->mmo_un.mmo_vmas), &tpvmarea1->vma_olink); tpvmarea1->vma_obj=chshadow_obj; } else{ tpvmarea1=list_item(cindex,vmarea_t,vma_plink); tpvmarea1->vma_obj=temp_pvmarea->vma_obj; tpvmarea1->vma_obj->mmo_ops->ref(tpvmarea1->vma_obj); list_insert_tail(&(tpvmarea1->vma_obj->mmo_un.mmo_vmas), &tpvmarea1->vma_olink); } } pt_unmap_range(curproc->p_pagedir,USER_MEM_LOW,USER_MEM_HIGH); tlb_flush_all(); kthread_t *chthread=kthread_clone(curthr); chthread->kt_proc=childproc; list_insert_tail(&(childproc->p_threads),&(chthread->kt_plink)); (chthread->kt_ctx).c_pdptr=childproc->p_pagedir; (chthread->kt_ctx).c_eip=(uint32_t)(userland_entry); regs->r_eax = 0; (chthread->kt_ctx).c_esp=fork_setup_stack(regs, chthread->kt_kstack); /*(chthread->kt_ctx).c_ebp*/ (chthread->kt_ctx).c_kstack=(uintptr_t)chthread->kt_kstack; (chthread->kt_ctx). c_kstacksz=DEFAULT_STACK_SIZE; KASSERT(chthread->kt_kstack != NULL); dbg(DBG_PRINT, "(GRADING3A 7.a)\n"); /*********how to set return value*********/ int i = 0; while (i<NFILES) { childproc->p_files[i] = curproc->p_files[i]; if(childproc->p_files[i]!=NULL) { fref(childproc->p_files[i]); } i++; } /* has been added in proc_create(); childproc->p_cwd=curproc->p_cwd; vref(childproc->p_cwd); */ sched_make_runnable(chthread); return childproc->p_pid; }
static int _elf32_load(const char *filename, int fd, char *const argv[], char *const envp[], uint32_t *eip, uint32_t *esp) { int err = 0; Elf32_Ehdr header; Elf32_Ehdr interpheader; /* variables to clean up on failure */ vmmap_t *map = NULL; file_t *file = NULL; char *pht = NULL; char *interpname = NULL; int interpfd = -1; file_t *interpfile = NULL; char *interppht = NULL; Elf32_auxv_t *auxv = NULL; char *argbuf = NULL; uintptr_t entry; file = fget(fd); KASSERT(NULL != file); /* Load and verify the ELF header */ if (0 > (err = _elf32_load_ehdr(fd, &header, 0))) { goto done; } if (NULL == (map = vmmap_create())) { err = -ENOMEM; goto done; } size_t phtsize = header.e_phentsize * header.e_phnum; if (NULL == (pht = kmalloc(phtsize))) { err = -ENOMEM; goto done; } /* Read in the program header table */ if (0 > (err = _elf32_load_phtable(fd, &header, pht, phtsize))) { goto done; } /* Load the segments in the program header table */ if (0 > (err = _elf32_map_progsegs(file->f_vnode, map, &header, pht, 0))) { goto done; } Elf32_Phdr *phinterp = NULL; /* Check if program requires an interpreter */ if (0 > (err = _elf32_find_phinterp(&header, pht, &phinterp))) { goto done; } /* Calculate program bounds for future reference */ void *proglow; void *proghigh; _elf32_calc_progbounds(&header, pht, &proglow, &proghigh); entry = (uintptr_t) header.e_entry; /* if an interpreter was requested load it */ if (NULL != phinterp) { /* read the file name of the interpreter from the binary */ if (0 > (err = do_lseek(fd, phinterp->p_offset, SEEK_SET))) { goto done; } else if (NULL == (interpname = kmalloc(phinterp->p_filesz))) { err = -ENOMEM; goto done; } else if (0 > (err = do_read(fd, interpname, phinterp->p_filesz))) { goto done; } if (err != (int)phinterp->p_filesz) { err = -ENOEXEC; goto done; } /* open the interpreter */ dbgq(DBG_ELF, "ELF Interpreter: %*s\n", phinterp->p_filesz, interpname); if (0 > (interpfd = do_open(interpname, O_RDONLY))) { err = interpfd; goto done; } kfree(interpname); interpname = NULL; interpfile = fget(interpfd); KASSERT(NULL != interpfile); /* Load and verify the interpreter ELF header */ if (0 > (err = _elf32_load_ehdr(interpfd, &interpheader, 1))) { goto done; } size_t interpphtsize = interpheader.e_phentsize * interpheader.e_phnum; if (NULL == (interppht = kmalloc(interpphtsize))) { err = -ENOMEM; goto done; } /* Read in the program header table */ if (0 > (err = _elf32_load_phtable(interpfd, &interpheader, interppht, interpphtsize))) { goto done; } /* Interpreter shouldn't itself need an interpreter */ Elf32_Phdr *interpphinterp; if (0 > (err = _elf32_find_phinterp(&interpheader, interppht, &interpphinterp))) { goto done; } if (NULL != interpphinterp) { err = -EINVAL; goto done; } /* Calculate the interpreter program size */ void *interplow; void *interphigh; _elf32_calc_progbounds(&interpheader, interppht, &interplow, &interphigh); uint32_t interpnpages = ADDR_TO_PN(PAGE_ALIGN_UP(interphigh)) - ADDR_TO_PN(interplow); /* Find space for the interpreter */ /* This is the first pn at which the interpreter will be mapped */ uint32_t interppagebase = (uint32_t) vmmap_find_range(map, interpnpages, VMMAP_DIR_HILO); if ((uint32_t) - 1 == interppagebase) { err = -ENOMEM; goto done; } /* Base address at which the interpreter begins on that page */ void *interpbase = (void *)((uintptr_t)PN_TO_ADDR(interppagebase) + PAGE_OFFSET(interplow)); /* Offset from "expected base" in number of pages */ int32_t interpoff = (int32_t) interppagebase - (int32_t) ADDR_TO_PN(interplow); entry = (uintptr_t) interpbase + ((uintptr_t) interpheader.e_entry - (uintptr_t) interplow); /* Load the interpreter program header and map in its segments */ if (0 > (err = _elf32_map_progsegs(interpfile->f_vnode, map, &interpheader, interppht, interpoff))) { goto done; } /* Build the ELF aux table */ /* Need to hold AT_PHDR, AT_PHENT, AT_PHNUM, AT_ENTRY, AT_BASE, * AT_PAGESZ, AT_NULL */ if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(7 * sizeof(Elf32_auxv_t)))) { err = -ENOMEM; goto done; } Elf32_auxv_t *auxvent = auxv; /* Add all the necessary entries */ auxvent->a_type = AT_PHDR; auxvent->a_un.a_ptr = pht; auxvent++; auxvent->a_type = AT_PHENT; auxvent->a_un.a_val = header.e_phentsize; auxvent++; auxvent->a_type = AT_PHNUM; auxvent->a_un.a_val = header.e_phnum; auxvent++; auxvent->a_type = AT_ENTRY; auxvent->a_un.a_ptr = (void *) header.e_entry; auxvent++; auxvent->a_type = AT_BASE; auxvent->a_un.a_ptr = interpbase; auxvent++; auxvent->a_type = AT_PAGESZ; auxvent->a_un.a_val = PAGE_SIZE; auxvent++; auxvent->a_type = AT_NULL; } else { /* Just put AT_NULL (we don't really need this at all) */ if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(sizeof(Elf32_auxv_t)))) { err = -ENOMEM; goto done; } auxv->a_type = AT_NULL; } /* Allocate a stack. We put the stack immediately below the program text. * (in the Intel x86 ELF supplement pp 59 "example stack", that is where the * stack is located). I suppose we can add this "extra page for magic data" too */ uint32_t stack_lopage = ADDR_TO_PN(proglow) - (DEFAULT_STACK_SIZE / PAGE_SIZE) - 1; err = vmmap_map(map, NULL, stack_lopage, (DEFAULT_STACK_SIZE / PAGE_SIZE) + 1, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, 0, 0, NULL); KASSERT(0 == err); dbg(DBG_ELF, "Mapped stack at low addr 0x%p, size %#x\n", PN_TO_ADDR(stack_lopage), DEFAULT_STACK_SIZE + PAGE_SIZE); /* Copy out arguments onto the user stack */ int argc, envc, auxc; size_t argsize = _elf32_calc_argsize(argv, envp, auxv, phtsize, &argc, &envc, &auxc); /* Make sure it fits on the stack */ if (argsize >= DEFAULT_STACK_SIZE) { err = -E2BIG; goto done; } /* Copy arguments into kernel buffer */ if (NULL == (argbuf = (char *) kmalloc(argsize))) { err = -ENOMEM; goto done; } /* Calculate where in user space we start putting the args. */ void *arglow = (void *)((uintptr_t)(((char *) proglow) - argsize) & ~PTR_MASK); /* Copy everything into the user address space, modifying addresses in * argv, envp, and auxv to be user addresses as we go. */ _elf32_load_args(map, arglow, argsize, argbuf, argv, envp, auxv, argc, envc, auxc, phtsize); dbg(DBG_ELF, "Past the point of no return. Swapping to map at 0x%p, setting brk to 0x%p\n", map, proghigh); /* the final threshold / What warm unspoken secrets will we learn? / Beyond * the point of no return ... */ /* Give the process the new mappings. */ vmmap_t *tempmap = curproc->p_vmmap; curproc->p_vmmap = map; map = tempmap; /* So the old maps are cleaned up */ curproc->p_vmmap->vmm_proc = curproc; map->vmm_proc = NULL; /* Flush the process pagetables and TLB */ pt_unmap_range(curproc->p_pagedir, USER_MEM_LOW, USER_MEM_HIGH); tlb_flush_all(); /* Set the process break and starting break (immediately after the mapped-in * text/data/bss from the executable) */ curproc->p_brk = proghigh; curproc->p_start_brk = proghigh; strncpy(curproc->p_comm, filename, PROC_NAME_LEN); /* Tell the caller the correct stack pointer and instruction * pointer to begin execution in user space */ *eip = (uint32_t) entry; *esp = ((uint32_t) arglow) - 4; /* Space on the user stack for the (garbage) return address */ /* Note that the return address will be fixed by the userland entry code, * whether in static or dynamic */ /* And we're done */ err = 0; done: if (NULL != map) { vmmap_destroy(map); } if (NULL != file) { fput(file); } if (NULL != pht) { kfree(pht); } if (NULL != interpname) { kfree(interpname); } if (0 <= interpfd) { do_close(interpfd); } if (NULL != interpfile) { fput(interpfile); } if (NULL != interppht) { kfree(interppht); } if (NULL != auxv) { kfree(auxv); } if (NULL != argbuf) { kfree(argbuf); } return err; }