/* * as_define_stack - define the vm_object for the user-level stack. */ int as_define_stack(struct addrspace *as, vaddr_t *stackptr) { int err; /* * make a stack vm_object * * The stack is USERSTACKSIZE bytes, which is defined in machine/vm.h. * This is generally quite large, so it is zerofilled to make swap use * efficient and fork reasonably fast. */ err = as_define_region(as, USERSTACKBASE, USERSTACKSIZE, USERSTACKREDZONE, 1, 1, 0); if (err) { return err; } /* Initial user-level stack pointer */ *stackptr = USERSTACK; return 0; }
/* * Mmaps some stuff to a given vspace. */ int load_segment_into_vspace(addrspace_t dest_as, seL4_CPtr src, unsigned long offset, unsigned long segment_size, unsigned long file_size, unsigned long dst, unsigned long permissions) { if (file_size > segment_size) { return false; } struct as_region* reg = as_define_region ( dest_as, dst, segment_size, permissions, REGION_GENERIC); if (!reg) { return false; } unsigned long pos; pos = 0; while (pos < file_size) { seL4_Word vpage = PAGE_ALIGN (dst); struct pt_entry* page = page_fetch_new ( dest_as, reg->attributes, dest_as->pagetable, vpage); if (!page) { return false; } page->frame = frame_new (); if (!page->frame) { return false; } /* mmap the page to the file */ int nbytes = PAGESIZE - (dst & PAGEMASK); page->frame->file = frame_create_mmap ( src, dst - vpage, offset, MIN (nbytes, file_size - pos)); assert (page->frame->file); offset += nbytes; pos += nbytes; dst += nbytes; } return true; }
int sys_sbrk(userptr_t amount, int32_t* retval) { *retval = 0; if ((int) amount % PAGE_SIZE != 0) { *retval = -1; return EINVAL; } if ((int) amount > 1024 * 256 * PAGE_SIZE) { *retval = -1; return ENOMEM; } struct addrspace* as = proc_getas(); //kprintf("SBRK CALLED WITH PARAMS %x, newregion start = %x\n", (int) amount, // as->as_addrPtr); if (as->as_heapBase == 0) { as->as_heapBase = as->as_addrPtr; } if (amount == 0) { *retval = as->as_addrPtr; return 0; } if ((int) amount < 0) { if ((int) (amount + as->as_addrPtr) < (int) (as->as_heapBase)) { *retval = -1; return EINVAL; } return reduceHeapSize(amount, retval, as); } vaddr_t newRegionStart = as->as_addrPtr; as_define_region(as, newRegionStart, (int) amount, 1, 1, 0); *retval = newRegionStart; return 0; }
/* * Load an ELF executable user program into the current address space. * * Returns the entry point (initial PC) for the program in ENTRYPOINT. */ int load_elf(struct vnode *v, vaddr_t *entrypoint) { Elf_Ehdr eh; /* Executable header */ Elf_Phdr ph; /* "Program header" = segment header */ int result, i; struct iovec iov; struct uio ku; struct addrspace *as; as = curproc_getas(); /* * Read the executable header from offset 0 in the file. */ uio_kinit(&iov, &ku, &eh, sizeof(eh), 0, UIO_READ); result = VOP_READ(v, &ku); if (result) { return result; } if (ku.uio_resid != 0) { /* short read; problem with executable? */ kprintf("ELF: short read on header - file truncated?\n"); return ENOEXEC; } /* * Check to make sure it's a 32-bit ELF-version-1 executable * for our processor type. If it's not, we can't run it. * * Ignore EI_OSABI and EI_ABIVERSION - properly, we should * define our own, but that would require tinkering with the * linker to have it emit our magic numbers instead of the * default ones. (If the linker even supports these fields, * which were not in the original elf spec.) */ if (eh.e_ident[EI_MAG0] != ELFMAG0 || eh.e_ident[EI_MAG1] != ELFMAG1 || eh.e_ident[EI_MAG2] != ELFMAG2 || eh.e_ident[EI_MAG3] != ELFMAG3 || eh.e_ident[EI_CLASS] != ELFCLASS32 || eh.e_ident[EI_DATA] != ELFDATA2MSB || eh.e_ident[EI_VERSION] != EV_CURRENT || eh.e_version != EV_CURRENT || eh.e_type!=ET_EXEC || eh.e_machine!=EM_MACHINE) { return ENOEXEC; } /* * Go through the list of segments and set up the address space. * * Ordinarily there will be one code segment, one read-only * data segment, and one data/bss segment, but there might * conceivably be more. You don't need to support such files * if it's unduly awkward to do so. * * Note that the expression eh.e_phoff + i*eh.e_phentsize is * mandated by the ELF standard - we use sizeof(ph) to load, * because that's the structure we know, but the file on disk * might have a larger structure, so we must use e_phentsize * to find where the phdr starts. */ for (i=0; i<eh.e_phnum; i++) { off_t offset = eh.e_phoff + i*eh.e_phentsize; uio_kinit(&iov, &ku, &ph, sizeof(ph), offset, UIO_READ); result = VOP_READ(v, &ku); if (result) { return result; } if (ku.uio_resid != 0) { /* short read; problem with executable? */ kprintf("ELF: short read on phdr - file truncated?\n"); return ENOEXEC; } switch (ph.p_type) { case PT_NULL: /* skip */ continue; case PT_PHDR: /* skip */ continue; case PT_MIPS_REGINFO: /* skip */ continue; case PT_LOAD: break; default: kprintf("loadelf: unknown segment type %d\n", ph.p_type); return ENOEXEC; } result = as_define_region(as, ph.p_vaddr, ph.p_memsz, ph.p_flags & PF_R, ph.p_flags & PF_W, ph.p_flags & PF_X); if (result) { return result; } } result = as_prepare_load(as); if (result) { return result; } /* * Now actually load each segment. */ for (i=0; i<eh.e_phnum; i++) { off_t offset = eh.e_phoff + i*eh.e_phentsize; uio_kinit(&iov, &ku, &ph, sizeof(ph), offset, UIO_READ); result = VOP_READ(v, &ku); if (result) { return result; } if (ku.uio_resid != 0) { /* short read; problem with executable? */ kprintf("ELF: short read on phdr - file truncated?\n"); return ENOEXEC; } switch (ph.p_type) { case PT_NULL: /* skip */ continue; case PT_PHDR: /* skip */ continue; case PT_MIPS_REGINFO: /* skip */ continue; case PT_LOAD: break; default: kprintf("loadelf: unknown segment type %d\n", ph.p_type); return ENOEXEC; } result = load_segment(as, v, ph.p_offset, ph.p_vaddr, ph.p_memsz, ph.p_filesz, ph.p_flags & PF_X); if (result) { return result; } } result = as_complete_load(as); if (result) { return result; } *entrypoint = eh.e_entry; return 0; }
/* * Load an ELF executable user program into the current address space. * * Returns the entry point (initial PC) for the program in ENTRYPOINT. */ int load_elf(struct vnode *v, vaddr_t *entrypoint) { Elf_Ehdr eh; /* Executable header */ Elf_Phdr ph; /* "Program header" = segment header */ int result, i; struct iovec iov; struct uio ku; struct addrspace *as = curthread->t_addrspace; /* * Read the executable header from offset 0 in the file. */ uio_kinit(&iov, &ku, &eh, sizeof(eh), 0, UIO_READ); result = VOP_READ(v, &ku); if (result) { return result; } if (ku.uio_resid != 0) { /* short read; problem with executable? */ kprintf("ELF: short read on header - file truncated?\n"); return ENOEXEC; } /* * Check to make sure it's a 32-bit ELF-version-1 executable * for our processor type. If it's not, we can't run it. * * Ignore EI_OSABI and EI_ABIVERSION - properly, we should * define our own, but that would require tinkering with the * linker to have it emit our magic numbers instead of the * default ones. (If the linker even supports these fields, * which were not in the original elf spec.) */ if (eh.e_ident[EI_MAG0] != ELFMAG0 || eh.e_ident[EI_MAG1] != ELFMAG1 || eh.e_ident[EI_MAG2] != ELFMAG2 || eh.e_ident[EI_MAG3] != ELFMAG3 || eh.e_ident[EI_CLASS] != ELFCLASS32 || eh.e_ident[EI_DATA] != ELFDATA2MSB || eh.e_ident[EI_VERSION] != EV_CURRENT || eh.e_version != EV_CURRENT || eh.e_type!=ET_EXEC || eh.e_machine!=EM_MACHINE) { return ENOEXEC; } /* * Go through the list of segments and set up the address space. * * Ordinarily there will be one code segment, one read-only * data segment, and one data/bss segment, but there might * conceivably be more. You don't need to support such files * if it's unduly awkward to do so. * * Note that the expression eh.e_phoff + i*eh.e_phentsize is * mandated by the ELF standard - we use sizeof(ph) to load, * because that's the structure we know, but the file on disk * might have a larger structure, so we must use e_phentsize * to find where the phdr starts. */ DEBUG(DB_DEMAND,"Num Of Segements:%d\n",eh.e_phnum); for (i=0; i<eh.e_phnum; i++) { off_t offset = eh.e_phoff + i*eh.e_phentsize; uio_kinit(&iov, &ku, &ph, sizeof(ph), offset, UIO_READ); result = VOP_READ(v, &ku); if (result) { return result; } if (ku.uio_resid != 0) { /* short read; problem with executable? */ kprintf("ELF: short read on phdr - file truncated?\n"); return ENOEXEC; } switch (ph.p_type) { case PT_NULL: /* skip */ continue; case PT_PHDR: /* skip */ continue; case PT_MIPS_REGINFO: /* skip */ continue; case PT_LOAD: break; default: kprintf("loadelf: unknown segment type %d\n", ph.p_type); return ENOEXEC; } DEBUG(DB_DEMAND, "MEM SZ:%d\n",ph.p_memsz); DEBUG(DB_DEMAND, "FILE SZ:%d\n", ph.p_filesz); DEBUG(DB_DEMAND, "VADDR:%p\n",(void*)ph.p_vaddr); result = as_define_region(curthread->t_addrspace, ph.p_vaddr, ph.p_memsz, ph.p_flags & PF_R, ph.p_flags & PF_W, ph.p_flags & PF_X); if (result) { return result; } } result = as_prepare_load(curthread->t_addrspace); if (result) { return result; } /* * Now actually load each segment. */ for (i=0; i<eh.e_phnum; i++) { off_t offset = eh.e_phoff + i*eh.e_phentsize; uio_kinit(&iov, &ku, &ph, sizeof(ph), offset, UIO_READ); DEBUG(DB_DEMAND, "SEGMENT: %d\n",1); result = VOP_READ(v, &ku); if (result) { return result; } if (ku.uio_resid != 0) { /* short read; problem with executable? */ kprintf("ELF: short read on phdr - file truncated?\n"); return ENOEXEC; } switch (ph.p_type) { case PT_NULL: /* skip */ continue; case PT_PHDR: /* skip */ continue; case PT_MIPS_REGINFO: /* skip */ continue; case PT_LOAD: break; default: kprintf("loadelf: unknown segment type %d\n", ph.p_type); return ENOEXEC; } struct addrspace *as = curthread->t_addrspace; vaddr_t va = ph.p_vaddr; DEBUG(DB_DEMAND, "Original VA: %p\n", (void*) va); DEBUG(DB_DEMAND, "Max VA:%p\n", (void*) (va + ph.p_memsz)); // va &= PAGE_FRAME; struct page_table *pt = pgdir_walk(as,va,false); int pt_index = VA_TO_PT_INDEX(va); int permissions = PTE_TO_PERMISSIONS(pt->table[pt_index]); int small_pages = 0; if(ph.p_memsz <= PAGE_SIZE) { DEBUG(DB_DEMAND, "Segement Total Size is < 4k\n"); // pt = pgdir_walk(as,va,false); // pt_index = VA_TO_PT_INDEX(va); // permissions = PTE_TO_PERMISSIONS(pt->table[pt_index]); result = dynamic_load_segment(v, ph.p_offset, va, ph.p_memsz, ph.p_filesz, ph.p_flags & PF_X,permissions); if (result) { return result; } // page->state = DIRTY; small_pages = 1; } else { int filesize = (int) ph.p_filesz; int memsize = (int) ph.p_memsz; off_t cur_offset = ph.p_offset; DEBUG(DB_DEMAND, "Loading >1 Segment\n"); DEBUG(DB_DEMAND, "Original Parameters:\n"); DEBUG(DB_DEMAND, "File Size: %d\n",filesize); DEBUG(DB_DEMAND, "Mem Size: %d\n", memsize); DEBUG(DB_DEMAND, "Offset:%d\n", (int) cur_offset); DEBUG(DB_DEMAND, "VA:%p\n", (void*) va); size_t amt_to_read; int pages = 0; int blank_pages = 0; while(filesize > 0) { // struct page *page = page_alloc(as,va & PAGE_FRAME,permissions); DEBUG(DB_DEMAND, "loading...\n"); DEBUG(DB_DEMAND, "File Size: %d\n",filesize); DEBUG(DB_DEMAND, "Mem Size: %d\n", memsize); DEBUG(DB_DEMAND, "Offset:%d\n", (int) cur_offset); DEBUG(DB_DEMAND, "VA:%p\n", (void*) va); amt_to_read = (filesize - PAGE_SIZE > 0 ? PAGE_SIZE : filesize); result = dynamic_load_segment(v, cur_offset, va, PAGE_SIZE, amt_to_read, ph.p_flags & PF_X,permissions); if (result) { return result; } filesize -= PAGE_SIZE; memsize -= PAGE_SIZE; va += PAGE_SIZE; cur_offset += PAGE_SIZE; // page->state = DIRTY; pages++; } (void)blank_pages; DEBUG(DB_DEMAND,"Empty Pages...\n"); while(memsize > 0) { DEBUG(DB_DEMAND,"Mem Size:%d\n",memsize); DEBUG(DB_DEMAND,"VA: %p\n",(void*) va); bool lock = get_coremap_lock(); struct page *page = page_alloc(as,va & PAGE_FRAME,permissions); release_coremap_lock(lock); va += PAGE_SIZE; memsize -= PAGE_SIZE; KASSERT(page->state == LOCKED); page->state = DIRTY; blank_pages++; } DEBUG(DB_DEMAND, "Small Pages: %d\n", small_pages); DEBUG(DB_DEMAND, "Pages: %d\n", pages); DEBUG(DB_DEMAND, "Blank Pages: %d\n", blank_pages); } // result = load_segment(v, ph.p_offset, ph.p_vaddr, // ph.p_memsz, ph.p_filesz, // ph.p_flags & PF_X); if (result) { return result; } } result = as_complete_load(curthread->t_addrspace); if (result) { return result; } *entrypoint = eh.e_entry; /* Register load complete in addrspace */ as->loadelf_done = true; DEBUG(DB_VM,"LoadELFDone\n"); // kprintf("LoadELF done\n"); return 0; }
static int load_segment_directly_into_vspace(addrspace_t dest_as, char *src, unsigned long segment_size, unsigned long file_size, unsigned long dst, unsigned long permissions) { assert(file_size <= segment_size); unsigned long pos; struct as_region* reg = as_define_region (dest_as, dst, segment_size, permissions, REGION_GENERIC); if (!reg) { return 1; } /* We work a page at a time in the destination vspace. */ pos = 0; while(pos < segment_size) { seL4_CPtr sos_cap, frame_cap; seL4_Word vpage, kvpage; unsigned long kdst; int nbytes; int err; kdst = dst + PROCESS_SCRATCH_START; vpage = PAGE_ALIGN(dst); kvpage = PAGE_ALIGN(kdst); //kvpage = PROCESS_SCRATCH + 0x1000; /* Map the page into the destination address space */ int status = PAGE_FAILED; struct pt_entry* page = page_map (dest_as, reg, vpage, &status, NULL, NULL); if (!page || status != PAGE_SUCCESS) { /* we should really only be using this function at boot time. * load_segment_into_vspace will handle lazy loading/swap events * for you - early on in the boot we can assume that swapping is NOT * an option */ return 1; } /* Map the frame into SOS as well so we can copy into it */ /* FIXME: WOULD BE MUCH NICER(!) if we just used cur_addrspace - * you will need to create a region in main's init function */ sos_cap = page->cap; assert (sos_cap); frame_cap = cspace_copy_cap (cur_cspace, cur_cspace, sos_cap, seL4_AllRights); if (!frame_cap) { return 1; } err = map_page (frame_cap, seL4_CapInitThreadPD, kvpage, seL4_AllRights, seL4_ARM_Default_VMAttributes); if (err) { return 1; } /* Now copy our data into the destination vspace */ nbytes = PAGESIZE - (dst & PAGEMASK); if (pos < file_size){ memcpy((void*)kdst, (void*)src, MIN(nbytes, file_size - pos)); } /* Not observable to I-cache yet so flush the frame */ seL4_ARM_Page_FlushCaches(frame_cap); /* unmap page + delete cap copy */ err = seL4_ARM_Page_Unmap (frame_cap); if (err) { return 1; } cspace_delete_cap (cur_cspace, frame_cap); pos += nbytes; dst += nbytes; src += nbytes; } return 0; }