Exemplo n.º 1
0
/*
 * This function implements the brk(2) system call.
 *
 * This routine manages the calling process's "break" -- the ending address
 * of the process's "dynamic" region (often also referred to as the "heap").
 * The current value of a process's break is maintained in the 'p_brk' member
 * of the proc_t structure that represents the process in question.
 *
 * The 'p_brk' and 'p_start_brk' members of a proc_t struct are initialized
 * by the loader. 'p_start_brk' is subsequently never modified; it always
 * holds the initial value of the break. Note that the starting break is
 * not necessarily page aligned!
 *
 * 'p_start_brk' is the lower limit of 'p_brk' (that is, setting the break
 * to any value less than 'p_start_brk' should be disallowed).
 *
 * The upper limit of 'p_brk' is defined by the minimum of (1) the
 * starting address of the next occuring mapping or (2) USER_MEM_HIGH.
 * That is, growth of the process break is limited only in that it cannot
 * overlap with/expand into an existing mapping or beyond the region of
 * the address space allocated for use by userland. (note the presence of
 * the 'vmmap_is_range_empty' function).
 *
 * The dynamic region should always be represented by at most ONE vmarea.
 * Note that vmareas only have page granularity, you will need to take this
 * into account when deciding how to set the mappings if p_brk or p_start_brk
 * is not page aligned.
 *
 * You are guaranteed that the process data/bss region is non-empty.
 * That is, if the starting brk is not page-aligned, its page has
 * read/write permissions.
 *
 * If addr is NULL, you should NOT fail as the man page says. Instead,
 * "return" the current break. We use this to implement sbrk(0) without writing
 * a separate syscall. Look in user/libc/syscall.c if you're curious.
 *
 * Also, despite the statement on the manpage, you MUST support combined use
 * of brk and mmap in the same process.
 *
 * Note that this function "returns" the new break through the "ret" argument.
 * Return 0 on success, -errno on failure.
 */
int
do_brk(void *addr, void **ret)
{
    if (addr == NULL) {
        *ret = curproc->p_brk;
        return 0;
    }

    uintptr_t start_brk = (uintptr_t)curproc->p_start_brk;
    uintptr_t brk = (uintptr_t)curproc->p_brk;
    uintptr_t vaddr = (uintptr_t)addr;
    uint32_t lopage = ADDR_TO_PN(PAGE_ALIGN_DOWN(start_brk));

    if (vaddr < start_brk) {
        return -ENOMEM;
    }

    if (vaddr >= USER_MEM_HIGH) {
        return -ENOMEM;
    }

    if (vaddr == brk) {
        *ret = addr;
        KASSERT(curproc->p_brk == addr);
        curproc->p_brk = addr;
        return 0;
    }

    KASSERT(start_brk <= brk);

    vmarea_t *area = vmmap_lookup(curproc->p_vmmap, lopage);

    if (area == NULL) {
        panic("panic for now\n");
        return -1;
    } else {
        KASSERT(area);

        uint32_t hiaddr = (uint32_t)(vaddr - 1);
        uint32_t hipage = ADDR_TO_PN(hiaddr);
        if (hipage < area->vma_end) {
            area->vma_end = hipage + 1;
            *ret = addr;
            curproc->p_brk = addr;
            return 0;
        } else {
            if (vmmap_is_range_empty(curproc->p_vmmap, area->vma_end,
                                        hipage - area->vma_end + 1)) {
                area->vma_end = hipage + 1;
                *ret = addr;
                curproc->p_brk = addr;
                return 0;
            } else {
                return -ENOMEM;
            }
        }
    }
        /*NOT_YET_IMPLEMENTED("VM: do_brk");*/
        /*return 0;*/
}
Exemplo n.º 2
0
static void fix_auxv(Elf_auxv_t *auxv, Elf_Ehdr *elf, void *program_base, void *interp_base){
	int i;
	for(i=0;auxv[i].a_type != 0;++i){
#ifdef LOADER_DEBUG
		myprintf("aux %d : %p -> ",auxv[i].a_type,
			auxv[i].a_un.a_val);
#endif
		switch(auxv[i].a_type){
			case AT_PHDR:
				auxv[i].a_un.a_val = (Elf_Addr)program_base + elf->e_phoff;
				break;
			case AT_ENTRY:
				auxv[i].a_un.a_val = elf->e_entry;
				break;
			case AT_PHNUM:
				auxv[i].a_un.a_val = elf->e_phnum;
				break;
			case AT_BASE:
				auxv[i].a_un.a_val = PAGE_ALIGN_DOWN((Elf_Addr)interp_base);
				break;
		}
#ifdef LOADER_DEBUG
		myprintf("%p\n",auxv[i].a_un.a_val);
#endif
	}
}
Exemplo n.º 3
0
/**
 * @brief Return true the the given zone contains the given address.
 */
bool zone_contains(struct page_zone *zone, size_t addr)
{
	size_t page_addr = PAGE_ALIGN_DOWN(addr);

	return ZONE_START_PAGE_ADDR(zone) <= page_addr &&
		ZONE_END_PAGE_ADDR(zone)   >= page_addr;
}
Exemplo n.º 4
0
static int bootm_relocate_fdt(void *addr, struct image_data *data)
{
	if (addr < LINUX_TLB1_MAX_ADDR) {
		/* The kernel is within  the boot TLB mapping.
		 * Put the DTB above if there is no space
		 * below.
		 */
		if (addr < (void *)data->oftree->totalsize) {
			addr = (void *)PAGE_ALIGN((phys_addr_t)addr +
					data->os->header.ih_size);
			addr += data->oftree->totalsize;
			if (addr < LINUX_TLB1_MAX_ADDR)
				addr = LINUX_TLB1_MAX_ADDR;
		}
	}

	if (addr > LINUX_TLB1_MAX_ADDR) {
		pr_crit("Unable to relocate DTB to Linux TLB\n");
		return 1;
	}

	addr = (void *)PAGE_ALIGN_DOWN((phys_addr_t)addr -
			data->oftree->totalsize);
	memcpy(addr, data->oftree, data->oftree->totalsize);
	free(data->oftree);
	data->oftree = addr;

	pr_info("Relocating device tree to 0x%p\n", addr);
	return 0;
}
Exemplo n.º 5
0
void __section(.text_entry) pbl_main_entry(void *fdt, void *fdt_end,
					   u32 ram_size)
{
	u32 pg_start, pg_end, pg_len, fdt_len;
	void *fdt_new;
	void (*barebox)(void *fdt, u32 fdt_len, u32 ram_size);

	puts_ll("pbl_main_entry()\n");

	/* clear bss */
	memset(__bss_start, 0, __bss_stop - __bss_start);

	pg_start = (u32)&input_data;
	pg_end = (u32)&input_data_end;
	pg_len = pg_end - pg_start;

	barebox_uncompress(&input_data, pg_len);

	fdt_len = (u32)fdt_end - (u32)fdt;
	fdt_new = (void *)PAGE_ALIGN_DOWN(TEXT_BASE - MALLOC_SIZE - STACK_SIZE - fdt_len);
	memcpy(fdt_new, fdt, fdt_len);

	barebox = (void *)TEXT_BASE;
	barebox(fdt_new, fdt_len, ram_size);
}
Exemplo n.º 6
0
int main() {
  // read-only log all current logged events
  tthread::log log;

  global_var = 1;
  pthread_t thread;
  pthread_create(&thread, NULL, child, NULL);
  pthread_join(thread, NULL);

  // the size of an log instance will be not changed after instantiation
  // to get all events happend after `log` was created, use:
  // (in this case global_var will logged)
  tthread::log log2(log.end());

  unsigned int llen = log2.length();

  for (unsigned long i = 0; i < llen; i++) {
    tthread::logevent e = log2.get(i);

    const tthread::EventData data = e.getData();

    switch (e.getType()) {
    case tthread::logevent::READ:
    case tthread::logevent::WRITE:
    {
      const char *access = e.getType() ==
                           tthread::logevent::READ ? "read" : "write";
      fprintf(stderr,
              "[%s] threadIndex: %d, address: %p, pageStart: %p, issued at: [%p]\n",
              access,
              e.getThreadId(),
              data.memory.address,
              ((void *)PAGE_ALIGN_DOWN(
                 data.memory.address)),
              e.getReturnAddress());
      break;
    }

    case tthread::logevent::THUNK:
    {
      fprintf(stderr,
              "[thunk] %d, issued at [%p]\n",
              data.thunk.id,
              e.getReturnAddress());
      break;
    }

    case tthread::logevent::FINISH:
      fprintf(stderr, "thread %d finish\n", e.getThreadId());

    case tthread::logevent::INVALID:
      fprintf(stderr, "[invalid entry]\n");

    default:
      printf("foo\n");
    }
  }
  return 0;
}
Exemplo n.º 7
0
static void clean_pages(char *base, Elf_Phdr *p){
	size_t len = p->p_vaddr - PAGE_ALIGN_DOWN(p->p_vaddr);
	if(len > 0){
		memset(base + PAGE_ALIGN_DOWN(p->p_vaddr), 0, len);
	}

	len = p->p_memsz - p->p_filesz;
	
	if(len >0){
		memset(base + p->p_vaddr + p->p_filesz, 0, len);
	}

	len = PAGE_ALIGN(p->p_vaddr + p->p_memsz) - (p->p_vaddr + p->p_memsz);
	if(len > 0){
		memset(base + p->p_vaddr + p->p_memsz, 0, len);
	}
}
Exemplo n.º 8
0
/* given the physical address of an ACPI table this function
 * allocates memory for that table and copies the table into
 * that memory, returning the new virtual address for that table */
static void *_acpi_load_table(uintptr_t paddr)
{
        struct acpi_header *tmp =
                (struct acpi_header *)(pt_phys_tmp_map((uintptr_t)PAGE_ALIGN_DOWN(paddr)) + (PAGE_OFFSET(paddr)));

        /* this function is not designed to handle tables which
         * cross page boundaries */
        KASSERT(PAGE_OFFSET(paddr) + tmp->ah_size < PAGE_SIZE);
        struct acpi_header *table = kmalloc(tmp->ah_size);
        memcpy(table, tmp, tmp->ah_size);
        return (void *)table;
}
Exemplo n.º 9
0
static size_t elf_total_size(Elf_Phdr *phs, int n){
	Elf_Phdr *first=NULL, *last=NULL;
	int i;
	size_t totallen;
	for(i=0;i<n;++i){
		if(phs[i].p_type == PT_LOAD){
			if(first == NULL)
				first=&phs[i];
			last=&phs[i];
		}
	}
	totallen = last->p_vaddr + last->p_memsz - PAGE_ALIGN_DOWN(first->p_vaddr);
#ifdef LOADER_DEBUG
	myprintf("total len %d %x\n",totallen,totallen);
#endif
	return totallen;
}
Exemplo n.º 10
0
void g_elf32_loader::loadLoadSegment(elf32_ehdr* header, g_process* process) {

	// Initial values
	uint32_t imageStart = 0xFFFFFFFF;
	uint32_t imageEnd = 0;

	// First find out how much place the image needs in memory
	for (uint32_t i = 0; i < header->e_phnum; i++) {
		elf32_phdr* programHeader = (elf32_phdr*) (((uint32_t) header) + header->e_phoff + (header->e_phentsize * i));
		if (programHeader->p_type != PT_LOAD)
			continue;
		if (programHeader->p_vaddr < imageStart)
			imageStart = programHeader->p_vaddr;
		if (programHeader->p_vaddr + programHeader->p_memsz > imageEnd)
			imageEnd = programHeader->p_vaddr + programHeader->p_memsz;
	}

	// Align the addresses
	imageStart = PAGE_ALIGN_DOWN(imageStart);
	imageEnd = PAGE_ALIGN_UP(imageEnd);

	// Map pages for the executable
	for (uint32_t virt = imageStart; virt < imageEnd; virt += G_PAGE_SIZE) {
		uint32_t phys = g_pp_allocator::allocate();
		g_address_space::map(virt, phys, DEFAULT_USER_TABLE_FLAGS, DEFAULT_USER_PAGE_FLAGS);
		g_pp_reference_tracker::increment(phys);
	}

	// Write the image to memory
	for (uint32_t i = 0; i < header->e_phnum; i++) {
		elf32_phdr* programHeader = (elf32_phdr*) (((uint32_t) header) + header->e_phoff + (header->e_phentsize * i));
		if (programHeader->p_type != PT_LOAD)
			continue;
		g_memory::setBytes((void*) programHeader->p_vaddr, 0, programHeader->p_memsz);
		g_memory::copy((void*) programHeader->p_vaddr, (uint8_t*) (((uint32_t) header) + programHeader->p_offset), programHeader->p_filesz);
	}

	// Set out parameters
	process->imageStart = imageStart;
	process->imageEnd = imageEnd;
}
Exemplo n.º 11
0
/*
 * This gets called by _pt_fault_handler in mm/pagetable.c The
 * calling function has already done a lot of error checking for
 * us. In particular it has checked that we are not page faulting
 * while in kernel mode. Make sure you understand why an
 * unexpected page fault in kernel mode is bad in Weenix. You
 * should probably read the _pt_fault_handler function to get a
 * sense of what it is doing.
 *
 * Before you can do anything you need to find the vmarea that
 * contains the address that was faulted on. Make sure to check
 * the permissions on the area to see if the process has
 * permission to do [cause]. If either of these checks does not
 * pass kill the offending process, setting its exit status to
 * EFAULT (normally we would send the SIGSEGV signal, however
 * Weenix does not support signals).
 *
 * Now it is time to find the correct page (don't forget
 * about shadow objects, especially copy-on-write magic!). Make
 * sure that if the user writes to the page it will be handled
 * correctly.
 *
 * Finally call pt_map to have the new mapping placed into the
 * appropriate page table.
 *
 * @param vaddr the address that was accessed to cause the fault
 *
 * @param cause this is the type of operation on the memory
 *              address which caused the fault, possible values
 *              can be found in pagefault.h
 */
void
handle_pagefault(uintptr_t vaddr, uint32_t cause)
{
        /*NOT_YET_IMPLEMENTED("VM: handle_pagefault");*/
	vmmap_t *map = curproc->p_vmmap;
	dbginfo(DBG_ERROR, proc_info, curproc);
	dbginfo(DBG_ERROR, proc_list_info, NULL);
	if(vaddr == NULL){
	}	
	vmarea_t *vma =	vmmap_lookup(map, ADDR_TO_PN(vaddr));
	
	/*uintptr_t pagenum = PAGE_OFFSET(vaddr);*/
	if(vma == NULL ||  !(cause & FAULT_USER)){
		/*XXX permission checks*/
		curproc->p_status = EFAULT;
		proc_kill(curproc, EFAULT);
	}
	pframe_t *pf;
	uintptr_t pagenum =  ADDR_TO_PN(vaddr) - vma->vma_start+vma->vma_off;
	/*XXX handle shadow objects*/	
	/*
	int forWrite = 0;
	if(cause & FAULT_WRITE){
		forWrite = 1;
	}
	*/		
	/*
	if(vma->vma_obj->mmo_shadowed != NULL){
		shadow_lookuppage(vma->vma_obj->mmo_shadowed, pagenum, forWrite,&pf);
	}else{
	*/
		pframe_get(vma->vma_obj, pagenum, &pf);
	/*}*/
	uintptr_t paddr = pt_virt_to_phys((uintptr_t)pf->pf_addr);
	uintptr_t pdflags = PD_PRESENT | PD_WRITE | PD_USER;
	uintptr_t ptflags = PT_PRESENT | PT_WRITE | PT_USER;
	/*XXX tlb flush?*/
	pt_map(curproc->p_pagedir,(uintptr_t)PAGE_ALIGN_DOWN(vaddr), paddr, pdflags, ptflags);

}
Exemplo n.º 12
0
uint32_t g_loader::findFreeMemory(g_multiboot_information* info, uint32_t start, int count) {

	g_log_info("%! searching for %i free pages (starting at %h)", "loader", count, start);
	g_physical_address location = start;

	while (location < 0xFFFFFFFF) {

		bool notWithinModule = true;

		// For each of the required pages, check if it is within a module
		for (int i = 0; i < count; i++) {
			uint32_t pos = location + i * G_PAGE_SIZE;

			// Check one of the modules contains this position
			for (uint32_t i = 0; i < info->modulesCount; i++) {
				g_multiboot_module* module = (g_multiboot_module*) (info->modulesAddress + sizeof(g_multiboot_module) * i);

				uint32_t moduleStart = PAGE_ALIGN_DOWN(module->moduleStart);
				uint32_t moduleEnd = PAGE_ALIGN_UP(module->moduleEnd);

				if (pos >= moduleStart && pos < moduleEnd) {
					notWithinModule = false;
					location = moduleEnd;
					break;
				}
			}
		}

		if (notWithinModule) {
			g_log_info("%# found: %h", location);
			return location;
		}

		location += G_PAGE_SIZE;
	}

	panic("%! could not find free memory chunk", "loader");
	return 0;
}
Exemplo n.º 13
0
/*
 * This gets called by _pt_fault_handler in mm/pagetable.c The
 * calling function has already done a lot of error checking for
 * us. In particular it has checked that we are not page faulting
 * while in kernel mode. Make sure you understand why an
 * unexpected page fault in kernel mode is bad in Weenix. You
 * should probably read the _pt_fault_handler function to get a
 * sense of what it is doing.
 *
 * Before you can do anything you need to find the vmarea that
 * contains the address that was faulted on. Make sure to check
 * the permissions on the area to see if the process has
 * permission to do [cause]. If either of these checks does not
 * pass kill the offending process, setting its exit status to
 * EFAULT (normally we would send the SIGSEGV signal, however
 * Weenix does not support signals).
 *
 * Now it is time to find the correct page (don't forget
 * about shadow objects, especially copy-on-write magic!). Make
 * sure that if the user writes to the page it will be handled
 * correctly.
 *
 * Finally call pt_map to have the new mapping placed into the
 * appropriate page table.
 *
 * @param vaddr the address that was accessed to cause the fault
 *
 * @param cause this is the type of operation on the memory
 *              address which caused the fault, possible values
 *              can be found in pagefault.h
 */
void handle_pagefault(uintptr_t vaddr, uint32_t cause) {
	/*NOT_YET_IMPLEMENTED("VM: handle_pagefault");*/
	vmarea_t *vma;
	pframe_t *pf;
	int pflags = PD_PRESENT | PD_USER;
	int writeflag = 0;
	dbg(DBG_PRINT, "(GRADING3F)\n");
	if ((vma = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(vaddr))) == NULL) {
		dbg(DBG_PRINT, "(GRADING3C 1)\n");
		proc_kill(curproc, EFAULT);
		return;
	}
	/*
	if (vma->vma_prot & PROT_NONE) {
		dbg(DBG_ERROR, "(GRADING3 3)\n");
		proc_kill(curproc, EFAULT);
		return;
	}*/
	if (!((cause & FAULT_WRITE) || (cause & FAULT_EXEC))
			&& !(vma->vma_prot & PROT_READ)) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		proc_kill(curproc, EFAULT);
		return;
	}
	if ((cause & FAULT_WRITE) && !(vma->vma_prot & PROT_WRITE)) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		proc_kill(curproc, EFAULT);
		return;
	}/*
	if ((cause & FAULT_EXEC) && !(vma->vma_prot & PROT_EXEC)) {
		dbg(DBG_ERROR, "(GRADING3 6)\n");
		proc_kill(curproc, EFAULT);
		return;;
	}*/

	if (cause & FAULT_WRITE) {
		dbg(DBG_PRINT, "(GRADING3F)\n");
		writeflag = 1;
	}

	if (pframe_lookup(vma->vma_obj,
	ADDR_TO_PN(vaddr) - vma->vma_start + vma->vma_off, writeflag, &pf) < 0) {
		dbg(DBG_PRINT, "(GRADING3D 4)\n");
		proc_kill(curproc, EFAULT);
		return;
	}
	if (cause & FAULT_WRITE) {
		pframe_pin(pf);
		dbg(DBG_PRINT, "(GRADING3F)\n");
		pframe_dirty(pf);
		/*
		if ( < 0) {
			dbg(DBG_ERROR, "(GRADING3 10)\n");
			pframe_unpin(pf);
			proc_kill(curproc, EFAULT);
			return;
		}*/
		pframe_unpin(pf);
		pflags |= PD_WRITE;
	}

	pt_map(curproc->p_pagedir, (uintptr_t) PAGE_ALIGN_DOWN(vaddr),
			pt_virt_to_phys((uintptr_t) pf->pf_addr), pflags, pflags);

}
Exemplo n.º 14
0
/*
 * This gets called by _pt_fault_handler in mm/pagetable.c The
 * calling function has already done a lot of error checking for
 * us. In particular it has checked that we are not page faulting
 * while in kernel mode. Make sure you understand why an
 * unexpected page fault in kernel mode is bad in Weenix. You
 * should probably read the _pt_fault_handler function to get a
 * sense of what it is doing.
 *
 * Before you can do anything you need to find the vmarea that
 * contains the address that was faulted on. Make sure to check
 * the permissions on the area to see if the process has
 * permission to do [cause]. If either of these checks does not
 * pass kill the offending process, setting its exit status to
 * EFAULT (normally we would send the SIGSEGV signal, however
 * Weenix does not support signals).
 *
 * Now it is time to find the correct page (don't forget
 * about shadow objects, especially copy-on-write magic!). Make
 * sure that if the user writes to the page it will be handled
 * correctly.
 *
 * Finally call pt_map to have the new mapping placed into the
 * appropriate page table.
 *
 * @param vaddr the address that was accessed to cause the fault
 *
 * @param cause this is the type of operation on the memory
 *              address which caused the fault, possible values
 *              can be found in pagefault.h
 */
void
handle_pagefault(uintptr_t vaddr, uint32_t cause)
{

        int forwrite = 0;
        if( vaddr<(USER_MEM_LOW) ||vaddr >= (USER_MEM_HIGH)){
                dbg(DBG_PRINT, "(GRADING3D) ADDRESS NOT VALID \n");
                do_exit(EFAULT);
                return;
        }
        vmarea_t *container = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(vaddr));
        if(container == NULL){
          dbg(DBG_PRINT, "(GRADING3D) VMAREA NOT VALID \n");
                do_exit(EFAULT);
                return;
        }
        if(container->vma_prot == PROT_NONE){
          dbg(DBG_PRINT, "(GRADING3D) PROT NOT VALID \n");
                do_exit(EFAULT);
                return;
        }
        if((cause & FAULT_WRITE) && !(container->vma_prot & PROT_WRITE)){
          dbg(DBG_PRINT, "(GRADING3D) CONTAINER PROT NOT VALID \n");
                do_exit(EFAULT);
                return;
        }
        if(!(container->vma_prot & PROT_READ)){
          dbg(DBG_PRINT, "(GRADING3D)  PROT IS NOT PROT READ \n");
                do_exit(EFAULT);
                return;
        }
        int pagenum = ADDR_TO_PN(vaddr)-container->vma_start+container->vma_off;
        pframe_t *pf;
        if((container->vma_prot & PROT_WRITE) && (cause & FAULT_WRITE)){
          dbg(DBG_PRINT, "(GRADING3D) prot write fault write \n");
            int pf_res = pframe_lookup(container->vma_obj, pagenum, 1, &pf);
            if(pf_res<0){
              dbg(DBG_PRINT, "(GRADING3D) pframe lookup failed\n");
                do_exit(EFAULT);
            }
            pframe_dirty(pf);
        }else{
          dbg(DBG_PRINT, "(GRADING3D) prot write fault write else \n");
            int pf_res = pframe_lookup(container->vma_obj, pagenum, forwrite, &pf);
            if(pf_res<0){
              dbg(DBG_PRINT, "(GRADING3D) pframe lookup failed \n");
                do_exit(EFAULT);
            }
        }
        KASSERT(pf);
        dbg(DBG_PRINT, "(GRADING3A 5.a) pf is not NULL\n");
        KASSERT(pf->pf_addr);
        dbg(DBG_PRINT, "(GRADING3A 5.a) pf->addr is not NULL\n");
        uint32_t pdflags = PD_PRESENT | PD_USER;
        uint32_t ptflags = PT_PRESENT | PT_USER;
        if(cause & FAULT_WRITE){
          dbg(DBG_PRINT, "(GRADING3D) cause is fault write \n");
            pdflags = pdflags | PD_WRITE;
            ptflags = ptflags | PT_WRITE;
        }
        int ptmap_res = pt_map(curproc->p_pagedir, (uintptr_t)PAGE_ALIGN_DOWN(vaddr), pt_virt_to_phys((uintptr_t)pf->pf_addr), pdflags, ptflags);
}
Exemplo n.º 15
0
/* returns NULL if program has no interpreter */
static void *map_loader(Elf_Ehdr *elf, Elf_Phdr *phs, void **interpr_base_p){
	int i;
	char interp[128];
	size_t len;
	char *interp_base = NULL;
	int fd;
	char header[1024];
	void *ret;
#ifdef ARCH_X86
	static char bsd_interp[]="/libexec/ld-elf.so.1";
	static char bsd_interp32[]="/libexec/ld-elf32.so.1";
	int changed_interp=0;
#endif	
	for (i=0; i<elf->e_phnum; ++i){
		if(phs[i].p_type == PT_INTERP)
			break;
	}
	if (i>= elf->e_phnum)
		return NULL;
	len = phs[i].p_filesz;
	if (len > sizeof(interp) -1)
		len = sizeof(interp) -1;
	memcpy(interp, ((char *)elf) + phs[i].p_offset, len);
	interp[len]='\0';
#ifdef ARCH_X86
	/* FreeBSD requires the kernel to modify the loader name when */
	/* running on x86-64. Bad design IMO, here's a workaround     */
	if(memcmp(interp,bsd_interp, sizeof(bsd_interp))==0){
			memcpy(interp,bsd_interp32,sizeof(bsd_interp32));
			changed_interp=1;
	}
#endif
#ifdef LOADER_DEBUG
	myprintf("Opening %s\n",interp);
#endif
	fd = open(interp, O_RDONLY);
#ifdef ARCH_X86
	if (fd < 0 && changed_interp){
#ifdef LOADER_DEBUG
		myprintf("Fallback on %s\n", bsd_interp);
#endif
		memcpy(interp, bsd_interp, sizeof(bsd_interp));
		fd = open(bsd_interp, O_RDONLY);
	}
#endif
	if (fd <0){
		//myprintf("Could not open interpreter %s (%d), mismatch 32/64 bits ?\n",
			//interp, fd);
		exit(1);
	}
	read(fd, header, 1024);
	elf=(Elf_Ehdr *) header;
	phs=(Elf_Phdr *) ((char *)header + elf->e_phoff);
#ifdef LOADER_DEBUG
	myprintf("Elf header at %x\n",header);
	myprintf("%d pheaders starting at %p\n",elf->e_phnum,elf->e_phoff);
#endif

	len = elf_total_size(phs, elf->e_phnum);
	len = PAGE_ALIGN(len);
	interp_base = mmap(NULL, len , PROT_READ|PROT_WRITE,
			MAP_PRIVATE | MP_MAP_ANON, -1, 0);
	if (interp_base == NULL){
		//myprintf("mapping error\n");
		return NULL;
	}
#ifdef LOADER_DEBUG
	myprintf("base mapped at %p\n",interp_base);
#endif

#if 0	
	for (i=0;i<elf->e_phnum; ++i){
#ifdef LOADER_DEBUG
		myprintf("pheader %d:%p type %d vaddr %p filesz %x\n", i,
			&phs[i],
			phs[i].p_type, phs[i].p_vaddr, phs[i].p_filesz);
#endif
		if(phs[i].p_type == PT_LOAD){
				len = elf_total_size(phs, elf->e_phnum);
				len = PAGE_ALIGN(len);
#ifdef LOADER_DEBUG
				myprintf("first map len: %d\n",len);
#endif
				interp_base = mmap(NULL, len
					, PROT_EXEC | PROT_READ | PROT_WRITE,
					MAP_PRIVATE,
					fd, 0);
				if (interp_base == NULL){
					myprintf("mapping error\n");
					return NULL;
				}
#ifdef LOADER_DEBUG
				myprintf("base mapped at %p\n",interp_base);
#endif
				clean_pages(interp_base, &phs[i]);
				/* clean unmapped memory that was overly mapped */
				//memset(interp_base + phs[i].p_memsz, 0, len - phs[i].p_memsz);
				++i;
				break;
		}
	}
#endif
	/* first page loaded, now fixed mmap of the others */
	for(i=0;i<elf->e_phnum; ++i){
		if(phs[i].p_type == PT_LOAD){
#ifdef LOADER_DEBUG
				myprintf("pheader %d:%p type %d vaddr %p filesz %p\n", i,
				&phs[i],
				phs[i].p_type, phs[i].p_vaddr, phs[i].p_filesz);
#endif

				ret = mmap(
					interp_base + PAGE_ALIGN_DOWN(phs[i].p_vaddr),
					PAGE_ALIGN((phs[i].p_vaddr & 0xfff) + phs[i].p_memsz),
					PROT_EXEC | PROT_READ | PROT_WRITE,
					MAP_PRIVATE |MAP_FIXED,
					fd, PAGE_ALIGN_DOWN(phs[i].p_offset));
				if (ret == NULL){
					//myprintf("mapping error\n");
					return NULL;
				}
#ifdef LOADER_DEBUG
				myprintf("page mapped at %p\n",ret);
#endif
				clean_pages(interp_base, &phs[i]);
		}
	}
	close(fd);
#ifdef LOADER_DEBUG
	myprintf("interpreter entrypoint at %p\n",interp_base +elf->e_entry);
#endif
	*interpr_base_p = interp_base;
	return (void *)(interp_base + elf->e_entry);
}
Exemplo n.º 16
0
/* Helper function for the ELF loader. Maps the specified segment
 * of the program header from the given file in to the given address
 * space with the given memory offset (in pages). On success returns 0, otherwise
 * returns a negative error code for the ELF loader to return.
 * Note that since any error returned by this function should
 * cause the ELF loader to give up, it is acceptable for the
 * address space to be modified after returning an error.
 * Note that memoff can be negative */
static int _elf32_map_segment(vmmap_t *map, vnode_t *file, int32_t memoff, const Elf32_Phdr *segment)
{
        uintptr_t addr;
        if (memoff < 0) {
                KASSERT(ADDR_TO_PN(segment->p_vaddr) > (uint32_t) -memoff);
                addr = (uintptr_t)segment->p_vaddr - (uintptr_t)PN_TO_ADDR(-memoff);
        } else {
                addr = (uintptr_t)segment->p_vaddr + (uintptr_t)PN_TO_ADDR(memoff);
        }
        uint32_t off = segment->p_offset;
        uint32_t memsz = segment->p_memsz;
        uint32_t filesz = segment->p_filesz;

        dbg(DBG_ELF, "Mapping program segment: type %#x, offset %#08x,"
            " vaddr %#08x, filesz %#x, memsz %#x, flags %#x, align %#x\n",
            segment->p_type, segment->p_offset, segment->p_vaddr,
            segment->p_filesz, segment->p_memsz, segment->p_flags,
            segment->p_align);

        /* check for bad data in the segment header */
        if (PAGE_SIZE != segment->p_align) {
                dbg(DBG_ELF, "ERROR: segment does not have correct alignment\n");
                return -ENOEXEC;
        } else if (filesz > memsz) {
                dbg(DBG_ELF, "ERROR: segment file size is greater than memory size\n");
                return -ENOEXEC;
        } else if (PAGE_OFFSET(addr) != PAGE_OFFSET(off)) {
                dbg(DBG_ELF, "ERROR: segment address and offset are not aligned correctly\n");
                return -ENOEXEC;
        }

        int perms = 0;
        if (PF_R & segment->p_flags) {
                perms |= PROT_READ;
        }
        if (PF_W & segment->p_flags) {
                perms |= PROT_WRITE;
        }
        if (PF_X & segment->p_flags) {
                perms |= PROT_EXEC;
        }

        if (0 < filesz) {
                /* something needs to be mapped from the file */
                /* start from the starting address and include enough pages to
                 * map all filesz bytes of the file */
                uint32_t lopage = ADDR_TO_PN(addr);
                uint32_t npages = ADDR_TO_PN(addr + filesz - 1) - lopage + 1;
                off_t fileoff = (off_t)PAGE_ALIGN_DOWN(off);

                int ret;
                if (!vmmap_is_range_empty(map, lopage, npages)) {
                        dbg(DBG_ELF, "ERROR: ELF file contains overlapping segments\n");
                        return -ENOEXEC;
                } else if (0 > (ret = vmmap_map(map, file, lopage, npages, perms,
                                                MAP_PRIVATE | MAP_FIXED, fileoff,
                                                0, NULL))) {
                        return ret;
                }
        }

        if (memsz > filesz) {
                /* there is left over memory in the segment which must
                 * be initialized to 0 (anonymously mapped) */
                uint32_t lopage = ADDR_TO_PN(addr + filesz);
                uint32_t npages = ADDR_TO_PN(PAGE_ALIGN_UP(addr + memsz)) - lopage;

                int ret;
                if (npages > 1 && !vmmap_is_range_empty(map, lopage + 1, npages - 1)) {
                        dbg(DBG_ELF, "ERROR: ELF file contains overlapping segments\n");
                        return -ENOEXEC;
                } else if (0 > (ret = vmmap_map(map, NULL, lopage, npages, perms,
                                                MAP_PRIVATE | MAP_FIXED, 0, 0, NULL))) {
                        return ret;
                } else if (!PAGE_ALIGNED(addr + filesz) && filesz > 0) {
                        /* In this case, we have accidentally zeroed too much of memory, as
                         * we zeroed all memory in the page containing addr + filesz.
                         * However, the remaining part of the data is not a full page, so we
                         * should not just map in another page (as there could be garbage
                         * after addr+filesz). For instance, consider the data-bss boundary
                         * (c.f. Intel x86 ELF supplement pp. 82).
                         * To fix this, we need to read in the contents of the file manually
                         * and put them at that user space addr in the anon map we just
                         * added. */
                        void *buf;
                        if (NULL == (buf = page_alloc()))
                                return -ENOMEM;
                        if (!(0 > (ret = file->vn_ops->read(file, (off_t) PAGE_ALIGN_DOWN(off + filesz),
                                                            buf, PAGE_OFFSET(addr + filesz))))) {
                                ret = vmmap_write(map, PAGE_ALIGN_DOWN(addr + filesz),
                                                  buf, PAGE_OFFSET(addr + filesz));
                        }
                        page_free(buf);
                        return ret;
                }
        }
        return 0;
}
Exemplo n.º 17
0
void
handle_pagefault(uintptr_t vaddr, uint32_t cause)
{

   
    pframe_t *pf;
        int ret_val;
        vmarea_t *vma = vmmap_lookup(curproc->p_vmmap, ADDR_TO_PN(vaddr));
        if(vma == NULL)
        {
            dbg(DBG_PRINT,"(GRADING3D 1): No vmarea found\n");
            proc_kill(curproc,EFAULT);
            return;
        }

        if(cause & FAULT_WRITE)
        {
            dbg(DBG_VM,"grade14\n");
            dbg(DBG_PRINT,"(GRADING3D 1),checking permission for writing\n");
            if(vma->vma_prot & PROT_WRITE)
            {
                dbg(DBG_VM,"grade15\n");
                 dbg(DBG_PRINT,"(GRADING3D 1),Vmarea has write permission\n");
                ret_val = pframe_lookup(vma->vma_obj, ADDR_TO_PN(vaddr) - vma->vma_start + vma->vma_off, (cause & FAULT_WRITE),&pf);
                if(ret_val<0)
                {
                    dbg(DBG_VM,"grade16\n");
                    dbg(DBG_PRINT,"(GRADING3D 1),pframe could not be found\n");
                     proc_kill(curproc,EFAULT);
                    return;  
                }
                pframe_dirty(pf);

                KASSERT(pf);
                dbg(DBG_PRINT,"(GRADING3A 5.a),pframe is not NULL\n");
                KASSERT(pf->pf_addr);
                dbg(DBG_PRINT,"(GRADING3A 5.a),pf->pf_addr is not NULL\n");
            }
            else
            {
                dbg(DBG_VM,"grade17\n");
                dbg(DBG_PRINT,"(GRADING3D 1),Vmarea does not have write permission\n"); 
              proc_kill(curproc,EFAULT);
            return;  
            }
            dbg(DBG_VM,"grade18\n");
            dbg(DBG_PRINT,"(GRADING3D 1),Calling pt_map after write\n");
            pt_map(curproc->p_pagedir,(uintptr_t)PAGE_ALIGN_DOWN(vaddr),pt_virt_to_phys((uintptr_t)pf->pf_addr), 
                (PD_WRITE|PD_PRESENT|PD_USER), (PT_WRITE|PT_PRESENT|PT_USER));
        }

        else

            {
                dbg(DBG_VM,"grade19\n");
                dbg(DBG_PRINT,"(GRADING3D 1),checking permission for reading\n");
            if(vma->vma_prot & PROT_READ)
            {
                dbg(DBG_VM,"grade20\n");
                dbg(DBG_PRINT,"(GRADING3D 1),Vmarea has read permission\n");
                ret_val = pframe_lookup(vma->vma_obj, ADDR_TO_PN(vaddr) - vma->vma_start + vma->vma_off, (cause & FAULT_WRITE),&pf);
                if(ret_val<0)
                {
                    dbg(DBG_VM,"grade21\n");
                    dbg(DBG_PRINT,"(GRADING3D 1),pframe could not be found\n");
                     proc_kill(curproc,EFAULT);
                    return;  
                }
                    dbg(DBG_VM,"grade22\n");
                KASSERT(pf);
                dbg(DBG_PRINT,"(GRADING3A 5.a),pframe is not NULL\n");
                KASSERT(pf->pf_addr);
                dbg(DBG_PRINT,"(GRADING3A 5.a),pf->pf_addr is not NULL\n");
            }
            else
            {
                dbg(DBG_VM,"grade23\n");
                dbg(DBG_PRINT,"(GRADING3D 1),Vmarea does not have read permission\n");
              proc_kill(curproc,EFAULT);
            return;  
            }
            dbg(DBG_VM,"grade24\n");
            dbg(DBG_PRINT,"(GRADING3D 1),Calling pt_map after read\n");
            pt_map(curproc->p_pagedir,(uintptr_t)PAGE_ALIGN_DOWN(vaddr),pt_virt_to_phys((uintptr_t)pf->pf_addr), 
               (PD_PRESENT|PD_USER), (PT_PRESENT|PT_USER));
        }


   
}
Exemplo n.º 18
0
static void __create_task_mm(task_t *task, int num, init_server_t *srv)
{
  struct bin_map *emap = get_elf_map(task,srv);
  per_task_data_t *ptd;
  vmm_t *vmm = task->task_mm;
  ulong_t entry = get_elf_entry(task,srv);
  struct bin_map *cur = emap;
  ulong_t sseek = 0, psize;
  void *sbss;
  int r, flags, kflags;
  int *argc;
  uintptr_t ustack_top;
  uintptr_t *argv, *envp;
  char *arg1, *envp1;

  if(!emap)
    panic("[Service start] Cannot load ELF map of module %d\n", num);

  /* map image sections */
  while(cur) {
    /* check for override */
    if(cur->prev && (cur->virt_addr <
                     PAGE_ALIGN(cur->prev->virt_addr + cur->prev->size))) {
      sseek = PAGE_ALIGN(cur->virt_addr) - cur->virt_addr;
      cur->bin_addr += sseek;
      cur->virt_addr = PAGE_ALIGN(cur->virt_addr);
      cur->size -= sseek;

      /* if it's NO_BITS section it should be zeroed */
      if(cur->type == SHT_NOBITS) {
        sbss = user_to_kernel_vaddr(task_get_rpd(task), PAGE_ALIGN_DOWN(cur->virt_addr -
                                                                        sseek));
        memset((sbss + PAGE_SIZE) - sseek, 0, sseek);
      }
    }

    /* create vm range for this region */
    flags = VMR_PRIVATE | VMR_FIXED;
    kflags = 0;
    if(cur->flags & ESH_EXEC) {
      flags |= VMR_EXEC;
      kflags |= KMAP_EXEC;
    }

    flags |= VMR_READ;
    kflags |= KMAP_READ;

    if(cur->flags & ESH_WRITE) {
      flags |= VMR_WRITE;
      kflags |= KMAP_WRITE;
    }
    if((cur->type == SHT_NOBITS) ||
       (cur->flags & ESH_WRITE)) flags |= VMR_POPULATE;

    psize = (cur->size + (cur->virt_addr - PAGE_ALIGN_DOWN(cur->virt_addr)))
      >> PAGE_WIDTH;
    if(psize<<PAGE_WIDTH < (cur->size + (cur->virt_addr -
                                         PAGE_ALIGN_DOWN(cur->virt_addr)))) psize++;

#if 0
    kprintf("Mapping vmrange %p - %p\n", PAGE_ALIGN_DOWN(cur->virt_addr),
            PAGE_ALIGN_DOWN(cur->virt_addr) + (psize << PAGE_WIDTH));
#endif
    r = vmrange_map(generic_memobj, vmm, PAGE_ALIGN_DOWN(cur->virt_addr), psize,
                    flags, 0);
    if(!PAGE_ALIGN(r))
      panic("Server [#%d]: Failed to create VM range for section. (ERR = %d)", num, r);

    if(cur->type == SHT_PROGBITS) {
      if(cur->flags & ESH_WRITE) {
#if 0
        kprintf("Copying to %p kaddr (%p uaddr) from %p baddr(%p kaddr) (%ld size)\n",
                user_to_kernel_vaddr(task_get_rpd(task), PAGE_ALIGN_DOWN(cur->virt_addr)),
                PAGE_ALIGN_DOWN(cur->virt_addr), PAGE_ALIGN_DOWN(cur->bin_addr),
                pframe_id_to_virt((PAGE_ALIGN_DOWN(cur->bin_addr))>>PAGE_WIDTH), psize << PAGE_WIDTH);
#endif
        memcpy((void *)user_to_kernel_vaddr(task_get_rpd(task),
                                            PAGE_ALIGN_DOWN(cur->virt_addr)),
               (const void *)pframe_id_to_virt((PAGE_ALIGN_DOWN(cur->bin_addr))>>PAGE_WIDTH),
               psize << PAGE_WIDTH);
        r = 0;
      } else {
#if 0
        kprintf("Mapping range %p - %p (%p - %p)\n", PAGE_ALIGN_DOWN(cur->bin_addr),
                PAGE_ALIGN_DOWN(cur->bin_addr) + (psize << PAGE_WIDTH),
                PAGE_ALIGN_DOWN(cur->bin_addr - srv->addr),
                PAGE_ALIGN_DOWN(cur->bin_addr - srv->addr) + (psize << PAGE_WIDTH));
#endif
        r = mmap_core(vmm, PAGE_ALIGN_DOWN(cur->virt_addr),
                      PAGE_ALIGN_DOWN(cur->bin_addr) >> PAGE_WIDTH, psize, kflags);
      }
      if(r)
        panic("Server [#%d]: Failed to map section. (ERR = %d)", num, r);
    }
/**
 * Reads the GRUB memory map to find out which memory areas are usable and free.
 * Excludes everything before "reservedAreaEnd" and also excludes the locations
 * of the multiboot modules.
 *
 * @param allocator:		the allocator object where mark free addresses
 * @param reservedAreaEnd:	the end address of the reserved area
 */
void MultibootMmapInterpreter::load(BitMapPageAllocator *allocator, uint32_t reservedAreaEnd)
{
	MultibootInformation *mbInfo = EvaLoader::getSetupInformation()->multibootInformation;

	MultibootMmap *map = (MultibootMmap*) mbInfo->memoryMapAddress;
	uint32_t mapListEnd = mbInfo->memoryMapAddress + mbInfo->memoryMapLength;

	// Iterate over the list of memory maps from GRUB
	logInfo("%! memory regions:", "memmap");
	while (((uint32_t) map) < mapListEnd)
	{
		// Check if the map is usable memory
		if (map->type == 1)
		{
			uint64_t areaStart = (uint64_t) map->baseAddressLower | ((uint64_t) map->baseAddressHigher << 32);
			uint64_t areaEnd = areaStart + ((uint64_t) map->lengthLower | ((uint64_t) map->lengthHigher << 32));

			// If this ranges is out of 32bit bounds, ignore it
			if (areaStart > 0xFFFFFFFF) logInfo("%# > 0xFFFFFFFF             : not usable");

			else
			{
				logInfon("%#   %h - %h", (uint32_t ) areaStart, (uint32_t ) areaEnd);

				// Make sure that the mapped area lays behind the kernel
				if (areaStart < reservedAreaEnd) areaStart = reservedAreaEnd;

				// End of area above 32bit? Cut off
				if (areaEnd > 0xFFFFFFFF) areaEnd = 0xFFFFFFFF;

				// Page-align
				areaStart = PAGE_ALIGN_UP(areaStart);
				areaEnd = PAGE_ALIGN_DOWN(areaEnd);

				// Mark as free
				uint32_t chunkCount = 0;
				uint32_t inModule = 0;

				if (areaEnd > areaStart)
				{
					// Split into page sized chunks
					while (areaStart < areaEnd - PAGE_SIZE)
					{
						// Exclude memory within modules
						bool isInModule = false;
						for (uint32_t i = 0; i < mbInfo->modulesCount; i++)
						{
							MultibootModule *module = (MultibootModule*) (mbInfo->modulesAddress + sizeof(MultibootModule) * i);

							if ((areaStart >= PAGE_ALIGN_DOWN(module->moduleStart)) && (areaStart < PAGE_ALIGN_UP(module->moduleEnd)))
							{
								isInModule = true;
								break;
							}
						}

						// If its not inside a module, mark as free
						if (isInModule) ++inModule;

						else
						{
							allocator->markFree(areaStart);
							++chunkCount;
						}

						areaStart = areaStart + PAGE_SIZE;
					}
				}

				logInfo(": %i available (%i blocked)", chunkCount, inModule);
			}
		}

		// Skip to the next map (the sizeof in the end is something GRUB-specific, look up the docs)
		map = (MultibootMmap*) ((uint32_t) map + map->size + sizeof(uint32_t));
	}
}