コード例 #1
0
ファイル: vm_descriptor.c プロジェクト: samueldotj/AceOS
/*! Enumerator - call back function used by FindVaRange()->EnumerateAvlTree()
	\param node - AVL tree node(vm descriptor)
	\param arg - preferred start and size of the required new region
*/
static int enumerate_descriptor_callback(AVL_TREE_PTR node, void * arg)
{
	ENUMERATE_DESCRIPTOR_ARG_PTR a= (ENUMERATE_DESCRIPTOR_ARG_PTR) arg;
	VM_DESCRIPTOR_PTR descriptor = STRUCT_ADDRESS_FROM_MEMBER(node, VM_DESCRIPTOR, tree_node);
	VADDR va_start, va_end, size;
	
	va_start = PAGE_ALIGN(a->preferred_start);
	va_end = PAGE_ALIGN_UP(a->preferred_start+a->size);
	size = PAGE_ALIGN_UP(a->size)-1;
	a->previous_descriptor_va_end = PAGE_ALIGN_UP(a->previous_descriptor_va_end);
	/*check whether the hole has "preferred" start and required size*/
	if ( RANGE_WITH_IN_RANGE( PAGE_ALIGN_UP(a->previous_descriptor_va_end), PAGE_ALIGN(descriptor->start), va_start, va_end ) )
	{
		/*update the result with correct address*/
		a->result = va_start;
		/*terminate enumeration*/
		return 1;
	}
	/*atleast the hole has required size?*/
	else if ( (descriptor->start - a->previous_descriptor_va_end) > size )
	{
		a->result = a->previous_descriptor_va_end;
		/*break the enumeration if we passed preferred va range*/
		if ( descriptor->end > a->preferred_start )
			return 1;
	}
	
	a->previous_descriptor_va_start = descriptor->start;
	a->previous_descriptor_va_end = descriptor->end;
	/*continue enumeration*/
	return 0;
}
コード例 #2
0
gmgr_surface_t *GmgrCreateMemorySurface(const videomode_t *mode, uint32_t flags)
{
	gmgr_surface_t *surf;

	surf = malloc(sizeof(*surf));
	if (surf == NULL)
		return NULL;

	surf->device = NULL;
	surf->flags = flags;
	surf->mode = *mode;
	surf->base = VmmAlloc(PAGE_ALIGN_UP(mode->bytesPerLine * mode->height) / PAGE_SIZE,
		NULL,
		VM_MEM_USER | VM_MEM_READ | VM_MEM_WRITE);
	if (surf->base == NULL)
	{
		free(surf);
		return NULL;
	}

	if (!FramebufCreateSurface(mode, surf->base, &surf->surf))
	{
		VmmFree(surf->base);
		free(surf);
		return NULL;
	}

	return surf;
}
コード例 #3
0
ファイル: elf32_loader.cpp プロジェクト: emcifuntik/ghost
void g_elf32_loader::loadTlsMasterCopy(elf32_ehdr* header, g_process* process) {

	uint32_t tlsSize = 0;

	// Map pages for TLS master copy
	for (uint32_t i = 0; i < header->e_phnum; i++) {
		elf32_phdr* programHeader = (elf32_phdr*) (((uint32_t) header) + header->e_phoff + (header->e_phentsize * i));
		if (programHeader->p_type == PT_TLS) {
			tlsSize = PAGE_ALIGN_UP(programHeader->p_memsz);

			uint32_t tlsPages = tlsSize / G_PAGE_SIZE;
			uint32_t tlsStart = process->virtualRanges.allocate(tlsPages);
			uint32_t tlsEnd = tlsStart + tlsSize;

			for (uint32_t virt = tlsStart; virt < tlsEnd; virt += G_PAGE_SIZE) {
				uint32_t phys = g_pp_allocator::allocate();
				g_address_space::map(virt, phys, DEFAULT_USER_TABLE_FLAGS, DEFAULT_USER_PAGE_FLAGS);
				g_pp_reference_tracker::increment(phys);
			}

			g_memory::setBytes((void*) tlsStart, 0, programHeader->p_memsz);
			g_memory::copy((void*) tlsStart, (uint8_t*) (((uint32_t) header) + programHeader->p_offset), programHeader->p_filesz);
			break;
		}
	}

}
コード例 #4
0
void *__morecore(size_t *bytes)
{
	*bytes = max(PAGE_ALIGN_UP(*bytes), 4 * PAGE_SIZE);
	return VmmAlloc(*bytes / PAGE_SIZE,
		0,
		VM_MEM_USER | VM_MEM_READ | VM_MEM_WRITE);
}
コード例 #5
0
ファイル: main.c プロジェクト: 1tgr/mobius
static void *KeMapPhysicalRange(addr_t low, addr_t high, bool as_normal)
{
    return VmmMap((PAGE_ALIGN_UP(high) - PAGE_ALIGN(low)) / PAGE_SIZE,
        low,
        (void*) low,
        NULL,
        as_normal ? VM_AREA_NORMAL : VM_AREA_MAP,
        (as_normal ? VM_MEM_LITERAL : 0) | VM_MEM_READ | VM_MEM_WRITE | VM_MEM_USER);
}
コード例 #6
0
ファイル: vm_descriptor.c プロジェクト: samueldotj/AceOS
/*! Creates and initalizes a vm descriptor
	\param vmap - virtual map of this descriptor
	\param start - starting va address of this descriptor range
	\param end - ending va address of this descriptor range
	\param vm_unit - vm unit which is backing this descriptor
	\param protection - protection for this range
	\return on success pointer to vm descriptor
			on failure null
*/
VM_DESCRIPTOR_PTR CreateVmDescriptor(VIRTUAL_MAP_PTR vmap, VADDR start, VADDR end, VM_UNIT_PTR vm_unit, VM_PROTECTION_PTR protection)
{
	VM_DESCRIPTOR_PTR vd;
	
	assert( PAGE_ALIGN_UP(end-start) <= PAGE_ALIGN_UP(vm_unit->size) );
	
	SpinLock(&vmap->lock);
	vmap->reference_count++;
	SpinUnlock(&vmap->lock);
	
	start = PAGE_ALIGN(start);
	//end = PAGE_ALIGN_UP(end)-1;
	
	vd = (VM_DESCRIPTOR_PTR)kmalloc(sizeof(VM_DESCRIPTOR), KMEM_NO_FAIL);
	//vd = AllocateBuffer( &vm_descriptor_cache, 0 );
	InitVmDescriptor( vd, vmap, start, end, vm_unit, protection);
	SpinLock(&vm_unit->lock);
	vm_unit->reference_count++;
	SpinUnlock(&vm_unit->lock);
	vd->reference_count++;
	return vd;
}
コード例 #7
0
ファイル: rtl8139.c プロジェクト: 1tgr/mobius
void RtlInit(rtl8139_t *rtl)
{
    unsigned i;

    wprintf(L"rtl8139: resetting... ");

    SpinAcquire(&rtl->sem);

    /* Bring the chip out of low-power mode. */
    out(rtl->iobase + Config1, 0x00);

    if (RtlReadEeprom(rtl, 0) != 0xffff)
    {
        unsigned short *ap = (unsigned short*)rtl->station_address;
        for (i = 0; i < 3; i++)
            *ap++ = RtlReadEeprom(rtl, i + 7);
    }
    else
    {
        unsigned char *ap = (unsigned char*)rtl->station_address;
        for (i = 0; i < 6; i++)
            *ap++ = in(rtl->iobase + MAC0 + i);
    }

    rtl->speed10 = (in(rtl->iobase + MediaStatus) & MSRSpeed10) != 0;
    rtl->fullduplex = (in16(rtl->iobase + MII_BMCR) & BMCRDuplex) != 0;
    wprintf(L"rtl8139: %sMbps %s-duplex\n", 
        rtl->speed10 ? L"10" : L"100",
        rtl->fullduplex ? L"full" : L"half");

    rtl->rx_phys = MemAlloc();
    rtl->tx_phys = MemAlloc();

    rtl->rx_ring = sbrk_virtual(RX_BUF_LEN);
    rtl->tx_ring = sbrk_virtual(TX_BUF_SIZE);

    wprintf(L"rtl8139: rx_ring = %p, tx_ring = %p\n",
        rtl->rx_ring, rtl->tx_ring);

    MemMapRange(rtl->rx_ring, 
        rtl->rx_phys, 
        rtl->rx_ring + RX_BUF_LEN,
        PRIV_RD | PRIV_PRES | PRIV_KERN);
    MemMapRange(rtl->tx_ring, 
        rtl->tx_phys, 
        (uint8_t*) PAGE_ALIGN_UP((addr_t) rtl->tx_ring + TX_BUF_SIZE),
        PRIV_WR | PRIV_PRES | PRIV_KERN);

    RtlReset(rtl);
    SpinRelease(&rtl->sem);
}
コード例 #8
0
ファイル: vm_descriptor.c プロジェクト: samueldotj/AceOS
/*! Searches the vm descriptor AVL tree for a particular VA range*/
static COMPARISION_RESULT compare_vm_descriptor_with_va(struct binary_tree * node1, struct binary_tree * node2)
{
	VM_DESCRIPTOR_PTR d1, d2;
	assert( node1 != NULL );
	assert( node2 != NULL );
	
	d1 = STRUCT_ADDRESS_FROM_MEMBER(node1, VM_DESCRIPTOR, tree_node.bintree);
	d2 = STRUCT_ADDRESS_FROM_MEMBER(node2, VM_DESCRIPTOR, tree_node.bintree);
	
	if( PAGE_ALIGN(d1->start) <= PAGE_ALIGN(d2->start) && PAGE_ALIGN_UP(d1->end)-1 >= d2->end )
		return EQUAL;

	if ( PAGE_ALIGN(d1->start) > PAGE_ALIGN(d2->start) )
		return LESS_THAN;
	else
		return GREATER_THAN;
		
}
コード例 #9
0
ファイル: elf32_loader.cpp プロジェクト: emcifuntik/ghost
void g_elf32_loader::loadLoadSegment(elf32_ehdr* header, g_process* process) {

	// Initial values
	uint32_t imageStart = 0xFFFFFFFF;
	uint32_t imageEnd = 0;

	// First find out how much place the image needs in memory
	for (uint32_t i = 0; i < header->e_phnum; i++) {
		elf32_phdr* programHeader = (elf32_phdr*) (((uint32_t) header) + header->e_phoff + (header->e_phentsize * i));
		if (programHeader->p_type != PT_LOAD)
			continue;
		if (programHeader->p_vaddr < imageStart)
			imageStart = programHeader->p_vaddr;
		if (programHeader->p_vaddr + programHeader->p_memsz > imageEnd)
			imageEnd = programHeader->p_vaddr + programHeader->p_memsz;
	}

	// Align the addresses
	imageStart = PAGE_ALIGN_DOWN(imageStart);
	imageEnd = PAGE_ALIGN_UP(imageEnd);

	// Map pages for the executable
	for (uint32_t virt = imageStart; virt < imageEnd; virt += G_PAGE_SIZE) {
		uint32_t phys = g_pp_allocator::allocate();
		g_address_space::map(virt, phys, DEFAULT_USER_TABLE_FLAGS, DEFAULT_USER_PAGE_FLAGS);
		g_pp_reference_tracker::increment(phys);
	}

	// Write the image to memory
	for (uint32_t i = 0; i < header->e_phnum; i++) {
		elf32_phdr* programHeader = (elf32_phdr*) (((uint32_t) header) + header->e_phoff + (header->e_phentsize * i));
		if (programHeader->p_type != PT_LOAD)
			continue;
		g_memory::setBytes((void*) programHeader->p_vaddr, 0, programHeader->p_memsz);
		g_memory::copy((void*) programHeader->p_vaddr, (uint8_t*) (((uint32_t) header) + programHeader->p_offset), programHeader->p_filesz);
	}

	// Set out parameters
	process->imageStart = imageStart;
	process->imageEnd = imageEnd;
}
コード例 #10
0
ファイル: loader.cpp プロジェクト: besiano15/ghost
uint32_t g_loader::findFreeMemory(g_multiboot_information* info, uint32_t start, int count) {

	g_log_info("%! searching for %i free pages (starting at %h)", "loader", count, start);
	g_physical_address location = start;

	while (location < 0xFFFFFFFF) {

		bool notWithinModule = true;

		// For each of the required pages, check if it is within a module
		for (int i = 0; i < count; i++) {
			uint32_t pos = location + i * G_PAGE_SIZE;

			// Check one of the modules contains this position
			for (uint32_t i = 0; i < info->modulesCount; i++) {
				g_multiboot_module* module = (g_multiboot_module*) (info->modulesAddress + sizeof(g_multiboot_module) * i);

				uint32_t moduleStart = PAGE_ALIGN_DOWN(module->moduleStart);
				uint32_t moduleEnd = PAGE_ALIGN_UP(module->moduleEnd);

				if (pos >= moduleStart && pos < moduleEnd) {
					notWithinModule = false;
					location = moduleEnd;
					break;
				}
			}
		}

		if (notWithinModule) {
			g_log_info("%# found: %h", location);
			return location;
		}

		location += G_PAGE_SIZE;
	}

	panic("%! could not find free memory chunk", "loader");
	return 0;
}
コード例 #11
0
ファイル: kernel.cpp プロジェクト: yonick/ghost
void g_kernel::load_ramdisk(g_multiboot_module* ramdiskModule) {

	int ramdiskPages = PAGE_ALIGN_UP(ramdiskModule->moduleEnd - ramdiskModule->moduleStart) / G_PAGE_SIZE;

	g_virtual_address ramdiskNewLocation = g_kernel_virt_addr_ranges->allocate(ramdiskPages);
	if (ramdiskNewLocation == 0) {
		panic("%! not enough virtual space for ramdisk remapping", "kern");
	}

	for (int i = 0; i < ramdiskPages; i++) {
		g_virtual_address virt = ramdiskNewLocation + i * G_PAGE_SIZE;
		g_physical_address phys = g_address_space::virtual_to_physical(ramdiskModule->moduleStart + i * G_PAGE_SIZE);
		g_address_space::map(virt, phys, DEFAULT_KERNEL_TABLE_FLAGS, DEFAULT_KERNEL_PAGE_FLAGS);
	}

	ramdiskModule->moduleEnd = ramdiskNewLocation + (ramdiskModule->moduleEnd - ramdiskModule->moduleStart);
	ramdiskModule->moduleStart = ramdiskNewLocation;

	g_kernel_ramdisk = new g_ramdisk();
	g_kernel_ramdisk->load(ramdiskModule);
	g_log_info("%! ramdisk loaded", "kern");
}
コード例 #12
0
ファイル: mmap.c プロジェクト: AnupreetKJohar/weenix
/*
 * This function implements the munmap(2) syscall.
 *
 * As with do_mmap() it should perform the required error checking,
 * before calling upon vmmap_remove() to do most of the work.
 * Remember to clear the TLB.
 */
int
do_munmap(void *addr, size_t len)
{
    if ((uintptr_t) addr < USER_MEM_LOW || USER_MEM_HIGH - (uint32_t) addr < len){
        return -EINVAL;
    }

    if (len == 0){
        return -EINVAL;
    }

    if (!PAGE_ALIGNED(addr)){
        return -EINVAL; 
    }

    int ret = vmmap_remove(curproc->p_vmmap, ADDR_TO_PN(addr),
            (uint32_t) PAGE_ALIGN_UP(len) / PAGE_SIZE);

    /* no need to unmap range or flush the tlb, since this is done in
     * vmmap_remove() */

    return ret;
}
コード例 #13
0
ファイル: init.c プロジェクト: d33tah/whitix
initcode static DWORD GetMaxPfn()
{
	/* 
	 * By now all the memory detection methods should have been converted to
	 * sorted E820 entries, so find the end of the highest free entry.
	 */

	DWORD retVal=0;
	int i;
	DWORD currEndPfn;
	
	/* Page-aligned start and end of the memory region. */
	QWORD start,end;

	for (i=0; i<entryCount; i++)
	{
		if (entries[i].rangeType != E820_FREE && entries[i].rangeType != E820_ACPI_RECLAIM)
			continue;

		start=PAGE_ALIGN_UP(entries[i].base);
		end=PAGE_ALIGN(entries[i].base+entries[i].length);

		/* 32-bit addresses only. */
		if (end > 0x100000000ULL)
			continue;

		if (start >= end)
			continue;

		currEndPfn = end >> PAGE_SHIFT;

		if (currEndPfn > retVal)
			retVal = currEndPfn;
	}

	return retVal;
}
コード例 #14
0
ファイル: mmap.c プロジェクト: AnupreetKJohar/weenix
/*
 * This function implements the mmap(2) syscall, but only
 * supports the MAP_SHARED, MAP_PRIVATE, MAP_FIXED, and
 * MAP_ANON flags.
 *
 * Add a mapping to the current process's address space.
 * You need to do some error checking; see the ERRORS section
 * of the manpage for the problems you should anticipate.
 * After error checking most of the work of this function is
 * done by vmmap_map(), but remember to clear the TLB.
 */
int
do_mmap(void *addr, size_t len, int prot, int flags,
        int fd, off_t off, void **ret)
{
    if (len == 0){
        return -EINVAL;
    }

    if (!valid_map_type(flags)){
        return -EINVAL;
    }

    if (!PAGE_ALIGNED(off)){
        return -EINVAL;
    }

    if (!(flags & MAP_ANON) && (flags & MAP_FIXED) && !PAGE_ALIGNED(addr)){
        return -EINVAL;
    }

    if (addr != NULL && (uint32_t) addr < USER_MEM_LOW){
        return -EINVAL;
    }

    if (len > USER_MEM_HIGH){
        return -EINVAL;
    }

    if (addr != NULL && len > USER_MEM_HIGH - (uint32_t) addr){
        return -EINVAL;
    }

    if (addr == 0 && (flags & MAP_FIXED)){
        return -EINVAL;
    }

/*    if ((!(flags & MAP_PRIVATE) && !(flags & MAP_SHARED))*/
            /*|| ((flags & MAP_PRIVATE) && (flags & MAP_SHARED)))*/
    /*{*/
        /*return -EINVAL;*/
    /*}*/

    vnode_t *vnode;
      
    if (!(flags & MAP_ANON)){
    
        if (!valid_fd(fd) || curproc->p_files[fd] == NULL){
            return -EBADF;
        }

        file_t *f = curproc->p_files[fd];
        vnode = f->f_vnode;

        if ((flags & MAP_PRIVATE) && !(f->f_mode & FMODE_READ)){
            return -EACCES;
        }

        if ((flags & MAP_SHARED) && (prot & PROT_WRITE) &&
                !((f->f_mode & FMODE_READ) && (f->f_mode & FMODE_WRITE)))
        {
            return -EACCES;
        }

        /*if ((prot & PROT_WRITE) && (f->f_mode & FMODE_APPEND)){*/
            /*return -EACCES;*/
        /*}*/
    } else {
        vnode = NULL;
    }

    vmarea_t *vma;

    int retval = vmmap_map(curproc->p_vmmap, vnode, ADDR_TO_PN(addr),
            (uint32_t) PAGE_ALIGN_UP(len) / PAGE_SIZE, prot, flags, off,
            VMMAP_DIR_HILO, &vma);

    KASSERT(retval == 0 || retval == -ENOMEM);

    if (ret != NULL && retval >= 0){
        *ret = PN_TO_ADDR(vma->vma_start);
    
        pt_unmap_range(curproc->p_pagedir, (uintptr_t) PN_TO_ADDR(vma->vma_start),
               (uintptr_t) PN_TO_ADDR(vma->vma_start)
               + (uintptr_t) PAGE_ALIGN_UP(len));
    
        tlb_flush_range((uintptr_t) PN_TO_ADDR(vma->vma_start),
                (uint32_t) PAGE_ALIGN_UP(len) / PAGE_SIZE);
    }
    


    return retval;
}
コード例 #15
0
ファイル: arm_syscall.cpp プロジェクト: BananaSlug/esesc
void target_set_brk_arm(uint32_t addr) {
  target_original_brk = target_brk = addr;
  brk_page = PAGE_ALIGN_UP(target_brk);
  printf("original brk 0x%08x brk_page 0x%08x \n", addr, brk_page);
}
コード例 #16
0
ファイル: arm_syscall.cpp プロジェクト: BananaSlug/esesc
uint32_t do_syscall_arm(ProgramBase *prog, uint32_t num, uint32_t arg1,uint32_t
    arg2, uint32_t arg3,uint32_t arg4,uint32_t arg5, uint32_t arg6, FILE *syscallTrace)

{ 

#ifdef DEBUG2
  printf ("syscall %d \n", num);
#endif

  uint32_t ret;  
  void  *p;
#if defined (CONFIG_ESESC_system) || defined (CONFIG_ESESC_user)
  struct timespec startTime;
  clock_gettime(CLOCK_REALTIME,&startTime);
#endif

  char tmp_str[128];
  char pc[32];
  char systrace_syscall_num[32];
  char emul_syscall_num[32];
  int i, j, ret1; 
  regex_t re1;
  regmatch_t match[3];

  if (syscallTrace != NULL) {

    // Example trace file section
    //
    //----------
    //pc: 0x8174
    //syscall: 122
    //Linux
    //masca1
    //3.0.0-1205-omap4
    //#10-Ubuntu SMP PREEMPT Thu Sep 29 03:57:24 UTC 2011
    //armv7l
    //----------
    //pc: 0x8230
    //syscall: 4
    //----------

    //printf("Advance to the section \n");
    fgets(tmp_str, sizeof tmp_str, syscallTrace);
    tmp_str[strlen (tmp_str) - 1] = '\0';
    memset(tmp_str, '\0', 128);

    // read pc
    fgets(pc, sizeof pc, syscallTrace);
    pc[strlen (pc) - 1] = '\0';

    // read syscall number
    fgets(tmp_str, sizeof tmp_str, syscallTrace);
    tmp_str[strlen (tmp_str) - 1] = '\0';
    //printf("pc %s, syscall %s \n", pc, tmp_str);

    ret1 = regcomp(&re1, "syscall: ([0-9]+)", REG_EXTENDED);
    ret1 = regexec(&re1, tmp_str, 2, match, 0);
    if(!ret1) {
      j = 0;
      for(i = match[1].rm_so; i < match[1].rm_eo; i++) {
        systrace_syscall_num[j] = tmp_str[i];
        j++;
      }
      systrace_syscall_num[j] = '\0';
    }else{
      I(0);
    }

    sprintf(emul_syscall_num, "%d", num);

    // Sanity check for syscall number
    if (strcmp(systrace_syscall_num, emul_syscall_num) != 0) {
      printf("Emulator syscall trace and expected syscall trace are out of sync \n");
      printf("  syscall num - Expected: %s, Emulator: %s\n", systrace_syscall_num, emul_syscall_num);
      I(0);
      // exit(0);
    }
  }

  switch(num) {
    case TARGET_NR_exit:
      exit(arg1);
      return 0;
      break;
    case TARGET_NR_exit_group:
      exit(arg1);
      return 0;
      break;
    case TARGET_NR_write: 
      ret = write(arg1, prog->g2h(arg2), arg3);
      return ret;
      break;
    case TARGET_NR_read: 
      if (arg3 == 0) {
        ret = 0;
      }else{
        ret = read(arg1, prog->g2h(arg2), arg3);
      }
      return ret;
      break;
      /* case TARGET_NR_ioctl:
         do_ioctl(prog, arg1, arg2, arg3);
         return 0;
         break;*/
    case TARGET_NR_lseek:
      ret = lseek(arg1, arg2, arg3);
      return ret;
      break;
    case TARGET_NR_gettimeofday:
      {          
        // FIXME: Not correct
        struct timeval tv; 
        gettimeofday(&tv, NULL);
      }  
      return 0;
      break;
    case TARGET_NR_open:
      // FIXME: how to check if this is a open(a,b,c) or open(a,b)
      ret = open((const char *)prog->g2h(arg1), arg2);
      return ret;
      break;        
    case TARGET_NR_close:
      ret = close(arg1);
      return ret;
      break;
    case TARGET_NR_mkdir:
      p = lock_user_string(prog, arg1);
      ret = mkdir((const char *)p, arg2);
      return ret; 
      break;
    case TARGET_NR_rmdir:
      p = lock_user_string(prog, arg1);
      ret = rmdir((const char *)p);
      return ret;                
      break;
    case TARGET_NR_rename:
      {
        void *p2;
        p  = lock_user_string(prog, arg1);
        p2 = lock_user_string(prog, arg2);
        if (!p || !p2)
          ret = -TARGET_EFAULT;
        else
          rename((const char *)p, (const char *)p2);
      }
      return 0;
      break;

    case TARGET_NR_uname:
      // copy 390 bytes from syscalltrace to buf
      //Linux
      //masca1
      //3.0.0-1205-omap4
      //#10-Ubuntu SMP PREEMPT Thu Sep 29 03:57:24 UTC 2011
      {
        struct utsname *uts_buf = (struct utsname *)prog->g2h(arg1);

        if (syscallTrace != NULL) {

          for (i = 0; i < 5; i++) {
            memset(tmp_str, '\0', 65);
            fgets(tmp_str, sizeof tmp_str, syscallTrace);
            tmp_str[strlen (tmp_str) - 1] = '\0';
            if(i == 0)
              strcpy(uts_buf->sysname, tmp_str);
            else if(i == 1)
              strcpy(uts_buf->nodename, tmp_str);
            else if(i == 2)
              strcpy(uts_buf->release, tmp_str);
            else if(i == 3)
              strcpy(uts_buf->version, tmp_str);
            else if(i == 4)
              strcpy(uts_buf->machine, tmp_str);
          }
        } else {
          if (uname(uts_buf) < 0)
            return (-1);
        }
      }
      return 0;
      break;

    case TARGET_NR_brk:
      {

        if (!arg1) {
          return target_brk;
        } else if(arg1 < target_original_brk) { 
          return target_brk; 
        } else {

          if(arg1 < brk_page) {
            if(arg1 > target_brk) {
              // initialize as it may contain garbage due to a previous heap usage
              // (grown then shrunken)
              memset(prog->g2h(target_brk), 0, arg1 - target_brk);
            }
            // deallocate is handled automatically here.
            target_brk = arg1;
            return target_brk;
          } 

          // Need to allocate memory.
          // We can't call local host brk() system call which tends to change the 
          // simulator program's brk_addr.
          // What we need to adjust here is the simulated virtual memory data segment.
          // so simulate brk here by calling realloc to reallocate simulator data segment.

          uint32_t size_incr = PAGE_ALIGN_UP(arg1 - brk_page); 

          uint32_t old_size;
          uint32_t *data_ptr = NULL;
          data_ptr = prog->get_data_buffer(&old_size);

          uint8_t *new_data_ptr = (uint8_t *)realloc(data_ptr, (old_size + size_incr));

          if (new_data_ptr != NULL) {

            prog->set_data_buffer((old_size + size_incr), new_data_ptr);
            target_brk = arg1;
            brk_page = PAGE_ALIGN_UP(target_brk);

            return target_brk;
          } else {
            return (-1);
          }
        }
      }
      return target_brk;;
      break;

    case TARGET_NR_fstat64:
      {
        if (syscallTrace == NULL) {
          ret = fstat(arg1, (struct stat *)prog->g2h(arg2));
          return ret;
        } else {
          // copy 144 bytes (size of stat structure) from systrace file
          // number of lines in the file: total 144 bytes/8 bytes per line = 18 lines
          for (i=0; i < 18; i++) {
            memset(tmp_str, '\0', 128);
            fgets(tmp_str, 128, syscallTrace);
            long long int val = strtoull(tmp_str, NULL, 16);
            memcpy( (prog->g2h(arg2 + (i * 8) )), &val, 8);
          }
        }
      }
      return 0;
      break;

    case TARGET_NR_mmap2:
      {
        if (arg1 != 0x00000000) {
          // FIXME: This is the non-zero address case.
          // The address gives a hint to the kernel about where to map the file.
          // We might still do a malloc but need to remember a mapping of the native
          // address (i.e arg1) to pointer returned by malloc.
          printf("TARGET_NR_mmap2 map address not zero!!! This case is not handled \n");
          exit(-1);
        }
        uint8_t *p = (uint8_t *)malloc(arg2);
        if (p == MAP_FAILED)
          return -1;
        else {
          prog->set_mem_mapped_file_endpts((long)p, arg2);
          return (long)p;
        }
      }
      return 0;
      printf("return address 0x%08x \n", p);
      break;

    case TARGET_NR_getpid:
      getpid();
      return 0;
      break;
    case TARGET_NR_kill:
      kill(arg1, target_to_host_signal(arg2));
      return 0;                 
      break;
    case TARGET_NR_pause:
      pause();
      return 0;
      break;
#ifdef TARGET_NR_shutdown
    case TARGET_NR_shutdown:
      shutdown(arg1, arg2);
      return 0;
      break;
#endif
#ifdef TARGET_NR_semget
    case TARGET_NR_semget:
      semget(arg1, arg2, arg3);
      return 0;       
      break; 
#endif
    case TARGET_NR_setdomainname:
      p = lock_user_string(prog, arg1);
      setdomainname((const char *)p, arg2);
      return 0;      
      break;
    case TARGET_NR_getsid:
      getsid(arg1);
      return 0;
      break;
#if defined(TARGET_NR_fdatasync)
      /* Not on alpha (osf_datasync ?) */
    case TARGET_NR_fdatasync:
      fdatasync(arg1);
      return 0;
      break;
#endif
#ifdef TARGET_NR_getuid32
    case TARGET_NR_getuid32:
      getuid();
      return 0;
      break;
#endif 
    case TARGET_NR_ioctl:
      printf("ioctl syscall not implemented\n");
      return 0;
      break;
    default:
      printf("syscall %d is not implemented\n", num);
      exit(-1);
  }    

}
コード例 #17
0
ファイル: elf32.c プロジェクト: Aliced3645/os
/* Helper function for the ELF loader. Maps the specified segment
 * of the program header from the given file in to the given address
 * space with the given memory offset (in pages). On success returns 0, otherwise
 * returns a negative error code for the ELF loader to return.
 * Note that since any error returned by this function should
 * cause the ELF loader to give up, it is acceptable for the
 * address space to be modified after returning an error.
 * Note that memoff can be negative */
static int _elf32_map_segment(vmmap_t *map, vnode_t *file, int32_t memoff, const Elf32_Phdr *segment)
{
        uintptr_t addr;
        if (memoff < 0) {
                KASSERT(ADDR_TO_PN(segment->p_vaddr) > (uint32_t) -memoff);
                addr = (uintptr_t)segment->p_vaddr - (uintptr_t)PN_TO_ADDR(-memoff);
        } else {
                addr = (uintptr_t)segment->p_vaddr + (uintptr_t)PN_TO_ADDR(memoff);
        }
        uint32_t off = segment->p_offset;
        uint32_t memsz = segment->p_memsz;
        uint32_t filesz = segment->p_filesz;

        dbg(DBG_ELF, "Mapping program segment: type %#x, offset %#08x,"
            " vaddr %#08x, filesz %#x, memsz %#x, flags %#x, align %#x\n",
            segment->p_type, segment->p_offset, segment->p_vaddr,
            segment->p_filesz, segment->p_memsz, segment->p_flags,
            segment->p_align);

        /* check for bad data in the segment header */
        if (PAGE_SIZE != segment->p_align) {
                dbg(DBG_ELF, "ERROR: segment does not have correct alignment\n");
                return -ENOEXEC;
        } else if (filesz > memsz) {
                dbg(DBG_ELF, "ERROR: segment file size is greater than memory size\n");
                return -ENOEXEC;
        } else if (PAGE_OFFSET(addr) != PAGE_OFFSET(off)) {
                dbg(DBG_ELF, "ERROR: segment address and offset are not aligned correctly\n");
                return -ENOEXEC;
        }

        int perms = 0;
        if (PF_R & segment->p_flags) {
                perms |= PROT_READ;
        }
        if (PF_W & segment->p_flags) {
                perms |= PROT_WRITE;
        }
        if (PF_X & segment->p_flags) {
                perms |= PROT_EXEC;
        }

        if (0 < filesz) {
                /* something needs to be mapped from the file */
                /* start from the starting address and include enough pages to
                 * map all filesz bytes of the file */
                uint32_t lopage = ADDR_TO_PN(addr);
                uint32_t npages = ADDR_TO_PN(addr + filesz - 1) - lopage + 1;
                off_t fileoff = (off_t)PAGE_ALIGN_DOWN(off);

                int ret;
                if (!vmmap_is_range_empty(map, lopage, npages)) {
                        dbg(DBG_ELF, "ERROR: ELF file contains overlapping segments\n");
                        return -ENOEXEC;
                } else if (0 > (ret = vmmap_map(map, file, lopage, npages, perms,
                                                MAP_PRIVATE | MAP_FIXED, fileoff,
                                                0, NULL))) {
                        return ret;
                }
        }

        if (memsz > filesz) {
                /* there is left over memory in the segment which must
                 * be initialized to 0 (anonymously mapped) */
                uint32_t lopage = ADDR_TO_PN(addr + filesz);
                uint32_t npages = ADDR_TO_PN(PAGE_ALIGN_UP(addr + memsz)) - lopage;

                int ret;
                if (npages > 1 && !vmmap_is_range_empty(map, lopage + 1, npages - 1)) {
                        dbg(DBG_ELF, "ERROR: ELF file contains overlapping segments\n");
                        return -ENOEXEC;
                } else if (0 > (ret = vmmap_map(map, NULL, lopage, npages, perms,
                                                MAP_PRIVATE | MAP_FIXED, 0, 0, NULL))) {
                        return ret;
                } else if (!PAGE_ALIGNED(addr + filesz) && filesz > 0) {
                        /* In this case, we have accidentally zeroed too much of memory, as
                         * we zeroed all memory in the page containing addr + filesz.
                         * However, the remaining part of the data is not a full page, so we
                         * should not just map in another page (as there could be garbage
                         * after addr+filesz). For instance, consider the data-bss boundary
                         * (c.f. Intel x86 ELF supplement pp. 82).
                         * To fix this, we need to read in the contents of the file manually
                         * and put them at that user space addr in the anon map we just
                         * added. */
                        void *buf;
                        if (NULL == (buf = page_alloc()))
                                return -ENOMEM;
                        if (!(0 > (ret = file->vn_ops->read(file, (off_t) PAGE_ALIGN_DOWN(off + filesz),
                                                            buf, PAGE_OFFSET(addr + filesz))))) {
                                ret = vmmap_write(map, PAGE_ALIGN_DOWN(addr + filesz),
                                                  buf, PAGE_OFFSET(addr + filesz));
                        }
                        page_free(buf);
                        return ret;
                }
        }
        return 0;
}
コード例 #18
0
ファイル: elf32.c プロジェクト: Aliced3645/os
static int _elf32_load(const char *filename, int fd, char *const argv[],
                       char *const envp[], uint32_t *eip, uint32_t *esp)
{
        int err = 0;
        Elf32_Ehdr header;
        Elf32_Ehdr interpheader;

        /* variables to clean up on failure */
        vmmap_t *map = NULL;
        file_t *file = NULL;
        char *pht = NULL;
        char *interpname = NULL;
        int interpfd = -1;
        file_t *interpfile = NULL;
        char *interppht = NULL;
        Elf32_auxv_t *auxv = NULL;
        char *argbuf = NULL;

        uintptr_t entry;

        file = fget(fd);
        KASSERT(NULL != file);

        /* Load and verify the ELF header */
        if (0 > (err = _elf32_load_ehdr(fd, &header, 0))) {
                goto done;
        }

        if (NULL == (map = vmmap_create())) {
                err = -ENOMEM;
                goto done;
        }

        size_t phtsize = header.e_phentsize * header.e_phnum;
        if (NULL == (pht = kmalloc(phtsize))) {
                err = -ENOMEM;
                goto done;
        }
        /* Read in the program header table */
        if (0 > (err = _elf32_load_phtable(fd, &header, pht, phtsize))) {
                goto done;
        }
        /* Load the segments in the program header table */
        if (0 > (err = _elf32_map_progsegs(file->f_vnode, map, &header, pht, 0))) {
                goto done;
        }

        Elf32_Phdr *phinterp = NULL;
        /* Check if program requires an interpreter */
        if (0 > (err = _elf32_find_phinterp(&header, pht, &phinterp))) {
                goto done;
        }

        /* Calculate program bounds for future reference */
        void *proglow;
        void *proghigh;
        _elf32_calc_progbounds(&header, pht, &proglow, &proghigh);

        entry = (uintptr_t) header.e_entry;

        /* if an interpreter was requested load it */
        if (NULL != phinterp) {
                /* read the file name of the interpreter from the binary */
                if (0 > (err = do_lseek(fd, phinterp->p_offset, SEEK_SET))) {
                        goto done;
                } else if (NULL == (interpname = kmalloc(phinterp->p_filesz))) {
                        err = -ENOMEM;
                        goto done;
                } else if (0 > (err = do_read(fd, interpname, phinterp->p_filesz))) {
                        goto done;
                }
                if (err != (int)phinterp->p_filesz) {
                        err = -ENOEXEC;
                        goto done;
                }

                /* open the interpreter */
                dbgq(DBG_ELF, "ELF Interpreter: %*s\n", phinterp->p_filesz, interpname);
                if (0 > (interpfd = do_open(interpname, O_RDONLY))) {
                        err = interpfd;
                        goto done;
                }
                kfree(interpname);
                interpname = NULL;

                interpfile = fget(interpfd);
                KASSERT(NULL != interpfile);

                /* Load and verify the interpreter ELF header */
                if (0 > (err = _elf32_load_ehdr(interpfd, &interpheader, 1))) {
                        goto done;
                }
                size_t interpphtsize = interpheader.e_phentsize * interpheader.e_phnum;
                if (NULL == (interppht = kmalloc(interpphtsize))) {
                        err = -ENOMEM;
                        goto done;
                }
                /* Read in the program header table */
                if (0 > (err = _elf32_load_phtable(interpfd, &interpheader, interppht, interpphtsize))) {
                        goto done;
                }

                /* Interpreter shouldn't itself need an interpreter */
                Elf32_Phdr *interpphinterp;
                if (0 > (err = _elf32_find_phinterp(&interpheader, interppht, &interpphinterp))) {
                        goto done;
                }
                if (NULL != interpphinterp) {
                        err = -EINVAL;
                        goto done;
                }

                /* Calculate the interpreter program size */
                void *interplow;
                void *interphigh;
                _elf32_calc_progbounds(&interpheader, interppht, &interplow, &interphigh);
                uint32_t interpnpages = ADDR_TO_PN(PAGE_ALIGN_UP(interphigh)) - ADDR_TO_PN(interplow);

                /* Find space for the interpreter */
                /* This is the first pn at which the interpreter will be mapped */
                uint32_t interppagebase = (uint32_t) vmmap_find_range(map, interpnpages, VMMAP_DIR_HILO);
                if ((uint32_t) - 1 == interppagebase) {
                        err = -ENOMEM;
                        goto done;
                }

                /* Base address at which the interpreter begins on that page */
                void *interpbase = (void *)((uintptr_t)PN_TO_ADDR(interppagebase) + PAGE_OFFSET(interplow));

                /* Offset from "expected base" in number of pages */
                int32_t interpoff = (int32_t) interppagebase - (int32_t) ADDR_TO_PN(interplow);

                entry = (uintptr_t) interpbase + ((uintptr_t) interpheader.e_entry - (uintptr_t) interplow);

                /* Load the interpreter program header and map in its segments */
                if (0 > (err = _elf32_map_progsegs(interpfile->f_vnode, map, &interpheader, interppht, interpoff))) {
                        goto done;
                }

                /* Build the ELF aux table */
                /* Need to hold AT_PHDR, AT_PHENT, AT_PHNUM, AT_ENTRY, AT_BASE,
                 * AT_PAGESZ, AT_NULL */
                if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(7 * sizeof(Elf32_auxv_t)))) {
                        err = -ENOMEM;
                        goto done;
                }
                Elf32_auxv_t *auxvent = auxv;

                /* Add all the necessary entries */
                auxvent->a_type = AT_PHDR;
                auxvent->a_un.a_ptr = pht;
                auxvent++;

                auxvent->a_type = AT_PHENT;
                auxvent->a_un.a_val = header.e_phentsize;
                auxvent++;

                auxvent->a_type = AT_PHNUM;
                auxvent->a_un.a_val = header.e_phnum;
                auxvent++;

                auxvent->a_type = AT_ENTRY;
                auxvent->a_un.a_ptr = (void *) header.e_entry;
                auxvent++;

                auxvent->a_type = AT_BASE;
                auxvent->a_un.a_ptr = interpbase;
                auxvent++;

                auxvent->a_type = AT_PAGESZ;
                auxvent->a_un.a_val = PAGE_SIZE;
                auxvent++;

                auxvent->a_type = AT_NULL;

        } else {
                /* Just put AT_NULL (we don't really need this at all) */
                if (NULL == (auxv = (Elf32_auxv_t *) kmalloc(sizeof(Elf32_auxv_t)))) {
                        err = -ENOMEM;
                        goto done;
                }
                auxv->a_type = AT_NULL;
        }

        /* Allocate a stack. We put the stack immediately below the program text.
         * (in the Intel x86 ELF supplement pp 59 "example stack", that is where the
         * stack is located). I suppose we can add this "extra page for magic data" too */
        uint32_t stack_lopage = ADDR_TO_PN(proglow) - (DEFAULT_STACK_SIZE / PAGE_SIZE) - 1;
        err = vmmap_map(map, NULL, stack_lopage, (DEFAULT_STACK_SIZE / PAGE_SIZE) + 1,
                        PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED, 0, 0, NULL);
        KASSERT(0 == err);
        dbg(DBG_ELF, "Mapped stack at low addr 0x%p, size %#x\n",
            PN_TO_ADDR(stack_lopage), DEFAULT_STACK_SIZE + PAGE_SIZE);


        /* Copy out arguments onto the user stack */
        int argc, envc, auxc;
        size_t argsize = _elf32_calc_argsize(argv, envp, auxv, phtsize, &argc, &envc, &auxc);
        /* Make sure it fits on the stack */
        if (argsize >= DEFAULT_STACK_SIZE) {
                err = -E2BIG;
                goto done;
        }
        /* Copy arguments into kernel buffer */
        if (NULL == (argbuf = (char *) kmalloc(argsize))) {
                err = -ENOMEM;
                goto done;
        }
        /* Calculate where in user space we start putting the args. */
        void *arglow = (void *)((uintptr_t)(((char *) proglow) - argsize) & ~PTR_MASK);
        /* Copy everything into the user address space, modifying addresses in
         * argv, envp, and auxv to be user addresses as we go. */
        _elf32_load_args(map, arglow, argsize, argbuf, argv, envp, auxv, argc, envc, auxc, phtsize);

        dbg(DBG_ELF, "Past the point of no return. Swapping to map at 0x%p, setting brk to 0x%p\n", map, proghigh);
        /* the final threshold / What warm unspoken secrets will we learn? / Beyond
         * the point of no return ... */

        /* Give the process the new mappings. */
        vmmap_t *tempmap = curproc->p_vmmap;
        curproc->p_vmmap = map;
        map = tempmap; /* So the old maps are cleaned up */
        curproc->p_vmmap->vmm_proc = curproc;
        map->vmm_proc = NULL;

        /* Flush the process pagetables and TLB */
        pt_unmap_range(curproc->p_pagedir, USER_MEM_LOW, USER_MEM_HIGH);
        tlb_flush_all();

        /* Set the process break and starting break (immediately after the mapped-in
         * text/data/bss from the executable) */
        curproc->p_brk = proghigh;
        curproc->p_start_brk = proghigh;

        strncpy(curproc->p_comm, filename, PROC_NAME_LEN);

        /* Tell the caller the correct stack pointer and instruction
         * pointer to begin execution in user space */
        *eip = (uint32_t) entry;
        *esp = ((uint32_t) arglow) - 4; /* Space on the user stack for the (garbage) return address */
        /* Note that the return address will be fixed by the userland entry code,
         * whether in static or dynamic */

        /* And we're done */
        err = 0;

done:
        if (NULL != map) {
                vmmap_destroy(map);
        }
        if (NULL != file) {
                fput(file);
        }
        if (NULL != pht) {
                kfree(pht);
        }
        if (NULL != interpname) {
                kfree(interpname);
        }
        if (0 <= interpfd) {
                do_close(interpfd);
        }
        if (NULL != interpfile) {
                fput(interpfile);
        }
        if (NULL != interppht) {
                kfree(interppht);
        }
        if (NULL != auxv) {
                kfree(auxv);
        }
        if (NULL != argbuf) {
                kfree(argbuf);
        }
        return err;
}
コード例 #19
0
/**
 * Reads the GRUB memory map to find out which memory areas are usable and free.
 * Excludes everything before "reservedAreaEnd" and also excludes the locations
 * of the multiboot modules.
 *
 * @param allocator:		the allocator object where mark free addresses
 * @param reservedAreaEnd:	the end address of the reserved area
 */
void MultibootMmapInterpreter::load(BitMapPageAllocator *allocator, uint32_t reservedAreaEnd)
{
	MultibootInformation *mbInfo = EvaLoader::getSetupInformation()->multibootInformation;

	MultibootMmap *map = (MultibootMmap*) mbInfo->memoryMapAddress;
	uint32_t mapListEnd = mbInfo->memoryMapAddress + mbInfo->memoryMapLength;

	// Iterate over the list of memory maps from GRUB
	logInfo("%! memory regions:", "memmap");
	while (((uint32_t) map) < mapListEnd)
	{
		// Check if the map is usable memory
		if (map->type == 1)
		{
			uint64_t areaStart = (uint64_t) map->baseAddressLower | ((uint64_t) map->baseAddressHigher << 32);
			uint64_t areaEnd = areaStart + ((uint64_t) map->lengthLower | ((uint64_t) map->lengthHigher << 32));

			// If this ranges is out of 32bit bounds, ignore it
			if (areaStart > 0xFFFFFFFF) logInfo("%# > 0xFFFFFFFF             : not usable");

			else
			{
				logInfon("%#   %h - %h", (uint32_t ) areaStart, (uint32_t ) areaEnd);

				// Make sure that the mapped area lays behind the kernel
				if (areaStart < reservedAreaEnd) areaStart = reservedAreaEnd;

				// End of area above 32bit? Cut off
				if (areaEnd > 0xFFFFFFFF) areaEnd = 0xFFFFFFFF;

				// Page-align
				areaStart = PAGE_ALIGN_UP(areaStart);
				areaEnd = PAGE_ALIGN_DOWN(areaEnd);

				// Mark as free
				uint32_t chunkCount = 0;
				uint32_t inModule = 0;

				if (areaEnd > areaStart)
				{
					// Split into page sized chunks
					while (areaStart < areaEnd - PAGE_SIZE)
					{
						// Exclude memory within modules
						bool isInModule = false;
						for (uint32_t i = 0; i < mbInfo->modulesCount; i++)
						{
							MultibootModule *module = (MultibootModule*) (mbInfo->modulesAddress + sizeof(MultibootModule) * i);

							if ((areaStart >= PAGE_ALIGN_DOWN(module->moduleStart)) && (areaStart < PAGE_ALIGN_UP(module->moduleEnd)))
							{
								isInModule = true;
								break;
							}
						}

						// If its not inside a module, mark as free
						if (isInModule) ++inModule;

						else
						{
							allocator->markFree(areaStart);
							++chunkCount;
						}

						areaStart = areaStart + PAGE_SIZE;
					}
				}

				logInfo(": %i available (%i blocked)", chunkCount, inModule);
			}
		}

		// Skip to the next map (the sizeof in the end is something GRUB-specific, look up the docs)
		map = (MultibootMmap*) ((uint32_t) map + map->size + sizeof(uint32_t));
	}
}
コード例 #20
0
ファイル: exec.c プロジェクト: bajdcc/MiniOS
int exec(char *path, char **argv) {
    int i;
    char *s, *name;
    uint32_t sz, sp, off, argc, pa, ustack[3 + MAX_ARGC + 1];
    pde_t *pgdir, *old_pgdir;
    struct inode *ip;
    struct elf32hdr eh;
    struct proghdr ph;

    pgdir = 0;
    i = off = 0;

    pgdir = (pde_t *)pmm_alloc();
    kvm_init(pgdir);

    // exception handle pgdir
    //
    if ((ip = p2i(path)) == 0) {
        goto bad;
    }
    ilock(ip);

    // read elf header
    if (iread(ip, (char *)&eh, 0, sizeof(eh)) < (int)sizeof(eh)) {
        goto bad;
    }

    if (eh.magic != ELF_MAGIC) {
        goto bad;
    }

    // load program to memory
    sz = USER_BASE;
    for (i = 0, off = eh.phoff; i < eh.phnum; i++, off += sizeof(ph)) {
        if (iread(ip, (char *)&ph, off, sizeof(ph)) != sizeof(ph)) {
            goto bad;
        }
        if (ph.type != ELF_PROG_LOAD) {
            continue;
        }
        if (ph.memsz < ph.filesz) {
            goto bad;
        }
        if ((sz = uvm_alloc(pgdir, sz, ph.vaddr + ph.memsz)) == 0) {
            goto bad;
        }
        if (uvm_load(pgdir, ph.vaddr, ip, ph.off, ph.filesz) < 0) {
            goto bad;
        }
    }

    iunlockput(ip);
    ip = 0;

    /* build user stack */
    sz = PAGE_ALIGN_UP(sz);
    if ((sz = uvm_alloc(pgdir, sz, sz + 2*PAGE_SIZE)) == 0) {
        goto bad;
    }

    /* leave a unaccessable page between kernel stack */
    if (vmm_get_mapping(pgdir, sz - 2*PAGE_SIZE, &pa) == 0) {  // sz is no mapped
        goto bad;
    }
    vmm_map(pgdir, sz - 2*PAGE_SIZE, pa, PTE_K | PTE_P | PTE_W);

    sp = sz;
    if (vmm_get_mapping(pgdir, sz - PAGE_SIZE, &pa) == 0) {  // sz is no mapped
        goto bad;
    }
    pa += PAGE_SIZE;

    for (argc = 0; argv[argc]; argc++) {
        if (argc > MAX_ARGC) {
            goto bad;
        }
        // "+1" leava room for '\0'  "&~3" align 4
        sp = (sp - (strlen(argv[argc]) + 1)) & ~3;    // sync with pa
        pa = (pa - (strlen(argv[argc]) + 1)) & ~3;    

        strcpy((char *)pa, argv[argc]);
        ustack[3+argc] = sp;  // argv[argc]
    }

    ustack[3+argc] = 0;

    ustack[0] = 0xffffffff;
    ustack[1] = argc;   // count of arguments
    ustack[2] = sp - (argc+1)*4;    // pointer of argv[0]

    sp -= (3 + argc + 1)*4;
    pa -= (3 + argc + 1)*4;
    memcpy((void *)pa, ustack, (3 + argc + 1)*4);   // combine

    for (name = s = path; *s; s++) {
        if (*s == '/') {
            name = s + 1;
        }
    }

    cli();
    strncpy(proc->name, name, sizeof(proc->name));

    old_pgdir = proc->pgdir;
    proc->pgdir = pgdir;
    proc->size = sz - USER_BASE;
    proc->fm->eip = eh.entry;
    proc->fm->user_esp = sp;
    uvm_switch(proc);

    uvm_free(old_pgdir);
    old_pgdir  = 0;
    old_pgdir ++;
    sti();

    return 0;

bad:
    if (pgdir) {
        uvm_free(pgdir);
    }
    if (ip) {
        iunlockput(ip);
    }
    return -1;
}
コード例 #21
0
ファイル: v86.c プロジェクト: 1tgr/mobius
static void *aligned_alloc(size_t bytes)
{
    return VmmAlloc(PAGE_ALIGN_UP(bytes) / PAGE_SIZE, NULL, VM_MEM_READ | VM_MEM_WRITE);
}
コード例 #22
0
ファイル: brk.c プロジェクト: darshanramu/LinuxLikeOS
/*
 * This function implements the brk(2) system call.
 *
 * This routine manages the calling process's "break" -- the ending address
 * of the process's "dynamic" region (often also referred to as the "heap").
 * The current value of a process's break is maintained in the 'p_brk' member
 * of the proc_t structure that represents the process in question.
 *
 * The 'p_brk' and 'p_start_brk' members of a proc_t struct are initialized
 * by the loader. 'p_start_brk' is subsequently never modified; it always
 * holds the initial value of the break. Note that the starting break is
 * not necessarily page aligned!
 *
 * 'p_start_brk' is the lower limit of 'p_brk' (that is, setting the break
 * to any value less than 'p_start_brk' should be disallowed).
 *
 * The upper limit of 'p_brk' is defined by the minimum of (1) the
 * starting address of the next occuring mapping or (2) USER_MEM_HIGH.
 * That is, growth of the process break is limited only in that it cannot
 * overlap with/expand into an existing mapping or beyond the region of
 * the address space allocated for use by userland. (note the presence of
 * the 'vmmap_is_range_empty' function).
 *
 * The dynamic region should always be represented by at most ONE vmarea.
 * Note that vmareas only have page granularity, you will need to take this
 * into account when deciding how to set the mappings if p_brk or p_start_brk
 * is not page aligned.
 *
 * You are guaranteed that the process data/bss region is non-empty.
 * That is, if the starting brk is not page-aligned, its page has
 * read/write permissions.
 *
 * If addr is NULL, you should NOT fail as the man page says. Instead,
 * "return" the current break. We use this to implement sbrk(0) without writing
 * a separate syscall. Look in user/libc/syscall.c if you're curious.
 *
 * Also, despite the statement on the manpage, you MUST support combined use
 * of brk and mmap in the same process.
 *
 * Note that this function "returns" the new break through the "ret" argument.
 * Return 0 on success, -errno on failure.
 */
int do_brk(void *addr, void **ret) {
	/*NOT_YET_IMPLEMENTED("VM: do_brk");*/
	vmarea_t *vmarea;
	/*If addr is NULL, "return" the current break.*/
	dbg(DBG_PRINT, "(GRADING3D 3)\n");
	if (addr == NULL || addr == curproc->p_brk) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		*ret = curproc->p_brk;
		return 0;
	}

	/*check for the address range*/
	if (((uint32_t) addr > USER_MEM_HIGH) || (curproc->p_start_brk > addr)) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		return -ENOMEM;
	}

	/*if p_brk and addr are not page aligned*/
	if (ADDR_TO_PN(
			PAGE_ALIGN_UP(curproc->p_brk)) != ADDR_TO_PN(PAGE_ALIGN_UP(addr))) {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		if (addr > curproc->p_brk) {
			dbg(DBG_PRINT, "(GRADING3D 3)\n");
			if (!vmmap_is_range_empty(curproc->p_vmmap,
					ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_brk)),
					ADDR_TO_PN(
							PAGE_ALIGN_UP(addr)) -ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_brk)))) {
				dbg(DBG_PRINT, "(GRADING3D 3)\n");
				return -ENOMEM;
			}
			vmarea = vmmap_lookup(curproc->p_vmmap,
					ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_start_brk)));
			if (vmarea != NULL) {
				dbg(DBG_PRINT, "(GRADING3D 3)\n");
				vmarea->vma_end = ADDR_TO_PN(PAGE_ALIGN_UP(addr));
			} else {
				dbg(DBG_PRINT, "(GRADING3D 3)\n");
				vmmap_map(curproc->p_vmmap,
				NULL, ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_start_brk)),
						ADDR_TO_PN(
								PAGE_ALIGN_UP(addr)) - ADDR_TO_PN(PAGE_ALIGN_UP(curproc->p_start_brk)),
						PROT_READ | PROT_WRITE, MAP_PRIVATE, 0, VMMAP_DIR_LOHI,
						&vmarea);
			}
		} else {
			dbg(DBG_PRINT, "(GRADING3D 3)\n");
			vmmap_remove(curproc->p_vmmap, ADDR_TO_PN(PAGE_ALIGN_UP(addr)),
					ADDR_TO_PN(
							PAGE_ALIGN_UP(curproc->p_brk)) -ADDR_TO_PN(PAGE_ALIGN_UP(addr)));
		}

		curproc->p_brk = addr;
		*ret = addr;
	} else {
		dbg(DBG_PRINT, "(GRADING3D 3)\n");
		curproc->p_brk = addr;
		*ret = addr;
		return 0;
	}
	return 0;
}
コード例 #23
0
static bool
mm_GetLibraryInfo(const void *libPtr, DynLibInfo &lib)
{
	uintptr_t baseAddr;

	if (libPtr == NULL)
	{
		return false;
	}

#ifdef _WIN32

	MEMORY_BASIC_INFORMATION info;
	IMAGE_DOS_HEADER *dos;
	IMAGE_NT_HEADERS *pe;
	IMAGE_FILE_HEADER *file;
	IMAGE_OPTIONAL_HEADER *opt;

	if (!VirtualQuery(libPtr, &info, sizeof(MEMORY_BASIC_INFORMATION)))
	{
		return false;
	}

	baseAddr = reinterpret_cast<uintptr_t>(info.AllocationBase);

	/* All this is for our insane sanity checks :o */
	dos = reinterpret_cast<IMAGE_DOS_HEADER *>(baseAddr);
	pe = reinterpret_cast<IMAGE_NT_HEADERS *>(baseAddr + dos->e_lfanew);
	file = &pe->FileHeader;
	opt = &pe->OptionalHeader;

	/* Check PE magic and signature */
	if (dos->e_magic != IMAGE_DOS_SIGNATURE || pe->Signature != IMAGE_NT_SIGNATURE || opt->Magic != IMAGE_NT_OPTIONAL_HDR32_MAGIC)
	{
		return false;
	}

	/* Check architecture, which is 32-bit/x86 right now
	* Should change this for 64-bit if Valve gets their act together
	*/
	if (file->Machine != IMAGE_FILE_MACHINE_I386)
	{
		return false;
	}

	/* For our purposes, this must be a dynamic library */
	if ((file->Characteristics & IMAGE_FILE_DLL) == 0)
	{
		return false;
	}

	/* Finally, we can do this */
	lib.memorySize = opt->SizeOfImage;

#elif defined __linux__

	Dl_info info;
	Elf32_Ehdr *file;
	Elf32_Phdr *phdr;
	uint16_t phdrCount;

	if (!dladdr(libPtr, &info))
	{
		return false;
	}

	if (!info.dli_fbase || !info.dli_fname)
	{
		return false;
	}

	/* This is for our insane sanity checks :o */
	baseAddr = reinterpret_cast<uintptr_t>(info.dli_fbase);
	file = reinterpret_cast<Elf32_Ehdr *>(baseAddr);

	/* Check ELF magic */
	if (memcmp(ELFMAG, file->e_ident, SELFMAG) != 0)
	{
		return false;
	}

	/* Check ELF version */
	if (file->e_ident[EI_VERSION] != EV_CURRENT)
	{
		return false;
	}

	/* Check ELF architecture, which is 32-bit/x86 right now
	* Should change this for 64-bit if Valve gets their act together
	*/
	if (file->e_ident[EI_CLASS] != ELFCLASS32 || file->e_machine != EM_386 || file->e_ident[EI_DATA] != ELFDATA2LSB)
	{
		return false;
	}

	/* For our purposes, this must be a dynamic library/shared object */
	if (file->e_type != ET_DYN)
	{
		return false;
	}

	phdrCount = file->e_phnum;
	phdr = reinterpret_cast<Elf32_Phdr *>(baseAddr + file->e_phoff);

	for (uint16_t i = 0; i < phdrCount; i++)
	{
		Elf32_Phdr &hdr = phdr[i];

		/* We only really care about the segment with executable code */
		if (hdr.p_type == PT_LOAD && hdr.p_flags == (PF_X|PF_R))
		{
			/* From glibc, elf/dl-load.c:
			 * c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1)
			 * & ~(GLRO(dl_pagesize) - 1));
			 *
			 * In glibc, the segment file size is aligned up to the nearest page size and
			 * added to the virtual address of the segment. We just want the size here.
			 */
			lib.memorySize = PAGE_ALIGN_UP(hdr.p_filesz);
			break;
		}
	}

#elif defined __APPLE__

	Dl_info info;
	struct mach_header *file;
	struct segment_command *seg;
	uint32_t cmd_count;

	if (!dladdr(libPtr, &info))
	{
		return false;
	}

	if (!info.dli_fbase || !info.dli_fname)
	{
		return false;
	}

	/* This is for our insane sanity checks :o */
	baseAddr = (uintptr_t)info.dli_fbase;
	file = (struct mach_header *)baseAddr;

	/* Check Mach-O magic */
	if (file->magic != MH_MAGIC)
	{
		return false;
	}

	/* Check architecture (32-bit/x86) */
	if (file->cputype != CPU_TYPE_I386 || file->cpusubtype != CPU_SUBTYPE_I386_ALL)
	{
		return false;
	}

	/* For our purposes, this must be a dynamic library */
	if (file->filetype != MH_DYLIB)
	{
		return false;
	}

	cmd_count = file->ncmds;
	seg = (struct segment_command *)(baseAddr + sizeof(struct mach_header));

	/* Add up memory sizes of mapped segments */
	for (uint32_t i = 0; i < cmd_count; i++)
	{
		if (seg->cmd == LC_SEGMENT)
		{
			lib.memorySize += seg->vmsize;
		}

		seg = (struct segment_command *)((uintptr_t)seg + seg->cmdsize);
	}

#endif

	lib.baseAddress = reinterpret_cast<void *>(baseAddr);

	return true;
}
コード例 #24
0
ファイル: memtest.c プロジェクト: Aliced3645/os
static int test_brk_bounds(void)
{
        void *oldbrk, *newbrk;
        int status;

        printf("Testing boundaries and permissions of brk()\n");

        /* "Stabilize" our old brk at a page boundary */
        test_assert((void *) - 1 != (oldbrk = sbrk(0)), NULL);
        oldbrk = PAGE_ALIGN_UP(oldbrk);
        test_assert(0 == brk(oldbrk), NULL);

        /* Look at next page-aligned addr */
        newbrk = (char *)oldbrk + PAGE_SIZE;

        assert_fault(char foo = *(char *)newbrk, "");
        assert_fault(*(char *)newbrk = 'a', "");

        /* Move brk to next page-aligned addr */
        test_assert(0 == brk(newbrk), NULL);

        /* Access the new memory */
        test_assert('\0' == *(char *)oldbrk, NULL);
        test_assert('\0' == *((char *)newbrk - 1), NULL);
        *((char *)newbrk - 1) = 'a';

        assert_fault(char foo = *(char *)newbrk, "");
        assert_fault(*(char *)newbrk = 'a', "");

        /* Move brk up by 1 byte */
        test_assert(0 == brk((char *)newbrk + 1), NULL);

        /* Access the new memory */
        test_assert('\0' == *(char *)newbrk, NULL);
        test_assert('\0' == *((char *)newbrk + PAGE_SIZE - 1), NULL);
        assert_nofault(*(char *)newbrk = 'b', "");

        /* Old memory didn't change */
        test_assert('a' == *((char *)newbrk - 1), NULL);

        /* Move it back */
        test_assert(0 == brk(newbrk), NULL);

        assert_fault(char foo = *(char *)newbrk, "");
        assert_fault(*(char *)newbrk = 'a', "");

        /* Move it up, make sure region wiped. Note that the actual wipe test is
         * 'evil' and is in eviltest. This just checks to make sure the brk region
         * is private mapped (modified in subprocesses) */
        test_assert(0 == brk((char *)newbrk + PAGE_SIZE), NULL);
        test_assert('\0' == *(char *)newbrk, NULL);
        test_assert('\0' == *((char *)newbrk + PAGE_SIZE - 1), NULL);

        /* Move it down by 1 byte */
        test_assert(0 == brk((char *)newbrk - 1), NULL);

        /* Access still-accessible memory */
        test_assert('a' == *((char *)newbrk - 1), NULL);
        *((char *)newbrk - 2) = 'z';

        /* Move brk to multiple addrs on same page, make sure page remains */
        test_assert(0 == brk((char *)newbrk - 1000), NULL);
        test_assert('z' == *((char *)newbrk - 2), NULL);
        test_assert(0 == brk((char *)oldbrk + 1), NULL);
        test_assert('z' == *((char *)newbrk - 2), NULL);
        test_assert(0 == brk((char *)oldbrk + 1000), NULL);
        test_assert('a' == *((char *)newbrk - 1), NULL);

        return 0;
}
コード例 #25
0
ファイル: bot_sigscan.cpp プロジェクト: Deathreus/RCBot2
bool CSignatureFunction::getLibraryInfo(const void *libPtr, DynLibInfo &lib)
{
	uintptr_t baseAddr;

	if (libPtr == NULL)
	{
		return false;
	}

#ifdef _WIN32


	MEMORY_BASIC_INFORMATION info;
	IMAGE_DOS_HEADER *dos;
	IMAGE_NT_HEADERS *pe;
	IMAGE_FILE_HEADER *file;
	IMAGE_OPTIONAL_HEADER *opt;

	if (!VirtualQuery(libPtr, &info, sizeof(MEMORY_BASIC_INFORMATION)))
	{
		return false;
	}

	baseAddr = reinterpret_cast<uintptr_t>(info.AllocationBase);

	// All this is for our insane sanity checks :o 
	dos = reinterpret_cast<IMAGE_DOS_HEADER *>(baseAddr);
	pe = reinterpret_cast<IMAGE_NT_HEADERS *>(baseAddr + dos->e_lfanew);
	file = &pe->FileHeader;
	opt = &pe->OptionalHeader;

	// Check PE magic and signature 
	if (dos->e_magic != IMAGE_DOS_SIGNATURE || pe->Signature != IMAGE_NT_SIGNATURE || opt->Magic != IMAGE_NT_OPTIONAL_HDR32_MAGIC)
	{
		return false;
	}

	// Check architecture, which is 32-bit/x86 right now
		// Should change this for 64-bit if Valve gets their act together
	
	if (file->Machine != IMAGE_FILE_MACHINE_I386)
	{
		return false;
	}

	//For our purposes, this must be a dynamic library 
	if ((file->Characteristics & IMAGE_FILE_DLL) == 0)
	{
		return false;
	}

	//Finally, we can do this
	lib.memorySize = opt->SizeOfImage;

#else
	Dl_info info;
	Elf32_Ehdr *file;
	Elf32_Phdr *phdr;
	uint16_t phdrCount;

	if (!dladdr(libPtr, &info))
	{
		return false;
	}

	if (!info.dli_fbase || !info.dli_fname)
	{
		return false;
	}

	// This is for our insane sanity checks :o 
	baseAddr = reinterpret_cast<uintptr_t>(info.dli_fbase);
	file = reinterpret_cast<Elf32_Ehdr *>(baseAddr);

	// Check ELF magic 
	if (memcmp(ELFMAG, file->e_ident, SELFMAG) != 0)
	{
		return false;
	}

	// Check ELF version 
	if (file->e_ident[EI_VERSION] != EV_CURRENT)
	{
		return false;
	}

	// Check ELF architecture, which is 32-bit/x86 right now
		// Should change this for 64-bit if Valve gets their act together
		
	if (file->e_ident[EI_CLASS] != ELFCLASS32 || file->e_machine != EM_386 || file->e_ident[EI_DATA] != ELFDATA2LSB)
	{
		return false;
	}

	// For our purposes, this must be a dynamic library/shared object 
	if (file->e_type != ET_DYN)
	{
		return false;
	}

	phdrCount = file->e_phnum;
	phdr = reinterpret_cast<Elf32_Phdr *>(baseAddr + file->e_phoff);

	for (uint16_t i = 0; i < phdrCount; i++)
	{
		Elf32_Phdr &hdr = phdr[i];

		// We only really care about the segment with executable code 
		if (hdr.p_type == PT_LOAD && hdr.p_flags == (PF_X|PF_R))
		{
			// From glibc, elf/dl-load.c:
			// c->mapend = ((ph->p_vaddr + ph->p_filesz + GLRO(dl_pagesize) - 1) 
			//             & ~(GLRO(dl_pagesize) - 1));
			//
			// In glibc, the segment file size is aligned up to the nearest page size and
			// added to the virtual address of the segment. We just want the size here.
			
			lib.memorySize = PAGE_ALIGN_UP(hdr.p_filesz);
			break;
		}
	}
#endif

	lib.baseAddress = reinterpret_cast<void *>(baseAddr);

	return true;
}
コード例 #26
0
ファイル: addrspace.c プロジェクト: dwizard/darkside-kernel
/* Allocate regions of a virtual address space */
void *addrspace_alloc(addrspace_t *addrspace, size_t size_reserved, size_t size_committed, int flags)
{
	/* Get the address space pointer */
	addrspace = resolve_addrspace(addrspace);

	/* Round up both the reserved and committed sizes to a page boundary */
	size_reserved = PAGE_ALIGN_UP(size_reserved);
	size_committed = PAGE_ALIGN_UP(size_committed);

	/* Make sure we don't commit more than we reserve */
	if (size_committed > size_reserved)
	{
		size_committed = size_reserved;
	}

	/* Search the address space for a free region of suitable size */
	spinlock_recursive_acquire(&addrspace->lock);
	vad_t *vad = &addrspace->free;
	while (vad)
	{
		/* Move on if it doesn't fit our allocation */
		if (vad->length < size_reserved)
		{
			vad = vad->next;
			continue;
		}

		/* Store the starting address of the allocation */
		vaddr_t address = vad->start;

		/* Create the guard page if requested */
		vaddr_t i = address;
		if (flags & GUARD_BOTTOM)
		{
			vmm_map_page(addrspace->address_space, i, 0, PAGE_INVALID);
			i += PAGE_SIZE;
		}

		/* Commit all the needed pages */
		for (; i < address + size_committed; i += PAGE_SIZE)
		{
			int color = vaddr_cache_color(i, addrspace->numa_domain, 0);
			vmm_map_page(addrspace->address_space, i, pmm_alloc_page(0, addrspace->numa_domain, color), flags);
		}

		/* Modify the free VAD or remove it entirely */
		if (size_reserved < vad->length)
		{
			vad->start += size_reserved;
			vad->length -= size_reserved;
		}
		else
		{
			/* Later VAD */
			if (vad != &addrspace->free)
			{
				/* Readjust the linked list */
				vad->prev->next = vad->next;
				vad->next->prev = vad->prev;

				/* Free the VAD */
				slab_cache_free(vad_cache, vad);
			}
			/* Root VAD */
			else
			{
				/* Copy the next VAD into the root one */
				vad_t *vad_next = vad->next;
				memcpy(vad, vad_next, sizeof(vad_t));

				/* Free the dynamically-allocated VAD */
				slab_cache_free(vad_cache, vad_next);
			}
		}

		/* Record metadata, unless told not to */
		if (!(flags & PAGE_PRIVATE))
		{
			/* Create a new VAD to represent the now-used region */
			vad = slab_cache_alloc(vad_cache);
			vad->start = address;
			vad->length = size_reserved;
			vad->flags = flags;
			vad->left = vad->right = NULL;
			vad->height = 0;

			/* Insert it into the tree */
			addrspace->used_root = vad_tree_insert(addrspace->used_root, vad);
		}

		/* Return the address of the allocated region */
		spinlock_recursive_release(&addrspace->lock);
		return (void*) address;
	}

	/* No free region of the address space available */
	spinlock_recursive_release(&addrspace->lock);
	return NULL;
}
コード例 #27
0
ファイル: loader.cpp プロジェクト: besiano15/ghost
void g_loader::initialize(g_multiboot_information* multibootInformation) {

	// Store multiboot structure
	setupInformation.multibootInformation = multibootInformation;

	// Begin initialization
	g_log_info("%! loader initializing", "loader");

	// End of the loader binary in memory
	uint32_t loaderEndAddress = PAGE_ALIGN_UP((uint32_t ) &endAddress);

	// Find free spaces to place the GDT and the bitmap
	uint32_t gdtAreaStart = findFreeMemory(multibootInformation, loaderEndAddress, 1);
	uint32_t gdtAreaEnd = gdtAreaStart + G_PAGE_SIZE;

	uint32_t bitmapStart = findFreeMemory(multibootInformation, gdtAreaEnd, PAGE_ALIGN_UP(G_BITMAP_SIZE) / G_PAGE_SIZE);
	uint32_t bitmapEnd = PAGE_ALIGN_UP(bitmapStart + G_BITMAP_SIZE);

	// The "reservedAreaEnd" is the end of the memory (somewhere above 1MiB)
	// that is not occupied by the loader binary or the pages that we split
	// of for use as bitmap and GDT.
	uint32_t reservedAreaEnd = bitmapEnd;

#if G_LOGGING_DEBUG
	// Information output
	g_log_debug("%! available modules:", "mmodule");
	for (uint32_t i = 0; i < multibootInformation->modulesCount; i++) {
		g_multiboot_module* module = (g_multiboot_module*) (multibootInformation->modulesAddress + sizeof(g_multiboot_module) * i);
		g_log_debug("%#   '%s' at %h - %h", module->path, module->moduleStart, module->moduleEnd);
	}

	g_log_debug("%! calculated addresses:", "loader");
	g_log_debug("%#   gdt area:            %h", gdtAreaStart);
	g_log_debug("%#   bitmap:              %h", bitmapStart);
	g_log_debug("%#   reserved area end:   %h", reservedAreaEnd);
#endif

	// Store setup information
	setupInformation.bitmapStart = bitmapStart;
	setupInformation.bitmapEnd = bitmapEnd;

	// Set up the GDT. Here we pass the address of the gdt area, which contains enough space to
	// create the descriptor table and its pointer.
	g_gdt_manager::initialize(gdtAreaStart);

	// Read GRUB map to add free pages to the allocator
	physicalAllocator.initialize((g_bitmap_entry*) bitmapStart);
	g_multiboot_mmap_interpreter::load(&physicalAllocator, reservedAreaEnd);

	// Set up paging, this relocates the multiboot modules
	g_paging_initializer::initialize(reservedAreaEnd, &setupInformation);
	// IMPORTANT: Now the multiboot module location has changed!

	// Load kernel binary
	g_log_info("%! locating kernel binary...", "loader");
	g_multiboot_module* kernelModule = g_multiboot_util::findModule(setupInformation.multibootInformation, "/boot/kernel");
	if (kernelModule) {
		g_log_info("%! found kernel binary at %h, loading...", "loader", kernelModule->moduleStart);
		g_kernel_loader::load(kernelModule);
		g_loader::panic("%! something went wrong during boot process, halting", "loader");
	} else {
		g_loader::panic("%! kernel module not found", "loader");
	}
}