Beispiel #1
0
void plat_boot(void){
	int i;
	for(i=0;init[i];i++){
		init[i]();
	}
	init_sys_mmu();
	start_mmu();
//	timer_init();

	init_page_map();
	kmalloc_init();

	ramdisk_driver_init();
	romfs_init();

	struct inode *node;

	char *buf=(char *)0x30100000;

	if((node=fs_type[ROMFS]->namei(fs_type[ROMFS],"main.bin"))==(void *)0){
		printk("inode read eror\n");
		goto HALT;
	}
	

	if(fs_type[ROMFS]->device->dout(fs_type[ROMFS]->device,buf,fs_type[ROMFS]->get_daddr(node),node->dsize)){
		printk("dout error\n");
		goto HALT;
	}

	exec(buf);
	
HALT:
	while(1);
}
Beispiel #2
0
void kmain(s64 magic, s64 info)
{
	//vga_clear(COLOR_BLACK);
    idt_init();
    isr_init();

    serial_init();
	set_debug_traps();
    BREAKPOINT();

	cpuid_print();
	multiboot(magic, info);
	kmem_map();
    page_init();
    kmalloc_init();
    //vesa_init();

    root_init();
    pci_init();
    vm_init();
    syscall_init();
    timer_init();
    kbd_init();
    //mouse_init();

    console_init();

 	create_kthread(NULL, idle_thread, THREAD_PRI_LOW, NULL, NULL);
 	create_kthread(NULL, init_thread, THREAD_PRI_NORMAL, NULL, NULL);

    thread_schedule();
}
Beispiel #3
0
void plat_boot(void){
	int i;
	for(i=0;init[i];i++){
		init[i]();
	}
	init_sys_mmu();
	start_mmu();
	test_mmu();
	test_printk();
//	timer_init();
	init_page_map();
	kmalloc_init();
	char *p1,*p2,*p3,*p4;
	p1=kmalloc(127);
	printk("the first alloced address is %x\n",p1);
	p2=kmalloc(124);
	printk("the second alloced address is %x\n",p2);
	kfree(p1);
	kfree(p2);
	p3=kmalloc(119);
	printk("the third alloced address is %x\n",p3);
	p4=kmalloc(512);
	printk("the forth alloced address is %x\n",p4);
	while(1);
}
Beispiel #4
0
void plat_boot(void) {
    int i;
    for(i=0; init[i]; i++) {
        init[i]();
    }
    init_sys_mmu();
    start_mmu();
//	timer_init();

    init_page_map();
    kmalloc_init();

    ramdisk_driver_init();
    romfs_init();

    struct inode *node;
    char buf[128];

    node=fs_type[ROMFS]->namei(fs_type[ROMFS],"number.txt");

    fs_type[ROMFS]->device->dout(fs_type[ROMFS]->device,buf,fs_type[ROMFS]->get_daddr(node),node->dsize);

    for(i=0; i<sizeof(buf); i++) {
        printk("%c ",buf[i]);
    }

    while(1);
}
Beispiel #5
0
//pmm_init - setup a pmm to manage physical memory, build PDT&PT to setup paging mechanism 
//         - check the correctness of pmm & paging mechanism, print PDT&PT
void
pmm_init(void) {
    //We need to alloc/free the physical memory (granularity is 4KB or other size). 
    //So a framework of physical memory manager (struct pmm_manager)is defined in pmm.h
    //First we should init a physical memory manager(pmm) based on the framework.
    //Then pmm can alloc/free the physical memory. 
    //Now the first_fit/best_fit/worst_fit/buddy_system pmm are available.
    init_pmm_manager();

    // detect physical memory space, reserve already used memory,
    // then use pmm->init_memmap to create free page list
    page_init();

    //use pmm->check to verify the correctness of the alloc/free function in a pmm
    check_alloc_page();

    // create boot_pgdir, an initial page directory(Page Directory Table, PDT)
    boot_pgdir = boot_alloc_page();
    memset(boot_pgdir, 0, PGSIZE);
    boot_cr3 = PADDR(boot_pgdir);

    check_pgdir();


    static_assert(KERNBASE % PTSIZE == 0 && KERNTOP % PTSIZE == 0);

    // recursively insert boot_pgdir in itself
    // to form a virtual page table at virtual address VPT
 //   cprintf("haah1\n");
    // map all physical memory to linear memory with base linear addr KERNBASE
    //linear_addr KERNBASE~KERNBASE+KMEMSIZE = phy_addr 0~KMEMSIZE
    //But shouldn't use this map until enable_paging() & gdt_init() finished.
    boot_map_segment(boot_pgdir, 0, KMEMSIZE, 0, PTE_TYPE_URWX_SRWX | PTE_R | PTE_V);
    boot_pgdir[PDX(VPT)] = PADDR(boot_pgdir) | PTE_TYPE_TABLE | PTE_R | PTE_V;
   // pgdir_alloc_page(boot_pgdir, USTACKTOP-PGSIZE , PTE_TYPE_URW_SRW);
    //cprintf("haha2\n");
    //temporary map: 
    //virtual_addr 3G~3G+4M = linear_addr 0~4M = linear_addr 3G~3G+4M = phy_addr 0~4M     
    //boot_pgdir[0] = boot_pgdir[PDX(KERNBASE)];
    //cprintf("OK!\n");
    enable_paging();
//    cprintf("haah\n");
    //reload gdt(third time,the last time) to map all physical memory
    //virtual_addr 0~4G=liear_addr 0~4G
    //then set kernel stack(ss:esp) in TSS, setup TSS in gdt, load TSS
    //gdt_init();

    //disable the map of virtual_addr 0~4M
    //boot_pgdir[0] = 0;

    //now the basic virtual memory map(see memalyout.h) is established.
    //check the correctness of the basic virtual memory map.
    check_boot_pgdir();

    print_pgdir();

    kmalloc_init();

}
Beispiel #6
0
void arch_early_init(void) {
	arm_mmu_init();
	kmalloc_init(&__heap, HEAP_SIZE);
	arm_mmu_remap_evt();
	exception_init();
	//clean_user_space();
	show_arch_info();
}
Beispiel #7
0
/*
 * kmain
 * 
 * This is the first thing that executes when the kernel starts. Any
 * initialisation e.g. interrupt handlers must be done at the start of
 * the function. This function should never return.
 */
void kmain(multiboot * mb)
{
	setup_segmentation();
	setup_interrupts();
	kmalloc_init();

	/*
	 * Clear the screen 
	 */
	unsigned int x;
	unsigned int y;
	for (y = 0; y < SCREEN_HEIGHT; y++)
		for (x = 0; x < SCREEN_WIDTH; x++)
			screen[y * SCREEN_WIDTH + x].c = ' ';

	/*
	 * Place the cursor on line 0 to start with 
	 */
	move_cursor(xpos, ypos);

	kprintf("%s\n%s\n%s\n\n\n\n", VERSION, COPYRIGHT, DISCLAIMER);

	assert(1 == mb->mods_count);
	assert(mb->mods_addr[0].mod_end < 2 * MB);
	filesystem = (char *)mb->mods_addr[0].mod_start;
	/*
	 * Check here for the size of the RAM disk. Because we use a
	 * hard-coded value of 2MB for the start of the kernel's private
	 * data area, we can't safely work with filesystems that extend
	 * into this area. This is really just a hack to avoid the
	 * additional complexity of computing the right place to start
	 * the kernel and page memory regions, but suffices for our
	 * purposes.
	 */
	if (mb->mods_addr[0].mod_end >= 2 * MB)
		assert
		    (!"Filesystem goes beyond 2Mb limit. Please use smaller filesystem.");

	pid_t pid = start_process(launch_shell);
	input_pipe = processes[pid].filedesc[STDIN_FILENO]->p;

	/*
	 * Go in to user mode and enable interrupts
	 */
	enter_user_mode();

	/*
	 * Loop indenitely... we should never return from this function
	 */

	/*
	 * Pretty soon a context switch will occur, and the processor
	 * will jump out of this loop and start executing the first
	 * scheduled process
	 */
	while (1) ;
}
Beispiel #8
0
void plat_boot(void) {
    int i;
    for (i=0; init[i]; i++) {
        init[i]();
    }
    init_sys_mmu();
    start_mmu();
//    timer_init();

    init_page_map();
    kmalloc_init();
    ramdisk_driver_init();
    romfs_init();

    struct inode *node;
    struct elf32_phdr *phdr;
    struct elf32_ehdr *ehdr;
    int phnum, pos, dpos;
    char *buf;

    if ((buf = (char *)kmalloc(1024)) == (void *)0) {
        printk("get free pages error\n");
        goto HALT;
    }

    if ((node = fs_type[ROMFS]->namei(fs_type[ROMFS], "main")) == (void *)0) {
        printk("inode read error\n");
        goto HALT;
    }

    if (fs_type[ROMFS]->device->dout(fs_type[ROMFS]->device, buf, \
                fs_type[ROMFS]->get_daddr(node), node->dsize)) {
        printk("dount error\n");
        goto HALT;
    }

    ehdr = (struct elf32_ehdr *)buf;
    phdr = (struct elf32_phdr *)((char *)buf+ehdr->e_phoff);
    
    for (i = 0; i< ehdr->e_phnum; i++) {
        if (CHECK_PT_TYPE_LOAD(phdr)) {
            if (fs_type[ROMFS]->device->dout(fs_type[ROMFS]->device,\
                        (char *)phdr->p_vaddr, \
                        fs_type[ROMFS]->get_daddr(node) + \
                        phdr->p_offset, phdr->p_filesz) < 0) {
                printk("dout error\n");
                goto HALT;
            }
        }
        phdr++;
    }

    exec(ehdr->e_entry);

HALT:
    while (1);
}
Beispiel #9
0
void
kernel_main_init(void) {
    //__asm__(".cont:\n\tmov %rsp, %rax\n\tmov %rsp, %rbx\n\tint $34\n\tsub %rsp, %rax\n\tjz .cont\n\thlt");
    InitializeTimer();
    SetTimerEnableMode(ENABLE);

    kmalloc_init ();
    ProcessSys_Initialize();
    Thread_Initialize();
    KeyMan_Initialize();
    RegisterCore(0, NULL);
    CreateThread(ROOT_PID, ThreadPermissionLevel_Kernel, (ThreadEntryPoint)kernel_main, NULL);

    CoreUpdate();  //BSP is core 0
}
Beispiel #10
0
void kmain( struct multiboot_info * info )
{
	page_init();	/* we start up with a hacked segment base, so */
	gdt_init();		/* get paging enabled and a real GDT installed first. */
	
	vga_clear();
	
	put_status_line( 1, "Paging enabled." );
	put_status_line( 1, "Starting Physical Memory Allocator..." );
	phys_alloc_init( info );
	
	put_status_line( 1, "Starting Kernel Heap Allocator..." );
	kmalloc_init();	
	page_init_finish();
	
	/* install other default handlers */
	
	timer_init( 50 );
	
	/* test the heap allocator */
	int * foo = kmalloc( 240 );
	vga_puts( "Allocation test: " );
	vga_put_hex( (u32) foo );
	vga_puts( "\n" );
	
	*foo = 42;	/* shouldn't die */
	
	put_status_line( 1, "Scanning PCI buses..." );
	pci_enum_devices();
	
	/* finished initializing, so turn on the interrupts */
	enable_interrupts();
	
//	asm volatile( "int $0x3" );
	
	for(;;)
		halt();
}
Beispiel #11
0
void plat_boot(void) {
    int i;
    for (i=0; init[i]; i++) {
        init[i]();
    }
    init_sys_mmu();
    start_mmu();
    task_init();
    timer_init();

    init_page_map();
    kmalloc_init();
    ramdisk_driver_init();
    romfs_init();
    
    i = do_fork(test_process, (void *)0x1);
    i = do_fork(test_process, (void *)0x2);

    while (1) {
        delay ();
        printk("this is the original process\n");
    }

}
Beispiel #12
0
int main() {
  vm_page_t *page = pm_alloc(1);

  MALLOC_DEFINE(mp, "testing memory pool");

  kmalloc_init(mp);
  kmalloc_add_arena(mp, page->vaddr, PAGESIZE);

  void *ptr1 = kmalloc(mp, 15, 0);
  assert(ptr1 != NULL);

  void *ptr2 = kmalloc(mp, 23, 0);
  assert(ptr2 != NULL && ptr2 > ptr1);

  void *ptr3 = kmalloc(mp, 7, 0);
  assert(ptr3 != NULL && ptr3 > ptr2);

  void *ptr4 = kmalloc(mp, 2000, 0);
  assert(ptr4 != NULL && ptr4 > ptr3);

  void *ptr5 = kmalloc(mp, 1000, 0);
  assert(ptr5 != NULL);

  kfree(mp, ptr1);
  kfree(mp, ptr2);
  kmalloc_dump(mp);
  kfree(mp, ptr3);
  kfree(mp, ptr5);

  void *ptr6 = kmalloc(mp, 2000, M_NOWAIT);
  assert(ptr6 == NULL);

  pm_free(page);

  return 0;
}
Beispiel #13
0
void init(void)
{
    TID_t startup_thread;
    int numcpus;

    /* Initialize polling TTY driver for kprintf() usage. */
    polltty_init();

    kwrite("BUENOS is a University Educational Nutshell Operating System\n");
    kwrite("==========================================================\n");
    kwrite("\n");

    kwrite("Copyright (C) 2003-2006  Juha Aatrokoski, Timo Lilja,\n");
    kwrite("  Leena Salmela, Teemu Takanen, Aleksi Virtanen\n");
    kwrite("See the file COPYING for licensing details.\n");
    kwrite("\n");

    kwrite("Initializing memory allocation system\n");
    kmalloc_init();

    kwrite("Reading boot arguments\n");
    bootargs_init();

    /* Seed the random number generator. */
    if (bootargs_get("randomseed") == NULL) {
	_set_rand_seed(0);
    } else {
	int seed = atoi(bootargs_get("randomseed"));
	kprintf("Seeding pseudorandom number generator with %i\n", seed);
	_set_rand_seed(seed);
    }

    numcpus = cpustatus_count();
    kprintf("Detected %i CPUs\n", numcpus);
    KERNEL_ASSERT(numcpus <= CONFIG_MAX_CPUS);

    kwrite("Initializing interrupt handling\n");
    interrupt_init(numcpus);

    kwrite("Initializing threading system\n");
    thread_table_init();

    kwrite("Initializing user process system\n");
    process_init();

    kwrite("Initializing sleep queue\n");
    sleepq_init();

    kwrite("Initializing semaphores\n");
    semaphore_init();

    kwrite("Initializing device drivers\n");
    device_init();

    kprintf("Initializing virtual filesystem\n");
    vfs_init();

    kwrite("Initializing scheduler\n");
    scheduler_init();

    kwrite("Initializing virtual memory\n");
    vm_init();

    kprintf("Creating initialization thread\n");
    startup_thread = thread_create(&init_startup_thread, 0);
    thread_run(startup_thread);

    kprintf("Starting threading system and SMP\n");

    /* Let other CPUs run */
    kernel_bootstrap_finished = 1;
    
    _interrupt_clear_bootstrap();
    _interrupt_enable();

    /* Enter context switch, scheduler will be run automatically,
       since thread_switch() behaviour is identical to timer tick
       (thread timeslice is over). */
    thread_switch();

    /* We should never get here */
    KERNEL_PANIC("Threading system startup failed.");
}
Beispiel #14
0
/*
 * Bootstrap-CPU start; we came from head.S
 */
void __no_return kernel_start(void)
{
	/* Before anything else, zero the bss section. As said by C99:
	 * “All objects with static storage duration shall be inited
	 * before program startup”, and that the implicit init is done
	 * with zero. Kernel assembly code also assumes a zeroed BSS
	 * space */
	clear_bss();

	/*
	 * Very-early setup: Do not call any code that will use
	 * printk(), `current', per-CPU vars, or a spin lock.
	 */

	setup_idt();

	schedulify_this_code_path(BOOTSTRAP);

	/*
	 * Memory Management init
	 */

	print_info();

	/* First, don't override the ramdisk area (if any) */
	ramdisk_init();

	/* Then discover our physical memory map .. */
	e820_init();

	/* and tokenize the available memory into allocatable pages */
	pagealloc_init();

	/* With the page allocator in place, git rid of our temporary
	 * early-boot page tables and setup dynamic permanent ones */
	vm_init();

	/* MM basics done, enable dynamic heap memory to kernel code
	 * early on .. */
	kmalloc_init();

	/*
	 * Secondary-CPUs startup
	 */

	/* Discover our secondary-CPUs and system IRQs layout before
	 * initializing the local APICs */
	mptables_init();

	/* Remap and mask the PIC; it's just a disturbance */
	serial_init();
	pic_init();

	/* Initialize the APICs (and map their MMIO regs) before enabling
	 * IRQs, and before firing other cores using Inter-CPU Interrupts */
	apic_init();
	ioapic_init();

	/* SMP infrastructure ready, fire the CPUs! */
	smpboot_init();

	keyboard_init();

	/* Startup finished, roll-in the scheduler! */
	sched_init();
	local_irq_enable();

	/*
	 * Second part of kernel initialization (Scheduler is now on!)
	 */

	ext2_init();

	// Signal the secondary cores to run their own test-cases code.
	// They've been waiting for us (thread 0) till all of kernel
	// subsystems has been properly initialized.  Wait No More!
	smpboot_trigger_secondary_cores_testcases();

	run_test_cases();
	halt();
}
Beispiel #15
0
void mm_init()
{
	page_alloc_init(STATIC_MEM, 0, __pa(k_reloc_end));
	kmalloc_init();
	paging_init();
}
Beispiel #16
0
asmlinkage void start_kernel(void)
{
	char * command_line;

/*
 *	This little check will move.
 */

#ifdef __SMP__
	static int first_cpu=1;
	
	if(!first_cpu)
		start_secondary();
	first_cpu=0;
	
#endif	
/*
 * Interrupts are still disabled. Do necessary setups, then
 * enable them
 */
	setup_arch(&command_line, &memory_start, &memory_end);
	memory_start = paging_init(memory_start,memory_end);
	trap_init();
#ifndef	CONFIG_OSFMACH3
	init_IRQ();
#endif	/* CONFIG_OSFMACH3 */
	sched_init();
	time_init();
	parse_options(command_line);
#ifdef CONFIG_MODULES
	init_modules();
#endif
#ifdef CONFIG_PROFILE
	if (!prof_shift)
#ifdef CONFIG_PROFILE_SHIFT
		prof_shift = CONFIG_PROFILE_SHIFT;
#else
		prof_shift = 2;
#endif
#endif
	if (prof_shift) {
		prof_buffer = (unsigned int *) memory_start;
		/* only text is profiled */
		prof_len = (unsigned long) &_etext - (unsigned long) &_stext;
		prof_len >>= prof_shift;
		memory_start += prof_len * sizeof(unsigned int);
		memset(prof_buffer, 0, prof_len * sizeof(unsigned int));
	}
	memory_start = console_init(memory_start,memory_end);
#ifdef CONFIG_PCI
	memory_start = pci_init(memory_start,memory_end);
#endif
	memory_start = kmalloc_init(memory_start,memory_end);
	sti();
	calibrate_delay();
	memory_start = inode_init(memory_start,memory_end);
	memory_start = file_table_init(memory_start,memory_end);
	memory_start = name_cache_init(memory_start,memory_end);
#ifndef	CONFIG_OSFMACH3
#ifdef CONFIG_BLK_DEV_INITRD
	if (initrd_start && initrd_start < memory_start) {
		printk(KERN_CRIT "initrd overwritten (0x%08lx < 0x%08lx) - "
		    "disabling it.\n",initrd_start,memory_start);
		initrd_start = 0;
	}
#endif
#endif	/* CONFIG_OSFMACH3 */
	mem_init(memory_start,memory_end);
	buffer_init();
	sock_init();
#if defined(CONFIG_SYSVIPC) || defined(CONFIG_KERNELD)
	ipc_init();
#endif
	dquot_init();
	arch_syms_export();
	sti();
	check_bugs();

	printk(linux_banner);
#ifdef __SMP__
	smp_init();
#endif
	sysctl_init();
	/* 
	 *	We count on the initial thread going ok 
	 *	Like idlers init is an unlocked kernel thread, which will
	 *	make syscalls (and thus be locked).
	 */
#ifdef	CONFIG_OSFMACH3
	osfmach3_start_init(argv_init, envp_init);
#else	/* CONFIG_OSFMACH3 */
	kernel_thread(init, NULL, 0);
#endif	/* CONFIG_OSFMACH3 */
/*
 * task[0] is meant to be used as an "idle" task: it may not sleep, but
 * it might do some general things like count free pages or it could be
 * used to implement a reasonable LRU algorithm for the paging routines:
 * anything that can be useful, but shouldn't take time from the real
 * processes.
 *
 * Right now task[0] just does a infinite idle loop.
 */
 	cpu_idle(NULL);
}
Beispiel #17
0
void kernel_init(multiboot_info_t *mboot_info)
{
	extern char __start_bss[], __stop_bss[];

	memset(__start_bss, 0, __stop_bss - __start_bss);
	/* mboot_info is a physical address.  while some arches currently have the
	 * lower memory mapped, everyone should have it mapped at kernbase by now.
	 * also, it might be in 'free' memory, so once we start dynamically using
	 * memory, we may clobber it. */
	multiboot_kaddr = (struct multiboot_info*)((physaddr_t)mboot_info
                                               + KERNBASE);
	extract_multiboot_cmdline(multiboot_kaddr);

	cons_init();
	print_cpuinfo();

	printk("Boot Command Line: '%s'\n", boot_cmdline);

	exception_table_init();
	cache_init();					// Determine systems's cache properties
	pmem_init(multiboot_kaddr);
	kmem_cache_init();              // Sets up slab allocator
	kmalloc_init();
	hashtable_init();
	radix_init();
	cache_color_alloc_init();       // Inits data structs
	colored_page_alloc_init();      // Allocates colors for agnostic processes
	acpiinit();
	topology_init();
	kthread_init();					/* might need to tweak when this happens */
	vmr_init();
	file_init();
	page_check();
	idt_init();
	kernel_msg_init();
	timer_init();
	vfs_init();
	devfs_init();
	train_timing();
	kb_buf_init(&cons_buf);
	arch_init();
	block_init();
	enable_irq();
	run_linker_funcs();
	/* reset/init devtab after linker funcs 3 and 4.  these run NIC and medium
	 * pre-inits, which need to happen before devether. */
	devtabreset();
	devtabinit();

#ifdef CONFIG_EXT2FS
	mount_fs(&ext2_fs_type, "/dev/ramdisk", "/mnt", 0);
#endif /* CONFIG_EXT2FS */
#ifdef CONFIG_ETH_AUDIO
	eth_audio_init();
#endif /* CONFIG_ETH_AUDIO */
	get_coreboot_info(&sysinfo);
	booting = 0;

#ifdef CONFIG_RUN_INIT_SCRIPT
	if (run_init_script()) {
		printk("Configured to run init script, but no script specified!\n");
		manager();
	}
#else
	manager();
#endif
}
Beispiel #18
0
int main(void)
{
	char *foo, *bar, *tmp;
	
	if (kmalloc_init(heap_mem, sizeof(heap_mem)) < 0)
	{
		printf("kmalloc_init() returned < 0: some error\n");
		return 1;
	}

	
	foo = kmalloc(strlen(TEST_STRING_1) + 1);
	if (foo == NULL)
	{
		printf("kmalloc() returned a NULL pointer\n");
		return 1;
	}
	strcpy(foo, TEST_STRING_1);
	if (strcmp(foo, TEST_STRING_1) != 0)
	{
		printf("kmalloc() failed\n");
		return 1;
	}
	
	bar = kmalloc(strlen(TEST_STRING_1) + 1);
	if (bar == NULL)
	{
		printf("kmalloc() returned a NULL pointer\n");
		return 1;
	}
	strcpy(bar, TEST_STRING_1);
	if (strcmp(bar, TEST_STRING_1) != 0)
	{
		printf("kmalloc() failed\n");
		return 1;
	}

	printf("%s\n%s\n", foo, bar);


	tmp = krealloc(foo, strlen(TEST_STRING_2) + 1);
	if (tmp == NULL)
	{
		printf("krealloc() returned a NULL pointer\n");
		return 1;
	}
	foo = tmp;
	strcpy(foo, TEST_STRING_2);
	
	printf("%s\n", foo);
	
	tmp = krealloc(foo, 0); /* works like free */
	if (tmp != NULL)
	{
		printf("krealloc(void *, 0) didn't return NULL\n");
		return 1;
	}
	kfree(bar);

	return 0;
}
Beispiel #19
0
size_t	rmm_init(struct multiboot_info* mbi) {
  uint64_t	max_contiguous_size = 0;
  multiboot_memory_map_t* mmap = (multiboot_memory_map_t*)mbi->mmap_addr;
  
  // Sanity checks
  if (sizeof(struct rmm_internal) > 4096*1024)
    panic("struct rmm_internal is larger than a chunk");
  if (sizeof(struct rmm_pageinfo) > 4)
    panic("struct rmm_pageinfo is larger than 32 bits");
  
  // Scans the memory map given by Multiboot to find the largest
  // continuous chunk. We will use this chunk, and only this chunk,
  // for allocation.
  while((uint32_t)mmap < mbi->mmap_addr + mbi->mmap_length) {
    if (mmap->type == MULTIBOOT_MEMORY_AVAILABLE) {
      if (mmap->len > max_contiguous_size) {
	max_contiguous_size = mmap->len;
	rmm_gl_max_physical_addr = mmap->addr + mmap->len;
	rmm_gl_min_physical_addr = mmap->addr;
      }
    }
    mmap = (multiboot_memory_map_t*)((unsigned int)mmap + mmap->size + sizeof(unsigned int));
  }
  if (max_contiguous_size == 0)
    panic("No available physical memory according to Multiboot");

  // Scans the ELF sections to remove our kernel's memory from the
  // available pool (this also includes the kernel's stack)
  Elf32_Shdr* shdr = (Elf32_Shdr*)mbi->u.elf_sec.addr;
  unsigned int num = mbi->u.elf_sec.num;
  for (unsigned int i = 0; i < num; i++) {
    // Case 1 : the section overlaps only the beginning of our memory area
    if (shdr[i].sh_addr <= rmm_gl_min_physical_addr &&
	shdr[i].sh_addr + shdr[i].sh_size > rmm_gl_min_physical_addr)
      rmm_gl_min_physical_addr = shdr[i].sh_addr + shdr[i].sh_size;
    // Case 2 : the section overlaps only the end of our memory area
    else if (shdr[i].sh_addr < rmm_gl_max_physical_addr &&
	     shdr[i].sh_addr + shdr[i].sh_size >= rmm_gl_max_physical_addr)
      rmm_gl_max_physical_addr = shdr[i].sh_addr;
    // Case 3 : the section is totally contained in our memory area
    else if (shdr[i].sh_addr >= rmm_gl_min_physical_addr &&
	     shdr[i].sh_addr + shdr[i].sh_size <= rmm_gl_max_physical_addr) {
      // Case 3a : we have more space AFTER the section
      if (rmm_gl_max_physical_addr - (shdr[i].sh_addr + shdr[i].sh_size) >
	  shdr[i].sh_addr - rmm_gl_min_physical_addr)
	rmm_gl_min_physical_addr = shdr[i].sh_addr + shdr[i].sh_size;
      // Case 3b we habe more space BEFORE the section
      else
	rmm_gl_max_physical_addr = shdr[i].sh_addr;
    }
  }

  // Rounds the boundaries of the available memory to CHUNK_SIZE (4M)
  if (rmm_gl_min_physical_addr & (~0xffc00000))
    rmm_gl_min_physical_addr = (rmm_gl_min_physical_addr + CHUNK_SIZE) & 0xffc00000;
  rmm_gl_max_physical_addr = rmm_gl_max_physical_addr & 0xffc00000;

  // Puts the metadata for RMM at the beginning of the available memory
  rmm_gl_metadata_addr = (struct rmm_internal*)rmm_gl_min_physical_addr;
  rmm_gl_min_physical_addr += CHUNK_SIZE;

  // Puts the kernel kmalloc()'d data after RMM's data
  size_t kmalloc_size = KMALLOC_REQUIRED_SPACE;
  // kmalloc_size must be aligned on CHUNK_SIZE
  if (kmalloc_size & (~0xffc00000))
    kmalloc_size = (kmalloc_size + CHUNK_SIZE) & 0xffc00000;
  kmalloc_size = kmalloc_size & 0xffc00000;  
  kmalloc_init(rmm_gl_min_physical_addr, kmalloc_size);
  rmm_gl_min_physical_addr += kmalloc_size;
  
  // Sanity check - do we still have some memory left ?
  if (rmm_gl_max_physical_addr <= rmm_gl_min_physical_addr)
    panic("No free physical memory");
  
  // Now we can fill in the metadata
  // First we set the whole structure to 0, just in case
  memset(rmm_gl_metadata_addr, '\0', sizeof(struct rmm_internal));
  // Then we set each chunk as having 1024 free pages
  for (uint32_t chunkID = 0; chunkID < 1024; chunkID++)
    rmm_gl_metadata_addr->chunk[chunkID].free_pages_count = 1024;
  // Finally, we iterate on each page
  for (uint32_t pageID = 0; pageID < 1024*1024; pageID++) {
    // Mark all the protected pages as referenced even though no
    // paging context use them
    if (pageID * PAGE_SIZE < rmm_gl_min_physical_addr) {
      rmm_gl_metadata_addr->page[pageID].ref_count = 1;
      // We also mark the associated chunk as having one less free page
      rmm_gl_metadata_addr->chunk[pageID / 1024].free_pages_count--;
    }
    if (pageID * PAGE_SIZE >= rmm_gl_max_physical_addr) {
      rmm_gl_metadata_addr->page[pageID].ref_count = 1;
      // We also mark the associated chunk as having one less free page
      rmm_gl_metadata_addr->chunk[pageID / 1024].free_pages_count--;
    }
  }
  return rmm_gl_max_physical_addr - rmm_gl_min_physical_addr;
}