Example #1
0
File: ofd.c Project: jiamacs/rhype
uval
ofd_devtree_init(uval mem, uval *space)
{
	uval sz;
	uval mapped;

	/* map the first page, there may be more */
	mapped = map_pages(mem, mem, PGSIZE);

	sz = ofd_size(mem);

	*space = ofd_space(mem);

	if (sz > mapped) {
		/* map the rest */
		map_pages(mem + mapped, mem + mapped, sz - mapped);
	}

#ifdef OFD_DEBUG
	ofd_walk((void *)mem, OFD_ROOT, ofd_dump_props, OFD_DUMP_VALUES);
#endif
	/* scan the tree and identify resources */
	ofd_pci_addr(mem);
	ofd_proc_dev_probe((void *)mem);

	if (ofd_platform_probe)
		ofd_platform_probe((void *)mem);

	*space -= 8;
	return mem;
}
Example #2
0
uval
load_in_lpar(struct partition_status* ps, uval offset,
	     uval istart, uval isize)
{
	uval ra;
	uval ea;
	uval size;
	ra = ALIGN_DOWN(ps->init_mem + offset, 1 << LOG_CHUNKSIZE);
	size = ALIGN_UP(ps->init_mem + offset - ra + isize, PGSIZE);

	ea = ra;

	map_pages(ra, ea, 1 << LOG_CHUNKSIZE);

	ea += offset;

	hprintf("Copying partition img: 0x%lx[0x%lx] -> 0x%lx\n",
		istart, isize, ea);

	memcpy((void *)ea, (void *)istart, isize);

	isize = ALIGN_UP(isize, PGSIZE);
	while (isize) {
		icache_invalidate_page(ea >> LOG_PGSIZE, LOG_PGSIZE);
		isize -= PGSIZE;
	}

	return 1;
}
Example #3
0
void init_interrupt(void){
    map_pages(MAP_LAPIC,lapic_base,LAPIC_SIZE);
    
    init_idt();

    init_lapic(MAP_LAPIC + (lapic_base & (PAGE_SIZE - 1)));
}
Example #4
0
void heap_init(void)
{
  void *heap_pool_phys_addr;

  kprintf("[heap] initializing heap (%d B)\n", CONFIG_KERNEL_HEAP_SIZE);

  if (NULL == (heap_pool_phys_addr = pm_alloc_cont(CONFIG_KERNEL_HEAP_SIZE / PAGE_SIZE))){
    kprintf("[heap] -- failed - can't proceed!\n");
    halt();
  }

  map_pages(heap_pool_phys_addr, heap_pool_addr, 0, CONFIG_KERNEL_HEAP_SIZE);

  /* let's create the first block which will span the entire heap, and will be
   * marked unused */
  struct block *initial = heap_pool_addr;

  initial->magic = 0xbabe;
  initial->type = BLOCK_UNUSED;
  /* blerh */
  initial->data_size = CONFIG_KERNEL_HEAP_SIZE - (sizeof(*initial) - sizeof(initial->data_size));
  initial->next = NULL;

  heap_initialized = true;
}
Example #5
0
// Get the physical address of the page table for the given
// section index (i.e. multiple of 1MB). If it doesn't exist,
// it's allocated and mapped (mapping is skipped if skip_map
// is true).
// We actually allocate page tables four at a time, because
// it's too fiddly to keep track of partially used pages -
// pagetables are only 1kb on ARM.
physaddr get_page_table(int section_index, int skip_map)
{
  uint32_t *pgd = (uint32_t *)bootdata->page_directory;
  physaddr ptbl = (physaddr)(pgd[section_index] & 0xFFFFFC00);
  if (!ptbl)
  {
    // need to allocate it
    ptbl = alloc_pages_zero(PAGE_SIZE, PAGE_SIZE);

    int start_sec = section_index & ~(PTBLS_PER_PAGE-1);
    for (int i = 0; i < PTBLS_PER_PAGE; ++i)
      pgd[start_sec + i] = (ptbl + (i * PAGETABLE_SIZE)) | PGD_COARSE;

    int pagetable_index = section_index / PTBLS_PER_PAGE;
    virtaddr pgt_virt = (virtaddr)(&__page_tbl_start__)
        + (pagetable_index << PAGE_SHIFT);
    if (!skip_map)
      map_pages(pgt_virt, pgt_virt + PAGE_SIZE,
          ptbl | PTB_RW | PTB_CACHE | PTB_BUFF | PTB_EXT);

    int index_in_page = section_index % PTBLS_PER_PAGE;
    ptbl += index_in_page * PAGETABLE_SIZE;
  }

  return ptbl;
}
Example #6
0
void	init_page_table()
{
	//页表内存采取预分配模式,位于物理内存0x00400000-0x007fffff处
	//第0x0000页将映射00000000-003FFFFF虚拟地址
	//第0x0001页将映射00400000-007FFFFF虚拟地址
	//第0x0300页将映射C0000000-C03FFFFF虚拟地址(page_dir,指向页目录自身)
	//第0x03FF页将映射FFC00000-FFFFFFFF虚拟地址

	//清理页表
	memset((void*)PAGE_TABLE_PHYSICAL_ADDR, 0, PAGE_TABLE_SIZE);

	//页目录自映射
	//((UINT32*)m_page_dir_physical_addr)[PDE_INDEX(PAGE_TABLE_VIRTUAL_ADDR)] = m_page_dir_physical_addr | PT_PRESENT | PT_WRITABLE;

	//映射所有页表
	for (int i = 0; i < 1024; i++)
	{
		((UINT32*)PAGE_DIR_PHYSICAL_ADDR)[i] = (PAGE_TABLE_PHYSICAL_ADDR + i * PAGE_SIZE) | PT_PRESENT | PT_WRITABLE;
	}
	 
	map_pages(0, 0, BOOT_CODE_SIZE);
	map_pages(KERNEL_STACK_BOTTOM_PHYSICAL_ADDR, KERNEL_STACK_BOTTOM_VIRTUAL_ADDR, KERNEL_STACK_SIZE);//分页之后,作为内核栈
	map_pages(FRAME_DB_PHYSICAL_ADDR, FRAME_DB_VIRTUAL_ADDR, FRAME_DB_SIZE);
	map_pages(VIDEO_BUF_PHYSICAL_ADDR, VIDEO_BUF_VIRTUAL_ADDR, VIDEO_BUF_SIZE);
	map_pages(PAGE_TABLE_PHYSICAL_ADDR, PAGE_TABLE_VIRTUAL_ADDR, PAGE_TABLE_SIZE);
	map_pages(OS_CODE_PHYSICAL_ADDR, OS_CODE_VIRTUAL_ADDR, OS_CODE_SIZE);
}
Example #7
0
__init void module_load() {
    kprintf("module - loading modules", module_count);

    for(uint32_t i = 0; i < module_count; i++) {
        kprintf("module - #%u loaded", i + 1);
        uint32_t num_pages = DIV_UP(modules[i].end - modules[i].start, PAGE_SIZE);
        void *virt = map_pages(modules[i].start, num_pages);
        rootramfs_load(virt, modules[i].end - modules[i].start);
        //TODO unmap pages
    }

    uint32_t freed_pages = 0;

    for(uint32_t i = 0; i < module_count; i++) {
        uint32_t first_page = DIV_UP(modules[i].start, PAGE_SIZE);
        uint32_t last_page = DIV_DOWN(modules[i].end, PAGE_SIZE);
        claim_pages(first_page, last_page - first_page);
    }

    kprintf("module - %u pages reclaimed", freed_pages);
}
Example #8
0
void load_driver(ElfHeader* header) {
	bool valid = elf_is_valid(header);
	if(!valid) {
		die("Invalid driver.");
	}
	size_t num_segments = header->num_ph_entries;
	size_t segment_len = header->ph_entry_size;
	ElfSegment* segments = program_header(header);
	for(size_t i = 0; i < num_segments; ++i) {
		ElfSegment* segment = segments + i;

		//TODO: loading for other segment types?
		if(segment->type != PT_LOAD) {
			continue;
		}

		if(segment->memory_size == 0) {
			continue;
		}

		void* virt = alloc_driver_segment(0);
		if(!virt) {
			die("Unable to allocate address space for driver segment");
		}

		for(size_t i = 0; i < lsr_round_up(segment->memory_size, PAGE_SIZE_POWER); ++i) {
			void* phys = alloc_phys_page();
			map_pages((uint8_t*)virt + PAGE_SIZE * i, phys, 1);
		}

		memcpy(virt, (uint8_t*)header + segment->offset, segment->memory_size);

		void* start = virt + header->entry - segment->offset;
		((EntryPoint)start)();
		//TODO: clear .bss area?
	}
}
Example #9
0
/*-----------------------------------------------------------------------------
 *		Execute kernel process
 *---------------------------------------------------------------------------*/
process_t* exec_proc(char* name, bool kernel)
{
	u32int		pdir_vaddr = 0;			/* Page directory virtual address */
	physaddr_t	page_dir = 0;			/* Page directory physical address */

	size_t		page_count;				/* Allocation page's count */
	size_t		stack_page_count;		/* Stack page's count */
	size_t		seg_page_count;			/* ELF segments page's count */
	size_t		heap_page_count;		/* Heap page's count */
	size_t		blocks_page_count;		/* Heap info blocks page count */

	physaddr_t	tmp_paddr = 0;			/* Temporary physical address */
	physaddr_t	stack_paddr = 0;
	physaddr_t	user_stack_paddr = 0;
	physaddr_t	seg_paddr = 0;
	physaddr_t	heap_paddr = 0;
	physaddr_t	blocks_paddr = 0;

	s8int		err = -1;				/* Error code */
	int			i = 0;
	size_t		sz = 0;					/* Reading data size */
	process_t*	proc = 0;				/* Process handler */
	thread_t*	thread = 0;				/* Thread handler */

	u32int		stack = 0;				/* Stack start address */
	u32int		stack_size = 0x4000;	/* Stack size */

	u32int		usr_stack = 0;

	u32int		eflags = 0;				/* EFLAGS buffer */
	u32int		seg_size = 0;

	heap_t*		heap;

	/* Load ELF info */
	elf_sections_t* elf = load_elf(name);

	/* Check file format */
	if (elf->elf_header->e_type != ET_EXEC)
	{
		print_text("This file is not executable...FAIL\n");
		return NULL;
	}

	/* Check architecture */
	if (elf->elf_header->e_mashine != EM_386)
	{
		print_text("This file is not for i386 architecture...FAIL\n");
		return NULL;
	}

	/* Create page directory */
	page_dir = clone_kernel_dir(&pdir_vaddr);

	/* Allocate pages for ELF segments */
	for (i = 0; i < elf->elf_header->e_phnum; i++)
		seg_size += elf->p_header[i].p_memsz;

	page_count = seg_size / PAGE_SIZE + 1;
	seg_page_count = page_count;

	tmp_paddr = alloc_phys_pages(page_count);
	seg_paddr = tmp_paddr;

	err = map_pages(page_dir,
			        (void*) elf->p_header[0].p_vaddr,
			        tmp_paddr,
			        page_count,
			        0x07);

	if (err == -1)
	{
		print_text("Memory mapping error...FAIL\n");
		return NULL;
	}

	/* kernel stack */
	stack = (u32int) kmalloc(stack_size);

	/* user stack */
	usr_stack = elf->p_header[0].p_vaddr + page_count*PAGE_SIZE;

	page_count = stack_size / PAGE_SIZE;
	tmp_paddr = alloc_phys_pages(page_count);
	user_stack_paddr = tmp_paddr;
	stack_page_count = page_count;

	err = map_pages(page_dir,
				    (void*) usr_stack,
				    tmp_paddr,
				    page_count,
				    0x07);

	if (err == -1)
	{
		print_text("Memory mapping error...FAIL\n");
		return NULL;
	}

	/* Process heap creation */
	page_count = USER_HEAP_SIZE / PAGE_SIZE;
	heap_page_count = page_count;

	tmp_paddr = alloc_phys_pages(page_count);
	heap_paddr = tmp_paddr;

	err = map_pages(page_dir,
				    USER_HEAP_START,
				    tmp_paddr,
				    page_count,
				    0x07);

	if (err == -1)
	{
		print_text("Memory mapping error...FAIL\n");
		return NULL;
	}

	page_count = USER_HEAP_INFO_SIZE / PAGE_SIZE;
	blocks_page_count = page_count;

	tmp_paddr = alloc_phys_pages(page_count);
	blocks_paddr = tmp_paddr;

	err =map_pages(page_dir,
				    USER_HEAP_BLOKS_INFO,
				    tmp_paddr,
				    page_count,
				    0x07);

	if (err == -1)
	{
		print_text("Memory mapping error...FAIL\n");
		return NULL;
	}

	/* Create process */
	proc = (process_t*) kmalloc(sizeof(process_t));

	proc->page_dir = page_dir;
	proc->pid = get_pid();
	proc->list_item.list = NULL;
	strcpy(proc->name, name);
	proc->suspend = false;
	proc->threads_count = 0;

	proc->page_dir_vaddr = (void*) pdir_vaddr;

	proc->stack_paddr = stack_paddr;
	proc->user_stack_vaddr = (void*) usr_stack;
	proc->user_stack_paddr = user_stack_paddr;
	proc->stack_page_count = stack_page_count;
	proc->seg_paddr = seg_paddr;
	proc->seg_page_count = seg_page_count;
	proc->heap_paddr = heap_paddr;
	proc->heap_page_count = heap_page_count;
	proc->blocks_paddr = blocks_paddr;
	proc->blocks_page_count = blocks_page_count;

	add_process(proc);

	/* Create main thread */
	thread = (thread_t*) kmalloc(sizeof(thread_t));

	thread->id = get_thread_id();
	thread->suspend = false;
	thread->process = proc;
	thread->entry_point = elf->elf_header->e_entry;
	thread->list_item.list = NULL;
	thread->stack = (void*) stack;
	thread->stack_size = stack_size;
	thread->stack_top = stack + stack_size;
	thread->esp = stack + stack_size - 28;

	proc->thread_id[proc->threads_count++] = thread->id;

	u32int* esp = (u32int*) (stack + stack_size);

	eflags = read_eflags();

	eflags |= (1 << 9);

	if (kernel)
	{
		esp[-4] = (u32int) &destroy_proc;
		esp[-5] = elf->elf_header->e_entry;
		esp[-7] = eflags;
	}
	else
	{
		esp[-2] = (u32int) proc;
		esp[-3] = (u32int) elf;
		esp[-5] = (u32int) &start_elf;
		esp[-7] = eflags;
	}

	add_thread(thread);

	return proc;
}
Example #10
0
/*
	ok, this is pman init stage two. we will execute this code, and then jump to the process 
	manager main processing loop.
	
	What we will do here, is setup the page pool. And initialize System services, along with structures.
	Notice, we are now task 0 on the system.
*/	
void pman_init_stage2()
{
	UINT32 linear, physical; 
	struct pm_thread *pmthr = NULL;
	struct pm_task *pmtsk = NULL;
	int i = 0;
    int init_size = 0;
    
	/* get rid of the init stuff */
	destroy_thread(INIT_THREAD_NUM);
	destroy_task(INIT_TASK_NUM);
	
	/*
	Open used ports
	*/
	for(i = 0; i <= 12; i++)
	{
		open_port(i, 3, PRIV_LEVEL_ONLY);
	}
	
	/* 
		Init stage 1 has placed bootinfo at PMAN_MULTIBOOTINFO_PHYS 
		before initializing the pool we need to know memory size
		and that information is there. So lets map it on our page table.
	*/
	linear = PMAN_MULTIBOOT_LINEAR + SARTORIS_PROCBASE_LINEAR;
  	physical = PMAN_MULTIBOOT_PHYS; 

	map_pages(PMAN_TASK, linear, physical, PMAN_MULTIBOOT_PAGES, PGATT_WRITE_ENA, 2);

	/* Reallocate init image */
	init_size = init_reloc();

    pman_print_set_color(0x7);
	pman_print("Mapping Malloc %i pages", PMAN_MALLOC_PAGES);
       
	/* Pagein remaining pages for kmalloc */
	linear = PMAN_MALLOC_LINEAR + SARTORIS_PROCBASE_LINEAR; // place after multiboot (this will invalidate the map src/dest linear address, 
                                                            // we cannot use that area anymore, but it's ok, we used it for init copy only.)
  	physical = PMAN_MALLOC_PHYS; 

	map_pages(PMAN_TASK, linear, physical, PMAN_MALLOC_PAGES, PGATT_WRITE_ENA, 2);

	pman_print("Initializing tasks/threads.");

    /* Show MMAP information */
	if(((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->flags & MB_INFO_MMAP && ((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_length > 0)
	{		 
		//Calculate multiboot mmap linear address.
		//Sartoris loader left MMAP just after multiboot info structure.
		
		((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_addr = PMAN_MULTIBOOT_LINEAR + sizeof(struct multiboot_info);

		pman_print("Multiboot MMAP Size: %i ", ((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_length);
		pman_print("Multiboot mmap linear address: %x", ((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_addr);

		struct mmap_entry *entry = NULL;
		entry = (struct mmap_entry *)((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_addr;

		int kk = 0, mmlen = ((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR)->mmap_length / entry->size;
		for(kk = 0; kk < mmlen; kk++)
		{
			pman_print("Multiboot entry size: %i start: %x end: %x type: %i", entry->size, (UINT32)entry->start, (UINT32)entry->end, entry->type);		

			entry = (struct mmap_entry *)((UINT32)entry + entry->size);
		}
	}
	else
	{
		pman_print("No MMAP present.");
	}

    /* Initialize vmm subsystem */
	vmm_init((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR, PMAN_INIT_RELOC_PHYS, PMAN_INIT_RELOC_PHYS + init_size);
	
    tsk_init();
	thr_init();

	/* Mark SCHED_THR as taken! */
	pmtsk = tsk_create(PMAN_TASK);
	pmtsk->state = TSK_NORMAL;

    pmthr = thr_create(SCHED_THR, pmtsk);
	pmthr->state = THR_INTHNDL;		// ehm... well... it IS an interrupt handler :D
	pmthr->task_id = PMAN_TASK;
	pmthr->state = THR_INTHNDL;	
    
	pman_print("Initializing allocator and interrupts.");
    /* Initialize kernel memory allocator */
	kmem_init(PMAN_MALLOC_LINEAR, PMAN_MALLOC_PAGES);
	
	/* get our own interrupt handlers, override microkernel defaults */
	int_init();
	
	/* Initialize Scheduler subsystem */
	sch_init();
    
	pman_print("InitFS2 Service loading...");
	
	/* Load System Services and init Loader */
	loader_init((ADDR)PHYSICAL2LINEAR(PMAN_INIT_RELOC_PHYS));

	//pman_print_clr(7);
	pman_print("Loading finished, return INIT image memory to POOL...");

	/* Put now unused Init-Fs pages onto vmm managed address space again. */
	vmm_add_mem((struct multiboot_info*)PMAN_MULTIBOOT_LINEAR
				,PHYSICAL2LINEAR(PMAN_INIT_RELOC_PHYS)
				,PHYSICAL2LINEAR(PMAN_INIT_RELOC_PHYS + init_size));
	
	pman_print("Signals Initialization...");

	/* Initialize global signals container */
	init_signals();

	pman_print("Commands Initialization...");

	/* Initialize Commands subsystem. */
	cmd_init();

	pman_print_set_color(12);
	pman_print("PMAN: Initialization step 2 completed.");

	/* Create Scheduler int handler */
	if(create_int_handler(32, SCHED_THR, FALSE, 0) < 0)
		pman_print_and_stop("Could not create Scheduler thread.");

	/* This is it, we are finished! */
	process_manager();
}
Example #11
0
/****************************************************************************
 * lbtable_scan
 *
 * Scan the chunk of memory specified by 'start' and 'end' for a coreboot
 * table.  The first 4 bytes of the table are marked by the signature
 * { 'L', 'B', 'I', 'O' }.  'start' and 'end' indicate the addresses of the
 * first and last bytes of the chunk of memory to be scanned.  For instance,
 * values of 0x10000000 and 0x1000ffff for 'start' and 'end' specify a 64k
 * chunk of memory starting at address 0x10000000.  'start' and 'end' are
 * physical addresses.
 *
 * If a coreboot table is found, return a pointer to it.  Otherwise return
 * NULL.  On return, *bad_header_count and *bad_table_count are set as
 * follows:
 *
 *     *bad_header_count:
 *         Indicates the number of times in which a valid signature was found
 *         but the header checksum was invalid.
 *
 *     *bad_table_count:
 *         Indicates the number of times in which a header with a valid
 *         checksum was found but the table checksum was invalid.
 ****************************************************************************/
static const struct lb_header *lbtable_scan(unsigned long start,
					    unsigned long end,
					    int *bad_header_count,
					    int *bad_table_count)
{
	static const char signature[4] = { 'L', 'B', 'I', 'O' };
	const struct lb_header *table;
	const struct lb_forward *forward;
	unsigned long p;
	uint32_t sig;

	assert(end >= start);
	memcpy(&sig, signature, sizeof(sig));
	table = NULL;
	*bad_header_count = 0;
	*bad_table_count = 0;

	/* Look for signature.  Table is aligned on 16-byte boundary.  Therefore
	 * only check every fourth 32-bit memory word.  As the loop is coded below,
	 * this function will behave in a reasonable manner for ALL possible values
	 * for 'start' and 'end': even weird boundary cases like 0x00000000 and
	 * 0xffffffff on a 32-bit architecture.
	 */
	map_pages(start, end - start);
	for (p = start;
	     (p <= end) &&
	     (end - p >= (sizeof(uint32_t) - 1)); p += 4) {
		if (*(uint32_t*)phystov(p) != sig)
			continue;

		/* We found a valid signature. */
		table = (const struct lb_header *)phystov(p);

		/* validate header checksum */
		if (compute_ip_checksum((void *)table, sizeof(*table))) {
			(*bad_header_count)++;
			continue;
		}

		map_pages(p, table->table_bytes + sizeof(*table));
		/* validate table checksum */
		if (table->table_checksum !=
		    compute_ip_checksum(((char *)table) + sizeof(*table),
					table->table_bytes)) {
			(*bad_table_count)++;
			continue;
		}

		/* checksums are ok: we found it! */
		/* But it may just be a forwarding table, so look if there's a forwarder */
		lbtable = table;
		forward = (struct lb_forward *)find_lbrec(LB_TAG_FORWARD);
		lbtable = NULL;

		if (forward) {
			uint64_t new_phys = forward->forward;
			table = lbtable_scan(new_phys, new_phys + getpagesize(),
					 bad_header_count, bad_table_count);
		}
		return table;
	}

	return NULL;
}
Example #12
0
// Called by _start. Physical addressing is in effect, but this code is
// linked assuming virtual addresses: only PC-relative references actually
// work. A small stack is available.
void boot_start()
{
  // Clear some bootdata values
  bootdata->initrd_size = 0;

  // Work out phys->virt offset
  bootdata->phys_to_virt = (uint32_t)&_start - bootdata->rom_base;

  // Print all the info we received from the assembly entry point
  DBGSTR("Pycorn bootstrap entered\n");
  DBGINT("rom base: ", bootdata->rom_base);
  DBGINT("machine type: ", bootdata->machtype);
  DBGINT("taglist ptr: ", bootdata->taglist_ptr);
  DBGINT("phys->virt: ", bootdata->phys_to_virt);

  // Work out section sizes
  uint32_t text_size = &__text_end__ - &__text_start__;
  uint32_t data_size = &__data_end__ - &__data_start__;
  uint32_t bss_size = &__bss_end__ - &__bss_start__;
  uint32_t heap_size = &__heap_end__ - &__heap_start__;
  uint32_t stack_size = &__stack_end__ - &__stack_start__;
  uint32_t img_size = text_size + data_size;

  // Parse atags
  int r = parse_atags();
  DBGINT("parse_atags returned ", r);
  (void)r;

  // Work out where the first free page after the image is.
  // We will use this as the starting location to allocate pages, so there
  // had better be some megabytes of memory here. This will change later to
  // a less stupid allocator.
  bootdata->next_free_page = bootdata->rom_base + img_size;
  DBGINT("first free page: ", bootdata->next_free_page);

  // Allocate page directory
  DBGSTR("Allocate page directory\n");
  bootdata->page_directory = alloc_pages_zero(PAGEDIR_SIZE, PAGEDIR_SIZE);
  DBGINT("page directory: ", bootdata->page_directory);
  
  // Set MMU base address
  DBGSTR("Set MMU base address\n");
  mmu_set_base(bootdata->page_directory);

  // Allocate and map page table mappings
  // Page tables are placed linearly at a fixed location to make
  // it possible to find them again later without having to remember
  // where they are.
  // This is kinda scary as we are bootstrapping :)
  DBGSTR("Allocate and map page table mappings\n");
  int ptbl_section = (virtaddr)(&__page_tbl_start__) >> SECTION_SHIFT;
  physaddr ptbl_map = get_page_table(ptbl_section, 1);
  virtaddr ptbl_address = (virtaddr)(&__page_tbl_start__)
      + (ptbl_section * PAGETABLE_SIZE);
  map_pages(ptbl_address, ptbl_address + (PAGE_SIZE * PTBLS_PER_PAGE),
      ptbl_map | PTB_RW | PTB_CACHE | PTB_BUFF | PTB_EXT);

  // Map page directory
  DBGSTR("Map page directory\n");
  map_pages((virtaddr)&__page_dir_virt__,
      (virtaddr)(&__page_dir_virt__ + PAGEDIR_SIZE),
      bootdata->page_directory | PTB_RW | PTB_CACHE | PTB_BUFF | PTB_EXT);

  // Map text section of image
  DBGSTR("Map text section\n");
  map_pages((virtaddr)&__text_start__, (virtaddr)&__text_end__,
      bootdata->rom_base | PTB_ROM | PTB_CACHE | PTB_BUFF | PTB_EXT);

  // Map data section of image
  DBGSTR("Map data section\n");
  physaddr data_phys = bootdata->rom_base + text_size;
  map_pages((virtaddr)&__data_start__, (virtaddr)&__data_end__,
      data_phys | PTB_RW | PTB_CACHE | PTB_BUFF | PTB_EXT);

  // Allocate and map bss section
  DBGSTR("Allocate and map bss\n");
  physaddr bss_phys = alloc_pages_zero(bss_size, PAGE_SIZE);
  map_pages((virtaddr)&__bss_start__, (virtaddr)&__bss_end__,
      bss_phys | PTB_RW | PTB_CACHE | PTB_BUFF | PTB_EXT);

  // Allocate and map heap section
  DBGSTR("Allocate and map heap\n");
  physaddr heap_phys = alloc_pages_zero(heap_size, PAGE_SIZE);
  map_pages((virtaddr)&__heap_start__, (virtaddr)&__heap_end__,
      heap_phys | PTB_RW | PTB_CACHE | PTB_BUFF | PTB_EXT);

  // Allocate and map stack section
  DBGSTR("Allocate and map stack\n");
  physaddr stack_phys = alloc_pages_zero(stack_size, PAGE_SIZE);
  map_pages((virtaddr)&__stack_start__, (virtaddr)&__stack_end__,
      stack_phys | PTB_RW | PTB_CACHE | PTB_BUFF | PTB_EXT);

  // Map debug UART - we assume no more than a page is needed
  DBGSTR("Mapping debug UART\n");
  map_pages((virtaddr)&__dbg_serial_virt__, 
      (virtaddr)(&__dbg_serial_virt__ + PAGE_SIZE),
      (physaddr)&__dbg_serial_phys__ | PTB_RW | PTB_EXT);

  // Map boot data page
  DBGSTR("Mapping boot data\n");
  map_pages((virtaddr)&__bootdata_virt__,
      (virtaddr)(&__bootdata_virt__ + PAGE_SIZE),
      (physaddr)bootdata | PTB_RW | PTB_CACHE | PTB_BUFF | PTB_EXT);

  // Map the initrd if there was one
  if (bootdata->initrd_size)
  {
    // The initrd address may not be a page multiple as u-boot has
    // its own header on the file, so we need to align it.
    physaddr map_start = PAGEALIGN_DOWN(bootdata->initrd_phys);
    uint32_t map_len = PAGEALIGN_UP(bootdata->initrd_phys +
        bootdata->initrd_size) - map_start;
    // We also need to calculate the offset and offset the virtual
    // address by the matching amount.
    uint32_t offset = bootdata->initrd_phys - map_start;
    bootdata->initrd_virt = (virtaddr)&__initrd_map_start__ + offset;

    DBGSTR("Map initrd\n");
    map_pages((virtaddr)&__initrd_map_start__,
        (virtaddr)(&__initrd_map_start__ + map_len),
        map_start | PTB_ROM | PTB_CACHE | PTB_BUFF | PTB_EXT);
  }

  // Self-map MMU enabling code
  // The page which contains the MMU enable function must be mapped
  // with phys==virt address, otherwise bad stuff happens. We do this
  // by stuffing in a 1MB section mapping for this address, which may
  // overwrite an actual page table mapping (it's saved and restored
  // later on).
  DBGSTR("Self-map MMU enabling code\n");
  mmu_enable_func mmu_enable_phys = &mmu_enable - bootdata->phys_to_virt;
  physaddr selfmap_addr = (physaddr)mmu_enable_phys;
  int selfmap_index = selfmap_addr >> SECTION_SHIFT;
  selfmap_addr = selfmap_index << SECTION_SHIFT;
  uint32_t *pgd = (uint32_t *)bootdata->page_directory;
  uint32_t old_pde = pgd[selfmap_index];
  pgd[selfmap_index] = selfmap_addr | PGD_ROM | PGD_SECTION;

  // Enable MMU. This doesn't return, it goes to boot_after_mmu.
  DBGSTR("Enable MMU\n");
  mmu_enable_phys(selfmap_index, old_pde, &boot_after_mmu);
}