Example #1
0
/*
 * Initialize page tables for the lpae case
 */
void
init_mmu_lpae(void)
{
	unsigned	base;
	unsigned	ncpu = lsp.syspage.p->num_cpu;
	unsigned	l1_size = ncpu * ARM_LPAE_L1_SIZE;

	// Get the CPU-specific PTE descriptors - Not implemented for A15 under LPAE...
	arm_pte_setup();

	// As paddr == vaddr in startup, we require the L1/L2 tables to be allocated from memory < 4G
	// However, if calloc_ram ever starts handing out high memory, things will promptly stop working.
	// It will not be a subtle failure.

	// LPAE L1 table is only 32 bytes per cpu.  (So we can only get 512 on a single page!)
	L1_paddr = calloc_ram(l1_size, __PAGESIZE);

	if (debug_flag) {
		kprintf("LPAE: %s:%d allocated 0x%x bytes at 0x%x for L1 page table\n", __func__, __LINE__, l1_size, L1_paddr);
	}
	
	// One page per cpu for kernel L2; three pages for user L2, shared between cpus
	// We allocate user and kernel l2 pages together so we can pass both to procnto with one param
	L2_size = (ncpu * ARM_LPAE_L2_SIZE) + ARM_LPAE_USER_L2_SIZE;
	L2_paddr = calloc_ram(L2_size, __PAGESIZE);

	if (debug_flag) {
		kprintf("LPAE: %s:%d allocated 0x%x bytes at 0x%x for L2 page tables\n", __func__, __LINE__, L2_size, L2_paddr);
	}
	
	// Init the L1/L2 tables and make the kernel L2/L3 tables accessible at the specified vaddrs
	// This function uses the above globals directly; doesn't need them as params.
	arm_lpae_table_init(ARM_LPAE_KERN_L2, ARM_LPAE_KERN_L3);
	
	// Map the L1 table into virtual space
	L1_vaddr = arm_map(~0L, L1_paddr, l1_size, ARM_MAP_NOEXEC | ARM_PTE_RW | armv_chip->pte_attr);

	// Block map startup code to allow transition to virtual addresses.
	// This 1-1 mapping is also used by kdebug to access the imagefs.
	// procnto uses syspage->un.arm.startup_base/startup_size to unmap it.
	// Under LPAE we use 2MB blocks instead of v6 1MB sections

	startup_base = shdr->ram_paddr & ~ARM_LPAE_BMASK;
	startup_size = shdr->ram_size;
	for (base = startup_base; base < startup_base + startup_size; base += ARM_LPAE_BSIZE) {
		arm_lpae_bmap(base, base, ARM_PTE_RO);
	}

	/*
	 * Map RAM into the 1-1 mapping area
	 */
	lpae_map_1to1_ram();

	if (debug_flag>2) {
		kprintf("%s dump table\n", __func__);
	    dump_ptbl();
		kprintf("%s dump table done\n", __func__);
	}
		
}
Example #2
0
/*
 * Initialize page tables for the non-lpae case.
 */
void
init_mmu_32(void)
{
	unsigned	base;
	unsigned	ncpu = lsp.syspage.p->num_cpu;
	unsigned	L1size = ncpu * ARM_L1_SIZE;

	L2_size = ncpu * __PAGESIZE;

	/*
	 * Get the CPU-specific PTE descriptors
	 */
	arm_pte_setup();

	/*
	 * Allocate the L1 table and the "page directory" used to map L2 tables
	 */
	L1_paddr = calloc_ram(L1size, ARM_L1_SIZE);
	L2_paddr = calloc_ram(L2_size, __PAGESIZE);

	/*
	 * Make these tables accessible at vaddr ARM_PTP_BASE
	 * This function uses the above globals directly; doesn't need them as params.
	 */
	arm_pdmap(ARM_PTP_BASE);

	/*
	 * Map the real L1 table
	 */
	L1_vaddr = arm_map(~0L, L1_paddr, L1size, ARM_MAP_NOEXEC | ARM_PTE_RW | armv_chip->pte_attr);
	L1_paddr |= armv_chip->ttb_attr;

	/*
	 * Section map startup code to allow transition to virtual addresses.
	 * This 1-1 mapping is also used by kdebug to access the imagefs.
	 * procnto uses syspage->un.arm.startup_base/startup_size to unmap it.
	 */
	startup_base = shdr->ram_paddr & ~ARM_SCMASK;
	startup_size = shdr->ram_size;
	for (base = startup_base; base < startup_base + startup_size; base += ARM_SCSIZE) {
		arm_scmap(base, base, ARM_PTE_RO);
	}

	/*
	 * Map RAM into the 1-1 mapping area
	 */
	map_1to1_ram();
}
Example #3
0
static void
load_elf32mmu(paddr32_t addr, Elf32_Ehdr *hdr, Elf32_Phdr *phdr) {
	int			i;

	for(i = 0; i < hdr->e_phnum; ++i, ++phdr) {
		switch(phdr->p_type) {
		case PT_LOAD:
			{
				size_t memsz  = phdr->p_memsz + (phdr->p_vaddr & PAGEBITS);
				size_t filesz = phdr->p_filesz + (phdr->p_vaddr & PAGEBITS);
				uintptr_t vaddr  = phdr->p_vaddr & ~PAGEBITS;
				paddr32_t paddr  = phdr->p_paddr & ~PAGEBITS;
				paddr32_t daddr;

				if((boot_vaddr_base == 0u) || (boot_vaddr_base > vaddr)){
					boot_vaddr_base = vaddr;
				}
				if(boot_vaddr_end < (vaddr + memsz)){
					boot_vaddr_end = vaddr + memsz;
				}
								
				if((phdr->p_flags & PF_W) == 0 && memsz == filesz) {
					filesz = memsz = ROUNDPG(filesz);
				}
				if(phdr->p_paddr != 0) {
					if (phdr->p_memsz == phdr->p_filesz) {
						// mkifs has padded out the data/bss section
						filesz = memsz = ROUNDPG(filesz);
					}
					elf_map(vaddr, paddr, filesz & ~PAGEBITS, phdr->p_flags);
					memsz = ROUNDPG(memsz - (filesz & ~PAGEBITS));
					if(memsz) {
						daddr = calloc_ram(memsz, __PAGESIZE);
						copy_memory(daddr, (paddr + (filesz & ~PAGEBITS)), filesz & PAGEBITS);
						elf_map(vaddr + (filesz & ~PAGEBITS), daddr, memsz, phdr->p_flags);
					}
				} else {
#ifndef BOOTSTRAPS_RUN_ONE_TO_ONE
	/*
	 * We need to do different things depending on whether the bootstrap
	 * executables a run in a physical <-> virtual one to one mapping
	 * area (MIPS, SH, PPC) or if they use the normal virtual address
	 * mapping gear (X86, ARM). Basically, is startup or procnto
	 * enabling the MMU on the CPU.
	 */
	#error BOOTSTRAPS_RUN_ONE_TO_ONE must be defined
#endif
#if BOOTSTRAPS_RUN_ONE_TO_ONE
					daddr = alloc_ram(vaddr - shdr->paddr_bias, ROUNDPG(memsz), __PAGESIZE);
					if(daddr != vaddr - shdr->paddr_bias) {
						crash("Error: can not allocate RAM for proc in XIP.\n");
					}
					memmove((void *)phdr->p_vaddr,
							MAKE_1TO1_PTR(addr + phdr->p_offset),
							phdr->p_filesz);
					memset((void *)(phdr->p_vaddr + phdr->p_filesz), 0,
								(phdr->p_memsz - phdr->p_filesz));
#else
					daddr = calloc_ram(ROUNDPG(memsz), __PAGESIZE);
					copy_memory(daddr, (addr + phdr->p_offset) - (phdr->p_vaddr & PAGEBITS), filesz);
					elf_map(vaddr, daddr, memsz, phdr->p_flags);
#endif
				}
			}
			break;
		}
	}
}