lvaddr_t paging_x86_32_map_special(lpaddr_t base, size_t size, uint64_t bitmap) { // Allocate backwards from a page below end of address space static lvaddr_t vbase = (lvaddr_t)X86_32_VADDR_SPACE_SIZE; lpaddr_t addr; lvaddr_t vaddr; paging_align(&vbase, &base, &size, X86_32_MEM_PAGE_SIZE); // Align physical base address lpaddr_t offset = base & (X86_32_MEM_PAGE_SIZE - 1); base -= offset; if(vbase - size < X86_32_VADDR_SPACE_SIZE - X86_32_DEVICE_SPACE_LIMIT) { return 0; } // Map pages, tables and directories (reverse order) for(vaddr = vbase - X86_32_MEM_PAGE_SIZE, addr = base + size - X86_32_MEM_PAGE_SIZE; vaddr >= vbase - size; vaddr -= X86_32_MEM_PAGE_SIZE, addr -= X86_32_MEM_PAGE_SIZE) { #ifdef CONFIG_PAE union x86_32_pdpte_entry *pdpte_base = &pdpte[X86_32_PDPTE_BASE(vaddr)]; union x86_32_ptable_entry *pdir_base = &mem_pdir[X86_32_PDPTE_BASE(mem_to_local_phys(vaddr))][X86_32_PDIR_BASE(vaddr)]; debug(SUBSYS_PAGING, "Mapping 2M device page: vaddr = 0x%x, addr = 0x%x, " "PDPTE_BASE = %u, PDIR_BASE = %u -- ", vaddr, addr, X86_32_PDPTE_BASE(vaddr), X86_32_PDIR_BASE(vaddr)); mapit(pdpte_base, pdir_base, addr, bitmap); #else # ifdef CONFIG_PSE union x86_32_ptable_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)]; debug(SUBSYS_PAGING, "Mapping 4M device page: vaddr = 0x%x, addr = 0x%x, " "PDIR_BASE = %u -- ", vaddr, addr, X86_32_PDIR_BASE(vaddr)); mapit(pdir_base, addr, bitmap); # else union x86_32_pdir_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)]; union x86_32_ptable_entry *ptable_base = &mem_ptable[X86_32_PDIR_BASE(vaddr) - (X86_32_PTABLE_SIZE - MEM_PTABLE_SIZE)][X86_32_PTABLE_BASE(vaddr)]; debug(SUBSYS_PAGING, "Mapping 4K device page: vaddr = 0x%"PRIxLVADDR", " "addr = 0x%"PRIxLPADDR", " "PDIR_BASE = %"PRIxLPADDR", PTABLE_BASE = %"PRIxLPADDR", pdir = %p, ptable = %p -- ", vaddr, addr, X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr), pdir, mem_ptable[X86_32_PDIR_BASE(vaddr) - (X86_32_PTABLE_SIZE - MEM_PTABLE_SIZE)]); mapit(pdir_base, ptable_base, addr, bitmap); # endif #endif } vbase -= size; return vbase + offset; }
/** * \brief Map init user-space memory. * * This function maps pages of the init user-space module. It expects * the virtual base address 'vbase' of a program segment of the init executable, * its size 'size' and its ELF64 access control flags. It maps pages * into physical memory that is allocated on the fly and puts * corresponding frame caps into init's segcn. * * \param vbase Virtual base address of program segment. * \param size Size of program segment in bytes. * \param flags ELF64 access control flags of program segment. * \param ret Used to return base region pointer */ errval_t startup_alloc_init(void *state, genvaddr_t gvbase, size_t size, uint32_t flags, void **ret) { errval_t err; struct spawn_state *spawn_state = state; lvaddr_t vbase = (lvaddr_t)gvbase; /* XXX */ lvaddr_t offset = BASE_PAGE_OFFSET(vbase); /* Page align the parameters */ paging_align(&vbase, NULL, &size, BASE_PAGE_SIZE); lpaddr_t pbase = 0, paddr = 0; for(lvaddr_t i = vbase; i < vbase + size; i += BASE_PAGE_SIZE) { if (apic_is_bsp()) { paddr = bsp_alloc_phys(BASE_PAGE_SIZE); } else { paddr = app_alloc_phys(BASE_PAGE_SIZE); } if(pbase == 0) { pbase = paddr; } err = startup_map_init(i, paddr, BASE_PAGE_SIZE, flags); assert(err_is_ok(err)); } if (apic_is_bsp()) { // Create frame caps for segcn paddr += BASE_PAGE_SIZE; debug(SUBSYS_STARTUP, "Allocated physical memory [0x%"PRIxLPADDR", 0x%"PRIxLPADDR"]\n", pbase, paddr - pbase); err = create_caps_to_cnode(pbase, paddr - pbase, RegionType_RootTask, spawn_state, bootinfo); if (err_is_fail(err)) { return err; } } assert(ret != NULL); *ret = (void *)(vbase + offset); return SYS_ERR_OK; }
/** * \brief Map a region of physical memory into physical memory address space. * * Maps the region of physical memory, based at base and sized size bytes * to the same-sized virtual memory region. All pages are flagged according to * bitmap. This function automatically fills the needed page directory entries * in the page hierarchy rooted at pml4. base and size will be made * page-aligned by this function. * * \param base Physical base address of memory region * \param size Size in bytes of memory region * \param bitmap Bitmap of flags for page tables/directories * * \return 0 on success, -1 on error (out of range) */ static int paging_map_mem(lpaddr_t base, size_t size, uint64_t bitmap) { lvaddr_t vaddr, vbase = local_phys_to_mem(base); lpaddr_t addr; // Align given physical base address if (base & X86_64_MEM_PAGE_MASK) { base -= base & X86_64_MEM_PAGE_MASK; } paging_align(&vbase, &base, &size, X86_64_MEM_PAGE_SIZE); // Is mapped region out of range? assert(base + size <= (lpaddr_t)K1OM_PADDR_SPACE_LIMIT); if (base + size > (lpaddr_t) K1OM_PADDR_SPACE_LIMIT) { return -1; } // Map pages, tables and directories for (vaddr = vbase, addr = base; vaddr < vbase + size; vaddr += X86_64_MEM_PAGE_SIZE, addr += X86_64_MEM_PAGE_SIZE) { union x86_64_pdir_entry *pml4_base = &pml4[X86_64_PML4_BASE(vaddr)]; union x86_64_pdir_entry *pdpt_base = &mem_pdpt[X86_64_PML4_BASE(addr)][X86_64_PDPT_BASE(vaddr)]; union x86_64_ptable_entry *pdir_base = &mem_pdir[X86_64_PML4_BASE(addr)][X86_64_PDPT_BASE(addr)][X86_64_PDIR_BASE( vaddr)]; debug(SUBSYS_PAGING, "Mapping 2M page: vaddr = 0x%"PRIxLVADDR"x, addr = 0x%lx, " "PML4_BASE = %lu, PDPT_BASE = %lu, PDIR_BASE = %lu -- ", vaddr, addr, X86_64_PML4_BASE(vaddr), X86_64_PDPT_BASE(vaddr), X86_64_PDIR_BASE(vaddr)); mapit(pml4_base, pdpt_base, pdir_base, addr, bitmap); } // XXX FIXME: get rid of this TBL flush code, or move it elsewhere // uint64_t cr3; // __asm__ __volatile__("mov %%cr3,%0" : "=a" (cr3) : ); // __asm__ __volatile__("mov %0,%%cr3" : : "a" (cr3)); return 0; }
/** * \brief Map init user-space memory. * * This function maps pages of the init user-space module. It expects * the virtual base address 'vbase' of a program segment of the init executable, * its size 'size' and its ELF64 access control flags. It maps pages * to the sequential area of physical memory, given by 'base'. If you * want to allocate physical memory frames as you go, you better use * startup_alloc_init(). * * \param vbase Virtual base address of program segment. * \param base Physical base address of program segment. * \param size Size of program segment in bytes. * \param flags ELF64 access control flags of program segment. */ errval_t startup_map_init(lvaddr_t vbase, lpaddr_t base, size_t size, uint32_t flags) { lvaddr_t vaddr; paging_align(&vbase, &base, &size, BASE_PAGE_SIZE); assert(vbase + size < X86_32_INIT_SPACE_LIMIT); // Map pages for(vaddr = vbase; vaddr < vbase + size; vaddr += BASE_PAGE_SIZE, base += BASE_PAGE_SIZE) { #ifdef CONFIG_PAE union x86_32_ptable_entry *ptable_base = &init_ptable[ + X86_32_PDPTE_BASE(vaddr) * X86_32_PTABLE_SIZE * X86_32_PTABLE_SIZE + X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE + X86_32_PTABLE_BASE(vaddr)]; debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%x, base = 0x%x, " "PDPTE_BASE = %u, PDIR_BASE = %u, " "PTABLE_BASE = %u -- ", vaddr, base, X86_32_PDPTE_BASE(vaddr), X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr)); #else union x86_32_ptable_entry *ptable_base = &init_ptable[ X86_32_PDIR_BASE(vaddr) * X86_32_PTABLE_SIZE + X86_32_PTABLE_BASE(vaddr)]; debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%"PRIxLVADDR ", base = 0x%"PRIxLPADDR", " "PDIR_BASE = %"PRIuLPADDR", " "PTABLE_BASE = %"PRIuLPADDR" -- ", vaddr, base, X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr)); #endif if(!X86_32_IS_PRESENT(ptable_base)) { debug(SUBSYS_PAGING, "mapped!\n"); paging_x86_32_map(ptable_base, base, INIT_PAGE_BITMAP | paging_elf_to_page_flags(flags)); } else { debug(SUBSYS_PAGING, "already existing!\n"); } } return SYS_ERR_OK; }
/** * \brief Map a region of physical memory into physical memory address space. * * Maps the region of physical memory, based at base and sized size bytes * to the same-sized virtual memory region. All pages are flagged according to * bitmap. This function automatically fills the needed page directory entries * in the page hierarchy rooted at pml4. base and size will be made * page-aligned by this function. * * \param base Base address of memory region * \param size Size in bytes of memory region * \param bitmap Bitmap of flags for page tables/directories * * \return 0 on success, -1 on error (out of range) */ static int paging_x86_32_map_mem(lpaddr_t base, size_t size, uint64_t bitmap) { lvaddr_t vaddr, vbase = local_phys_to_mem(base); lpaddr_t addr; paging_align(&vbase, &base, &size, X86_32_MEM_PAGE_SIZE); // Is mapped region out of range? assert(local_phys_to_gen_phys(base + size) <= X86_32_PADDR_SPACE_LIMIT); if(local_phys_to_gen_phys(base + size) > X86_32_PADDR_SPACE_LIMIT) { printk(LOG_ERR, "Mapped region [%"PRIxLPADDR",%"PRIxLPADDR"]" "out of physical address range!", base, base + size); return -1; } assert(local_phys_to_gen_phys(vbase + size) <= X86_32_VADDR_SPACE_SIZE); // Map pages, tables and directories for(vaddr = vbase, addr = base;; vaddr += X86_32_MEM_PAGE_SIZE, addr += X86_32_MEM_PAGE_SIZE) { #ifdef CONFIG_PAE union x86_32_pdpte_entry *pdpte_base = &pdpte[X86_32_PDPTE_BASE(vaddr)]; union x86_32_ptable_entry *pdir_base = &mem_pdir[X86_32_PDPTE_BASE(addr)][X86_32_PDIR_BASE(vaddr)]; #else union x86_32_pdir_entry *pdir_base = &pdir[X86_32_PDIR_BASE(vaddr)]; # ifndef CONFIG_PSE union x86_32_ptable_entry *ptable_base = &mem_ptable[X86_32_PDIR_BASE(addr)][X86_32_PTABLE_BASE(vaddr)]; # endif #endif if(vbase + size != 0) { if(vaddr >= vbase + size) { break; } } #ifdef CONFIG_PAE debug(SUBSYS_PAGING, "Mapping 2M page: vaddr = 0x%x, addr = 0x%x, " "PDPTE_BASE = %u, PDIR_BASE = %u -- ", vaddr, addr, X86_32_PDPTE_BASE(vaddr), X86_32_PDIR_BASE(vaddr)); mapit(pdpte_base, pdir_base, addr, bitmap); #else # ifdef CONFIG_PSE debug(SUBSYS_PAGING, "Mapping 4M page: vaddr = 0x%x, addr = 0x%x, " "PDIR_BASE = %u -- ", vaddr, addr, X86_32_PDIR_BASE(vaddr)); mapit(pdir_base, addr, bitmap); # else debug(SUBSYS_PAGING, "Mapping 4K page: vaddr = 0x%"PRIxLVADDR", " "addr = 0x%"PRIxLVADDR", " "PDIR_BASE = %"PRIuLPADDR", PTABLE_BASE = %"PRIuLPADDR" -- ", vaddr, addr, X86_32_PDIR_BASE(vaddr), X86_32_PTABLE_BASE(vaddr)); mapit(pdir_base, ptable_base, addr, bitmap); # endif #endif if(vbase + size == 0) { // Bail out if mapped last page of address space to prevent overflow if(vaddr == 0xffe00000) { break; } } } return 0; }