static inline void mapit(union x86_32_ptable_entry *pdir_base, lpaddr_t addr, uint64_t bitmap) { if(!X86_32_IS_PRESENT(pdir_base)) { debug(SUBSYS_PAGING, "mapped!\n"); } else { //remap the page anyway, this is important for the memory latency benchmark debug(SUBSYS_PAGING, "already existing! remapping it\n"); } paging_x86_32_map_large(pdir_base, addr, bitmap); }
/// Map within a x86_32 pdir static errval_t x86_32_pdir(struct capability *dest, cslot_t slot, struct capability * src, uintptr_t flags, uintptr_t offset, uintptr_t pte_count, struct cte *mapping_cte) { //printf("x86_32_pdir\n"); if (slot >= X86_32_PTABLE_SIZE) { // Slot within page table return SYS_ERR_VNODE_SLOT_INVALID; } if (slot + pte_count > X86_32_PTABLE_SIZE) { // check that mapping fits page directory return SYS_ERR_VM_MAP_SIZE; } #ifndef CONFIG_PAE if(slot >= X86_32_PDIR_BASE(X86_32_MEMORY_OFFSET)) { // Kernel mapped here return SYS_ERR_VNODE_SLOT_RESERVED; } #endif // large page code if(src->type == ObjType_Frame || src->type == ObjType_DevFrame) { cslot_t last_slot = slot + pte_count; // check offset within frame if (offset + pte_count * X86_32_LARGE_PAGE_SIZE > get_size(src)) { return SYS_ERR_FRAME_OFFSET_INVALID; } /* Calculate page access protection flags */ // Get frame cap rights paging_x86_32_flags_t flags_large = paging_x86_32_cap_to_page_flags(src->rights); // Mask with provided access rights mask flags_large = paging_x86_32_mask_attrs(flags_large, X86_32_PTABLE_ACCESS(flags)); // Add additional arch-specific flags flags_large |= X86_32_PTABLE_FLAGS(flags); // Unconditionally mark the page present flags_large |= X86_32_PTABLE_PRESENT; // Convert destination base address genpaddr_t dest_gp = get_address(dest); lpaddr_t dest_lp = gen_phys_to_local_phys(dest_gp); lvaddr_t dest_lv = local_phys_to_mem(dest_lp); // Convert source base address genpaddr_t src_gp = get_address(src); lpaddr_t src_lp = gen_phys_to_local_phys(src_gp); // Set metadata create_mapping_cap(mapping_cte, src, dest_lp + slot * sizeof(union x86_32_ptable_entry), offset, pte_count); for (; slot < last_slot; slot++, offset += X86_32_LARGE_PAGE_SIZE) { union x86_32_ptable_entry *entry = (union x86_32_ptable_entry *)dest_lv + slot; /* FIXME: Flush TLB if the page is already present * in the meantime, since we don't do this, we just assert that * we never reuse a VA mapping */ if (X86_32_IS_PRESENT(entry)) { printf("Trying to map into an already present page NYI.\n"); return SYS_ERR_VNODE_SLOT_INUSE; } // Carry out the page mapping paging_x86_32_map_large(entry, src_lp + offset, flags_large); } return SYS_ERR_OK; } if (src->type != ObjType_VNode_x86_32_ptable) { // Right mapping return SYS_ERR_WRONG_MAPPING; } // Destination genpaddr_t dest_gp = dest->u.vnode_x86_32_pdir.base; lpaddr_t dest_lp = gen_phys_to_local_phys(dest_gp); lvaddr_t dest_lv = local_phys_to_mem(dest_lp); union x86_32_pdir_entry *entry = (union x86_32_pdir_entry *)dest_lv + slot; // Set metadata create_mapping_cap(mapping_cte, src, dest_lp + slot * sizeof(union x86_32_pdir_entry), pte_count); // Source // XXX: offset is ignored genpaddr_t src_gp = src->u.vnode_x86_32_pdir.base; lpaddr_t src_lp = gen_phys_to_local_phys(src_gp); paging_x86_32_map_table(entry, src_lp); return SYS_ERR_OK; }