/* Create Xen's mappings of memory. * Mapping_size must be either 2MB or 32MB. * Base and virt must be mapping_size aligned. * Size must be a multiple of mapping_size. * second must be a contiguous set of second level page tables * covering the region starting at virt_offset. */ static void __init create_mappings(lpae_t *second, unsigned long virt_offset, unsigned long base_mfn, unsigned long nr_mfns, unsigned int mapping_size) { unsigned long i, count; const unsigned long granularity = mapping_size >> PAGE_SHIFT; lpae_t pte, *p; ASSERT((mapping_size == MB(2)) || (mapping_size == MB(32))); ASSERT(!((virt_offset >> PAGE_SHIFT) % granularity)); ASSERT(!(base_mfn % granularity)); ASSERT(!(nr_mfns % granularity)); count = nr_mfns / LPAE_ENTRIES; p = second + second_linear_offset(virt_offset); pte = mfn_to_xen_entry(base_mfn, WRITEALLOC); if ( granularity == 16 * LPAE_ENTRIES ) pte.pt.contig = 1; /* These maps are in 16-entry contiguous chunks. */ for ( i = 0; i < count; i++ ) { write_pte(p + i, pte); pte.pt.base += 1 << LPAE_SHIFT; } flush_xen_data_tlb_local(); }
/* Map a 4k page in a fixmap entry */ void set_fixmap(unsigned map, unsigned long mfn, unsigned attributes) { lpae_t pte = mfn_to_xen_entry(mfn, attributes); pte.pt.table = 1; /* 4k mappings always have this bit set */ pte.pt.xn = 1; write_pte(xen_fixmap + third_table_offset(FIXMAP_ADDR(map)), pte); flush_xen_data_tlb_range_va(FIXMAP_ADDR(map), PAGE_SIZE); }
/* Create Xen's mappings of memory. * Base and virt must be 32MB aligned and size a multiple of 32MB. */ static void __init create_mappings(unsigned long virt, unsigned long base_mfn, unsigned long nr_mfns) { unsigned long i, count; lpae_t pte, *p; ASSERT(!((virt >> PAGE_SHIFT) % (16 * LPAE_ENTRIES))); ASSERT(!(base_mfn % (16 * LPAE_ENTRIES))); ASSERT(!(nr_mfns % (16 * LPAE_ENTRIES))); count = nr_mfns / LPAE_ENTRIES; p = xen_second + second_linear_offset(virt); pte = mfn_to_xen_entry(base_mfn); pte.pt.hint = 1; /* These maps are in 16-entry contiguous chunks. */ for ( i = 0; i < count; i++ ) { write_pte(p + i, pte); pte.pt.base += 1 << LPAE_SHIFT; } flush_xen_data_tlb(); }
static inline lpae_t pte_of_xenaddr(vaddr_t va) { paddr_t ma = va + phys_offset; unsigned long mfn = ma >> PAGE_SHIFT; return mfn_to_xen_entry(mfn, WRITEALLOC); }
/* Map a page of domheap memory */ void *map_domain_page(mfn_t mfn) { unsigned long flags; lpae_t *map = this_cpu(xen_dommap); unsigned long slot_mfn = mfn_x(mfn) & ~LPAE_ENTRY_MASK; vaddr_t va; lpae_t pte; int i, slot; local_irq_save(flags); /* The map is laid out as an open-addressed hash table where each * entry is a 2MB superpage pte. We use the available bits of each * PTE as a reference count; when the refcount is zero the slot can * be reused. */ for ( slot = (slot_mfn >> LPAE_SHIFT) % DOMHEAP_ENTRIES, i = 0; i < DOMHEAP_ENTRIES; slot = (slot + 1) % DOMHEAP_ENTRIES, i++ ) { if ( map[slot].pt.avail < 0xf && map[slot].pt.base == slot_mfn && map[slot].pt.valid ) { /* This slot already points to the right place; reuse it */ map[slot].pt.avail++; break; } else if ( map[slot].pt.avail == 0 ) { /* Commandeer this 2MB slot */ pte = mfn_to_xen_entry(slot_mfn, WRITEALLOC); pte.pt.avail = 1; write_pte(map + slot, pte); break; } } /* If the map fills up, the callers have misbehaved. */ BUG_ON(i == DOMHEAP_ENTRIES); #ifndef NDEBUG /* Searching the hash could get slow if the map starts filling up. * Cross that bridge when we come to it */ { static int max_tries = 32; if ( i >= max_tries ) { dprintk(XENLOG_WARNING, "Domheap map is filling: %i tries\n", i); max_tries *= 2; } } #endif local_irq_restore(flags); va = (DOMHEAP_VIRT_START + (slot << SECOND_SHIFT) + ((mfn_x(mfn) & LPAE_ENTRY_MASK) << THIRD_SHIFT)); /* * We may not have flushed this specific subpage at map time, * since we only flush the 4k page not the superpage */ flush_xen_data_tlb_range_va_local(va, PAGE_SIZE); return (void *)va; }
/* Boot-time pagetable setup. * Changes here may need matching changes in head.S */ void __init setup_pagetables(unsigned long boot_phys_offset, paddr_t xen_paddr) { unsigned long dest_va; lpae_t pte, *p; int i; /* Map the destination in the boot misc area. */ dest_va = BOOT_MISC_VIRT_START; pte = mfn_to_xen_entry(xen_paddr >> PAGE_SHIFT); write_pte(xen_second + second_table_offset(dest_va), pte); flush_xen_data_tlb_range_va(dest_va, SECOND_SIZE); /* Calculate virt-to-phys offset for the new location */ phys_offset = xen_paddr - (unsigned long) _start; /* Copy */ memcpy((void *) dest_va, _start, _end - _start); /* Beware! Any state we modify between now and the PT switch may be * discarded when we switch over to the copy. */ /* Update the copy of xen_pgtable to use the new paddrs */ p = (void *) xen_pgtable + dest_va - (unsigned long) _start; #ifdef CONFIG_ARM_64 p[0].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT; p = (void *) xen_first + dest_va - (unsigned long) _start; #endif for ( i = 0; i < 4; i++) p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT; p = (void *) xen_second + dest_va - (unsigned long) _start; if ( boot_phys_offset != 0 ) { /* Remove the old identity mapping of the boot paddr */ vaddr_t va = (vaddr_t)_start + boot_phys_offset; p[second_linear_offset(va)].bits = 0; } for ( i = 0; i < 4 * LPAE_ENTRIES; i++) if ( p[i].pt.valid ) p[i].pt.base += (phys_offset - boot_phys_offset) >> PAGE_SHIFT; /* Change pagetables to the copy in the relocated Xen */ boot_ttbr = (uintptr_t) xen_pgtable + phys_offset; flush_xen_dcache(boot_ttbr); flush_xen_dcache_va_range((void*)dest_va, _end - _start); flush_xen_text_tlb(); WRITE_SYSREG64(boot_ttbr, TTBR0_EL2); dsb(); /* Ensure visibility of HTTBR update */ flush_xen_text_tlb(); /* Undo the temporary map */ pte.bits = 0; write_pte(xen_second + second_table_offset(dest_va), pte); flush_xen_text_tlb(); /* Link in the fixmap pagetable */ pte = mfn_to_xen_entry((((unsigned long) xen_fixmap) + phys_offset) >> PAGE_SHIFT); pte.pt.table = 1; write_pte(xen_second + second_table_offset(FIXMAP_ADDR(0)), pte); /* * No flush required here. Individual flushes are done in * set_fixmap as entries are used. */ /* Break up the Xen mapping into 4k pages and protect them separately. */ for ( i = 0; i < LPAE_ENTRIES; i++ ) { unsigned long mfn = paddr_to_pfn(xen_paddr) + i; unsigned long va = XEN_VIRT_START + (i << PAGE_SHIFT); if ( !is_kernel(va) ) break; pte = mfn_to_xen_entry(mfn); pte.pt.table = 1; /* 4k mappings always have this bit set */ if ( is_kernel_text(va) || is_kernel_inittext(va) ) { pte.pt.xn = 0; pte.pt.ro = 1; } if ( is_kernel_rodata(va) ) pte.pt.ro = 1; write_pte(xen_xenmap + i, pte); /* No flush required here as page table is not hooked in yet. */ } pte = mfn_to_xen_entry((((unsigned long) xen_xenmap) + phys_offset) >> PAGE_SHIFT); pte.pt.table = 1; write_pte(xen_second + second_linear_offset(XEN_VIRT_START), pte); /* TLBFLUSH and ISB would be needed here, but wait until we set WXN */ /* From now on, no mapping may be both writable and executable. */ WRITE_SYSREG32(READ_SYSREG32(SCTLR_EL2) | SCTLR_WXN, SCTLR_EL2); /* Flush everything after setting WXN bit. */ flush_xen_text_tlb(); }