static __init void setup_cpu_entry_area_ptes(void) { #ifdef CONFIG_X86_32 unsigned long start, end; BUILD_BUG_ON(CPU_ENTRY_AREA_PAGES * PAGE_SIZE < CPU_ENTRY_AREA_MAP_SIZE); BUG_ON(CPU_ENTRY_AREA_BASE & ~PMD_MASK); start = CPU_ENTRY_AREA_BASE; end = start + CPU_ENTRY_AREA_MAP_SIZE; /* Careful here: start + PMD_SIZE might wrap around */ for (; start < end && start >= CPU_ENTRY_AREA_BASE; start += PMD_SIZE) populate_extra_pte(start); #endif }
static void __init pcpup_populate_pte(unsigned long addr) { populate_extra_pte(addr); }
static void __init xen_rebuild_p2m_list(unsigned long *p2m) { unsigned int i, chunk; unsigned long pfn; unsigned long *mfns; pte_t *ptep; pmd_t *pmdp; int type; p2m_missing = alloc_p2m_page(); p2m_init(p2m_missing); p2m_identity = alloc_p2m_page(); p2m_init(p2m_identity); p2m_missing_pte = alloc_p2m_page(); paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT); p2m_identity_pte = alloc_p2m_page(); paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT); for (i = 0; i < PTRS_PER_PTE; i++) { set_pte(p2m_missing_pte + i, pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO)); set_pte(p2m_identity_pte + i, pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO)); } for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) { /* * Try to map missing/identity PMDs or p2m-pages if possible. * We have to respect the structure of the mfn_list_list * which will be built just afterwards. * Chunk size to test is one p2m page if we are in the middle * of a mfn_list_list mid page and the complete mid page area * if we are at index 0 of the mid page. Please note that a * mid page might cover more than one PMD, e.g. on 32 bit PAE * kernels. */ chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ? P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE; type = xen_p2m_elem_type(pfn); i = 0; if (type != P2M_TYPE_PFN) for (i = 1; i < chunk; i++) if (xen_p2m_elem_type(pfn + i) != type) break; if (i < chunk) /* Reset to minimal chunk size. */ chunk = P2M_PER_PAGE; if (type == P2M_TYPE_PFN || i < chunk) { /* Use initial p2m page contents. */ #ifdef CONFIG_X86_64 mfns = alloc_p2m_page(); copy_page(mfns, xen_p2m_addr + pfn); #else mfns = xen_p2m_addr + pfn; #endif ptep = populate_extra_pte((unsigned long)(p2m + pfn)); set_pte(ptep, pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL)); continue; } if (chunk == P2M_PER_PAGE) { /* Map complete missing or identity p2m-page. */ mfns = (type == P2M_TYPE_MISSING) ? p2m_missing : p2m_identity; ptep = populate_extra_pte((unsigned long)(p2m + pfn)); set_pte(ptep, pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO)); continue; } /* Complete missing or identity PMD(s) can be mapped. */ ptep = (type == P2M_TYPE_MISSING) ? p2m_missing_pte : p2m_identity_pte; for (i = 0; i < PMDS_PER_MID_PAGE; i++) { pmdp = populate_extra_pmd( (unsigned long)(p2m + pfn) + i * PMD_SIZE); set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); } } }