unsigned long __init set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e) { unsigned long pfn; if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN)) return 0; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn_e - pfn_s; if (pfn_s > pfn_e) return 0; for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) { WARN_ON(!early_alloc_p2m(pfn)); } early_alloc_p2m_middle(pfn_s, true); early_alloc_p2m_middle(pfn_e, true); for (pfn = pfn_s; pfn < pfn_e; pfn++) if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) break; if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), "Identity mapping failed. We are %ld short of 1-1 mappings!\n", (pfn_e - pfn_s) - (pfn - pfn_s))) printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn); return pfn - pfn_s; }
bool __init early_set_phys_to_machine(unsigned long pfn, unsigned long mfn) { if (unlikely(!__set_phys_to_machine(pfn, mfn))) { if (!early_alloc_p2m(pfn)) return false; if (!early_alloc_p2m_middle(pfn, false /* boundary crossover OK!*/)) return false; if (!__set_phys_to_machine(pfn, mfn)) return false; } return true; }
/* * Skim over the P2M tree looking at pages that are either filled with * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and * replace the P2M leaf with a p2m_missing or p2m_identity. * Stick the old page in the new P2M tree location. */ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) { unsigned topidx; unsigned mididx; unsigned ident_pfns; unsigned inv_pfns; unsigned long *p2m; unsigned long *mid_mfn_p; unsigned idx; unsigned long pfn; /* We only look when this entails a P2M middle layer */ if (p2m_index(set_pfn)) return false; for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { topidx = p2m_top_index(pfn); if (!p2m_top[topidx]) continue; if (p2m_top[topidx] == p2m_mid_missing) continue; mididx = p2m_mid_index(pfn); p2m = p2m_top[topidx][mididx]; if (!p2m) continue; if ((p2m == p2m_missing) || (p2m == p2m_identity)) continue; if ((unsigned long)p2m == INVALID_P2M_ENTRY) continue; ident_pfns = 0; inv_pfns = 0; for (idx = 0; idx < P2M_PER_PAGE; idx++) { /* IDENTITY_PFNs are 1:1 */ if (p2m[idx] == IDENTITY_FRAME(pfn + idx)) ident_pfns++; else if (p2m[idx] == INVALID_P2M_ENTRY) inv_pfns++; else break; } if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE)) goto found; } return false; found: /* Found one, replace old with p2m_identity or p2m_missing */ p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); /* And the other for save/restore.. */ mid_mfn_p = p2m_top_mfn_p[topidx]; /* NOTE: Even if it is a p2m_identity it should still be point to * a page filled with INVALID_P2M_ENTRY entries. */ mid_mfn_p[mididx] = virt_to_mfn(p2m_missing); /* Reset where we want to stick the old page in. */ topidx = p2m_top_index(set_pfn); mididx = p2m_mid_index(set_pfn); /* This shouldn't happen */ if (WARN_ON(p2m_top[topidx] == p2m_mid_missing)) early_alloc_p2m(set_pfn); if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing)) return false; p2m_init(p2m); p2m_top[topidx][mididx] = p2m; mid_mfn_p = p2m_top_mfn_p[topidx]; mid_mfn_p[mididx] = virt_to_mfn(p2m); return true; }