static bool __init early_alloc_p2m(unsigned long pfn) { unsigned topidx = p2m_top_index(pfn); unsigned long *mid_mfn_p; unsigned long **mid; mid = p2m_top[topidx]; mid_mfn_p = p2m_top_mfn_p[topidx]; if (mid == p2m_mid_missing) { mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); } /* And the save/restore P2M tables.. */ if (mid_mfn_p == p2m_mid_missing_mfn) { mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(mid_mfn_p); p2m_top_mfn_p[topidx] = mid_mfn_p; p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); /* Note: we don't set mid_mfn_p[midix] here, * look in early_alloc_p2m_middle */ } return true; }
/* * Build the parallel p2m_top_mfn and p2m_mid_mfn structures * * This is called both at boot time, and after resuming from suspend: * - At boot time we're called rather early, and must use alloc_bootmem*() * to allocate memory. * * - After resume we're called from within stop_machine, but the mfn * tree should already be completely allocated. */ void __ref xen_build_mfn_list_list(void) { unsigned long pfn, mfn; pte_t *ptep; unsigned int level, topidx, mididx; unsigned long *mid_mfn_p; if (xen_feature(XENFEAT_auto_translated_physmap)) return; /* Pre-initialize p2m_top_mfn to be completely missing */ if (p2m_top_mfn == NULL) { p2m_mid_missing_mfn = alloc_p2m_page(); p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); p2m_top_mfn_p = alloc_p2m_page(); p2m_top_mfn_p_init(p2m_top_mfn_p); p2m_top_mfn = alloc_p2m_page(); p2m_top_mfn_init(p2m_top_mfn); } else { /* Reinitialise, mfn's all change after migration */ p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); } for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN; pfn += P2M_PER_PAGE) { topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); mid_mfn_p = p2m_top_mfn_p[topidx]; ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level); BUG_ON(!ptep || level != PG_LEVEL_4K); mfn = pte_mfn(*ptep); ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); /* Don't bother allocating any mfn mid levels if * they're just missing, just update the stored mfn, * since all could have changed over a migrate. */ if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) { BUG_ON(mididx); BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; continue; } if (mid_mfn_p == p2m_mid_missing_mfn) { mid_mfn_p = alloc_p2m_page(); p2m_mid_mfn_init(mid_mfn_p, p2m_missing); p2m_top_mfn_p[topidx] = mid_mfn_p; } p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); mid_mfn_p[mididx] = mfn; } }
unsigned long __init set_phys_range_identity(unsigned long pfn_s, unsigned long pfn_e) { unsigned long pfn; if (unlikely(pfn_s >= MAX_P2M_PFN || pfn_e >= MAX_P2M_PFN)) return 0; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) return pfn_e - pfn_s; if (pfn_s > pfn_e) return 0; for (pfn = (pfn_s & ~(P2M_MID_PER_PAGE * P2M_PER_PAGE - 1)); pfn < ALIGN(pfn_e, (P2M_MID_PER_PAGE * P2M_PER_PAGE)); pfn += P2M_MID_PER_PAGE * P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned long *mid_mfn_p; unsigned long **mid; mid = p2m_top[topidx]; mid_mfn_p = p2m_top_mfn_p[topidx]; if (mid == p2m_mid_missing) { mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); } if (mid_mfn_p == p2m_mid_missing_mfn) { mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(mid_mfn_p); p2m_top_mfn_p[topidx] = mid_mfn_p; p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); } } __early_alloc_p2m(pfn_s); __early_alloc_p2m(pfn_e); for (pfn = pfn_s; pfn < pfn_e; pfn++) if (!__set_phys_to_machine(pfn, IDENTITY_FRAME(pfn))) break; if (!WARN((pfn - pfn_s) != (pfn_e - pfn_s), "Identity mapping failed. We are %ld short of 1-1 mappings!\n", (pfn_e - pfn_s) - (pfn - pfn_s))) printk(KERN_DEBUG "1-1 mapping on %lx->%lx\n", pfn_s, pfn); return pfn - pfn_s; }
/* * Build the parallel p2m_top_mfn and p2m_mid_mfn structures * * This is called both at boot time, and after resuming from suspend: * - At boot time we're called very early, and must use extend_brk() * to allocate memory. * * - After resume we're called from within stop_machine, but the mfn * tree should alreay be completely allocated. */ void __ref xen_build_mfn_list_list(void) { unsigned long pfn; /* Pre-initialize p2m_top_mfn to be completely missing */ if (p2m_top_mfn == NULL) { p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(p2m_mid_missing_mfn); p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_p_init(p2m_top_mfn_p); p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_init(p2m_top_mfn); } else { /* Reinitialise, mfn's all change after migration */ p2m_mid_mfn_init(p2m_mid_missing_mfn); } for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); unsigned long **mid; unsigned long *mid_mfn_p; mid = p2m_top[topidx]; mid_mfn_p = p2m_top_mfn_p[topidx]; /* Don't bother allocating any mfn mid levels if * they're just missing, just update the stored mfn, * since all could have changed over a migrate. */ if (mid == p2m_mid_missing) { BUG_ON(mididx); BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; continue; } if (mid_mfn_p == p2m_mid_missing_mfn) { /* * XXX boot-time only! We should never find * missing parts of the mfn tree after * runtime. extend_brk() will BUG if we call * it too late. */ mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(mid_mfn_p); p2m_top_mfn_p[topidx] = mid_mfn_p; } p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); } }
unsigned long get_phys_to_machine(unsigned long pfn) { unsigned topidx, idx; if (unlikely(pfn >= MAX_DOMAIN_PAGES)) return INVALID_P2M_ENTRY; topidx = p2m_top_index(pfn); idx = p2m_index(pfn); return p2m_top[topidx][idx]; }
/* Set up p2m_top to point to the domain-builder provided p2m pages */ void __init xen_build_dynamic_phys_to_machine(void) { unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); unsigned long pfn; xen_max_p2m_pfn = max_pfn; p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m_missing); p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(p2m_mid_missing); p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_init(p2m_top); p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m_identity); /* * The domain builder gives us a pre-constructed p2m array in * mfn_list for all the pages initially given to us, so we just * need to graft that into our tree structure. */ for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); if (p2m_top[topidx] == p2m_mid_missing) { unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; } /* * As long as the mfn_list has enough entries to completely * fill a p2m page, pointing into the array is ok. But if * not the entries beyond the last pfn will be undefined. */ if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { unsigned long p2midx; p2midx = max_pfn % P2M_PER_PAGE; for ( ; p2midx < P2M_PER_PAGE; p2midx++) mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY; } p2m_top[topidx][mididx] = &mfn_list[pfn]; } m2p_override_init(); }
/* Set up p2m_top to point to the domain-builder provided p2m pages */ void __init xen_build_dynamic_phys_to_machine(void) { unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); unsigned pfn; for (pfn = 0; pfn < max_pfn; pfn += P2M_ENTRIES_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); p2m_top[topidx] = &mfn_list[pfn]; } xen_build_mfn_list_list(); }
/* Build the parallel p2m_top_mfn structures */ static void __init xen_build_mfn_list_list(void) { unsigned pfn, idx; for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn += P2M_ENTRIES_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); p2m_top_mfn[topidx] = virt_to_mfn(p2m_top[topidx]); } for (idx = 0; idx < ARRAY_SIZE(p2m_top_mfn_list); idx++) { unsigned topidx = idx * P2M_ENTRIES_PER_PAGE; p2m_top_mfn_list[idx] = virt_to_mfn(&p2m_top_mfn[topidx]); } }
unsigned long get_phys_to_machine(unsigned long pfn) { unsigned topidx, mididx, idx; if (unlikely(pfn >= MAX_P2M_PFN)) return INVALID_P2M_ENTRY; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); if (p2m_top[topidx][mididx] == p2m_identity) return IDENTITY_FRAME(pfn); return p2m_top[topidx][mididx][idx]; }
void __ref xen_build_mfn_list_list(void) { unsigned long pfn; if (p2m_top_mfn == NULL) { p2m_mid_missing_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(p2m_mid_missing_mfn); p2m_top_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_p_init(p2m_top_mfn_p); p2m_top_mfn = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_mfn_init(p2m_top_mfn); } else { p2m_mid_mfn_init(p2m_mid_missing_mfn); } for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); unsigned long **mid; unsigned long *mid_mfn_p; mid = p2m_top[topidx]; mid_mfn_p = p2m_top_mfn_p[topidx]; if (mid == p2m_mid_missing) { BUG_ON(mididx); BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; continue; } if (mid_mfn_p == p2m_mid_missing_mfn) { mid_mfn_p = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_mfn_init(mid_mfn_p); p2m_top_mfn_p[topidx] = mid_mfn_p; } p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); mid_mfn_p[mididx] = virt_to_mfn(mid[mididx]); } }
void __init xen_build_dynamic_phys_to_machine(void) { unsigned long *mfn_list = (unsigned long *)xen_start_info->mfn_list; unsigned long max_pfn = min(MAX_DOMAIN_PAGES, xen_start_info->nr_pages); unsigned long pfn; xen_max_p2m_pfn = max_pfn; p2m_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m_missing); p2m_mid_missing = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(p2m_mid_missing); p2m_top = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_top_init(p2m_top); p2m_identity = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m_identity); for (pfn = 0; pfn < max_pfn; pfn += P2M_PER_PAGE) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); if (p2m_top[topidx] == p2m_mid_missing) { unsigned long **mid = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_mid_init(mid); p2m_top[topidx] = mid; } if (unlikely(pfn + P2M_PER_PAGE > max_pfn)) { unsigned long p2midx; p2midx = max_pfn % P2M_PER_PAGE; for ( ; p2midx < P2M_PER_PAGE; p2midx++) mfn_list[pfn + p2midx] = INVALID_P2M_ENTRY; } p2m_top[topidx][mididx] = &mfn_list[pfn]; } m2p_override_init(); }
/* install a new p2m_top page */ bool install_p2mtop_page(unsigned long pfn, unsigned long *p) { unsigned topidx = p2m_top_index(pfn); unsigned long **pfnp, *mfnp; unsigned i; pfnp = &p2m_top[topidx]; mfnp = &p2m_top_mfn[topidx]; for (i = 0; i < P2M_ENTRIES_PER_PAGE; i++) p[i] = INVALID_P2M_ENTRY; if (cmpxchg(pfnp, p2m_missing, p) == p2m_missing) { *mfnp = virt_to_mfn(p); return true; } return false; }
static bool __init early_alloc_p2m_middle(unsigned long pfn, bool check_boundary) { unsigned topidx, mididx, idx; unsigned long *p2m; unsigned long *mid_mfn_p; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* Pfff.. No boundary cross-over, lets get out. */ if (!idx && check_boundary) return false; WARN(p2m_top[topidx][mididx] == p2m_identity, "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n", topidx, mididx); /* * Could be done by xen_build_dynamic_phys_to_machine.. */ if (p2m_top[topidx][mididx] != p2m_missing) return false; /* Boundary cross-over for the edges: */ p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); p2m_init(p2m); p2m_top[topidx][mididx] = p2m; /* For save/restore we need to MFN of the P2M saved */ mid_mfn_p = p2m_top_mfn_p[topidx]; WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", topidx, mididx); mid_mfn_p[mididx] = virt_to_mfn(p2m); return true; }
/* Try to install p2m mapping; fail if intermediate bits missing */ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { unsigned topidx, idx; if (unlikely(pfn >= MAX_DOMAIN_PAGES)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; } topidx = p2m_top_index(pfn); if (p2m_top[topidx] == p2m_missing) { if (mfn == INVALID_P2M_ENTRY) return true; return false; } idx = p2m_index(pfn); p2m_top[topidx][idx] = mfn; return true; }
unsigned long get_phys_to_machine(unsigned long pfn) { unsigned topidx, mididx, idx; if (unlikely(pfn >= MAX_P2M_PFN)) return INVALID_P2M_ENTRY; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* * The INVALID_P2M_ENTRY is filled in both p2m_*identity * and in p2m_*missing, so returning the INVALID_P2M_ENTRY * would be wrong. */ if (p2m_top[topidx][mididx] == p2m_identity) return IDENTITY_FRAME(pfn); return p2m_top[topidx][mididx][idx]; }
static bool __init __early_alloc_p2m(unsigned long pfn) { unsigned topidx, mididx, idx; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); if (!idx) return false; WARN(p2m_top[topidx][mididx] == p2m_identity, "P2M[%d][%d] == IDENTITY, should be MISSING (or alloced)!\n", topidx, mididx); if (p2m_top[topidx][mididx] != p2m_missing) return false; if (idx) { unsigned long *p2m = extend_brk(PAGE_SIZE, PAGE_SIZE); unsigned long *mid_mfn_p; p2m_init(p2m); p2m_top[topidx][mididx] = p2m; mid_mfn_p = p2m_top_mfn_p[topidx]; WARN(mid_mfn_p[mididx] != virt_to_mfn(p2m_missing), "P2M_TOP_P[%d][%d] != MFN of p2m_missing!\n", topidx, mididx); mid_mfn_p[mididx] = virt_to_mfn(p2m); } return idx != 0; }
/* Try to install p2m mapping; fail if intermediate bits missing */ bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { unsigned topidx, mididx, idx; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return true; } if (unlikely(pfn >= MAX_P2M_PFN)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; } topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); /* For sparse holes were the p2m leaf has real PFN along with * PCI holes, stick in the PFN as the MFN value. */ if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { if (p2m_top[topidx][mididx] == p2m_identity) return true; /* Swap over from MISSING to IDENTITY if needed. */ if (p2m_top[topidx][mididx] == p2m_missing) { WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing, p2m_identity) != p2m_missing); return true; } } if (p2m_top[topidx][mididx] == p2m_missing) return mfn == INVALID_P2M_ENTRY; p2m_top[topidx][mididx][idx] = mfn; return true; }
bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) { unsigned topidx, mididx, idx; if (unlikely(xen_feature(XENFEAT_auto_translated_physmap))) { BUG_ON(pfn != mfn && mfn != INVALID_P2M_ENTRY); return true; } if (unlikely(pfn >= MAX_P2M_PFN)) { BUG_ON(mfn != INVALID_P2M_ENTRY); return true; } topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); idx = p2m_index(pfn); if (mfn != INVALID_P2M_ENTRY && (mfn & IDENTITY_FRAME_BIT)) { if (p2m_top[topidx][mididx] == p2m_identity) return true; if (p2m_top[topidx][mididx] == p2m_missing) { WARN_ON(cmpxchg(&p2m_top[topidx][mididx], p2m_missing, p2m_identity) != p2m_missing); return true; } } if (p2m_top[topidx][mididx] == p2m_missing) return mfn == INVALID_P2M_ENTRY; p2m_top[topidx][mididx][idx] = mfn; return true; }
/* * Fully allocate the p2m structure for a given pfn. We need to check * that both the top and mid levels are allocated, and make sure the * parallel mfn tree is kept in sync. We may race with other cpus, so * the new pages are installed with cmpxchg; if we lose the race then * simply free the page we allocated and use the one that's there. */ static bool alloc_p2m(unsigned long pfn) { unsigned topidx, mididx; unsigned long ***top_p, **mid; unsigned long *top_mfn_p, *mid_mfn; topidx = p2m_top_index(pfn); mididx = p2m_mid_index(pfn); top_p = &p2m_top[topidx]; mid = *top_p; if (mid == p2m_mid_missing) { /* Mid level is missing, allocate a new one */ mid = alloc_p2m_page(); if (!mid) return false; p2m_mid_init(mid); if (cmpxchg(top_p, p2m_mid_missing, mid) != p2m_mid_missing) free_p2m_page(mid); } top_mfn_p = &p2m_top_mfn[topidx]; mid_mfn = p2m_top_mfn_p[topidx]; BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); if (mid_mfn == p2m_mid_missing_mfn) { /* Separately check the mid mfn level */ unsigned long missing_mfn; unsigned long mid_mfn_mfn; mid_mfn = alloc_p2m_page(); if (!mid_mfn) return false; p2m_mid_mfn_init(mid_mfn); missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); mid_mfn_mfn = virt_to_mfn(mid_mfn); if (cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn) != missing_mfn) free_p2m_page(mid_mfn); else p2m_top_mfn_p[topidx] = mid_mfn; } if (p2m_top[topidx][mididx] == p2m_identity || p2m_top[topidx][mididx] == p2m_missing) { /* p2m leaf page is missing */ unsigned long *p2m; unsigned long *p2m_orig = p2m_top[topidx][mididx]; p2m = alloc_p2m_page(); if (!p2m) return false; p2m_init(p2m); if (cmpxchg(&mid[mididx], p2m_orig, p2m) != p2m_orig) free_p2m_page(p2m); else mid_mfn[mididx] = virt_to_mfn(p2m); } return true; }
static int p2m_dump_show(struct seq_file *m, void *v) { static const char * const level_name[] = { "top", "middle", "entry", "abnormal", "error"}; #define TYPE_IDENTITY 0 #define TYPE_MISSING 1 #define TYPE_PFN 2 #define TYPE_UNKNOWN 3 static const char * const type_name[] = { [TYPE_IDENTITY] = "identity", [TYPE_MISSING] = "missing", [TYPE_PFN] = "pfn", [TYPE_UNKNOWN] = "abnormal"}; unsigned long pfn, prev_pfn_type = 0, prev_pfn_level = 0; unsigned int uninitialized_var(prev_level); unsigned int uninitialized_var(prev_type); if (!p2m_top) return 0; for (pfn = 0; pfn < MAX_DOMAIN_PAGES; pfn++) { unsigned topidx = p2m_top_index(pfn); unsigned mididx = p2m_mid_index(pfn); unsigned idx = p2m_index(pfn); unsigned lvl, type; lvl = 4; type = TYPE_UNKNOWN; if (p2m_top[topidx] == p2m_mid_missing) { lvl = 0; type = TYPE_MISSING; } else if (p2m_top[topidx] == NULL) { lvl = 0; type = TYPE_UNKNOWN; } else if (p2m_top[topidx][mididx] == NULL) { lvl = 1; type = TYPE_UNKNOWN; } else if (p2m_top[topidx][mididx] == p2m_identity) { lvl = 1; type = TYPE_IDENTITY; } else if (p2m_top[topidx][mididx] == p2m_missing) { lvl = 1; type = TYPE_MISSING; } else if (p2m_top[topidx][mididx][idx] == 0) { lvl = 2; type = TYPE_UNKNOWN; } else if (p2m_top[topidx][mididx][idx] == IDENTITY_FRAME(pfn)) { lvl = 2; type = TYPE_IDENTITY; } else if (p2m_top[topidx][mididx][idx] == INVALID_P2M_ENTRY) { lvl = 2; type = TYPE_MISSING; } else if (p2m_top[topidx][mididx][idx] == pfn) { lvl = 2; type = TYPE_PFN; } else if (p2m_top[topidx][mididx][idx] != pfn) { lvl = 2; type = TYPE_PFN; } if (pfn == 0) { prev_level = lvl; prev_type = type; } if (pfn == MAX_DOMAIN_PAGES-1) { lvl = 3; type = TYPE_UNKNOWN; } if (prev_type != type) { seq_printf(m, " [0x%lx->0x%lx] %s\n", prev_pfn_type, pfn, type_name[prev_type]); prev_pfn_type = pfn; prev_type = type; } if (prev_level != lvl) { seq_printf(m, " [0x%lx->0x%lx] level %s\n", prev_pfn_level, pfn, level_name[prev_level]); prev_pfn_level = pfn; prev_level = lvl; } } return 0; #undef TYPE_IDENTITY #undef TYPE_MISSING #undef TYPE_PFN #undef TYPE_UNKNOWN }
/* * Fully allocate the p2m structure for a given pfn. We need to check * that both the top and mid levels are allocated, and make sure the * parallel mfn tree is kept in sync. We may race with other cpus, so * the new pages are installed with cmpxchg; if we lose the race then * simply free the page we allocated and use the one that's there. */ static bool alloc_p2m(unsigned long pfn) { unsigned topidx; unsigned long *top_mfn_p, *mid_mfn; pte_t *ptep, *pte_pg; unsigned int level; unsigned long flags; unsigned long addr = (unsigned long)(xen_p2m_addr + pfn); unsigned long p2m_pfn; ptep = lookup_address(addr, &level); BUG_ON(!ptep || level != PG_LEVEL_4K); pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { /* PMD level is missing, allocate a new one */ ptep = alloc_p2m_pmd(addr, pte_pg); if (!ptep) return false; } if (p2m_top_mfn && pfn < MAX_P2M_PFN) { topidx = p2m_top_index(pfn); top_mfn_p = &p2m_top_mfn[topidx]; mid_mfn = ACCESS_ONCE(p2m_top_mfn_p[topidx]); BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); if (mid_mfn == p2m_mid_missing_mfn) { /* Separately check the mid mfn level */ unsigned long missing_mfn; unsigned long mid_mfn_mfn; unsigned long old_mfn; mid_mfn = alloc_p2m_page(); if (!mid_mfn) return false; p2m_mid_mfn_init(mid_mfn, p2m_missing); missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); mid_mfn_mfn = virt_to_mfn(mid_mfn); old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn); if (old_mfn != missing_mfn) { free_p2m_page(mid_mfn); mid_mfn = mfn_to_virt(old_mfn); } else { p2m_top_mfn_p[topidx] = mid_mfn; } } } else { mid_mfn = NULL; } p2m_pfn = pte_pfn(READ_ONCE(*ptep)); if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) || p2m_pfn == PFN_DOWN(__pa(p2m_missing))) { /* p2m leaf page is missing */ unsigned long *p2m; p2m = alloc_p2m_page(); if (!p2m) return false; if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) p2m_init(p2m); else p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1)); spin_lock_irqsave(&p2m_update_lock, flags); if (pte_pfn(*ptep) == p2m_pfn) { HYPERVISOR_shared_info->arch.p2m_generation++; wmb(); /* Tools are synchronizing via p2m_generation. */ set_pte(ptep, pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL)); wmb(); /* Tools are synchronizing via p2m_generation. */ HYPERVISOR_shared_info->arch.p2m_generation++; if (mid_mfn) mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m); p2m = NULL; } spin_unlock_irqrestore(&p2m_update_lock, flags); if (p2m) free_p2m_page(p2m); } /* Expanded the p2m? */ if (pfn > xen_p2m_last_pfn) { xen_p2m_last_pfn = pfn; HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; } return true; }
/* * Skim over the P2M tree looking at pages that are either filled with * INVALID_P2M_ENTRY or with 1:1 PFNs. If found, re-use that page and * replace the P2M leaf with a p2m_missing or p2m_identity. * Stick the old page in the new P2M tree location. */ bool __init early_can_reuse_p2m_middle(unsigned long set_pfn, unsigned long set_mfn) { unsigned topidx; unsigned mididx; unsigned ident_pfns; unsigned inv_pfns; unsigned long *p2m; unsigned long *mid_mfn_p; unsigned idx; unsigned long pfn; /* We only look when this entails a P2M middle layer */ if (p2m_index(set_pfn)) return false; for (pfn = 0; pfn <= MAX_DOMAIN_PAGES; pfn += P2M_PER_PAGE) { topidx = p2m_top_index(pfn); if (!p2m_top[topidx]) continue; if (p2m_top[topidx] == p2m_mid_missing) continue; mididx = p2m_mid_index(pfn); p2m = p2m_top[topidx][mididx]; if (!p2m) continue; if ((p2m == p2m_missing) || (p2m == p2m_identity)) continue; if ((unsigned long)p2m == INVALID_P2M_ENTRY) continue; ident_pfns = 0; inv_pfns = 0; for (idx = 0; idx < P2M_PER_PAGE; idx++) { /* IDENTITY_PFNs are 1:1 */ if (p2m[idx] == IDENTITY_FRAME(pfn + idx)) ident_pfns++; else if (p2m[idx] == INVALID_P2M_ENTRY) inv_pfns++; else break; } if ((ident_pfns == P2M_PER_PAGE) || (inv_pfns == P2M_PER_PAGE)) goto found; } return false; found: /* Found one, replace old with p2m_identity or p2m_missing */ p2m_top[topidx][mididx] = (ident_pfns ? p2m_identity : p2m_missing); /* And the other for save/restore.. */ mid_mfn_p = p2m_top_mfn_p[topidx]; /* NOTE: Even if it is a p2m_identity it should still be point to * a page filled with INVALID_P2M_ENTRY entries. */ mid_mfn_p[mididx] = virt_to_mfn(p2m_missing); /* Reset where we want to stick the old page in. */ topidx = p2m_top_index(set_pfn); mididx = p2m_mid_index(set_pfn); /* This shouldn't happen */ if (WARN_ON(p2m_top[topidx] == p2m_mid_missing)) early_alloc_p2m(set_pfn); if (WARN_ON(p2m_top[topidx][mididx] != p2m_missing)) return false; p2m_init(p2m); p2m_top[topidx][mididx] = p2m; mid_mfn_p = p2m_top_mfn_p[topidx]; mid_mfn_p[mididx] = virt_to_mfn(p2m); return true; }