static void p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn, unsigned int page_order) { unsigned long i; mfn_t mfn_return; p2m_type_t t; p2m_access_t a; if ( !paging_mode_translate(p2m->domain) ) { if ( need_iommu(p2m->domain) ) for ( i = 0; i < (1 << page_order); i++ ) iommu_unmap_page(p2m->domain, mfn + i); return; } ASSERT(gfn_locked_by_me(p2m, gfn)); P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn); if ( mfn_valid(_mfn(mfn)) ) { for ( i = 0; i < (1UL << page_order); i++ ) { mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL); if ( !p2m_is_grant(t) && !p2m_is_shared(t) ) set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); } } set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access); }
int clear_mmio_p2m_entry(struct domain *d, unsigned long gfn) { int rc = 0; mfn_t mfn; p2m_access_t a; p2m_type_t t; struct p2m_domain *p2m = p2m_get_hostp2m(d); if ( !paging_mode_translate(d) ) return 0; gfn_lock(p2m, gfn, 0); mfn = p2m->get_entry(p2m, gfn, &t, &a, 0, NULL); /* Do not use mfn_valid() here as it will usually fail for MMIO pages. */ if ( (INVALID_MFN == mfn_x(mfn)) || (t != p2m_mmio_direct) ) { gdprintk(XENLOG_ERR, "clear_mmio_p2m_entry: gfn_to_mfn failed! gfn=%08lx\n", gfn); goto out; } rc = set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid, p2m->default_access); out: gfn_unlock(p2m, gfn, 0); return rc; }
static void pvpmu_finish(struct domain *d, xen_pmu_params_t *params) { struct vcpu *v; struct vpmu_struct *vpmu; uint64_t mfn; void *xenpmu_data; if ( (params->vcpu >= d->max_vcpus) || (d->vcpu[params->vcpu] == NULL) ) return; v = d->vcpu[params->vcpu]; if ( v != current ) vcpu_pause(v); vpmu = vcpu_vpmu(v); spin_lock(&vpmu->vpmu_lock); vpmu_arch_destroy(v); xenpmu_data = vpmu->xenpmu_data; vpmu->xenpmu_data = NULL; spin_unlock(&vpmu->vpmu_lock); if ( xenpmu_data ) { mfn = domain_page_map_to_mfn(xenpmu_data); ASSERT(mfn_valid(_mfn(mfn))); unmap_domain_page_global(xenpmu_data); put_page_and_type(mfn_to_page(mfn)); } if ( v != current ) vcpu_unpause(v); }
int set_p2m_entry(struct p2m_domain *p2m, unsigned long gfn, mfn_t mfn, unsigned int page_order, p2m_type_t p2mt, p2m_access_t p2ma) { struct domain *d = p2m->domain; unsigned long todo = 1ul << page_order; unsigned int order; int rc = 1; ASSERT(gfn_locked_by_me(p2m, gfn)); while ( todo ) { if ( hap_enabled(d) ) order = ( (((gfn | mfn_x(mfn) | todo) & ((1ul << PAGE_ORDER_1G) - 1)) == 0) && hvm_hap_has_1gb(d) && opt_hap_1gb ) ? PAGE_ORDER_1G : ((((gfn | mfn_x(mfn) | todo) & ((1ul << PAGE_ORDER_2M) - 1)) == 0) && hvm_hap_has_2mb(d) && opt_hap_2mb) ? PAGE_ORDER_2M : PAGE_ORDER_4K; else order = 0; if ( !p2m->set_entry(p2m, gfn, mfn, order, p2mt, p2ma) ) rc = 0; gfn += 1ul << order; if ( mfn_x(mfn) != INVALID_MFN ) mfn = _mfn(mfn_x(mfn) + (1ul << order)); todo -= 1ul << order; } return rc; }
static int __init modify_identity_mmio(struct domain *d, unsigned long pfn, unsigned long nr_pages, const bool map) { int rc; for ( ; ; ) { rc = (map ? map_mmio_regions : unmap_mmio_regions) (d, _gfn(pfn), nr_pages, _mfn(pfn)); if ( rc == 0 ) break; if ( rc < 0 ) { printk(XENLOG_WARNING "Failed to identity %smap [%#lx,%#lx) for d%d: %d\n", map ? "" : "un", pfn, pfn + nr_pages, d->domain_id, rc); break; } nr_pages -= rc; pfn += rc; process_pending_softirqs(); } return rc; }
/* Assign the low 1MB to Dom0. */ static void __init pvh_steal_low_ram(struct domain *d, unsigned long start, unsigned long nr_pages) { unsigned long mfn; ASSERT(start + nr_pages <= PFN_DOWN(MB(1))); for ( mfn = start; mfn < start + nr_pages; mfn++ ) { struct page_info *pg = mfn_to_page(mfn); int rc; rc = unshare_xen_page_with_guest(pg, dom_io); if ( rc ) { printk("Unable to unshare Xen mfn %#lx: %d\n", mfn, rc); continue; } share_xen_page_with_guest(pg, d, XENSHARE_writable); rc = guest_physmap_add_entry(d, _gfn(mfn), _mfn(mfn), 0, p2m_ram_rw); if ( rc ) printk("Unable to add mfn %#lx to p2m: %d\n", mfn, rc); } }
mfn_t __get_gfn_type_access(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t *a, p2m_query_t q, unsigned int *page_order, bool_t locked) { mfn_t mfn; /* Unshare makes no sense withuot populate. */ if ( q & P2M_UNSHARE ) q |= P2M_ALLOC; if ( !p2m || !paging_mode_translate(p2m->domain) ) { /* Not necessarily true, but for non-translated guests, we claim * it's the most generic kind of memory */ *t = p2m_ram_rw; return _mfn(gfn); } if ( locked ) /* Grab the lock here, don't release until put_gfn */ gfn_lock(p2m, gfn, 0); mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order); if ( (q & P2M_UNSHARE) && p2m_is_shared(*t) ) { ASSERT(!p2m_is_nestedp2m(p2m)); /* Try to unshare. If we fail, communicate ENOMEM without * sleeping. */ if ( mem_sharing_unshare_page(p2m->domain, gfn, 0) < 0 ) (void)mem_sharing_notify_enomem(p2m->domain, gfn, 0); mfn = p2m->get_entry(p2m, gfn, t, a, q, page_order); } if (unlikely((p2m_is_broken(*t)))) { /* Return invalid_mfn to avoid caller's access */ mfn = _mfn(INVALID_MFN); if ( q & P2M_ALLOC ) domain_crash(p2m->domain); } return mfn; }
/** * p2m_mem_paging_evict - Mark a guest page as paged-out * @d: guest domain * @gfn: guest page to evict * * Returns 0 for success or negative errno values if eviction is not possible. * * p2m_mem_paging_evict() is called by the pager and will free a guest page and * release it back to Xen. If the following conditions are met the page can be * freed: * - the gfn is backed by a mfn * - the gfn was nominated * - the mfn has still exactly one user and has no special meaning * * After successful nomination some other process could have mapped the page. In * this case eviction can not be done. If the gfn was populated before the pager * could evict it, eviction can not be done either. In this case the gfn is * still backed by a mfn. */ int p2m_mem_paging_evict(struct domain *d, unsigned long gfn) { struct page_info *page; p2m_type_t p2mt; p2m_access_t a; mfn_t mfn; struct p2m_domain *p2m = p2m_get_hostp2m(d); int ret = -EBUSY; gfn_lock(p2m, gfn, 0); /* Get mfn */ mfn = p2m->get_entry(p2m, gfn, &p2mt, &a, 0, NULL); if ( unlikely(!mfn_valid(mfn)) ) goto out; /* Allow only nominated pages */ if ( p2mt != p2m_ram_paging_out ) goto out; /* Get the page so it doesn't get modified under Xen's feet */ page = mfn_to_page(mfn); if ( unlikely(!get_page(page, d)) ) goto out; /* Check page count and type once more */ if ( (page->count_info & (PGC_count_mask | PGC_allocated)) != (2 | PGC_allocated) ) goto out_put; if ( (page->u.inuse.type_info & PGT_count_mask) != 0 ) goto out_put; /* Decrement guest domain's ref count of the page */ if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) put_page(page); /* Remove mapping from p2m table */ set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_ram_paged, a); /* Clear content before returning the page to Xen */ scrub_one_page(page); /* Track number of paged gfns */ atomic_inc(&d->paged_pages); ret = 0; out_put: /* Put the page back so it gets freed */ put_page(page); out: gfn_unlock(p2m, gfn, 0); return ret; }
// Allocate a new p2m table for a domain. // // The structure of the p2m table is that of a pagetable for xen (i.e. it is // controlled by CONFIG_PAGING_LEVELS). // // Returns 0 for success or -errno. // int p2m_alloc_table(struct p2m_domain *p2m) { struct page_info *p2m_top; struct domain *d = p2m->domain; p2m_lock(p2m); if ( !p2m_is_nestedp2m(p2m) && !page_list_empty(&d->page_list) ) { P2M_ERROR("dom %d already has memory allocated\n", d->domain_id); p2m_unlock(p2m); return -EINVAL; } if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 ) { P2M_ERROR("p2m already allocated for this domain\n"); p2m_unlock(p2m); return -EINVAL; } P2M_PRINTK("allocating p2m table\n"); p2m_top = p2m_alloc_ptp(p2m, PGT_l4_page_table); if ( p2m_top == NULL ) { p2m_unlock(p2m); return -ENOMEM; } p2m->phys_table = pagetable_from_mfn(page_to_mfn(p2m_top)); if ( hap_enabled(d) ) iommu_share_p2m_table(d); P2M_PRINTK("populating p2m table\n"); /* Initialise physmap tables for slot zero. Other code assumes this. */ p2m->defer_nested_flush = 1; if ( !set_p2m_entry(p2m, 0, _mfn(INVALID_MFN), PAGE_ORDER_4K, p2m_invalid, p2m->default_access) ) goto error; p2m->defer_nested_flush = 0; P2M_PRINTK("p2m table initialised (%u pages)\n", page_count); p2m_unlock(p2m); return 0; spin_unlock(&p2m->domain->page_alloc_lock); error: P2M_PRINTK("failed to initialize p2m table, gfn=%05lx, mfn=%" PRI_mfn "\n", gfn, mfn_x(mfn)); p2m_unlock(p2m); return -ENOMEM; }
/* Free intermediate tables from a p2m sub-tree */ static void p2m_free_entry(struct p2m_domain *p2m, l1_pgentry_t *p2m_entry, int page_order) { /* End if the entry is a leaf entry. */ if ( page_order == PAGE_ORDER_4K || !(l1e_get_flags(*p2m_entry) & _PAGE_PRESENT) || (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) return; if ( page_order > PAGE_ORDER_2M ) { l1_pgentry_t *l3_table = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry))); for ( int i = 0; i < L3_PAGETABLE_ENTRIES; i++ ) p2m_free_entry(p2m, l3_table + i, page_order - 9); unmap_domain_page(l3_table); } p2m_free_ptp(p2m, mfn_to_page(_mfn(l1e_get_pfn(*p2m_entry)))); }
/* Populate a HVM memory range using the biggest possible order. */ static int __init pvh_populate_memory_range(struct domain *d, unsigned long start, unsigned long nr_pages) { unsigned int order, i = 0; struct page_info *page; int rc; #define MAP_MAX_ITER 64 order = MAX_ORDER; while ( nr_pages != 0 ) { unsigned int range_order = get_order_from_pages(nr_pages + 1); order = min(range_order ? range_order - 1 : 0, order); page = alloc_domheap_pages(d, order, dom0_memflags); if ( page == NULL ) { if ( order == 0 && dom0_memflags ) { /* Try again without any dom0_memflags. */ dom0_memflags = 0; order = MAX_ORDER; continue; } if ( order == 0 ) { printk("Unable to allocate memory with order 0!\n"); return -ENOMEM; } order--; continue; } rc = guest_physmap_add_page(d, _gfn(start), _mfn(page_to_mfn(page)), order); if ( rc != 0 ) { printk("Failed to populate memory: [%#lx,%lx): %d\n", start, start + (1UL << order), rc); return -ENOMEM; } start += 1UL << order; nr_pages -= 1UL << order; if ( (++i % MAP_MAX_ITER) == 0 ) process_pending_softirqs(); } return 0; #undef MAP_MAX_ITER }
void flush_page_to_ram(unsigned long mfn) { void *v = map_domain_page(_mfn(mfn)); clean_and_invalidate_dcache_va_range(v, PAGE_SIZE); unmap_domain_page(v); /* * For some of the instruction cache (such as VIPT), the entire I-Cache * needs to be flushed to guarantee that all the aliases of a given * physical address will be removed from the cache. * Invalidating the I-Cache by VA highly depends on the behavior of the * I-Cache (See D4.9.2 in ARM DDI 0487A.k_iss10775). Instead of using flush * by VA on select platforms, we just flush the entire cache here. */ invalidate_icache(); }
static void mca_init_bank(enum mca_source who, struct mc_info *mi, int bank) { struct mcinfo_bank *mib; if (!mi) return; mib = x86_mcinfo_reserve(mi, sizeof(*mib), MC_TYPE_BANK); if (!mib) { mi->flags |= MCINFO_FLAGS_UNCOMPLETE; return; } mib->mc_status = mca_rdmsr(MSR_IA32_MCx_STATUS(bank)); mib->mc_bank = bank; mib->mc_domid = DOMID_INVALID; if (mib->mc_status & MCi_STATUS_MISCV) mib->mc_misc = mca_rdmsr(MSR_IA32_MCx_MISC(bank)); if (mib->mc_status & MCi_STATUS_ADDRV) mib->mc_addr = mca_rdmsr(MSR_IA32_MCx_ADDR(bank)); if ((mib->mc_status & MCi_STATUS_MISCV) && (mib->mc_status & MCi_STATUS_ADDRV) && (mc_check_addr(mib->mc_status, mib->mc_misc, MC_ADDR_PHYSICAL)) && (who == MCA_POLLER || who == MCA_CMCI_HANDLER) && (mfn_valid(_mfn(paddr_to_pfn(mib->mc_addr))))) { struct domain *d; d = maddr_get_owner(mib->mc_addr); if (d) mib->mc_domid = d->domain_id; } if (who == MCA_CMCI_HANDLER) { mib->mc_ctrl2 = mca_rdmsr(MSR_IA32_MC0_CTL2 + bank); mib->mc_tsc = rdtsc(); } }
int p2m_set_altp2m_mem_access(struct domain *d, struct p2m_domain *hp2m, struct p2m_domain *ap2m, p2m_access_t a, gfn_t gfn) { mfn_t mfn; p2m_type_t t; p2m_access_t old_a; unsigned int page_order; unsigned long gfn_l = gfn_x(gfn); int rc; mfn = ap2m->get_entry(ap2m, gfn_l, &t, &old_a, 0, NULL, NULL); /* Check host p2m if no valid entry in alternate */ if ( !mfn_valid(mfn) ) { mfn = __get_gfn_type_access(hp2m, gfn_l, &t, &old_a, P2M_ALLOC | P2M_UNSHARE, &page_order, 0); rc = -ESRCH; if ( !mfn_valid(mfn) || t != p2m_ram_rw ) return rc; /* If this is a superpage, copy that first */ if ( page_order != PAGE_ORDER_4K ) { unsigned long mask = ~((1UL << page_order) - 1); unsigned long gfn2_l = gfn_l & mask; mfn_t mfn2 = _mfn(mfn_x(mfn) & mask); rc = ap2m->set_entry(ap2m, gfn2_l, mfn2, page_order, t, old_a, 1); if ( rc ) return rc; } } return ap2m->set_entry(ap2m, gfn_l, mfn, PAGE_ORDER_4K, t, a, (current->domain != d)); }
/* Read the current domain's p2m table (through the linear mapping). */ static mfn_t p2m_gfn_to_mfn_current(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t *a, p2m_query_t q, unsigned int *page_order) { mfn_t mfn = _mfn(INVALID_MFN); p2m_type_t p2mt = p2m_mmio_dm; paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT; /* XXX This is for compatibility with the old model, where anything not * XXX marked as RAM was considered to be emulated MMIO space. * XXX Once we start explicitly registering MMIO regions in the p2m * XXX we will return p2m_invalid for unmapped gfns */ l1_pgentry_t l1e = l1e_empty(), *p2m_entry; l2_pgentry_t l2e = l2e_empty(); int ret; #if CONFIG_PAGING_LEVELS >= 4 l3_pgentry_t l3e = l3e_empty(); #endif ASSERT(gfn < (RO_MPT_VIRT_END - RO_MPT_VIRT_START) / sizeof(l1_pgentry_t)); #if CONFIG_PAGING_LEVELS >= 4 /* * Read & process L3 */ p2m_entry = (l1_pgentry_t *) &__linear_l2_table[l2_linear_offset(RO_MPT_VIRT_START) + l3_linear_offset(addr)]; pod_retry_l3: ret = __copy_from_user(&l3e, p2m_entry, sizeof(l3e)); if ( ret != 0 || !(l3e_get_flags(l3e) & _PAGE_PRESENT) ) { if ( (l3e_get_flags(l3e) & _PAGE_PSE) && (p2m_flags_to_type(l3e_get_flags(l3e)) == p2m_populate_on_demand) ) { /* The read has succeeded, so we know that mapping exists */ if ( q & P2M_ALLOC ) { if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) ) goto pod_retry_l3; p2mt = p2m_invalid; gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__); goto out; } else { p2mt = p2m_populate_on_demand; goto out; } } goto pod_retry_l2; } if ( l3e_get_flags(l3e) & _PAGE_PSE ) { p2mt = p2m_flags_to_type(l3e_get_flags(l3e)); ASSERT(l3e_get_pfn(l3e) != INVALID_MFN || !p2m_is_ram(p2mt)); if (p2m_is_valid(p2mt) ) mfn = _mfn(l3e_get_pfn(l3e) + l2_table_offset(addr) * L1_PAGETABLE_ENTRIES + l1_table_offset(addr)); else p2mt = p2m_mmio_dm; if ( page_order ) *page_order = PAGE_ORDER_1G; goto out; } #endif /* * Read & process L2 */ p2m_entry = &__linear_l1_table[l1_linear_offset(RO_MPT_VIRT_START) + l2_linear_offset(addr)]; pod_retry_l2: ret = __copy_from_user(&l2e, p2m_entry, sizeof(l2e)); if ( ret != 0 || !(l2e_get_flags(l2e) & _PAGE_PRESENT) ) { if( (l2e_get_flags(l2e) & _PAGE_PSE) && ( p2m_flags_to_type(l2e_get_flags(l2e)) == p2m_populate_on_demand ) ) { /* The read has succeeded, so we know that the mapping * exits at this point. */ if ( q & P2M_ALLOC ) { if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_2M, q) ) goto pod_retry_l2; /* Allocate failed. */ p2mt = p2m_invalid; printk("%s: Allocate failed!\n", __func__); goto out; } else { p2mt = p2m_populate_on_demand; goto out; } } goto pod_retry_l1; } if (l2e_get_flags(l2e) & _PAGE_PSE) { p2mt = p2m_flags_to_type(l2e_get_flags(l2e)); ASSERT(l2e_get_pfn(l2e) != INVALID_MFN || !p2m_is_ram(p2mt)); if ( p2m_is_valid(p2mt) ) mfn = _mfn(l2e_get_pfn(l2e) + l1_table_offset(addr)); else p2mt = p2m_mmio_dm; if ( page_order ) *page_order = PAGE_ORDER_2M; goto out; } /* * Read and process L1 */ /* Need to __copy_from_user because the p2m is sparse and this * part might not exist */ pod_retry_l1: p2m_entry = &phys_to_machine_mapping[gfn]; ret = __copy_from_user(&l1e, p2m_entry, sizeof(l1e)); if ( ret == 0 ) { unsigned long l1e_mfn = l1e_get_pfn(l1e); p2mt = p2m_flags_to_type(l1e_get_flags(l1e)); ASSERT( mfn_valid(_mfn(l1e_mfn)) || !p2m_is_ram(p2mt) || p2m_is_paging(p2mt) ); if ( p2mt == p2m_populate_on_demand ) { /* The read has succeeded, so we know that the mapping * exits at this point. */ if ( q & P2M_ALLOC ) { if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_4K, q) ) goto pod_retry_l1; /* Allocate failed. */ p2mt = p2m_invalid; goto out; } else { p2mt = p2m_populate_on_demand; goto out; } } if ( p2m_is_valid(p2mt) || p2m_is_grant(p2mt) ) mfn = _mfn(l1e_mfn); else /* XXX see above */ p2mt = p2m_mmio_dm; } if ( page_order ) *page_order = PAGE_ORDER_4K; out: *t = p2mt; return mfn; }
/* * Handle possibly necessary P2M type re-calculation (U flag clear for a * present entry) for the entries in the page table hierarchy for the given * GFN. Propagate the re-calculation flag down to the next page table level * for entries not involved in the translation of the given GFN. */ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn) { void *table; unsigned long gfn_remainder = gfn; unsigned int level = 4; l1_pgentry_t *pent; int err = 0; table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); while ( --level ) { unsigned long remainder = gfn_remainder; pent = p2m_find_entry(table, &remainder, gfn, level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER); if ( !pent || !(l1e_get_flags(*pent) & _PAGE_PRESENT) ) goto out; if ( l1e_get_flags(*pent) & _PAGE_PSE ) { unsigned long mask = ~0UL << (level * PAGETABLE_ORDER); if ( !needs_recalc(l1, *pent) || !p2m_is_changeable(p2m_flags_to_type(l1e_get_flags(*pent))) || p2m_is_logdirty_range(p2m, gfn & mask, gfn | ~mask) >= 0 ) break; } err = p2m_next_level(p2m, &table, &gfn_remainder, gfn, level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER, pgt[level - 1], 0); if ( err ) goto out; if ( needs_recalc(l1, *pent) ) { l1_pgentry_t e = *pent, *ptab = table; unsigned int i; if ( !valid_recalc(l1, e) ) P2M_DEBUG("bogus recalc state at d%d:%lx:%u\n", p2m->domain->domain_id, gfn, level); remainder = gfn_remainder; for ( i = 0; i < (1 << PAGETABLE_ORDER); ++i ) { l1_pgentry_t ent = ptab[i]; if ( (l1e_get_flags(ent) & _PAGE_PRESENT) && !needs_recalc(l1, ent) ) { set_recalc(l1, ent); p2m->write_p2m_entry(p2m, gfn - remainder, &ptab[i], ent, level); } remainder -= 1UL << ((level - 1) * PAGETABLE_ORDER); } smp_wmb(); clear_recalc(l1, e); p2m->write_p2m_entry(p2m, gfn, pent, e, level + 1); } unmap_domain_page((void *)((unsigned long)pent & PAGE_MASK)); } pent = p2m_find_entry(table, &gfn_remainder, gfn, level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER); if ( pent && (l1e_get_flags(*pent) & _PAGE_PRESENT) && needs_recalc(l1, *pent) ) { l1_pgentry_t e = *pent; if ( !valid_recalc(l1, e) ) P2M_DEBUG("bogus recalc leaf at d%d:%lx:%u\n", p2m->domain->domain_id, gfn, level); if ( p2m_is_changeable(p2m_flags_to_type(l1e_get_flags(e))) ) { unsigned long mask = ~0UL << (level * PAGETABLE_ORDER); p2m_type_t p2mt = p2m_is_logdirty_range(p2m, gfn & mask, gfn | ~mask) ? p2m_ram_logdirty : p2m_ram_rw; unsigned long mfn = l1e_get_pfn(e); unsigned long flags = p2m_type_to_flags(p2mt, _mfn(mfn)); if ( level ) { if ( flags & _PAGE_PAT ) { BUILD_BUG_ON(_PAGE_PAT != _PAGE_PSE); mfn |= _PAGE_PSE_PAT >> PAGE_SHIFT; } else mfn &= ~(_PAGE_PSE_PAT >> PAGE_SHIFT); flags |= _PAGE_PSE; }
/* Returns: 0 for success, -errno for failure */ static int p2m_next_level(struct p2m_domain *p2m, void **table, unsigned long *gfn_remainder, unsigned long gfn, u32 shift, u32 max, unsigned long type, bool_t unmap) { l1_pgentry_t *l1_entry; l1_pgentry_t *p2m_entry; l1_pgentry_t new_entry; void *next; int i; if ( !(p2m_entry = p2m_find_entry(*table, gfn_remainder, gfn, shift, max)) ) return -ENOENT; /* PoD/paging: Not present doesn't imply empty. */ if ( !l1e_get_flags(*p2m_entry) ) { struct page_info *pg; pg = p2m_alloc_ptp(p2m, type); if ( pg == NULL ) return -ENOMEM; new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), P2M_BASE_FLAGS | _PAGE_RW); switch ( type ) { case PGT_l3_page_table: p2m_add_iommu_flags(&new_entry, 3, IOMMUF_readable|IOMMUF_writable); p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 4); break; case PGT_l2_page_table: p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable); p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3); break; case PGT_l1_page_table: p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable); p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2); break; default: BUG(); break; } } ASSERT(l1e_get_flags(*p2m_entry) & (_PAGE_PRESENT|_PAGE_PSE)); /* split 1GB pages into 2MB pages */ if ( type == PGT_l2_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) { unsigned long flags, pfn; struct page_info *pg; pg = p2m_alloc_ptp(p2m, PGT_l2_page_table); if ( pg == NULL ) return -ENOMEM; flags = l1e_get_flags(*p2m_entry); pfn = l1e_get_pfn(*p2m_entry); l1_entry = __map_domain_page(pg); for ( i = 0; i < L2_PAGETABLE_ENTRIES; i++ ) { new_entry = l1e_from_pfn(pfn + (i * L1_PAGETABLE_ENTRIES), flags); p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable); p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 2); } unmap_domain_page(l1_entry); new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), P2M_BASE_FLAGS | _PAGE_RW); /* disable PSE */ p2m_add_iommu_flags(&new_entry, 2, IOMMUF_readable|IOMMUF_writable); p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 3); } /* split single 2MB large page into 4KB page in P2M table */ if ( type == PGT_l1_page_table && (l1e_get_flags(*p2m_entry) & _PAGE_PSE) ) { unsigned long flags, pfn; struct page_info *pg; pg = p2m_alloc_ptp(p2m, PGT_l1_page_table); if ( pg == NULL ) return -ENOMEM; /* New splintered mappings inherit the flags of the old superpage, * with a little reorganisation for the _PAGE_PSE_PAT bit. */ flags = l1e_get_flags(*p2m_entry); pfn = l1e_get_pfn(*p2m_entry); if ( pfn & 1 ) /* ==> _PAGE_PSE_PAT was set */ pfn -= 1; /* Clear it; _PAGE_PSE becomes _PAGE_PAT */ else flags &= ~_PAGE_PSE; /* Clear _PAGE_PSE (== _PAGE_PAT) */ l1_entry = __map_domain_page(pg); for ( i = 0; i < L1_PAGETABLE_ENTRIES; i++ ) { new_entry = l1e_from_pfn(pfn + i, flags); p2m_add_iommu_flags(&new_entry, 0, 0); p2m->write_p2m_entry(p2m, gfn, l1_entry + i, new_entry, 1); } unmap_domain_page(l1_entry); new_entry = l1e_from_pfn(mfn_x(page_to_mfn(pg)), P2M_BASE_FLAGS | _PAGE_RW); p2m_add_iommu_flags(&new_entry, 1, IOMMUF_readable|IOMMUF_writable); p2m->write_p2m_entry(p2m, gfn, p2m_entry, new_entry, 2); } next = map_domain_page(_mfn(l1e_get_pfn(*p2m_entry))); if ( unmap ) unmap_domain_page(*table); *table = next; return 0; }
void mc_memerr_dhandler(struct mca_binfo *binfo, enum mce_result *result, struct cpu_user_regs *regs) { struct mcinfo_bank *bank = binfo->mib; struct mcinfo_global *global = binfo->mig; struct domain *d; unsigned long mfn, gfn; uint32_t status; int vmce_vcpuid; if (!mc_check_addr(bank->mc_status, bank->mc_misc, MC_ADDR_PHYSICAL)) { dprintk(XENLOG_WARNING, "No physical address provided for memory error\n"); return; } mfn = bank->mc_addr >> PAGE_SHIFT; if (offline_page(mfn, 1, &status)) { dprintk(XENLOG_WARNING, "Failed to offline page %lx for MCE error\n", mfn); return; } mci_action_add_pageoffline(binfo->bank, binfo->mi, mfn, status); /* This is free page */ if (status & PG_OFFLINE_OFFLINED) *result = MCER_RECOVERED; else if (status & PG_OFFLINE_AGAIN) *result = MCER_CONTINUE; else if (status & PG_OFFLINE_PENDING) { /* This page has owner */ if (status & PG_OFFLINE_OWNED) { bank->mc_domid = status >> PG_OFFLINE_OWNER_SHIFT; mce_printk(MCE_QUIET, "MCE: This error page is ownded" " by DOM %d\n", bank->mc_domid); /* XXX: Cannot handle shared pages yet * (this should identify all domains and gfn mapping to * the mfn in question) */ BUG_ON( bank->mc_domid == DOMID_COW ); if ( bank->mc_domid != DOMID_XEN ) { d = get_domain_by_id(bank->mc_domid); ASSERT(d); gfn = get_gpfn_from_mfn((bank->mc_addr) >> PAGE_SHIFT); if ( !is_vmce_ready(bank, d) ) { printk("DOM%d not ready for vMCE\n", d->domain_id); goto vmce_failed; } if ( unmmap_broken_page(d, _mfn(mfn), gfn) ) { printk("Unmap broken memory %lx for DOM%d failed\n", mfn, d->domain_id); goto vmce_failed; } bank->mc_addr = gfn << PAGE_SHIFT | (bank->mc_addr & (PAGE_SIZE -1 )); if ( fill_vmsr_data(bank, d, global->mc_gstatus) == -1 ) { mce_printk(MCE_QUIET, "Fill vMCE# data for DOM%d " "failed\n", bank->mc_domid); goto vmce_failed; } if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL ) vmce_vcpuid = VMCE_INJECT_BROADCAST; else vmce_vcpuid = global->mc_vcpuid; /* We will inject vMCE to DOMU*/ if ( inject_vmce(d, vmce_vcpuid) < 0 ) { mce_printk(MCE_QUIET, "inject vMCE to DOM%d" " failed\n", d->domain_id); goto vmce_failed; } /* Impacted domain go on with domain's recovery job * if the domain has its own MCA handler. * For xen, it has contained the error and finished * its own recovery job. */ *result = MCER_RECOVERED; put_domain(d); return; vmce_failed: put_domain(d); domain_crash(d); } }
int guest_physmap_add_entry(struct domain *d, unsigned long gfn, unsigned long mfn, unsigned int page_order, p2m_type_t t) { struct p2m_domain *p2m = p2m_get_hostp2m(d); unsigned long i, ogfn; p2m_type_t ot; p2m_access_t a; mfn_t omfn; int pod_count = 0; int rc = 0; if ( !paging_mode_translate(d) ) { if ( need_iommu(d) && t == p2m_ram_rw ) { for ( i = 0; i < (1 << page_order); i++ ) { rc = iommu_map_page( d, mfn + i, mfn + i, IOMMUF_readable|IOMMUF_writable); if ( rc != 0 ) { while ( i-- > 0 ) iommu_unmap_page(d, mfn + i); return rc; } } } return 0; } p2m_lock(p2m); P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn); /* First, remove m->p mappings for existing p->m mappings */ for ( i = 0; i < (1UL << page_order); i++ ) { omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL); if ( p2m_is_shared(ot) ) { /* Do an unshare to cleanly take care of all corner * cases. */ int rc; rc = mem_sharing_unshare_page(p2m->domain, gfn + i, 0); if ( rc ) { p2m_unlock(p2m); /* NOTE: Should a guest domain bring this upon itself, * there is not a whole lot we can do. We are buried * deep in locks from most code paths by now. So, fail * the call and don't try to sleep on a wait queue * while placing the mem event. * * However, all current (changeset 3432abcf9380) code * paths avoid this unsavoury situation. For now. * * Foreign domains are okay to place an event as they * won't go to sleep. */ (void)mem_sharing_notify_enomem(p2m->domain, gfn + i, 0); return rc; } omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL); ASSERT(!p2m_is_shared(ot)); } if ( p2m_is_grant(ot) ) { /* Really shouldn't be unmapping grant maps this way */ domain_crash(d); p2m_unlock(p2m); return -EINVAL; } else if ( p2m_is_ram(ot) && !p2m_is_paged(ot) ) { ASSERT(mfn_valid(omfn)); set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); } else if ( ot == p2m_populate_on_demand ) { /* Count how man PoD entries we'll be replacing if successful */ pod_count++; } else if ( p2m_is_paging(ot) && (ot != p2m_ram_paging_out) ) { /* We're plugging a hole in the physmap where a paged out page was */ atomic_dec(&d->paged_pages); } } /* Then, look for m->p mappings for this range and deal with them */ for ( i = 0; i < (1UL << page_order); i++ ) { if ( page_get_owner(mfn_to_page(_mfn(mfn + i))) == dom_cow ) { /* This is no way to add a shared page to your physmap! */ gdprintk(XENLOG_ERR, "Adding shared mfn %lx directly to dom %hu " "physmap not allowed.\n", mfn+i, d->domain_id); p2m_unlock(p2m); return -EINVAL; } if ( page_get_owner(mfn_to_page(_mfn(mfn + i))) != d ) continue; ogfn = mfn_to_gfn(d, _mfn(mfn+i)); if ( (ogfn != INVALID_M2P_ENTRY) && (ogfn != gfn + i) ) { /* This machine frame is already mapped at another physical * address */ P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n", mfn + i, ogfn, gfn + i); omfn = p2m->get_entry(p2m, ogfn, &ot, &a, 0, NULL); if ( p2m_is_ram(ot) && !p2m_is_paged(ot) ) { ASSERT(mfn_valid(omfn)); P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", ogfn , mfn_x(omfn)); if ( mfn_x(omfn) == (mfn + i) ) p2m_remove_page(p2m, ogfn, mfn + i, 0); } } } /* Now, actually do the two-way mapping */ if ( mfn_valid(_mfn(mfn)) ) { if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t, p2m->default_access) ) { rc = -EINVAL; goto out; /* Failed to update p2m, bail without updating m2p. */ } if ( !p2m_is_grant(t) ) { for ( i = 0; i < (1UL << page_order); i++ ) set_gpfn_from_mfn(mfn+i, gfn+i); } } else { gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n", gfn, mfn); if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access) ) rc = -EINVAL; else { pod_lock(p2m); p2m->pod.entry_count -= pod_count; BUG_ON(p2m->pod.entry_count < 0); pod_unlock(p2m); } } out: p2m_unlock(p2m); return rc; }
static void populate_physmap(struct memop_args *a) { struct page_info *page; unsigned int i, j; xen_pfn_t gpfn, mfn; struct domain *d = a->domain; if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done, a->nr_extents-1) ) return; if ( a->extent_order > (a->memflags & MEMF_populate_on_demand ? MAX_ORDER : max_order(current->domain)) ) return; for ( i = a->nr_done; i < a->nr_extents; i++ ) { if ( i != a->nr_done && hypercall_preempt_check() ) { a->preempted = 1; goto out; } if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) ) goto out; if ( a->memflags & MEMF_populate_on_demand ) { if ( guest_physmap_mark_populate_on_demand(d, gpfn, a->extent_order) < 0 ) goto out; } else { if ( is_domain_direct_mapped(d) ) { mfn = gpfn; for ( j = 0; j < (1U << a->extent_order); j++, mfn++ ) { if ( !mfn_valid(mfn) ) { gdprintk(XENLOG_INFO, "Invalid mfn %#"PRI_xen_pfn"\n", mfn); goto out; } page = mfn_to_page(mfn); if ( !get_page(page, d) ) { gdprintk(XENLOG_INFO, "mfn %#"PRI_xen_pfn" doesn't belong to d%d\n", mfn, d->domain_id); goto out; } put_page(page); } mfn = gpfn; page = mfn_to_page(mfn); } else { page = alloc_domheap_pages(d, a->extent_order, a->memflags); if ( unlikely(!page) ) { if ( !tmem_enabled() || a->extent_order ) gdprintk(XENLOG_INFO, "Could not allocate order=%u extent: id=%d memflags=%#x (%u of %u)\n", a->extent_order, d->domain_id, a->memflags, i, a->nr_extents); goto out; } mfn = page_to_mfn(page); } guest_physmap_add_page(d, _gfn(gpfn), _mfn(mfn), a->extent_order); if ( !paging_mode_translate(d) ) { for ( j = 0; j < (1U << a->extent_order); j++ ) set_gpfn_from_mfn(mfn + j, gpfn + j); /* Inform the domain of the new page's machine address. */ if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) ) goto out; } } } out: a->nr_done = i; }
void dump_pt_walk(paddr_t ttbr, paddr_t addr, unsigned int root_level, unsigned int nr_root_tables) { static const char *level_strs[4] = { "0TH", "1ST", "2ND", "3RD" }; const unsigned long root_pfn = paddr_to_pfn(ttbr); const unsigned int offsets[4] = { zeroeth_table_offset(addr), first_table_offset(addr), second_table_offset(addr), third_table_offset(addr) }; lpae_t pte, *mapping; unsigned int level, root_table; #ifdef CONFIG_ARM_32 BUG_ON(root_level < 1); #endif BUG_ON(root_level > 3); if ( nr_root_tables > 1 ) { /* * Concatenated root-level tables. The table number will be * the offset at the previous level. It is not possible to * concatenate a level-0 root. */ BUG_ON(root_level == 0); root_table = offsets[root_level - 1]; printk("Using concatenated root table %u\n", root_table); if ( root_table >= nr_root_tables ) { printk("Invalid root table offset\n"); return; } } else root_table = 0; mapping = map_domain_page(_mfn(root_pfn + root_table)); for ( level = root_level; ; level++ ) { if ( offsets[level] > LPAE_ENTRIES ) break; pte = mapping[offsets[level]]; printk("%s[0x%x] = 0x%"PRIpaddr"\n", level_strs[level], offsets[level], pte.bits); if ( level == 3 || !pte.walk.valid || !pte.walk.table ) break; /* For next iteration */ unmap_domain_page(mapping); mapping = map_domain_page(_mfn(pte.walk.base)); } unmap_domain_page(mapping); }
long p2m_pt_audit_p2m(struct p2m_domain *p2m) { unsigned long entry_count = 0, pmbad = 0; unsigned long mfn, gfn, m2pfn; int test_linear; struct domain *d = p2m->domain; ASSERT(p2m_locked_by_me(p2m)); ASSERT(pod_locked_by_me(p2m)); test_linear = ( (d == current->domain) && !pagetable_is_null(current->arch.monitor_table) ); if ( test_linear ) flush_tlb_local(); /* Audit part one: walk the domain's p2m table, checking the entries. */ if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) != 0 ) { l2_pgentry_t *l2e; l1_pgentry_t *l1e; int i1, i2; #if CONFIG_PAGING_LEVELS == 4 l4_pgentry_t *l4e; l3_pgentry_t *l3e; int i4, i3; l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #else /* CONFIG_PAGING_LEVELS == 3 */ l3_pgentry_t *l3e; int i3; l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #endif gfn = 0; #if CONFIG_PAGING_LEVELS >= 4 for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ ) { if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) ) { gfn += 1 << (L4_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } l3e = map_domain_page(mfn_x(_mfn(l4e_get_pfn(l4e[i4])))); #endif for ( i3 = 0; i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); i3++ ) { if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) ) { gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } /* check for 1GB super page */ if ( l3e_get_flags(l3e[i3]) & _PAGE_PSE ) { mfn = l3e_get_pfn(l3e[i3]); ASSERT(mfn_valid(_mfn(mfn))); /* we have to cover 512x512 4K pages */ for ( i2 = 0; i2 < (L2_PAGETABLE_ENTRIES * L1_PAGETABLE_ENTRIES); i2++) { m2pfn = get_gpfn_from_mfn(mfn+i2); if ( m2pfn != (gfn + i2) ) { pmbad++; P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" " -> gfn %#lx\n", gfn+i2, mfn+i2, m2pfn); BUG(); } gfn += 1 << (L3_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } } l2e = map_domain_page(mfn_x(_mfn(l3e_get_pfn(l3e[i3])))); for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ ) { if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) ) { if ( (l2e_get_flags(l2e[i2]) & _PAGE_PSE) && ( p2m_flags_to_type(l2e_get_flags(l2e[i2])) == p2m_populate_on_demand ) ) entry_count+=SUPERPAGE_PAGES; gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } /* check for super page */ if ( l2e_get_flags(l2e[i2]) & _PAGE_PSE ) { mfn = l2e_get_pfn(l2e[i2]); ASSERT(mfn_valid(_mfn(mfn))); for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++) { m2pfn = get_gpfn_from_mfn(mfn+i1); /* Allow shared M2Ps */ if ( (m2pfn != (gfn + i1)) && (m2pfn != SHARED_M2P_ENTRY) ) { pmbad++; P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" " -> gfn %#lx\n", gfn+i1, mfn+i1, m2pfn); BUG(); } } gfn += 1 << (L2_PAGETABLE_SHIFT - PAGE_SHIFT); continue; } l1e = map_domain_page(mfn_x(_mfn(l2e_get_pfn(l2e[i2])))); for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ ) { p2m_type_t type; type = p2m_flags_to_type(l1e_get_flags(l1e[i1])); if ( !(l1e_get_flags(l1e[i1]) & _PAGE_PRESENT) ) { if ( type == p2m_populate_on_demand ) entry_count++; continue; } mfn = l1e_get_pfn(l1e[i1]); ASSERT(mfn_valid(_mfn(mfn))); m2pfn = get_gpfn_from_mfn(mfn); if ( m2pfn != gfn && type != p2m_mmio_direct && !p2m_is_grant(type) && !p2m_is_shared(type) ) { pmbad++; printk("mismatch: gfn %#lx -> mfn %#lx" " -> gfn %#lx\n", gfn, mfn, m2pfn); P2M_PRINTK("mismatch: gfn %#lx -> mfn %#lx" " -> gfn %#lx\n", gfn, mfn, m2pfn); BUG(); } } unmap_domain_page(l1e); } unmap_domain_page(l2e); } #if CONFIG_PAGING_LEVELS >= 4 unmap_domain_page(l3e); } #endif #if CONFIG_PAGING_LEVELS == 4 unmap_domain_page(l4e); #else /* CONFIG_PAGING_LEVELS == 3 */ unmap_domain_page(l3e); #endif } if ( entry_count != p2m->pod.entry_count ) { printk("%s: refcounted entry count %ld, audit count %lu!\n", __func__, p2m->pod.entry_count, entry_count); BUG(); } return pmbad; }
int guest_remove_page(struct domain *d, unsigned long gmfn) { struct page_info *page; #ifdef CONFIG_X86 p2m_type_t p2mt; #endif unsigned long mfn; #ifdef CONFIG_X86 mfn = mfn_x(get_gfn_query(d, gmfn, &p2mt)); if ( unlikely(p2m_is_paging(p2mt)) ) { guest_physmap_remove_page(d, gmfn, mfn, 0); put_gfn(d, gmfn); /* If the page hasn't yet been paged out, there is an * actual page that needs to be released. */ if ( p2mt == p2m_ram_paging_out ) { ASSERT(mfn_valid(mfn)); page = mfn_to_page(mfn); if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) put_page(page); } p2m_mem_paging_drop_page(d, gmfn, p2mt); return 1; } if ( p2mt == p2m_mmio_direct ) { clear_mmio_p2m_entry(d, gmfn, _mfn(mfn)); put_gfn(d, gmfn); return 1; } #else mfn = gmfn_to_mfn(d, gmfn); #endif if ( unlikely(!mfn_valid(mfn)) ) { put_gfn(d, gmfn); gdprintk(XENLOG_INFO, "Domain %u page number %lx invalid\n", d->domain_id, gmfn); return 0; } #ifdef CONFIG_X86 if ( p2m_is_shared(p2mt) ) { /* Unshare the page, bail out on error. We unshare because * we might be the only one using this shared page, and we * need to trigger proper cleanup. Once done, this is * like any other page. */ if ( mem_sharing_unshare_page(d, gmfn, 0) ) { put_gfn(d, gmfn); (void)mem_sharing_notify_enomem(d, gmfn, 0); return 0; } /* Maybe the mfn changed */ mfn = mfn_x(get_gfn_query_unlocked(d, gmfn, &p2mt)); ASSERT(!p2m_is_shared(p2mt)); } #endif /* CONFIG_X86 */ page = mfn_to_page(mfn); if ( unlikely(!get_page(page, d)) ) { put_gfn(d, gmfn); gdprintk(XENLOG_INFO, "Bad page free for domain %u\n", d->domain_id); return 0; } if ( test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) ) put_page_and_type(page); /* * With the lack of an IOMMU on some platforms, domains with DMA-capable * device must retrieve the same pfn when the hypercall populate_physmap * is called. * * For this purpose (and to match populate_physmap() behavior), the page * is kept allocated. */ if ( !is_domain_direct_mapped(d) && test_and_clear_bit(_PGC_allocated, &page->count_info) ) put_page(page); guest_physmap_remove_page(d, gmfn, mfn, 0); put_page(page); put_gfn(d, gmfn); return 1; }
static mfn_t p2m_pt_get_entry(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t *a, p2m_query_t q, unsigned int *page_order) { mfn_t mfn; paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT; l2_pgentry_t *l2e; l1_pgentry_t *l1e; unsigned long l1e_flags; p2m_type_t l1t; ASSERT(paging_mode_translate(p2m->domain)); /* XXX This is for compatibility with the old model, where anything not * XXX marked as RAM was considered to be emulated MMIO space. * XXX Once we start explicitly registering MMIO regions in the p2m * XXX we will return p2m_invalid for unmapped gfns */ *t = p2m_mmio_dm; /* Not implemented except with EPT */ *a = p2m_access_rwx; if ( gfn > p2m->max_mapped_pfn ) /* This pfn is higher than the highest the p2m map currently holds */ return _mfn(INVALID_MFN); mfn = pagetable_get_mfn(p2m_get_pagetable(p2m)); { l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn)); l4e += l4_table_offset(addr); if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 ) { unmap_domain_page(l4e); return _mfn(INVALID_MFN); } mfn = _mfn(l4e_get_pfn(*l4e)); unmap_domain_page(l4e); } { l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn)); l3e += l3_table_offset(addr); pod_retry_l3: if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 ) { if ( p2m_flags_to_type(l3e_get_flags(*l3e)) == p2m_populate_on_demand ) { if ( q & P2M_ALLOC ) { if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) ) goto pod_retry_l3; gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__); } else *t = p2m_populate_on_demand; } unmap_domain_page(l3e); return _mfn(INVALID_MFN); } else if ( (l3e_get_flags(*l3e) & _PAGE_PSE) ) { mfn = _mfn(l3e_get_pfn(*l3e) + l2_table_offset(addr) * L1_PAGETABLE_ENTRIES + l1_table_offset(addr)); *t = p2m_flags_to_type(l3e_get_flags(*l3e)); unmap_domain_page(l3e); ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); if ( page_order ) *page_order = PAGE_ORDER_1G; return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN); } mfn = _mfn(l3e_get_pfn(*l3e)); unmap_domain_page(l3e); } l2e = map_domain_page(mfn_x(mfn)); l2e += l2_table_offset(addr); pod_retry_l2: if ( (l2e_get_flags(*l2e) & _PAGE_PRESENT) == 0 ) { /* PoD: Try to populate a 2-meg chunk */ if ( p2m_flags_to_type(l2e_get_flags(*l2e)) == p2m_populate_on_demand ) { if ( q & P2M_ALLOC ) { if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_2M, q) ) goto pod_retry_l2; } else *t = p2m_populate_on_demand; } unmap_domain_page(l2e); return _mfn(INVALID_MFN); } else if ( (l2e_get_flags(*l2e) & _PAGE_PSE) ) { mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr)); *t = p2m_flags_to_type(l2e_get_flags(*l2e)); unmap_domain_page(l2e); ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); if ( page_order ) *page_order = PAGE_ORDER_2M; return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN); } mfn = _mfn(l2e_get_pfn(*l2e)); unmap_domain_page(l2e); l1e = map_domain_page(mfn_x(mfn)); l1e += l1_table_offset(addr); pod_retry_l1: l1e_flags = l1e_get_flags(*l1e); l1t = p2m_flags_to_type(l1e_flags); if ( ((l1e_flags & _PAGE_PRESENT) == 0) && (!p2m_is_paging(l1t)) ) { /* PoD: Try to populate */ if ( l1t == p2m_populate_on_demand ) { if ( q & P2M_ALLOC ) { if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_4K, q) ) goto pod_retry_l1; } else *t = p2m_populate_on_demand; } unmap_domain_page(l1e); return _mfn(INVALID_MFN); } mfn = _mfn(l1e_get_pfn(*l1e)); *t = l1t; unmap_domain_page(l1e); ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t)); if ( page_order ) *page_order = PAGE_ORDER_4K; return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN); }
unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, paddr_t ga, uint32_t *pfec, unsigned int *page_order) { uint32_t missing; mfn_t top_mfn; void *top_map; p2m_type_t p2mt; walk_t gw; unsigned long top_gfn; struct page_info *top_page; /* Get the top-level table's MFN */ top_gfn = cr3 >> PAGE_SHIFT; top_page = get_page_from_gfn_p2m(p2m->domain, p2m, top_gfn, &p2mt, NULL, P2M_ALLOC | P2M_UNSHARE); if ( p2m_is_paging(p2mt) ) { ASSERT(p2m_is_hostp2m(p2m)); pfec[0] = PFEC_page_paged; if ( top_page ) put_page(top_page); p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT); return gfn_x(INVALID_GFN); } if ( p2m_is_shared(p2mt) ) { pfec[0] = PFEC_page_shared; if ( top_page ) put_page(top_page); return gfn_x(INVALID_GFN); } if ( !top_page ) { pfec[0] &= ~PFEC_page_present; goto out_tweak_pfec; } top_mfn = _mfn(page_to_mfn(top_page)); /* Map the top-level table and call the tree-walker */ ASSERT(mfn_valid(top_mfn)); top_map = map_domain_page(top_mfn); #if GUEST_PAGING_LEVELS == 3 top_map += (cr3 & ~(PAGE_MASK | 31)); #endif missing = guest_walk_tables(v, p2m, ga, &gw, pfec[0], top_mfn, top_map); unmap_domain_page(top_map); put_page(top_page); /* Interpret the answer */ if ( missing == 0 ) { gfn_t gfn = guest_walk_to_gfn(&gw); struct page_info *page; page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), &p2mt, NULL, P2M_ALLOC | P2M_UNSHARE); if ( page ) put_page(page); if ( p2m_is_paging(p2mt) ) { ASSERT(p2m_is_hostp2m(p2m)); pfec[0] = PFEC_page_paged; p2m_mem_paging_populate(p2m->domain, gfn_x(gfn)); return gfn_x(INVALID_GFN); } if ( p2m_is_shared(p2mt) ) { pfec[0] = PFEC_page_shared; return gfn_x(INVALID_GFN); } if ( page_order ) *page_order = guest_walk_to_page_order(&gw); return gfn_x(gfn); } if ( missing & _PAGE_PRESENT ) pfec[0] &= ~PFEC_page_present; if ( missing & _PAGE_INVALID_BITS ) pfec[0] |= PFEC_reserved_bit; if ( missing & _PAGE_PKEY_BITS ) pfec[0] |= PFEC_prot_key; if ( missing & _PAGE_PAGED ) pfec[0] = PFEC_page_paged; if ( missing & _PAGE_SHARED ) pfec[0] = PFEC_page_shared; out_tweak_pfec: /* * SDM Intel 64 Volume 3, Chapter Paging, PAGE-FAULT EXCEPTIONS: * The PFEC_insn_fetch flag is set only when NX or SMEP are enabled. */ if ( !hvm_nx_enabled(v) && !hvm_smep_enabled(v) ) pfec[0] &= ~PFEC_insn_fetch; return gfn_x(INVALID_GFN); }
/* * Lookup the MFN corresponding to a domain's PFN. * * There are no processor functions to do a stage 2 only lookup therefore we * do a a software walk. */ static paddr_t __p2m_lookup(struct domain *d, paddr_t paddr, p2m_type_t *t) { struct p2m_domain *p2m = &d->arch.p2m; const unsigned int offsets[4] = { zeroeth_table_offset(paddr), first_table_offset(paddr), second_table_offset(paddr), third_table_offset(paddr) }; const paddr_t masks[4] = { ZEROETH_MASK, FIRST_MASK, SECOND_MASK, THIRD_MASK }; lpae_t pte, *map; paddr_t maddr = INVALID_PADDR; paddr_t mask = 0; p2m_type_t _t; unsigned int level, root_table; ASSERT(spin_is_locked(&p2m->lock)); BUILD_BUG_ON(THIRD_MASK != PAGE_MASK); /* Allow t to be NULL */ t = t ?: &_t; *t = p2m_invalid; if ( P2M_ROOT_PAGES > 1 ) { /* * Concatenated root-level tables. The table number will be * the offset at the previous level. It is not possible to * concatenate a level-0 root. */ ASSERT(P2M_ROOT_LEVEL > 0); root_table = offsets[P2M_ROOT_LEVEL - 1]; if ( root_table >= P2M_ROOT_PAGES ) goto err; } else root_table = 0; map = __map_domain_page(p2m->root + root_table); ASSERT(P2M_ROOT_LEVEL < 4); for ( level = P2M_ROOT_LEVEL ; level < 4 ; level++ ) { mask = masks[level]; pte = map[offsets[level]]; if ( level == 3 && !p2m_table(pte) ) /* Invalid, clobber the pte */ pte.bits = 0; if ( level == 3 || !p2m_table(pte) ) /* Done */ break; ASSERT(level < 3); /* Map for next level */ unmap_domain_page(map); map = map_domain_page(_mfn(pte.p2m.base)); } unmap_domain_page(map); if ( p2m_valid(pte) ) { ASSERT(mask); ASSERT(pte.p2m.type != p2m_invalid); maddr = (pte.bits & PADDR_MASK & mask) | (paddr & ~mask); *t = pte.p2m.type; } err: return maddr; }
static mfn_t p2m_gfn_to_mfn(struct p2m_domain *p2m, unsigned long gfn, p2m_type_t *t, p2m_access_t *a, p2m_query_t q, unsigned int *page_order) { mfn_t mfn; paddr_t addr = ((paddr_t)gfn) << PAGE_SHIFT; l2_pgentry_t *l2e; l1_pgentry_t *l1e; unsigned long l1e_flags; p2m_type_t l1t; ASSERT(paging_mode_translate(p2m->domain)); /* XXX This is for compatibility with the old model, where anything not * XXX marked as RAM was considered to be emulated MMIO space. * XXX Once we start explicitly registering MMIO regions in the p2m * XXX we will return p2m_invalid for unmapped gfns */ *t = p2m_mmio_dm; /* Not implemented except with EPT */ *a = p2m_access_rwx; if ( gfn > p2m->max_mapped_pfn ) /* This pfn is higher than the highest the p2m map currently holds */ return _mfn(INVALID_MFN); /* Use the fast path with the linear mapping if we can */ if ( p2m == p2m_get_hostp2m(current->domain) ) return p2m_gfn_to_mfn_current(p2m, gfn, t, a, q, page_order); mfn = pagetable_get_mfn(p2m_get_pagetable(p2m)); #if CONFIG_PAGING_LEVELS >= 4 { l4_pgentry_t *l4e = map_domain_page(mfn_x(mfn)); l4e += l4_table_offset(addr); if ( (l4e_get_flags(*l4e) & _PAGE_PRESENT) == 0 ) { unmap_domain_page(l4e); return _mfn(INVALID_MFN); } mfn = _mfn(l4e_get_pfn(*l4e)); unmap_domain_page(l4e); } #endif { l3_pgentry_t *l3e = map_domain_page(mfn_x(mfn)); #if CONFIG_PAGING_LEVELS == 3 /* On PAE hosts the p2m has eight l3 entries, not four (see * shadow_set_p2m_entry()) so we can't use l3_table_offset. * Instead, just count the number of l3es from zero. It's safe * to do this because we already checked that the gfn is within * the bounds of the p2m. */ l3e += (addr >> L3_PAGETABLE_SHIFT); #else l3e += l3_table_offset(addr); #endif pod_retry_l3: if ( (l3e_get_flags(*l3e) & _PAGE_PRESENT) == 0 ) { if ( p2m_flags_to_type(l3e_get_flags(*l3e)) == p2m_populate_on_demand ) { if ( q & P2M_ALLOC ) { if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_1G, q) ) goto pod_retry_l3; gdprintk(XENLOG_ERR, "%s: Allocate 1GB failed!\n", __func__); } else *t = p2m_populate_on_demand; } unmap_domain_page(l3e); return _mfn(INVALID_MFN); } else if ( (l3e_get_flags(*l3e) & _PAGE_PSE) ) { mfn = _mfn(l3e_get_pfn(*l3e) + l2_table_offset(addr) * L1_PAGETABLE_ENTRIES + l1_table_offset(addr)); *t = p2m_flags_to_type(l3e_get_flags(*l3e)); unmap_domain_page(l3e); ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); if ( page_order ) *page_order = PAGE_ORDER_1G; return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN); } mfn = _mfn(l3e_get_pfn(*l3e)); unmap_domain_page(l3e); } l2e = map_domain_page(mfn_x(mfn)); l2e += l2_table_offset(addr); pod_retry_l2: if ( (l2e_get_flags(*l2e) & _PAGE_PRESENT) == 0 ) { /* PoD: Try to populate a 2-meg chunk */ if ( p2m_flags_to_type(l2e_get_flags(*l2e)) == p2m_populate_on_demand ) { if ( q & P2M_ALLOC ) { if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_2M, q) ) goto pod_retry_l2; } else *t = p2m_populate_on_demand; } unmap_domain_page(l2e); return _mfn(INVALID_MFN); } else if ( (l2e_get_flags(*l2e) & _PAGE_PSE) ) { mfn = _mfn(l2e_get_pfn(*l2e) + l1_table_offset(addr)); *t = p2m_flags_to_type(l2e_get_flags(*l2e)); unmap_domain_page(l2e); ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t)); if ( page_order ) *page_order = PAGE_ORDER_2M; return (p2m_is_valid(*t)) ? mfn : _mfn(INVALID_MFN); } mfn = _mfn(l2e_get_pfn(*l2e)); unmap_domain_page(l2e); l1e = map_domain_page(mfn_x(mfn)); l1e += l1_table_offset(addr); pod_retry_l1: l1e_flags = l1e_get_flags(*l1e); l1t = p2m_flags_to_type(l1e_flags); if ( ((l1e_flags & _PAGE_PRESENT) == 0) && (!p2m_is_paging(l1t)) ) { /* PoD: Try to populate */ if ( l1t == p2m_populate_on_demand ) { if ( q & P2M_ALLOC ) { if ( !p2m_pod_demand_populate(p2m, gfn, PAGE_ORDER_4K, q) ) goto pod_retry_l1; } else *t = p2m_populate_on_demand; } unmap_domain_page(l1e); return _mfn(INVALID_MFN); } mfn = _mfn(l1e_get_pfn(*l1e)); *t = l1t; unmap_domain_page(l1e); ASSERT(mfn_valid(mfn) || !p2m_is_ram(*t) || p2m_is_paging(*t)); if ( page_order ) *page_order = PAGE_ORDER_4K; return (p2m_is_valid(*t) || p2m_is_grant(*t)) ? mfn : _mfn(INVALID_MFN); }
/* Walk the whole p2m table, changing any entries of the old type * to the new type. This is used in hardware-assisted paging to * quickly enable or diable log-dirty tracking */ static void p2m_change_type_global(struct p2m_domain *p2m, p2m_type_t ot, p2m_type_t nt) { unsigned long mfn, gfn, flags; l1_pgentry_t l1e_content; l1_pgentry_t *l1e; l2_pgentry_t *l2e; mfn_t l1mfn, l2mfn, l3mfn; unsigned long i1, i2, i3; l3_pgentry_t *l3e; #if CONFIG_PAGING_LEVELS == 4 l4_pgentry_t *l4e; unsigned long i4; #endif /* CONFIG_PAGING_LEVELS == 4 */ BUG_ON(p2m_is_grant(ot) || p2m_is_grant(nt)); BUG_ON(ot != nt && (ot == p2m_mmio_direct || nt == p2m_mmio_direct)); if ( !paging_mode_translate(p2m->domain) ) return; if ( pagetable_get_pfn(p2m_get_pagetable(p2m)) == 0 ) return; ASSERT(p2m_locked_by_me(p2m)); #if CONFIG_PAGING_LEVELS == 4 l4e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #else /* CONFIG_PAGING_LEVELS == 3 */ l3mfn = _mfn(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); l3e = map_domain_page(mfn_x(pagetable_get_mfn(p2m_get_pagetable(p2m)))); #endif #if CONFIG_PAGING_LEVELS >= 4 for ( i4 = 0; i4 < L4_PAGETABLE_ENTRIES; i4++ ) { if ( !(l4e_get_flags(l4e[i4]) & _PAGE_PRESENT) ) { continue; } l3mfn = _mfn(l4e_get_pfn(l4e[i4])); l3e = map_domain_page(l4e_get_pfn(l4e[i4])); #endif for ( i3 = 0; i3 < ((CONFIG_PAGING_LEVELS==4) ? L3_PAGETABLE_ENTRIES : 8); i3++ ) { if ( !(l3e_get_flags(l3e[i3]) & _PAGE_PRESENT) ) { continue; } if ( (l3e_get_flags(l3e[i3]) & _PAGE_PSE) ) { flags = l3e_get_flags(l3e[i3]); if ( p2m_flags_to_type(flags) != ot ) continue; mfn = l3e_get_pfn(l3e[i3]); gfn = get_gpfn_from_mfn(mfn); flags = p2m_type_to_flags(nt, _mfn(mfn)); l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE); p2m->write_p2m_entry(p2m, gfn, (l1_pgentry_t *)&l3e[i3], l3mfn, l1e_content, 3); continue; } l2mfn = _mfn(l3e_get_pfn(l3e[i3])); l2e = map_domain_page(l3e_get_pfn(l3e[i3])); for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; i2++ ) { if ( !(l2e_get_flags(l2e[i2]) & _PAGE_PRESENT) ) { continue; } if ( (l2e_get_flags(l2e[i2]) & _PAGE_PSE) ) { flags = l2e_get_flags(l2e[i2]); if ( p2m_flags_to_type(flags) != ot ) continue; mfn = l2e_get_pfn(l2e[i2]); /* Do not use get_gpfn_from_mfn because it may return SHARED_M2P_ENTRY */ gfn = (i2 + (i3 #if CONFIG_PAGING_LEVELS >= 4 + (i4 * L3_PAGETABLE_ENTRIES) #endif ) * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; flags = p2m_type_to_flags(nt, _mfn(mfn)); l1e_content = l1e_from_pfn(mfn, flags | _PAGE_PSE); p2m->write_p2m_entry(p2m, gfn, (l1_pgentry_t *)&l2e[i2], l2mfn, l1e_content, 2); continue; } l1mfn = _mfn(l2e_get_pfn(l2e[i2])); l1e = map_domain_page(mfn_x(l1mfn)); for ( i1 = 0; i1 < L1_PAGETABLE_ENTRIES; i1++, gfn++ ) { flags = l1e_get_flags(l1e[i1]); if ( p2m_flags_to_type(flags) != ot ) continue; mfn = l1e_get_pfn(l1e[i1]); gfn = i1 + (i2 + (i3 #if CONFIG_PAGING_LEVELS >= 4 + (i4 * L3_PAGETABLE_ENTRIES) #endif ) * L2_PAGETABLE_ENTRIES) * L1_PAGETABLE_ENTRIES; /* create a new 1le entry with the new type */ flags = p2m_type_to_flags(nt, _mfn(mfn)); l1e_content = p2m_l1e_from_pfn(mfn, flags); p2m->write_p2m_entry(p2m, gfn, &l1e[i1], l1mfn, l1e_content, 1); } unmap_domain_page(l1e); } unmap_domain_page(l2e); } #if CONFIG_PAGING_LEVELS >= 4 unmap_domain_page(l3e); } #endif #if CONFIG_PAGING_LEVELS == 4 unmap_domain_page(l4e); #else /* CONFIG_PAGING_LEVELS == 3 */ unmap_domain_page(l3e); #endif }
unsigned long hap_p2m_ga_to_gfn(GUEST_PAGING_LEVELS)( struct vcpu *v, struct p2m_domain *p2m, unsigned long cr3, paddr_t ga, uint32_t *pfec, unsigned int *page_order) { uint32_t missing; mfn_t top_mfn; void *top_map; p2m_type_t p2mt; walk_t gw; unsigned long top_gfn; struct page_info *top_page; /* Get the top-level table's MFN */ top_gfn = cr3 >> PAGE_SHIFT; top_page = get_page_from_gfn_p2m(p2m->domain, p2m, top_gfn, &p2mt, NULL, P2M_ALLOC | P2M_UNSHARE); if ( p2m_is_paging(p2mt) ) { ASSERT(p2m_is_hostp2m(p2m)); pfec[0] = PFEC_page_paged; if ( top_page ) put_page(top_page); p2m_mem_paging_populate(p2m->domain, cr3 >> PAGE_SHIFT); return INVALID_GFN; } if ( p2m_is_shared(p2mt) ) { pfec[0] = PFEC_page_shared; if ( top_page ) put_page(top_page); return INVALID_GFN; } if ( !top_page ) { pfec[0] &= ~PFEC_page_present; return INVALID_GFN; } top_mfn = _mfn(page_to_mfn(top_page)); /* Map the top-level table and call the tree-walker */ ASSERT(mfn_valid(mfn_x(top_mfn))); top_map = map_domain_page(mfn_x(top_mfn)); #if GUEST_PAGING_LEVELS == 3 top_map += (cr3 & ~(PAGE_MASK | 31)); #endif missing = guest_walk_tables(v, p2m, ga, &gw, pfec[0], top_mfn, top_map); unmap_domain_page(top_map); put_page(top_page); /* Interpret the answer */ if ( missing == 0 ) { gfn_t gfn = guest_l1e_get_gfn(gw.l1e); struct page_info *page; page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), &p2mt, NULL, P2M_ALLOC | P2M_UNSHARE); if ( page ) put_page(page); if ( p2m_is_paging(p2mt) ) { ASSERT(p2m_is_hostp2m(p2m)); pfec[0] = PFEC_page_paged; p2m_mem_paging_populate(p2m->domain, gfn_x(gfn)); return INVALID_GFN; } if ( p2m_is_shared(p2mt) ) { pfec[0] = PFEC_page_shared; return INVALID_GFN; } if ( page_order ) *page_order = guest_walk_to_page_order(&gw); return gfn_x(gfn); } if ( missing & _PAGE_PRESENT ) pfec[0] &= ~PFEC_page_present; if ( missing & _PAGE_INVALID_BITS ) pfec[0] |= PFEC_reserved_bit; if ( missing & _PAGE_PAGED ) pfec[0] = PFEC_page_paged; if ( missing & _PAGE_SHARED ) pfec[0] = PFEC_page_shared; return INVALID_GFN; }
/* * pgd3val: this is the value of init_mm.pgd[3] in a PV guest. It is optional. * This to assist debug of modules in the guest. The kernel address * space seems is always mapped, but modules are not necessarily * mapped in any arbitraty guest cr3 that we pick if pgd3val is 0. * Modules should always be addressible if we use cr3 from init_mm. * Since pgd3val is already a pgd value, cr3->pgd[3], we just need to * do 2 level lookups. * * NOTE: 4 level paging works for 32 PAE guests also because cpu runs in IA32-e * mode. * Returns: mfn for the given (pv guest) vaddr */ static mfn_t dbg_pv_va2mfn(dbgva_t vaddr, struct domain *dp, uint64_t pgd3val) { l4_pgentry_t l4e, *l4t; l3_pgentry_t l3e, *l3t; l2_pgentry_t l2e, *l2t; l1_pgentry_t l1e, *l1t; unsigned long cr3 = (pgd3val ? pgd3val : dp->vcpu[0]->arch.cr3); mfn_t mfn = _mfn(cr3 >> PAGE_SHIFT); DBGP2("vaddr:%lx domid:%d cr3:%lx pgd3:%lx\n", vaddr, dp->domain_id, cr3, pgd3val); if ( pgd3val == 0 ) { l4t = map_domain_page(mfn); l4e = l4t[l4_table_offset(vaddr)]; unmap_domain_page(l4t); mfn = _mfn(l4e_get_pfn(l4e)); DBGP2("l4t:%p l4to:%lx l4e:%lx mfn:%#"PRI_mfn"\n", l4t, l4_table_offset(vaddr), l4e, mfn_x(mfn)); if ( !(l4e_get_flags(l4e) & _PAGE_PRESENT) ) { DBGP1("l4 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3); return INVALID_MFN; } l3t = map_domain_page(mfn); l3e = l3t[l3_table_offset(vaddr)]; unmap_domain_page(l3t); mfn = _mfn(l3e_get_pfn(l3e)); DBGP2("l3t:%p l3to:%lx l3e:%lx mfn:%#"PRI_mfn"\n", l3t, l3_table_offset(vaddr), l3e, mfn_x(mfn)); if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_flags(l3e) & _PAGE_PSE) ) { DBGP1("l3 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3); return INVALID_MFN; } } l2t = map_domain_page(mfn); l2e = l2t[l2_table_offset(vaddr)]; unmap_domain_page(l2t); mfn = _mfn(l2e_get_pfn(l2e)); DBGP2("l2t:%p l2to:%lx l2e:%lx mfn:%#"PRI_mfn"\n", l2t, l2_table_offset(vaddr), l2e, mfn_x(mfn)); if ( !(l2e_get_flags(l2e) & _PAGE_PRESENT) || (l2e_get_flags(l2e) & _PAGE_PSE) ) { DBGP1("l2 PAGE not present. vaddr:%lx cr3:%lx\n", vaddr, cr3); return INVALID_MFN; } l1t = map_domain_page(mfn); l1e = l1t[l1_table_offset(vaddr)]; unmap_domain_page(l1t); mfn = _mfn(l1e_get_pfn(l1e)); DBGP2("l1t:%p l1to:%lx l1e:%lx mfn:%#"PRI_mfn"\n", l1t, l1_table_offset(vaddr), l1e, mfn_x(mfn)); return mfn_valid(mfn_x(mfn)) ? mfn : INVALID_MFN; }