int set_shared_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) { struct p2m_domain *p2m = p2m_get_hostp2m(d); int rc = 0; p2m_access_t a; p2m_type_t ot; mfn_t omfn; unsigned long pg_type; if ( !paging_mode_translate(p2m->domain) ) return 0; gfn_lock(p2m, gfn, 0); omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL); /* At the moment we only allow p2m change if gfn has already been made * sharable first */ ASSERT(p2m_is_shared(ot)); ASSERT(mfn_valid(omfn)); /* Set the m2p entry to invalid only if there are no further type * refs to this page as shared */ pg_type = read_atomic(&(mfn_to_page(omfn)->u.inuse.type_info)); if ( (pg_type & PGT_count_mask) == 0 || (pg_type & PGT_type_mask) != PGT_shared_page ) set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); P2M_DEBUG("set shared %lx %lx\n", gfn, mfn_x(mfn)); rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_ram_shared, p2m->default_access); gfn_unlock(p2m, gfn, 0); if ( 0 == rc ) gdprintk(XENLOG_ERR, "set_shared_p2m_entry: set_p2m_entry failed! mfn=%08lx\n", mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot))); return rc; }
int set_mmio_p2m_entry(struct domain *d, unsigned long gfn, mfn_t mfn) { int rc = 0; p2m_access_t a; p2m_type_t ot; mfn_t omfn; struct p2m_domain *p2m = p2m_get_hostp2m(d); if ( !paging_mode_translate(d) ) return 0; gfn_lock(p2m, gfn, 0); omfn = p2m->get_entry(p2m, gfn, &ot, &a, 0, NULL); if ( p2m_is_grant(ot) ) { p2m_unlock(p2m); domain_crash(d); return 0; } else if ( p2m_is_ram(ot) ) { ASSERT(mfn_valid(omfn)); set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); } P2M_DEBUG("set mmio %lx %lx\n", gfn, mfn_x(mfn)); rc = set_p2m_entry(p2m, gfn, mfn, PAGE_ORDER_4K, p2m_mmio_direct, p2m->default_access); gfn_unlock(p2m, gfn, 0); if ( 0 == rc ) gdprintk(XENLOG_ERR, "set_mmio_p2m_entry: set_p2m_entry failed! mfn=%08lx\n", mfn_x(get_gfn_query_unlocked(p2m->domain, gfn, &ot))); return rc; }
static void p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn, unsigned long mfn, unsigned int page_order) { unsigned long i; mfn_t mfn_return; p2m_type_t t; p2m_access_t a; if ( !paging_mode_translate(p2m->domain) ) { if ( need_iommu(p2m->domain) ) for ( i = 0; i < (1 << page_order); i++ ) iommu_unmap_page(p2m->domain, mfn + i); return; } ASSERT(gfn_locked_by_me(p2m, gfn)); P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn, mfn); if ( mfn_valid(_mfn(mfn)) ) { for ( i = 0; i < (1UL << page_order); i++ ) { mfn_return = p2m->get_entry(p2m, gfn + i, &t, &a, 0, NULL); if ( !p2m_is_grant(t) && !p2m_is_shared(t) ) set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY); ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) ); } } set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access); }
// Find the next level's P2M entry, checking for out-of-range gfn's... // Returns NULL on error. // static l1_pgentry_t * p2m_find_entry(void *table, unsigned long *gfn_remainder, unsigned long gfn, uint32_t shift, uint32_t max) { u32 index; index = *gfn_remainder >> shift; if ( index >= max ) { P2M_DEBUG("gfn=%#lx out of range " "(gfn_remainder=%#lx shift=%d index=%#x max=%#x)\n", gfn, *gfn_remainder, shift, index, max); return NULL; } *gfn_remainder &= (1 << shift) - 1; return (l1_pgentry_t *)table + index; }
/* * Handle possibly necessary P2M type re-calculation (U flag clear for a * present entry) for the entries in the page table hierarchy for the given * GFN. Propagate the re-calculation flag down to the next page table level * for entries not involved in the translation of the given GFN. */ static int do_recalc(struct p2m_domain *p2m, unsigned long gfn) { void *table; unsigned long gfn_remainder = gfn; unsigned int level = 4; l1_pgentry_t *pent; int err = 0; table = map_domain_page(pagetable_get_mfn(p2m_get_pagetable(p2m))); while ( --level ) { unsigned long remainder = gfn_remainder; pent = p2m_find_entry(table, &remainder, gfn, level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER); if ( !pent || !(l1e_get_flags(*pent) & _PAGE_PRESENT) ) goto out; if ( l1e_get_flags(*pent) & _PAGE_PSE ) { unsigned long mask = ~0UL << (level * PAGETABLE_ORDER); if ( !needs_recalc(l1, *pent) || !p2m_is_changeable(p2m_flags_to_type(l1e_get_flags(*pent))) || p2m_is_logdirty_range(p2m, gfn & mask, gfn | ~mask) >= 0 ) break; } err = p2m_next_level(p2m, &table, &gfn_remainder, gfn, level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER, pgt[level - 1], 0); if ( err ) goto out; if ( needs_recalc(l1, *pent) ) { l1_pgentry_t e = *pent, *ptab = table; unsigned int i; if ( !valid_recalc(l1, e) ) P2M_DEBUG("bogus recalc state at d%d:%lx:%u\n", p2m->domain->domain_id, gfn, level); remainder = gfn_remainder; for ( i = 0; i < (1 << PAGETABLE_ORDER); ++i ) { l1_pgentry_t ent = ptab[i]; if ( (l1e_get_flags(ent) & _PAGE_PRESENT) && !needs_recalc(l1, ent) ) { set_recalc(l1, ent); p2m->write_p2m_entry(p2m, gfn - remainder, &ptab[i], ent, level); } remainder -= 1UL << ((level - 1) * PAGETABLE_ORDER); } smp_wmb(); clear_recalc(l1, e); p2m->write_p2m_entry(p2m, gfn, pent, e, level + 1); } unmap_domain_page((void *)((unsigned long)pent & PAGE_MASK)); } pent = p2m_find_entry(table, &gfn_remainder, gfn, level * PAGETABLE_ORDER, 1 << PAGETABLE_ORDER); if ( pent && (l1e_get_flags(*pent) & _PAGE_PRESENT) && needs_recalc(l1, *pent) ) { l1_pgentry_t e = *pent; if ( !valid_recalc(l1, e) ) P2M_DEBUG("bogus recalc leaf at d%d:%lx:%u\n", p2m->domain->domain_id, gfn, level); if ( p2m_is_changeable(p2m_flags_to_type(l1e_get_flags(e))) ) { unsigned long mask = ~0UL << (level * PAGETABLE_ORDER); p2m_type_t p2mt = p2m_is_logdirty_range(p2m, gfn & mask, gfn | ~mask) ? p2m_ram_logdirty : p2m_ram_rw; unsigned long mfn = l1e_get_pfn(e); unsigned long flags = p2m_type_to_flags(p2mt, _mfn(mfn)); if ( level ) { if ( flags & _PAGE_PAT ) { BUILD_BUG_ON(_PAGE_PAT != _PAGE_PSE); mfn |= _PAGE_PSE_PAT >> PAGE_SHIFT; } else mfn &= ~(_PAGE_PSE_PAT >> PAGE_SHIFT); flags |= _PAGE_PSE; }
int guest_physmap_add_entry(struct domain *d, unsigned long gfn, unsigned long mfn, unsigned int page_order, p2m_type_t t) { struct p2m_domain *p2m = p2m_get_hostp2m(d); unsigned long i, ogfn; p2m_type_t ot; p2m_access_t a; mfn_t omfn; int pod_count = 0; int rc = 0; if ( !paging_mode_translate(d) ) { if ( need_iommu(d) && t == p2m_ram_rw ) { for ( i = 0; i < (1 << page_order); i++ ) { rc = iommu_map_page( d, mfn + i, mfn + i, IOMMUF_readable|IOMMUF_writable); if ( rc != 0 ) { while ( i-- > 0 ) iommu_unmap_page(d, mfn + i); return rc; } } } return 0; } p2m_lock(p2m); P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn, mfn); /* First, remove m->p mappings for existing p->m mappings */ for ( i = 0; i < (1UL << page_order); i++ ) { omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL); if ( p2m_is_shared(ot) ) { /* Do an unshare to cleanly take care of all corner * cases. */ int rc; rc = mem_sharing_unshare_page(p2m->domain, gfn + i, 0); if ( rc ) { p2m_unlock(p2m); /* NOTE: Should a guest domain bring this upon itself, * there is not a whole lot we can do. We are buried * deep in locks from most code paths by now. So, fail * the call and don't try to sleep on a wait queue * while placing the mem event. * * However, all current (changeset 3432abcf9380) code * paths avoid this unsavoury situation. For now. * * Foreign domains are okay to place an event as they * won't go to sleep. */ (void)mem_sharing_notify_enomem(p2m->domain, gfn + i, 0); return rc; } omfn = p2m->get_entry(p2m, gfn + i, &ot, &a, 0, NULL); ASSERT(!p2m_is_shared(ot)); } if ( p2m_is_grant(ot) ) { /* Really shouldn't be unmapping grant maps this way */ domain_crash(d); p2m_unlock(p2m); return -EINVAL; } else if ( p2m_is_ram(ot) && !p2m_is_paged(ot) ) { ASSERT(mfn_valid(omfn)); set_gpfn_from_mfn(mfn_x(omfn), INVALID_M2P_ENTRY); } else if ( ot == p2m_populate_on_demand ) { /* Count how man PoD entries we'll be replacing if successful */ pod_count++; } else if ( p2m_is_paging(ot) && (ot != p2m_ram_paging_out) ) { /* We're plugging a hole in the physmap where a paged out page was */ atomic_dec(&d->paged_pages); } } /* Then, look for m->p mappings for this range and deal with them */ for ( i = 0; i < (1UL << page_order); i++ ) { if ( page_get_owner(mfn_to_page(_mfn(mfn + i))) == dom_cow ) { /* This is no way to add a shared page to your physmap! */ gdprintk(XENLOG_ERR, "Adding shared mfn %lx directly to dom %hu " "physmap not allowed.\n", mfn+i, d->domain_id); p2m_unlock(p2m); return -EINVAL; } if ( page_get_owner(mfn_to_page(_mfn(mfn + i))) != d ) continue; ogfn = mfn_to_gfn(d, _mfn(mfn+i)); if ( (ogfn != INVALID_M2P_ENTRY) && (ogfn != gfn + i) ) { /* This machine frame is already mapped at another physical * address */ P2M_DEBUG("aliased! mfn=%#lx, old gfn=%#lx, new gfn=%#lx\n", mfn + i, ogfn, gfn + i); omfn = p2m->get_entry(p2m, ogfn, &ot, &a, 0, NULL); if ( p2m_is_ram(ot) && !p2m_is_paged(ot) ) { ASSERT(mfn_valid(omfn)); P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n", ogfn , mfn_x(omfn)); if ( mfn_x(omfn) == (mfn + i) ) p2m_remove_page(p2m, ogfn, mfn + i, 0); } } } /* Now, actually do the two-way mapping */ if ( mfn_valid(_mfn(mfn)) ) { if ( !set_p2m_entry(p2m, gfn, _mfn(mfn), page_order, t, p2m->default_access) ) { rc = -EINVAL; goto out; /* Failed to update p2m, bail without updating m2p. */ } if ( !p2m_is_grant(t) ) { for ( i = 0; i < (1UL << page_order); i++ ) set_gpfn_from_mfn(mfn+i, gfn+i); } } else { gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n", gfn, mfn); if ( !set_p2m_entry(p2m, gfn, _mfn(INVALID_MFN), page_order, p2m_invalid, p2m->default_access) ) rc = -EINVAL; else { pod_lock(p2m); p2m->pod.entry_count -= pod_count; BUG_ON(p2m->pod.entry_count < 0); pod_unlock(p2m); } } out: p2m_unlock(p2m); return rc; }