static void decrease_reservation(struct memop_args *a) { unsigned long i, j; xen_pfn_t gmfn; if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done, a->nr_extents-1) ) return; for ( i = a->nr_done; i < a->nr_extents; i++ ) { if ( hypercall_preempt_check() ) { a->preempted = 1; goto out; } if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) ) goto out; for ( j = 0; j < (1 << a->extent_order); j++ ) if ( !guest_remove_page(a->domain, gmfn + j) ) goto out; } out: a->nr_done = i; }
static int relinquish_memory(struct domain *d, struct page_list_head *list) { struct page_info *page, *tmp; int ret = 0; /* Use a recursive lock, as we may enter 'free_domheap_page'. */ spin_lock_recursive(&d->page_alloc_lock); page_list_for_each_safe( page, tmp, list ) { /* Grab a reference to the page so it won't disappear from under us. */ if ( unlikely(!get_page(page, d)) ) /* Couldn't get a reference -- someone is freeing this page. */ BUG(); if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) put_page(page); put_page(page); if ( hypercall_preempt_check() ) { ret = -ERESTART; goto out; } } out: spin_unlock_recursive(&d->page_alloc_lock); return ret; }
static void decrease_reservation(struct memop_args *a) { unsigned long i, j; xen_pfn_t gmfn; if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done, a->nr_extents-1) || a->extent_order > MAX_ORDER ) return; for ( i = a->nr_done; i < a->nr_extents; i++ ) { if ( i != a->nr_done && hypercall_preempt_check() ) { a->preempted = 1; goto out; } if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) ) goto out; if ( tb_init_done ) { struct { u64 gfn; int d:16,order:16; } t; t.gfn = gmfn; t.d = a->domain->domain_id; t.order = a->extent_order; __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), &t); } /* See if populate-on-demand wants to handle this */ if ( is_hvm_domain(a->domain) && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) ) continue; /* With the lack for iommu on some ARM platform, domain with DMA-capable * device must retrieve the same pfn when the hypercall * populate_physmap is called. */ if ( is_domain_direct_mapped(a->domain) ) continue; for ( j = 0; j < (1 << a->extent_order); j++ ) if ( !guest_remove_page(a->domain, gmfn + j) ) goto out; } out: a->nr_done = i; }
/* * Set access type for a region of gfns. * If gfn == INVALID_GFN, sets the default access type. */ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, uint32_t start, uint32_t mask, xenmem_access_t access, unsigned int altp2m_idx) { struct p2m_domain *p2m = p2m_get_hostp2m(d), *ap2m = NULL; p2m_access_t a; unsigned long gfn_l; long rc = 0; /* altp2m view 0 is treated as the hostp2m */ if ( altp2m_idx ) { if ( altp2m_idx >= MAX_ALTP2M || d->arch.altp2m_eptp[altp2m_idx] == mfn_x(INVALID_MFN) ) return -EINVAL; ap2m = d->arch.altp2m_p2m[altp2m_idx]; } if ( !xenmem_access_to_p2m_access(p2m, access, &a) ) return -EINVAL; /* If request to set default access. */ if ( gfn_eq(gfn, INVALID_GFN) ) { p2m->default_access = a; return 0; } p2m_lock(p2m); if ( ap2m ) p2m_lock(ap2m); for ( gfn_l = gfn_x(gfn) + start; nr > start; ++gfn_l ) { rc = set_mem_access(d, p2m, ap2m, a, _gfn(gfn_l)); if ( rc ) break; /* Check for continuation if it's not the last iteration. */ if ( nr > ++start && !(start & mask) && hypercall_preempt_check() ) { rc = start; break; } } if ( ap2m ) p2m_unlock(ap2m); p2m_unlock(p2m); return rc; }
static void populate_physmap(struct memop_args *a) { struct page_info *page; unsigned long i, j; xen_pfn_t gpfn, mfn; struct domain *d = a->domain; if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done, a->nr_extents-1) ) return; if ( (a->extent_order != 0) && !multipage_allocation_permitted(current->domain) ) return; for ( i = a->nr_done; i < a->nr_extents; i++ ) { if ( hypercall_preempt_check() ) { a->preempted = 1; goto out; } if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) ) goto out; page = alloc_domheap_pages(d, a->extent_order, a->memflags); if ( unlikely(page == NULL) ) { gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: " "id=%d memflags=%x (%ld of %d)\n", a->extent_order, d->domain_id, a->memflags, i, a->nr_extents); goto out; } mfn = page_to_mfn(page); guest_physmap_add_page(d, gpfn, mfn, a->extent_order); if ( !paging_mode_translate(d) ) { for ( j = 0; j < (1 << a->extent_order); j++ ) set_gpfn_from_mfn(mfn + j, gpfn + j); /* Inform the domain of the new page's machine address. */ if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) ) goto out; } } out: a->nr_done = i; }
static void decrease_reservation(struct memop_args *a) { unsigned long i, j; xen_pfn_t gmfn; if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done, a->nr_extents-1) ) return; mcd_mem_dec_trap(a->domain, (a->nr_extents - a->nr_done)); for ( i = a->nr_done; i < a->nr_extents; i++ ) { if ( hypercall_preempt_check() ) { a->preempted = 1; goto out; } if ( unlikely(__copy_from_guest_offset(&gmfn, a->extent_list, i, 1)) ) goto out; if ( tb_init_done ) { struct { u64 gfn; int d:16,order:16; } t; t.gfn = gmfn; t.d = a->domain->domain_id; t.order = a->extent_order; __trace_var(TRC_MEM_DECREASE_RESERVATION, 0, sizeof(t), &t); } /* See if populate-on-demand wants to handle this */ if ( is_hvm_domain(a->domain) && p2m_pod_decrease_reservation(a->domain, gmfn, a->extent_order) ) continue; for ( j = 0; j < (1 << a->extent_order); j++ ) if ( !guest_remove_page(a->domain, gmfn + j) ) goto out; } out: a->nr_done = i; mcd_mem_upt_trap(a->domain); }
static void increase_reservation(struct memop_args *a) { struct page_info *page; unsigned long i; xen_pfn_t mfn; struct domain *d = a->domain; if ( !guest_handle_is_null(a->extent_list) && !guest_handle_subrange_okay(a->extent_list, a->nr_done, a->nr_extents-1) ) return; if ( !multipage_allocation_permitted(current->domain, a->extent_order) ) return; mcd_mem_inc_trap(a->domain, (a->nr_extents - a->nr_done)); for ( i = a->nr_done; i < a->nr_extents; i++ ) { if ( hypercall_preempt_check() ) { a->preempted = 1; goto out; } page = alloc_domheap_pages(d, a->extent_order, a->memflags); if ( unlikely(page == NULL) ) { gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: " "id=%d memflags=%x (%ld of %d)\n", a->extent_order, d->domain_id, a->memflags, i, a->nr_extents); goto out; } /* Inform the domain of the new page's machine address. */ if ( !guest_handle_is_null(a->extent_list) ) { mfn = page_to_mfn(page); if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) ) goto out; } } out: a->nr_done = i; mcd_mem_upt_trap(d); }
static int xenmem_add_to_physmap_range(struct domain *d, struct xen_add_to_physmap_range *xatpr) { int rc; /* Process entries in reverse order to allow continuations */ while ( xatpr->size > 0 ) { xen_ulong_t idx; xen_pfn_t gpfn; rc = copy_from_guest_offset(&idx, xatpr->idxs, xatpr->size-1, 1); if ( rc < 0 ) goto out; rc = copy_from_guest_offset(&gpfn, xatpr->gpfns, xatpr->size-1, 1); if ( rc < 0 ) goto out; rc = xenmem_add_to_physmap_one(d, xatpr->space, xatpr->foreign_domid, idx, gpfn); rc = copy_to_guest_offset(xatpr->errs, xatpr->size-1, &rc, 1); if ( rc < 0 ) goto out; xatpr->size--; /* Check for continuation if it's not the last interation */ if ( xatpr->size > 0 && hypercall_preempt_check() ) { rc = -EAGAIN; goto out; } } rc = 0; out: return rc; }
/* Set the pool of pages to the required number of pages. * Returns 0 for success, non-zero for failure. */ static unsigned int hap_set_allocation(struct domain *d, unsigned int pages, int *preempted) { struct page_info *pg; ASSERT(hap_locked_by_me(d)); while ( d->arch.paging.hap.total_pages != pages ) { if ( d->arch.paging.hap.total_pages < pages ) { /* Need to allocate more memory from domheap */ pg = alloc_domheap_page(NULL); if ( pg == NULL ) { HAP_PRINTK("failed to allocate hap pages.\n"); return -ENOMEM; } d->arch.paging.hap.free_pages++; d->arch.paging.hap.total_pages++; list_add_tail(&pg->list, &d->arch.paging.hap.freelist); } else if ( d->arch.paging.hap.total_pages > pages ) { /* Need to return memory to domheap */ ASSERT(!list_empty(&d->arch.paging.hap.freelist)); pg = list_entry(d->arch.paging.hap.freelist.next, struct page_info, list); list_del(&pg->list); d->arch.paging.hap.free_pages--; d->arch.paging.hap.total_pages--; pg->count_info = 0; free_domheap_page(pg); } /* Check to see if we need to yield and try again */ if ( preempted && hypercall_preempt_check() ) { *preempted = 1; return 0; } }
static int relinquish_memory(struct domain *d, struct page_list_head *list) { struct page_info *page, *tmp; int ret = 0; /* Use a recursive lock, as we may enter 'free_domheap_page'. */ spin_lock_recursive(&d->page_alloc_lock); page_list_for_each_safe( page, tmp, list ) { /* Grab a reference to the page so it won't disappear from under us. */ if ( unlikely(!get_page(page, d)) ) /* * Couldn't get a reference -- someone is freeing this page and * has already committed to doing so, so no more to do here. * * Note that the page must be left on the list, a list_del * here will clash with the list_del done by the other * party in the race and corrupt the list head. */ continue; if ( test_and_clear_bit(_PGC_allocated, &page->count_info) ) put_page(page); put_page(page); if ( hypercall_preempt_check() ) { ret = -ERESTART; goto out; } } out: spin_unlock_recursive(&d->page_alloc_lock); return ret; }
static void populate_physmap(struct memop_args *a) { struct page_info *page; unsigned int i, j; xen_pfn_t gpfn, mfn; struct domain *d = a->domain; if ( !guest_handle_subrange_okay(a->extent_list, a->nr_done, a->nr_extents-1) ) return; if ( a->extent_order > (a->memflags & MEMF_populate_on_demand ? MAX_ORDER : max_order(current->domain)) ) return; for ( i = a->nr_done; i < a->nr_extents; i++ ) { if ( i != a->nr_done && hypercall_preempt_check() ) { a->preempted = 1; goto out; } if ( unlikely(__copy_from_guest_offset(&gpfn, a->extent_list, i, 1)) ) goto out; if ( a->memflags & MEMF_populate_on_demand ) { if ( guest_physmap_mark_populate_on_demand(d, gpfn, a->extent_order) < 0 ) goto out; } else { if ( is_domain_direct_mapped(d) ) { mfn = gpfn; for ( j = 0; j < (1U << a->extent_order); j++, mfn++ ) { if ( !mfn_valid(mfn) ) { gdprintk(XENLOG_INFO, "Invalid mfn %#"PRI_xen_pfn"\n", mfn); goto out; } page = mfn_to_page(mfn); if ( !get_page(page, d) ) { gdprintk(XENLOG_INFO, "mfn %#"PRI_xen_pfn" doesn't belong to d%d\n", mfn, d->domain_id); goto out; } put_page(page); } mfn = gpfn; page = mfn_to_page(mfn); } else { page = alloc_domheap_pages(d, a->extent_order, a->memflags); if ( unlikely(!page) ) { if ( !opt_tmem || a->extent_order ) gdprintk(XENLOG_INFO, "Could not allocate order=%u extent: id=%d memflags=%#x (%u of %u)\n", a->extent_order, d->domain_id, a->memflags, i, a->nr_extents); goto out; } mfn = page_to_mfn(page); } guest_physmap_add_page(d, gpfn, mfn, a->extent_order); if ( !paging_mode_translate(d) ) { for ( j = 0; j < (1U << a->extent_order); j++ ) set_gpfn_from_mfn(mfn + j, gpfn + j); /* Inform the domain of the new page's machine address. */ if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) ) goto out; } } } out: a->nr_done = i; }
/* * Set access type for a region of pfns. * If gfn == INVALID_GFN, sets the default access type. */ long p2m_set_mem_access(struct domain *d, gfn_t gfn, uint32_t nr, uint32_t start, uint32_t mask, xenmem_access_t access, unsigned int altp2m_idx) { struct p2m_domain *p2m = p2m_get_hostp2m(d); p2m_access_t a; unsigned int order; long rc = 0; static const p2m_access_t memaccess[] = { #define ACCESS(ac) [XENMEM_access_##ac] = p2m_access_##ac ACCESS(n), ACCESS(r), ACCESS(w), ACCESS(rw), ACCESS(x), ACCESS(rx), ACCESS(wx), ACCESS(rwx), ACCESS(rx2rw), ACCESS(n2rwx), #undef ACCESS }; switch ( access ) { case 0 ... ARRAY_SIZE(memaccess) - 1: a = memaccess[access]; break; case XENMEM_access_default: a = p2m->default_access; break; default: return -EINVAL; } /* * Flip mem_access_enabled to true when a permission is set, as to prevent * allocating or inserting super-pages. */ p2m->mem_access_enabled = true; /* If request to set default access. */ if ( gfn_eq(gfn, INVALID_GFN) ) { p2m->default_access = a; return 0; } p2m_write_lock(p2m); for ( gfn = gfn_add(gfn, start); nr > start; gfn = gfn_next_boundary(gfn, order) ) { p2m_type_t t; mfn_t mfn = p2m_get_entry(p2m, gfn, &t, NULL, &order); if ( !mfn_eq(mfn, INVALID_MFN) ) { order = 0; rc = p2m_set_entry(p2m, gfn, 1, mfn, t, a); if ( rc ) break; } start += gfn_x(gfn_next_boundary(gfn, order)) - gfn_x(gfn); /* Check for continuation if it is not the last iteration */ if ( nr > start && !(start & mask) && hypercall_preempt_check() ) { rc = start; break; } } p2m_write_unlock(p2m); return rc; }