static void increase_reservation(struct memop_args *a) { struct page_info *page; unsigned long i; xen_pfn_t mfn; struct domain *d = a->domain; if ( !guest_handle_is_null(a->extent_list) && !guest_handle_subrange_okay(a->extent_list, a->nr_done, a->nr_extents-1) ) return; if ( !multipage_allocation_permitted(current->domain, a->extent_order) ) return; mcd_mem_inc_trap(a->domain, (a->nr_extents - a->nr_done)); for ( i = a->nr_done; i < a->nr_extents; i++ ) { if ( hypercall_preempt_check() ) { a->preempted = 1; goto out; } page = alloc_domheap_pages(d, a->extent_order, a->memflags); if ( unlikely(page == NULL) ) { gdprintk(XENLOG_INFO, "Could not allocate order=%d extent: " "id=%d memflags=%x (%ld of %d)\n", a->extent_order, d->domain_id, a->memflags, i, a->nr_extents); goto out; } /* Inform the domain of the new page's machine address. */ if ( !guest_handle_is_null(a->extent_list) ) { mfn = page_to_mfn(page); if ( unlikely(__copy_to_guest_offset(a->extent_list, i, &mfn, 1)) ) goto out; } } out: a->nr_done = i; mcd_mem_upt_trap(d); }
/* Update per-VCPU guest runstate shared memory area (if registered). */ static void update_runstate_area(struct vcpu *v) { if ( guest_handle_is_null(runstate_guest(v)) ) return; __copy_to_guest(runstate_guest(v), &v->runstate, 1); }
/* Update per-VCPU guest runstate shared memory area (if registered). */ static void update_runstate_area(struct vcpu *v) { void __user *guest_handle = NULL; if ( guest_handle_is_null(runstate_guest(v)) ) return; if ( VM_ASSIST(v->domain, runstate_update_flag) ) { guest_handle = &v->runstate_guest.p->state_entry_time + 1; guest_handle--; v->runstate.state_entry_time |= XEN_RUNSTATE_UPDATE; __raw_copy_to_guest(guest_handle, (void *)(&v->runstate.state_entry_time + 1) - 1, 1); smp_wmb(); } __copy_to_guest(runstate_guest(v), &v->runstate, 1); if ( guest_handle ) { v->runstate.state_entry_time &= ~XEN_RUNSTATE_UPDATE; smp_wmb(); __raw_copy_to_guest(guest_handle, (void *)(&v->runstate.state_entry_time + 1) - 1, 1); } }