unsigned long xc_make_page_below_4G( int xc_handle, uint32_t domid, unsigned long mfn) { xen_pfn_t old_mfn = mfn; xen_pfn_t new_mfn; if ( xc_domain_memory_decrease_reservation( xc_handle, domid, 1, 0, &old_mfn) != 0 ) { DPRINTF("xc_make_page_below_4G decrease failed. mfn=%lx\n",mfn); return 0; } if ( xc_domain_memory_increase_reservation( xc_handle, domid, 1, 0, XENMEMF_address_bits(32), &new_mfn) != 0 ) { DPRINTF("xc_make_page_below_4G increase failed. mfn=%lx\n",mfn); return 0; } return new_mfn; }
/* * balloon_replace_pages() * Try to replace nextexts blocks of 2^order pages. addr_bits specifies * how many bits of address the pages must be within (i.e. 16 would mean * that the pages cannot have an address > 64k). The constrints are on * what the hypervisor gives us -- we are free to give any pages in * exchange. The array pp is the pages we are giving away. The caller * provides storage space for mfns, which hold the new physical pages. */ long balloon_replace_pages(uint_t nextents, page_t **pp, uint_t addr_bits, uint_t order, mfn_t *mfns) { xen_memory_reservation_t memres; long fallback_cnt; long cnt; uint_t i, j, page_cnt, extlen; long e; int locked; /* * we shouldn't be allocating constrained pages on a guest. It doesn't * make any sense. They won't be constrained after a migration. */ ASSERT(DOMAIN_IS_INITDOMAIN(xen_info)); extlen = 1 << order; page_cnt = nextents * extlen; /* Give back the current pages to the hypervisor */ for (i = 0; i < page_cnt; i++) { cnt = balloon_free_pages(1, NULL, NULL, &pp[i]->p_pagenum); if (cnt != 1) { cmn_err(CE_PANIC, "balloon: unable to give a page back " "to the hypervisor.\n"); } } /* * try to allocate the new pages using addr_bits and order. If we can't * get all of the pages, try to get the remaining pages with no * constraints and, if that was successful, return the number of * constrained pages we did allocate. */ bzero(&memres, sizeof (memres)); /*LINTED: constant in conditional context*/ set_xen_guest_handle(memres.extent_start, mfns); memres.domid = DOMID_SELF; memres.nr_extents = nextents; memres.mem_flags = XENMEMF_address_bits(addr_bits); memres.extent_order = order; cnt = HYPERVISOR_memory_op(XENMEM_increase_reservation, &memres); /* assign the new MFNs to the current PFNs */ locked = balloon_lock_contig_pfnlist(cnt * extlen); for (i = 0; i < cnt; i++) { for (j = 0; j < extlen; j++) { reassign_pfn(pp[i * extlen + j]->p_pagenum, mfns[i] + j); } } if (locked) unlock_contig_pfnlist(); if (cnt != nextents) { if (cnt < 0) { cnt = 0; } /* * We couldn't get enough memory to satisfy our requirements. * The above loop will assign the parts of the request that * were successful (this part may be 0). We need to fill * in the rest. The bzero below clears out extent_order and * address_bits, so we'll take anything from the hypervisor * to replace the pages we gave away. */ fallback_cnt = page_cnt - cnt * extlen; bzero(&memres, sizeof (memres)); /*LINTED: constant in conditional context*/ set_xen_guest_handle(memres.extent_start, mfns); memres.domid = DOMID_SELF; memres.nr_extents = fallback_cnt; e = HYPERVISOR_memory_op(XENMEM_increase_reservation, &memres); if (e != fallback_cnt) { cmn_err(CE_PANIC, "balloon: unable to recover from " "failed increase_reservation.\n"); } locked = balloon_lock_contig_pfnlist(fallback_cnt); for (i = 0; i < fallback_cnt; i++) { uint_t offset = page_cnt - fallback_cnt; /* * We already used pp[0...(cnt * extlen)] before, * so start at the next entry in the pp array. */ reassign_pfn(pp[i + offset]->p_pagenum, mfns[i]); } if (locked) unlock_contig_pfnlist(); } /* * balloon_free_pages increments our counter. Decrement it here. */ atomic_add_long((ulong_t *)&bln_stats.bln_hv_pages, -(long)page_cnt); /* * return the number of extents we were able to replace. If we got * this far, we know all the pp's are valid. */ return (cnt); }