struct vm_struct *xen_alloc_vm_area(unsigned long size) { int order; unsigned long virt; unsigned long nr_pages; struct vm_struct *area; order = get_order(size); virt = __get_free_pages(GFP_KERNEL, order); if (virt == 0) goto err0; nr_pages = 1 << order; scrub_pages(virt, nr_pages); area = kmalloc(sizeof(*area), GFP_KERNEL); if (area == NULL) goto err1; area->flags = VM_IOREMAP; area->addr = (void *)virt; area->size = size; area->pages = NULL; area->nr_pages = nr_pages; area->phys_addr = 0; /* xenbus_map_ring_valloc uses this field! */ return area; err1: free_pages(virt, order); err0: return NULL; }
static void skbuff_ctor(void *buf, kmem_cache_t *cachep, unsigned long unused) { int order = 0; while (skbuff_order_cachep[order] != cachep) order++; /* Do our best to allocate contiguous memory but fall back to IOMMU. */ if (order != 0) (void)xen_create_contiguous_region( (unsigned long)buf, order, 0); scrub_pages(buf, 1 << order); }
static int dealloc_pte_fn( pte_t *pte, struct page *pmd_page, unsigned long addr, void *data) { unsigned long mfn = pte_mfn(*pte); int ret; struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &mfn); set_pte_at(&init_mm, addr, pte, __pte_ma(0)); set_phys_to_machine(__pa(addr) >> PAGE_SHIFT, INVALID_P2M_ENTRY); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != 1); return 0; } #endif struct page **alloc_empty_pages_and_pagevec(int nr_pages) { unsigned long vaddr, flags; struct page *page, **pagevec; int i, ret; pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL); if (pagevec == NULL) return NULL; for (i = 0; i < nr_pages; i++) { page = pagevec[i] = alloc_page(GFP_KERNEL); if (page == NULL) goto err; vaddr = (unsigned long)page_address(page); scrub_pages(vaddr, 1); balloon_lock(flags); if (xen_feature(XENFEAT_auto_translated_physmap)) { unsigned long gmfn = page_to_pfn(page); struct xen_memory_reservation reservation = { .nr_extents = 1, .extent_order = 0, .domid = DOMID_SELF }; set_xen_guest_handle(reservation.extent_start, &gmfn); ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); if (ret == 1) ret = 0; /* success */ } else { #ifdef CONFIG_XEN ret = apply_to_page_range(&init_mm, vaddr, PAGE_SIZE, dealloc_pte_fn, NULL); #else /* Cannot handle non-auto translate mode. */ ret = 1; #endif } if (ret != 0) { balloon_unlock(flags); __free_page(page); goto err; } totalram_pages = --current_pages; balloon_unlock(flags); } out: schedule_work(&balloon_worker); #ifdef CONFIG_XEN flush_tlb_all(); #endif return pagevec; err: balloon_lock(flags); while (--i >= 0) balloon_append(pagevec[i]); balloon_unlock(flags); kfree(pagevec); pagevec = NULL; goto out; }
static int increase_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; long rc; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); balloon_lock(flags); page = balloon_first_page(); for (i = 0; i < nr_pages; i++) { BUG_ON(page == NULL); frame_list[i] = page_to_pfn(page);; page = balloon_next_page(page); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; rc = HYPERVISOR_memory_op( XENMEM_populate_physmap, &reservation); if (rc < nr_pages) { int ret; /* We hit the Xen hard limit: reprobe. */ set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = rc; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != rc); hard_limit = current_pages + rc - driver_pages; goto out; } for (i = 0; i < nr_pages; i++) { page = balloon_retrieve(); BUG_ON(page == NULL); pfn = page_to_pfn(page); BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) && phys_to_machine_mapping_valid(pfn)); /* Update P->M and M->P tables. */ set_phys_to_machine(pfn, frame_list[i]); #ifdef CONFIG_XEN xen_machphys_update(frame_list[i], pfn); /* Link back into the page tables if not highmem. */ if (pfn < max_low_pfn) { int ret; ret = HYPERVISOR_update_va_mapping( (unsigned long)__va(pfn << PAGE_SHIFT), pfn_pte_ma(frame_list[i], PAGE_KERNEL), 0); BUG_ON(ret); } #endif /* Relinquish the page back to the allocator. */ ClearPageReserved(page); set_page_count(page, 1); __free_page(page); } current_pages += nr_pages; totalram_pages = current_pages; out: balloon_unlock(flags); return 0; } static int decrease_reservation(unsigned long nr_pages) { unsigned long pfn, i, flags; struct page *page; void *v; int need_sleep = 0; int ret; struct xen_memory_reservation reservation = { .address_bits = 0, .extent_order = 0, .domid = DOMID_SELF }; if (nr_pages > ARRAY_SIZE(frame_list)) nr_pages = ARRAY_SIZE(frame_list); for (i = 0; i < nr_pages; i++) { if ((page = alloc_page(GFP_BALLOON)) == NULL) { nr_pages = i; need_sleep = 1; break; } pfn = page_to_pfn(page); frame_list[i] = pfn_to_mfn(pfn); if (!PageHighMem(page)) { v = phys_to_virt(pfn << PAGE_SHIFT); scrub_pages(v, 1); #ifdef CONFIG_XEN ret = HYPERVISOR_update_va_mapping( (unsigned long)v, __pte_ma(0), 0); BUG_ON(ret); #endif } #ifdef CONFIG_XEN_SCRUB_PAGES else { v = kmap(page); scrub_pages(v, 1); kunmap(page); } #endif } #ifdef CONFIG_XEN /* Ensure that ballooned highmem pages don't have kmaps. */ kmap_flush_unused(); flush_tlb_all(); #endif balloon_lock(flags); /* No more mappings: invalidate P2M and add to balloon. */ for (i = 0; i < nr_pages; i++) { pfn = mfn_to_pfn(frame_list[i]); set_phys_to_machine(pfn, INVALID_P2M_ENTRY); balloon_append(pfn_to_page(pfn)); } set_xen_guest_handle(reservation.extent_start, frame_list); reservation.nr_extents = nr_pages; ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); BUG_ON(ret != nr_pages); current_pages -= nr_pages; totalram_pages = current_pages; balloon_unlock(flags); return need_sleep; } /* * We avoid multiple worker processes conflicting via the balloon mutex. * We may of course race updates of the target counts (which are protected * by the balloon lock), or with changes to the Xen hard limit, but we will * recover from these in time. */ static void balloon_process(void *unused) { int need_sleep = 0; long credit; down(&balloon_mutex); do { credit = current_target() - current_pages; if (credit > 0) need_sleep = (increase_reservation(credit) != 0); if (credit < 0) need_sleep = (decrease_reservation(-credit) != 0); #ifndef CONFIG_PREEMPT if (need_resched()) schedule(); #endif } while ((credit != 0) && !need_sleep); /* Schedule more work if there is some still to be done. */ if (current_target() != current_pages) mod_timer(&balloon_timer, jiffies + HZ); up(&balloon_mutex); }