/*
 * Associate a virtual page frame with a given physical page frame 
 * and protection flags for that frame.
 */ 
static void set_pte_pfn_ma(unsigned long vaddr, unsigned long pfn,
			   pgprot_t flags)
{
	pgd_t *pgd;
	pud_t *pud;
	pmd_t *pmd;
	pte_t *pte;

	pgd = swapper_pg_dir + pgd_index(vaddr);
	if (pgd_none(*pgd)) {
		BUG();
		return;
	}
	pud = pud_offset(pgd, vaddr);
	if (pud_none(*pud)) {
		BUG();
		return;
	}
	pmd = pmd_offset(pud, vaddr);
	if (pmd_none(*pmd)) {
		BUG();
		return;
	}
	pte = pte_offset_kernel(pmd, vaddr);
	/* <pfn,flags> stored as-is, to permit clearing entries */
	set_pte(pte, pfn_pte_ma(pfn, flags));

	/*
	 * It's enough to flush this one mapping.
	 * (PGE mappings get flushed as well)
	 */
	__flush_tlb_one(vaddr);
}
Beispiel #2
0
static int map_pte_fn(pte_t *pte, struct page *pmd_page,
		      unsigned long addr, void *data)
{
	unsigned long **frames = (unsigned long **)data;

	set_pte_at(&init_mm, addr, pte, pfn_pte_ma((*frames)[0], PAGE_KERNEL));
	(*frames)++;
	return 0;
}
Beispiel #3
0
static int increase_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	long           rc;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	balloon_lock(flags);

	page = balloon_first_page();
	for (i = 0; i < nr_pages; i++) {
		BUG_ON(page == NULL);
		frame_list[i] = page_to_pfn(page);;
		page = balloon_next_page(page);
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents   = nr_pages;
	rc = HYPERVISOR_memory_op(
		XENMEM_populate_physmap, &reservation);
	if (rc < nr_pages) {
		int ret;
		/* We hit the Xen hard limit: reprobe. */
		set_xen_guest_handle(reservation.extent_start, frame_list);
		reservation.nr_extents   = rc;
		ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation,
				&reservation);
		BUG_ON(ret != rc);
		hard_limit = current_pages + rc - driver_pages;
		goto out;
	}

	for (i = 0; i < nr_pages; i++) {
		page = balloon_retrieve();
		BUG_ON(page == NULL);

		pfn = page_to_pfn(page);
		BUG_ON(!xen_feature(XENFEAT_auto_translated_physmap) &&
		       phys_to_machine_mapping_valid(pfn));

		/* Update P->M and M->P tables. */
		set_phys_to_machine(pfn, frame_list[i]);

#ifdef CONFIG_XEN
		xen_machphys_update(frame_list[i], pfn);

		/* Link back into the page tables if not highmem. */
		if (pfn < max_low_pfn) {
			int ret;
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)__va(pfn << PAGE_SHIFT),
				pfn_pte_ma(frame_list[i], PAGE_KERNEL),
				0);
			BUG_ON(ret);
		}
#endif
		/* Relinquish the page back to the allocator. */
		ClearPageReserved(page);
		set_page_count(page, 1);
		__free_page(page);
	}

	current_pages += nr_pages;
	totalram_pages = current_pages;

 out:
	balloon_unlock(flags);

	return 0;
}

static int decrease_reservation(unsigned long nr_pages)
{
	unsigned long  pfn, i, flags;
	struct page   *page;
	void          *v;
	int            need_sleep = 0;
	int ret;
	struct xen_memory_reservation reservation = {
		.address_bits = 0,
		.extent_order = 0,
		.domid        = DOMID_SELF
	};

	if (nr_pages > ARRAY_SIZE(frame_list))
		nr_pages = ARRAY_SIZE(frame_list);

	for (i = 0; i < nr_pages; i++) {
		if ((page = alloc_page(GFP_BALLOON)) == NULL) {
			nr_pages = i;
			need_sleep = 1;
			break;
		}

		pfn = page_to_pfn(page);
		frame_list[i] = pfn_to_mfn(pfn);

		if (!PageHighMem(page)) {
			v = phys_to_virt(pfn << PAGE_SHIFT);
			scrub_pages(v, 1);
#ifdef CONFIG_XEN
			ret = HYPERVISOR_update_va_mapping(
				(unsigned long)v, __pte_ma(0), 0);
			BUG_ON(ret);
#endif
		}
#ifdef CONFIG_XEN_SCRUB_PAGES
		else {
			v = kmap(page);
			scrub_pages(v, 1);
			kunmap(page);
		}
#endif
	}

#ifdef CONFIG_XEN
	/* Ensure that ballooned highmem pages don't have kmaps. */
	kmap_flush_unused();
	flush_tlb_all();
#endif

	balloon_lock(flags);

	/* No more mappings: invalidate P2M and add to balloon. */
	for (i = 0; i < nr_pages; i++) {
		pfn = mfn_to_pfn(frame_list[i]);
		set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
		balloon_append(pfn_to_page(pfn));
	}

	set_xen_guest_handle(reservation.extent_start, frame_list);
	reservation.nr_extents   = nr_pages;
	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
	BUG_ON(ret != nr_pages);

	current_pages -= nr_pages;
	totalram_pages = current_pages;

	balloon_unlock(flags);

	return need_sleep;
}

/*
 * We avoid multiple worker processes conflicting via the balloon mutex.
 * We may of course race updates of the target counts (which are protected
 * by the balloon lock), or with changes to the Xen hard limit, but we will
 * recover from these in time.
 */
static void balloon_process(void *unused)
{
	int need_sleep = 0;
	long credit;

	down(&balloon_mutex);

	do {
		credit = current_target() - current_pages;
		if (credit > 0)
			need_sleep = (increase_reservation(credit) != 0);
		if (credit < 0)
			need_sleep = (decrease_reservation(-credit) != 0);

#ifndef CONFIG_PREEMPT
		if (need_resched())
			schedule();
#endif
	} while ((credit != 0) && !need_sleep);

	/* Schedule more work if there is some still to be done. */
	if (current_target() != current_pages)
		mod_timer(&balloon_timer, jiffies + HZ);

	up(&balloon_mutex);
}
Beispiel #4
0
/* Set up the grant operations for this fragment.  If it's a flipping
   interface, we also set up the unmap request from here. */
static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
			  int i, struct netrx_pending_operations *npo,
			  struct page *page, unsigned long size,
			  unsigned long offset)
{
	mmu_update_t *mmu;
	gnttab_transfer_t *gop;
	gnttab_copy_t *copy_gop;
	multicall_entry_t *mcl;
	netif_rx_request_t *req;
	unsigned long old_mfn, new_mfn;

	old_mfn = virt_to_mfn(page_address(page));

	req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
	if (netif->copying_receiver) {
		/* The fragment needs to be copied rather than
		   flipped. */
		meta->copy = 1;
		copy_gop = npo->copy + npo->copy_prod++;
		copy_gop->flags = GNTCOPY_dest_gref;
		if (PageForeign(page)) {
			struct pending_tx_info *src_pend =
				&pending_tx_info[page->index];
			copy_gop->source.domid = src_pend->netif->domid;
			copy_gop->source.u.ref = src_pend->req.gref;
			copy_gop->flags |= GNTCOPY_source_gref;
		} else {
			copy_gop->source.domid = DOMID_SELF;
			copy_gop->source.u.gmfn = old_mfn;
		}
		copy_gop->source.offset = offset;
		copy_gop->dest.domid = netif->domid;
		copy_gop->dest.offset = 0;
		copy_gop->dest.u.ref = req->gref;
		copy_gop->len = size;
	} else {
		meta->copy = 0;
		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
			new_mfn = alloc_mfn();

			/*
			 * Set the new P2M table entry before
			 * reassigning the old data page. Heed the
			 * comment in pgtable-2level.h:pte_page(). :-)
			 */
			set_phys_to_machine(page_to_pfn(page), new_mfn);

			mcl = npo->mcl + npo->mcl_prod++;
			MULTI_update_va_mapping(mcl,
					     (unsigned long)page_address(page),
					     pfn_pte_ma(new_mfn, PAGE_KERNEL),
					     0);

			mmu = npo->mmu + npo->mmu_prod++;
			mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
				MMU_MACHPHYS_UPDATE;
			mmu->val = page_to_pfn(page);
		}

		gop = npo->trans + npo->trans_prod++;
		gop->mfn = old_mfn;
		gop->domid = netif->domid;
		gop->ref = req->gref;
	}
	return req->id;
}
Beispiel #5
0
void __init adjust_boot_vcpu_info(void)
{
	unsigned long lpfn, rpfn, lmfn, rmfn;
	pte_t *lpte, *rpte;
	unsigned int level;
	mmu_update_t mmu[2];

	/*
	 * setup_vcpu_info() cannot be used more than once for a given (v)CPU,
	 * hence we must swap the underlying MFNs of the two pages holding old
	 * and new vcpu_info of the boot CPU.
	 *
	 * Do *not* use __get_cpu_var() or percpu_{write,...}() here, as the per-
	 * CPU segment didn't get reloaded yet. Using percpu_read(), as in
	 * arch_use_lazy_mmu_mode(), though undesirable, is safe except for the
	 * accesses to variables that were updated in setup_percpu_areas().
	 */
	lpte = lookup_address((unsigned long)&per_cpu_var(vcpu_info)
			      + (__per_cpu_load - __per_cpu_start),
			      &level);
	rpte = lookup_address((unsigned long)&per_cpu(vcpu_info, 0), &level);
	BUG_ON(!lpte || !(pte_flags(*lpte) & _PAGE_PRESENT));
	BUG_ON(!rpte || !(pte_flags(*rpte) & _PAGE_PRESENT));
	lmfn = __pte_mfn(*lpte);
	rmfn = __pte_mfn(*rpte);

	if (lmfn == rmfn)
		return;

	lpfn = mfn_to_local_pfn(lmfn);
	rpfn = mfn_to_local_pfn(rmfn);

	printk(KERN_INFO
	       "Swapping MFNs for PFN %lx and %lx (MFN %lx and %lx)\n",
	       lpfn, rpfn, lmfn, rmfn);

	xen_l1_entry_update(lpte, pfn_pte_ma(rmfn, pte_pgprot(*lpte)));
	xen_l1_entry_update(rpte, pfn_pte_ma(lmfn, pte_pgprot(*rpte)));
#ifdef CONFIG_X86_64
	if (HYPERVISOR_update_va_mapping((unsigned long)__va(lpfn<<PAGE_SHIFT),
					 pfn_pte_ma(rmfn, PAGE_KERNEL_RO), 0))
		BUG();
#endif
	if (HYPERVISOR_update_va_mapping((unsigned long)__va(rpfn<<PAGE_SHIFT),
					 pfn_pte_ma(lmfn, PAGE_KERNEL),
					 UVMF_TLB_FLUSH))
		BUG();

	set_phys_to_machine(lpfn, rmfn);
	set_phys_to_machine(rpfn, lmfn);

	mmu[0].ptr = ((uint64_t)lmfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
	mmu[0].val = rpfn;
	mmu[1].ptr = ((uint64_t)rmfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
	mmu[1].val = lpfn;
	if (HYPERVISOR_mmu_update(mmu, 2, NULL, DOMID_SELF))
		BUG();

	/*
	 * Copy over all contents of the page just replaced, except for the
	 * vcpu_info itself, as it may have got updated after having been
	 * copied from __per_cpu_load[].
	 */
	memcpy(__va(rpfn << PAGE_SHIFT),
	       __va(lpfn << PAGE_SHIFT),
	       (unsigned long)&per_cpu_var(vcpu_info) & (PAGE_SIZE - 1));
	level = (unsigned long)(&per_cpu_var(vcpu_info) + 1) & (PAGE_SIZE - 1);
	if (level)
		memcpy(__va(rpfn << PAGE_SHIFT) + level,
		       __va(lpfn << PAGE_SHIFT) + level,
		       PAGE_SIZE - level);
}