Esempio n. 1
0
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
		    pte_t *ptep, pte_t pteval)
{
	ADD_STATS(set_pte_at, 1);
//	ADD_STATS(set_pte_at_pinned, xen_page_pinned(ptep));
	ADD_STATS(set_pte_at_current, mm == current->mm);
	ADD_STATS(set_pte_at_kernel, mm == &init_mm);

	if (mm == current->mm || mm == &init_mm) {
		if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
			struct multicall_space mcs;
			mcs = xen_mc_entry(0);

			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
			ADD_STATS(set_pte_at_batched, 1);
			xen_mc_issue(PARAVIRT_LAZY_MMU);
			goto out;
		} else
			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
				goto out;
	}
	xen_set_pte(ptep, pteval);

out:	return;
}
Esempio n. 2
0
static int pin_page(struct page *page, unsigned flags)
{
	unsigned pgfl = test_and_set_bit(PG_pinned, &page->flags);
	int flush;

	if (pgfl)
		flush = 0;		/* already pinned */
	else if (PageHighMem(page))
		/* kmaps need flushing if we found an unpinned
		   highpage */
		flush = 1;
	else {
		void *pt = lowmem_page_address(page);
		unsigned long pfn = page_to_pfn(page);
		struct multicall_space mcs = __xen_mc_entry(0);

		flush = 0;

		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
					pfn_pte(pfn, PAGE_KERNEL_RO),
					flags);
	}

	return flush;
}
Esempio n. 3
0
static int unpin_page(struct page *page, unsigned flags)
{
	unsigned pgfl = test_and_clear_bit(PG_pinned, &page->flags);

	if (pgfl && !PageHighMem(page)) {
		void *pt = lowmem_page_address(page);
		unsigned long pfn = page_to_pfn(page);
		struct multicall_space mcs = __xen_mc_entry(0);

		MULTI_update_va_mapping(mcs.mc, (unsigned long)pt,
					pfn_pte(pfn, PAGE_KERNEL),
					flags);
	}

	return 0;		/* never need to flush on unpin */
}
Esempio n. 4
0
void xen_set_pte_at(struct mm_struct *mm, unsigned long addr,
		    pte_t *ptep, pte_t pteval)
{
	if (mm == current->mm || mm == &init_mm) {
		if (xen_get_lazy_mode() == PARAVIRT_LAZY_MMU) {
			struct multicall_space mcs;
			mcs = xen_mc_entry(0);

			MULTI_update_va_mapping(mcs.mc, addr, pteval, 0);
			xen_mc_issue(PARAVIRT_LAZY_MMU);
			return;
		} else
			if (HYPERVISOR_update_va_mapping(addr, pteval, 0) == 0)
				return;
	}
	xen_set_pte(ptep, pteval);
}
Esempio n. 5
0
static void
xennet_free_rx_buffer(struct xennet_xenbus_softc *sc)
{
	paddr_t ma, pa;
	vaddr_t va;
	RING_IDX i;
	mmu_update_t mmu[1];
	multicall_entry_t mcl[2];

	int s = splbio();
	
	DPRINTF(("%s: xennet_free_rx_buffer\n", device_xname(sc->sc_dev)));
	/* get back memory from RX ring */
	for (i = 0; i < NET_RX_RING_SIZE; i++) {
		struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i];

		/*
		 * if the buffer is in transit in the network stack, wait for
		 * the network stack to free it.
		 */
		while ((volatile grant_ref_t)rxreq->rxreq_gntref ==
		    GRANT_STACK_REF)
			tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2);

		if (rxreq->rxreq_gntref != GRANT_INVALID_REF) {
			/*
			 * this req is still granted. Get back the page or
			 * allocate a new one, and remap it.
			 */
			SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq,
			    rxreq_next);
			sc->sc_free_rxreql++;
			ma = xengnt_revoke_transfer(rxreq->rxreq_gntref);
			rxreq->rxreq_gntref = GRANT_INVALID_REF;
			if (ma == 0) {
				u_long pfn;
				struct xen_memory_reservation xenres;
				/*
				 * transfer not complete, we lost the page.
				 * Get one from hypervisor
				 */
				xenres.extent_start = &pfn;
				xenres.nr_extents = 1;
				xenres.extent_order = 0;
				xenres.address_bits = 31;
				xenres.domid = DOMID_SELF;
				if (HYPERVISOR_memory_op(
				    XENMEM_increase_reservation, &xenres) < 0) {
					panic("xennet_free_rx_buffer: "
					    "can't get memory back");
				}
				ma = pfn;
				KASSERT(ma != 0);
			}
			pa = rxreq->rxreq_pa;
			va = rxreq->rxreq_va;
			/* remap the page */
			mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE;
			mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT);
			MULTI_update_va_mapping(&mcl[0], va, 
			    (ma << PAGE_SHIFT) | PG_V | PG_KW,
			    UVMF_TLB_FLUSH|UVMF_ALL);
			xpmap_phys_to_machine_mapping[
			    (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma;
			mcl[1].op = __HYPERVISOR_mmu_update;
			mcl[1].args[0] = (unsigned long)mmu;
			mcl[1].args[1] = 1;
			mcl[1].args[2] = 0;
			mcl[1].args[3] = DOMID_SELF;
			HYPERVISOR_multicall(mcl, 2);
		}

	}
Esempio n. 6
0
static void
xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
{
	RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt;
	RING_IDX i;
	struct xennet_rxreq *req;
	struct xen_memory_reservation reservation;
	int s1, s2;
	paddr_t pfn;

	s1 = splnet();
	for (i = 0; sc->sc_free_rxreql != 0; i++) {
		req  = SLIST_FIRST(&sc->sc_rxreq_head);
		KASSERT(req != NULL);
		KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]);
		RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id =
		    req->rxreq_id;
		if (xengnt_grant_transfer(sc->sc_xbusd->xbusd_otherend_id,
		    &req->rxreq_gntref) != 0) {
			break;
		}
		RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref =
		    req->rxreq_gntref;

		SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next);
		sc->sc_free_rxreql--;

		/* unmap the page */
		MULTI_update_va_mapping(&rx_mcl[i], req->rxreq_va, 0, 0);
		/*
		 * Remove this page from pseudo phys map before
		 * passing back to Xen.
		 */
		pfn = (req->rxreq_pa - XPMAP_OFFSET) >> PAGE_SHIFT;
		xennet_pages[i] = xpmap_phys_to_machine_mapping[pfn];
		xpmap_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
	}
	if (i == 0) {
		splx(s1);
		return;
	}
	/* also make sure to flush all TLB entries */
	rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
	/*
	 * We may have allocated buffers which have entries
	 * outstanding in the page update queue -- make sure we flush
	 * those first!
	 */
	s2 = splvm();
	xpq_flush_queue();
	splx(s2);
	/* now decrease reservation */
	reservation.extent_start = xennet_pages;
	reservation.nr_extents = i;
	reservation.extent_order = 0;
	reservation.address_bits = 0;
	reservation.domid = DOMID_SELF;
	rx_mcl[i].op = __HYPERVISOR_memory_op;
	rx_mcl[i].args[0] = XENMEM_decrease_reservation;
	rx_mcl[i].args[1] = (unsigned long)&reservation;
	HYPERVISOR_multicall(rx_mcl, i+1);
	if (__predict_false(rx_mcl[i].result != i)) {
		panic("xennet_alloc_rx_buffer: XENMEM_decrease_reservation");
	}
	sc->sc_rx_ring.req_prod_pvt = req_prod + i;
	RING_PUSH_REQUESTS(&sc->sc_rx_ring);

	splx(s1);
	return;
}
Esempio n. 7
0
/* Set up the grant operations for this fragment.  If it's a flipping
   interface, we also set up the unmap request from here. */
static u16 netbk_gop_frag(netif_t *netif, struct netbk_rx_meta *meta,
			  int i, struct netrx_pending_operations *npo,
			  struct page *page, unsigned long size,
			  unsigned long offset)
{
	mmu_update_t *mmu;
	gnttab_transfer_t *gop;
	gnttab_copy_t *copy_gop;
	multicall_entry_t *mcl;
	netif_rx_request_t *req;
	unsigned long old_mfn, new_mfn;

	old_mfn = virt_to_mfn(page_address(page));

	req = RING_GET_REQUEST(&netif->rx, netif->rx.req_cons + i);
	if (netif->copying_receiver) {
		/* The fragment needs to be copied rather than
		   flipped. */
		meta->copy = 1;
		copy_gop = npo->copy + npo->copy_prod++;
		copy_gop->flags = GNTCOPY_dest_gref;
		if (PageForeign(page)) {
			struct pending_tx_info *src_pend =
				&pending_tx_info[page->index];
			copy_gop->source.domid = src_pend->netif->domid;
			copy_gop->source.u.ref = src_pend->req.gref;
			copy_gop->flags |= GNTCOPY_source_gref;
		} else {
			copy_gop->source.domid = DOMID_SELF;
			copy_gop->source.u.gmfn = old_mfn;
		}
		copy_gop->source.offset = offset;
		copy_gop->dest.domid = netif->domid;
		copy_gop->dest.offset = 0;
		copy_gop->dest.u.ref = req->gref;
		copy_gop->len = size;
	} else {
		meta->copy = 0;
		if (!xen_feature(XENFEAT_auto_translated_physmap)) {
			new_mfn = alloc_mfn();

			/*
			 * Set the new P2M table entry before
			 * reassigning the old data page. Heed the
			 * comment in pgtable-2level.h:pte_page(). :-)
			 */
			set_phys_to_machine(page_to_pfn(page), new_mfn);

			mcl = npo->mcl + npo->mcl_prod++;
			MULTI_update_va_mapping(mcl,
					     (unsigned long)page_address(page),
					     pfn_pte_ma(new_mfn, PAGE_KERNEL),
					     0);

			mmu = npo->mmu + npo->mmu_prod++;
			mmu->ptr = ((maddr_t)new_mfn << PAGE_SHIFT) |
				MMU_MACHPHYS_UPDATE;
			mmu->val = page_to_pfn(page);
		}

		gop = npo->trans + npo->trans_prod++;
		gop->mfn = old_mfn;
		gop->domid = netif->domid;
		gop->ref = req->gref;
	}
	return req->id;
}
Esempio n. 8
0
static int m2p_remove_override(struct page *page,
			       struct gnttab_map_grant_ref *kmap_op,
			       unsigned long mfn)
{
	unsigned long flags;
	unsigned long pfn;
	unsigned long uninitialized_var(address);
	unsigned level;
	pte_t *ptep = NULL;

	pfn = page_to_pfn(page);

	if (!PageHighMem(page)) {
		address = (unsigned long)__va(pfn << PAGE_SHIFT);
		ptep = lookup_address(address, &level);

		if (WARN(ptep == NULL || level != PG_LEVEL_4K,
			 "m2p_remove_override: pfn %lx not mapped", pfn))
			return -EINVAL;
	}

	spin_lock_irqsave(&m2p_override_lock, flags);
	list_del(&page->lru);
	spin_unlock_irqrestore(&m2p_override_lock, flags);

	if (kmap_op != NULL) {
		if (!PageHighMem(page)) {
			struct multicall_space mcs;
			struct gnttab_unmap_and_replace *unmap_op;
			struct page *scratch_page = get_balloon_scratch_page();
			unsigned long scratch_page_address = (unsigned long)
				__va(page_to_pfn(scratch_page) << PAGE_SHIFT);

			/*
			 * It might be that we queued all the m2p grant table
			 * hypercalls in a multicall, then m2p_remove_override
			 * get called before the multicall has actually been
			 * issued. In this case handle is going to -1 because
			 * it hasn't been modified yet.
			 */
			if (kmap_op->handle == -1)
				xen_mc_flush();
			/*
			 * Now if kmap_op->handle is negative it means that the
			 * hypercall actually returned an error.
			 */
			if (kmap_op->handle == GNTST_general_error) {
				pr_warn("m2p_remove_override: pfn %lx mfn %lx, failed to modify kernel mappings",
					pfn, mfn);
				put_balloon_scratch_page();
				return -1;
			}

			xen_mc_batch();

			mcs = __xen_mc_entry(
				sizeof(struct gnttab_unmap_and_replace));
			unmap_op = mcs.args;
			unmap_op->host_addr = kmap_op->host_addr;
			unmap_op->new_addr = scratch_page_address;
			unmap_op->handle = kmap_op->handle;

			MULTI_grant_table_op(mcs.mc,
				GNTTABOP_unmap_and_replace, unmap_op, 1);

			mcs = __xen_mc_entry(0);
			MULTI_update_va_mapping(mcs.mc, scratch_page_address,
					pfn_pte(page_to_pfn(scratch_page),
					PAGE_KERNEL_RO), 0);

			xen_mc_issue(PARAVIRT_LAZY_MMU);

			kmap_op->host_addr = 0;
			put_balloon_scratch_page();
		}
	}

	/* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
	 * somewhere in this domain, even before being added to the
	 * m2p_override (see comment above in m2p_add_override).
	 * If there are no other entries in the m2p_override corresponding
	 * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
	 * the original pfn (the one shared by the frontend): the backend
	 * cannot do any IO on this page anymore because it has been
	 * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
	 * the original pfn causes mfn_to_pfn(mfn) to return the frontend
	 * pfn again. */
	mfn &= ~FOREIGN_FRAME_BIT;
	pfn = mfn_to_pfn_no_overrides(mfn);
	if (__pfn_to_mfn(pfn) == FOREIGN_FRAME(mfn) &&
			m2p_find_override(mfn) == NULL)
		set_phys_to_machine(pfn, mfn);

	return 0;
}