Esempio n. 1
0
static inline void
xpq_increment_idx(void)
{

	if (__predict_false(++xpq_idx_array[curcpu()->ci_cpuid] == XPQUEUE_SIZE))
		xpq_flush_queue();
}
Esempio n. 2
0
void
xpq_flush_cache(void)
{
	int s = splvm();

	xpq_flush_queue();

	XENPRINTK2(("xpq_queue_flush_cache\n"));
	asm("wbinvd":::"memory");
	splx(s); /* XXX: removeme */
}
Esempio n. 3
0
void
xpq_queue_invlpg(vaddr_t va)
{
	struct mmuext_op op;
	xpq_flush_queue();

	XENPRINTK2(("xpq_queue_invlpg %#" PRIxVADDR "\n", va));
	op.cmd = MMUEXT_INVLPG_LOCAL;
	op.arg1.linear_addr = (va & ~PAGE_MASK);
	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
		panic("xpq_queue_invlpg");
}
Esempio n. 4
0
void
xpq_queue_tlb_flush(void)
{
	struct mmuext_op op;

	xpq_flush_queue();

	XENPRINTK2(("xpq_queue_tlb_flush\n"));
	op.cmd = MMUEXT_TLB_FLUSH_LOCAL;
	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
		panic("xpq_queue_tlb_flush");
}
Esempio n. 5
0
void
xpq_queue_unpin_table(paddr_t pa)
{
	struct mmuext_op op;

	xpq_flush_queue();

	XENPRINTK2(("xpq_queue_unpin_table: %#" PRIxPADDR "\n", pa));
	op.arg1.mfn = pa >> PAGE_SHIFT;
	op.cmd = MMUEXT_UNPIN_TABLE;
	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
		panic("xpq_queue_unpin_table");
}
Esempio n. 6
0
void
xpq_queue_pt_switch(paddr_t pa)
{
	struct mmuext_op op;
	xpq_flush_queue();

	XENPRINTK2(("xpq_queue_pt_switch: 0x%" PRIx64 " 0x%" PRIx64 "\n",
	    (int64_t)pa, (int64_t)pa));
	op.cmd = MMUEXT_NEW_BASEPTR;
	op.arg1.mfn = pa >> PAGE_SHIFT;
	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
		panic("xpq_queue_pt_switch");
}
Esempio n. 7
0
int
xpq_update_foreign(paddr_t ptr, pt_entry_t val, int dom)
{
	mmu_update_t op;
	int ok;

	xpq_flush_queue();

	op.ptr = ptr;
	op.val = val;
	if (HYPERVISOR_mmu_update(&op, 1, &ok, dom) < 0)
		return EFAULT;
	return (0);
}
Esempio n. 8
0
void
xpq_queue_set_ldt(vaddr_t va, uint32_t entries)
{
	struct mmuext_op op;

	xpq_flush_queue();

	XENPRINTK2(("xpq_queue_set_ldt\n"));
	KASSERT(va == (va & ~PAGE_MASK));
	op.cmd = MMUEXT_SET_LDT;
	op.arg1.linear_addr = va;
	op.arg2.nr_ents = entries;
	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
		panic("xpq_queue_set_ldt");
}
Esempio n. 9
0
void
xpq_queue_pte_update(paddr_t ptr, pt_entry_t val)
{

	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];

	KASSERT((ptr & 3) == 0);
	xpq_queue[xpq_idx].ptr = (paddr_t)ptr | MMU_NORMAL_PT_UPDATE;
	xpq_queue[xpq_idx].val = val;
	xpq_increment_idx();
#ifdef XENDEBUG_SYNC
	xpq_flush_queue();
#endif
}
Esempio n. 10
0
/* This is a synchronous call. */
void
xen_bcast_tlbflush(void)
{
	mmuext_op_t op;

	/* Flush pending page updates */
	xpq_flush_queue();

	op.cmd = MMUEXT_TLB_FLUSH_ALL;

	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
		panic("xpq_queue_invlpg_all");
	}

	return;
}
Esempio n. 11
0
void
xpq_queue_pin_table(paddr_t pa, int lvl)
{
	struct mmuext_op op;

	xpq_flush_queue();

	XENPRINTK2(("xpq_queue_pin_l%d_table: %#" PRIxPADDR "\n",
	    lvl + 1, pa));

	op.arg1.mfn = pa >> PAGE_SHIFT;
	op.cmd = lvl;

	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0)
		panic("xpq_queue_pin_table");
}
Esempio n. 12
0
void
xen_bcast_invlpg(vaddr_t va)
{
	mmuext_op_t op;

	/* Flush pending page updates */
	xpq_flush_queue();

	op.cmd = MMUEXT_INVLPG_ALL;
	op.arg1.linear_addr = va;

	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
		panic("xpq_queue_invlpg_all");
	}

	return;
}
Esempio n. 13
0
void
xpq_queue_machphys_update(paddr_t ma, paddr_t pa)
{

	mmu_update_t *xpq_queue = xpq_queue_array[curcpu()->ci_cpuid];
	int xpq_idx = xpq_idx_array[curcpu()->ci_cpuid];

	XENPRINTK2(("xpq_queue_machphys_update ma=0x%" PRIx64 " pa=0x%" PRIx64
	    "\n", (int64_t)ma, (int64_t)pa));

	xpq_queue[xpq_idx].ptr = ma | MMU_MACHPHYS_UPDATE;
	xpq_queue[xpq_idx].val = pa >> PAGE_SHIFT;
	xpq_increment_idx();
#ifdef XENDEBUG_SYNC
	xpq_flush_queue();
#endif
}
Esempio n. 14
0
/* This is a synchronous call. */
void
xen_vcpu_bcast_invlpg(vaddr_t sva, vaddr_t eva)
{
	KASSERT(eva > sva);

	/* Flush pending page updates */
	xpq_flush_queue();

	/* Align to nearest page boundary */
	sva &= ~PAGE_MASK;
	eva &= ~PAGE_MASK;

	for ( ; sva <= eva; sva += PAGE_SIZE) {
		xen_bcast_invlpg(sva);
	}

	return;
}
Esempio n. 15
0
/* This is a synchronous call. */
void
xen_mcast_tlbflush(kcpuset_t *kc)
{
	xcpumask_t xcpumask;
	mmuext_op_t op;

	kcpuset_export_u32(kc, &xcpumask.xcpum_km[0], sizeof(xcpumask));

	/* Flush pending page updates */
	xpq_flush_queue();

	op.cmd = MMUEXT_TLB_FLUSH_MULTI;
	op.arg2.vcpumask = &xcpumask.xcpum_xm;

	if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF) < 0) {
		panic("xpq_queue_invlpg_all");
	}

	return;
}
Esempio n. 16
0
static void
xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc)
{
	RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt;
	RING_IDX i;
	struct xennet_rxreq *req;
	struct xen_memory_reservation reservation;
	int s1, s2;
	paddr_t pfn;

	s1 = splnet();
	for (i = 0; sc->sc_free_rxreql != 0; i++) {
		req  = SLIST_FIRST(&sc->sc_rxreq_head);
		KASSERT(req != NULL);
		KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]);
		RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id =
		    req->rxreq_id;
		if (xengnt_grant_transfer(sc->sc_xbusd->xbusd_otherend_id,
		    &req->rxreq_gntref) != 0) {
			break;
		}
		RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref =
		    req->rxreq_gntref;

		SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next);
		sc->sc_free_rxreql--;

		/* unmap the page */
		MULTI_update_va_mapping(&rx_mcl[i], req->rxreq_va, 0, 0);
		/*
		 * Remove this page from pseudo phys map before
		 * passing back to Xen.
		 */
		pfn = (req->rxreq_pa - XPMAP_OFFSET) >> PAGE_SHIFT;
		xennet_pages[i] = xpmap_phys_to_machine_mapping[pfn];
		xpmap_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY;
	}
	if (i == 0) {
		splx(s1);
		return;
	}
	/* also make sure to flush all TLB entries */
	rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL;
	/*
	 * We may have allocated buffers which have entries
	 * outstanding in the page update queue -- make sure we flush
	 * those first!
	 */
	s2 = splvm();
	xpq_flush_queue();
	splx(s2);
	/* now decrease reservation */
	reservation.extent_start = xennet_pages;
	reservation.nr_extents = i;
	reservation.extent_order = 0;
	reservation.address_bits = 0;
	reservation.domid = DOMID_SELF;
	rx_mcl[i].op = __HYPERVISOR_memory_op;
	rx_mcl[i].args[0] = XENMEM_decrease_reservation;
	rx_mcl[i].args[1] = (unsigned long)&reservation;
	HYPERVISOR_multicall(rx_mcl, i+1);
	if (__predict_false(rx_mcl[i].result != i)) {
		panic("xennet_alloc_rx_buffer: XENMEM_decrease_reservation");
	}
	sc->sc_rx_ring.req_prod_pvt = req_prod + i;
	RING_PUSH_REQUESTS(&sc->sc_rx_ring);

	splx(s1);
	return;
}