static void pirq_unmask_and_notify(unsigned int evtchn, unsigned int irq) { struct physdev_eoi eoi = { .irq = evtchn_get_xen_pirq(irq) }; if (pirq_eoi_does_unmask) { if (test_bit(eoi.irq, pirq_needs_eoi)) VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi)); else unmask_evtchn(evtchn); } else if (test_bit(irq - PIRQ_BASE, pirq_needs_eoi)) { if (smp_processor_id() != cpu_from_evtchn(evtchn)) { struct evtchn_unmask unmask = { .port = evtchn }; struct multicall_entry mcl[2]; mcl[0].op = __HYPERVISOR_event_channel_op; mcl[0].args[0] = EVTCHNOP_unmask; mcl[0].args[1] = (unsigned long)&unmask; mcl[1].op = __HYPERVISOR_physdev_op; mcl[1].args[0] = PHYSDEVOP_eoi; mcl[1].args[1] = (unsigned long)&eoi; if (HYPERVISOR_multicall(mcl, 2)) BUG(); } else { unmask_evtchn(evtchn); VOID(HYPERVISOR_physdev_op(PHYSDEVOP_eoi, &eoi)); } } else
void xen_mc_flush(void) { struct mc_buffer *b = &__get_cpu_var(mc_buffer); int ret = 0; unsigned long flags; BUG_ON(preemptible()); /* Disable interrupts in case someone comes in and queues something in the middle */ local_irq_save(flags); if (b->mcidx) { int i; if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) BUG(); for (i = 0; i < b->mcidx; i++) if (b->entries[i].result < 0) ret++; b->mcidx = 0; b->argidx = 0; } else BUG_ON(b->argidx != 0); local_irq_restore(flags); BUG_ON(ret); }
void xen_mc_flush(void) { struct mc_buffer *b = &__get_cpu_var(mc_buffer); int ret = 0; unsigned long flags; int i; BUG_ON(preemptible()); /* Disable interrupts in case someone comes in and queues something in the middle */ local_irq_save(flags); mc_add_stats(b); if (b->mcidx) { #if MC_DEBUG memcpy(b->debug, b->entries, b->mcidx * sizeof(struct multicall_entry)); #endif if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) BUG(); for (i = 0; i < b->mcidx; i++) if (b->entries[i].result < 0) ret++; #if MC_DEBUG if (ret) { printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", ret, smp_processor_id()); dump_stack(); for (i = 0; i < b->mcidx; i++) { printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n", i+1, b->mcidx, b->debug[i].op, b->debug[i].args[0], b->entries[i].result, b->caller[i]); } } #endif b->mcidx = 0; b->argidx = 0; } else BUG_ON(b->argidx != 0); for (i = 0; i < b->cbidx; i++) { struct callback *cb = &b->callbacks[i]; (*cb->fn)(cb->data); } b->cbidx = 0; local_irq_restore(flags); WARN_ON(ret); }
int do_xen_hypercall(int xc_handle, privcmd_hypercall_t *hypercall) { multicall_entry_t call; int i, ret; call.op = hypercall->op; for (i = 0; i < sizeof(hypercall->arg) / sizeof(*hypercall->arg); i++) call.args[i] = hypercall->arg[i]; ret = HYPERVISOR_multicall(&call, 1); if (ret < 0) { errno = -ret; return -1; } if ((long) call.result < 0) { errno = - (long) call.result; return -1; } return call.result; }
static void xennet_free_rx_buffer(struct xennet_xenbus_softc *sc) { paddr_t ma, pa; vaddr_t va; RING_IDX i; mmu_update_t mmu[1]; multicall_entry_t mcl[2]; int s = splbio(); DPRINTF(("%s: xennet_free_rx_buffer\n", device_xname(sc->sc_dev))); /* get back memory from RX ring */ for (i = 0; i < NET_RX_RING_SIZE; i++) { struct xennet_rxreq *rxreq = &sc->sc_rxreqs[i]; /* * if the buffer is in transit in the network stack, wait for * the network stack to free it. */ while ((volatile grant_ref_t)rxreq->rxreq_gntref == GRANT_STACK_REF) tsleep(xennet_xenbus_detach, PRIBIO, "xnet_free", hz/2); if (rxreq->rxreq_gntref != GRANT_INVALID_REF) { /* * this req is still granted. Get back the page or * allocate a new one, and remap it. */ SLIST_INSERT_HEAD(&sc->sc_rxreq_head, rxreq, rxreq_next); sc->sc_free_rxreql++; ma = xengnt_revoke_transfer(rxreq->rxreq_gntref); rxreq->rxreq_gntref = GRANT_INVALID_REF; if (ma == 0) { u_long pfn; struct xen_memory_reservation xenres; /* * transfer not complete, we lost the page. * Get one from hypervisor */ xenres.extent_start = &pfn; xenres.nr_extents = 1; xenres.extent_order = 0; xenres.address_bits = 31; xenres.domid = DOMID_SELF; if (HYPERVISOR_memory_op( XENMEM_increase_reservation, &xenres) < 0) { panic("xennet_free_rx_buffer: " "can't get memory back"); } ma = pfn; KASSERT(ma != 0); } pa = rxreq->rxreq_pa; va = rxreq->rxreq_va; /* remap the page */ mmu[0].ptr = (ma << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE; mmu[0].val = ((pa - XPMAP_OFFSET) >> PAGE_SHIFT); MULTI_update_va_mapping(&mcl[0], va, (ma << PAGE_SHIFT) | PG_V | PG_KW, UVMF_TLB_FLUSH|UVMF_ALL); xpmap_phys_to_machine_mapping[ (pa - XPMAP_OFFSET) >> PAGE_SHIFT] = ma; mcl[1].op = __HYPERVISOR_mmu_update; mcl[1].args[0] = (unsigned long)mmu; mcl[1].args[1] = 1; mcl[1].args[2] = 0; mcl[1].args[3] = DOMID_SELF; HYPERVISOR_multicall(mcl, 2); } }
static void xennet_alloc_rx_buffer(struct xennet_xenbus_softc *sc) { RING_IDX req_prod = sc->sc_rx_ring.req_prod_pvt; RING_IDX i; struct xennet_rxreq *req; struct xen_memory_reservation reservation; int s1, s2; paddr_t pfn; s1 = splnet(); for (i = 0; sc->sc_free_rxreql != 0; i++) { req = SLIST_FIRST(&sc->sc_rxreq_head); KASSERT(req != NULL); KASSERT(req == &sc->sc_rxreqs[req->rxreq_id]); RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->id = req->rxreq_id; if (xengnt_grant_transfer(sc->sc_xbusd->xbusd_otherend_id, &req->rxreq_gntref) != 0) { break; } RING_GET_REQUEST(&sc->sc_rx_ring, req_prod + i)->gref = req->rxreq_gntref; SLIST_REMOVE_HEAD(&sc->sc_rxreq_head, rxreq_next); sc->sc_free_rxreql--; /* unmap the page */ MULTI_update_va_mapping(&rx_mcl[i], req->rxreq_va, 0, 0); /* * Remove this page from pseudo phys map before * passing back to Xen. */ pfn = (req->rxreq_pa - XPMAP_OFFSET) >> PAGE_SHIFT; xennet_pages[i] = xpmap_phys_to_machine_mapping[pfn]; xpmap_phys_to_machine_mapping[pfn] = INVALID_P2M_ENTRY; } if (i == 0) { splx(s1); return; } /* also make sure to flush all TLB entries */ rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] = UVMF_TLB_FLUSH|UVMF_ALL; /* * We may have allocated buffers which have entries * outstanding in the page update queue -- make sure we flush * those first! */ s2 = splvm(); xpq_flush_queue(); splx(s2); /* now decrease reservation */ reservation.extent_start = xennet_pages; reservation.nr_extents = i; reservation.extent_order = 0; reservation.address_bits = 0; reservation.domid = DOMID_SELF; rx_mcl[i].op = __HYPERVISOR_memory_op; rx_mcl[i].args[0] = XENMEM_decrease_reservation; rx_mcl[i].args[1] = (unsigned long)&reservation; HYPERVISOR_multicall(rx_mcl, i+1); if (__predict_false(rx_mcl[i].result != i)) { panic("xennet_alloc_rx_buffer: XENMEM_decrease_reservation"); } sc->sc_rx_ring.req_prod_pvt = req_prod + i; RING_PUSH_REQUESTS(&sc->sc_rx_ring); splx(s1); return; }
void xen_mc_flush(void) { struct mc_buffer *b = &__get_cpu_var(mc_buffer); struct multicall_entry *mc; int ret = 0; unsigned long flags; int i; BUG_ON(preemptible()); /* Disable interrupts in case someone comes in and queues something in the middle */ local_irq_save(flags); trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); switch (b->mcidx) { case 0: /* no-op */ BUG_ON(b->argidx != 0); break; case 1: /* Singleton multicall - bypass multicall machinery and just do the call directly. */ mc = &b->entries[0]; mc->result = privcmd_call(mc->op, mc->args[0], mc->args[1], mc->args[2], mc->args[3], mc->args[4]); ret = mc->result < 0; break; default: #if MC_DEBUG memcpy(b->debug, b->entries, b->mcidx * sizeof(struct multicall_entry)); #endif if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) BUG(); for (i = 0; i < b->mcidx; i++) if (b->entries[i].result < 0) ret++; #if MC_DEBUG if (ret) { printk(KERN_ERR "%d multicall(s) failed: cpu %d\n", ret, smp_processor_id()); dump_stack(); for (i = 0; i < b->mcidx; i++) { printk(KERN_DEBUG " call %2d/%d: op=%lu arg=[%lx] result=%ld\t%pF\n", i+1, b->mcidx, b->debug[i].op, b->debug[i].args[0], b->entries[i].result, b->caller[i]); } } #endif } b->mcidx = 0; b->argidx = 0; for (i = 0; i < b->cbidx; i++) { struct callback *cb = &b->callbacks[i]; (*cb->fn)(cb->data); } b->cbidx = 0; local_irq_restore(flags); WARN_ON(ret); }