static NTSTATUS v2v_disconnect_supplicant(const struct v2v_channel *_channel) { NTSTATUS status; struct v2v_channel *channel = (struct v2v_channel *)_channel; v2v_xenops_grant_unmap((void *)channel->cons_sring, channel->u.supplicant.prod_detail); v2v_xenops_grant_unmap(channel->prod_sring, channel->u.supplicant.cons_detail); v2v_xenops_grant_unmap(channel->control, channel->u.supplicant.control_detail); channel->u.supplicant.prod_detail = NULL; channel->u.supplicant.cons_detail = NULL; channel->u.supplicant.control_detail = NULL; channel->prod_sring = NULL; channel->cons_sring = NULL; channel->control = NULL; if (!is_null_EVTCHN_PORT(channel->receive_evtchn_port)) { EvtchnClose(channel->receive_evtchn_port); channel->receive_evtchn_port = null_EVTCHN_PORT(); } if (!is_null_EVTCHN_PORT(channel->send_evtchn_port)) { EvtchnClose(channel->send_evtchn_port); channel->send_evtchn_port = null_EVTCHN_PORT(); } status = v2v_change_local_state(channel, XBT_NIL, v2v_state_disconnected); if (!NT_SUCCESS(status)) return status; v2v_destroy_channel(channel, FALSE); return STATUS_SUCCESS; }
static void v2v_destroy_channel(const struct v2v_channel *_chan, BOOLEAN free_temple) { struct v2v_channel *chan = (struct v2v_channel *)_chan; unsigned x; if (chan->remote_state_watch) xenbus_unregister_watch(chan->remote_state_watch); if (chan->local_prefix) ExFreePoolWithTag(chan->local_prefix, V2V_TAG); if (chan->remote_prefix) ExFreePoolWithTag(chan->remote_prefix, V2V_TAG); if (!chan->is_temple) { v2v_xenops_grant_unmap((void *)chan->cons_sring, chan->u.supplicant.prod_detail); v2v_xenops_grant_unmap(chan->prod_sring, chan->u.supplicant.cons_detail); v2v_xenops_grant_unmap(chan->control, chan->u.supplicant.control_detail); } else if (free_temple) { /* and is temple */ if (chan->u.temple.grant_cache) { for (x = 0; x < chan->nr_prod_ring_pages; x++) { if (!is_null_GRANT_REF(chan->u.temple.prod_grefs[x])) GnttabEndForeignAccessCache(chan->u.temple.prod_grefs[x], chan->u.temple.grant_cache); } for (x = 0; x < chan->nr_cons_ring_pages; x++) { if (!is_null_GRANT_REF(chan->u.temple.cons_grefs[x])) GnttabEndForeignAccessCache(chan->u.temple.cons_grefs[x], chan->u.temple.grant_cache); } if (!is_null_GRANT_REF(chan->u.temple.control_gref)) GnttabEndForeignAccessCache(chan->u.temple.control_gref, chan->u.temple.grant_cache); GnttabFreeCache(chan->u.temple.grant_cache); } if (chan->prod_sring) ExFreePoolWithTag(chan->prod_sring, V2V_TAG); if (chan->cons_sring) ExFreePoolWithTag((void*)chan->cons_sring, V2V_TAG); if (chan->control) ExFreePoolWithTag(chan->control, V2V_TAG); } if (!is_null_EVTCHN_PORT(chan->receive_evtchn_port)) EvtchnClose(chan->receive_evtchn_port); if (!is_null_EVTCHN_PORT(chan->send_evtchn_port)) EvtchnClose(chan->send_evtchn_port); EvtchnReleaseDebugCallback(chan->debug_callback); ExFreePoolWithTag(chan, V2V_TAG); }
VOID XenLowerFree( PXEN_LOWER XenLower) { if (!XenLower) { return; } if (XenLower->LateSuspendHandler) { EvtchnUnregisterSuspendHandler(XenLower->LateSuspendHandler); } if (!is_null_EVTCHN_PORT(XenLower->EvtchnPort)) { EvtchnPortStop(XenLower->EvtchnPort); EvtchnClose(XenLower->EvtchnPort); } if (!is_null_GRANT_REF(XenLower->SringGrantRef)) { (VOID)GnttabEndForeignAccess(XenLower->SringGrantRef); } ExFreePool(XenLower); }
static void close_evtchn(struct scsifilt *sf) { if (!is_null_EVTCHN_PORT(sf->evtchn_port)) EvtchnClose(sf->evtchn_port); sf->evtchn_port = null_EVTCHN_PORT(); }
VOID XenLowerDisconnectEvtChnDPC( PXEN_LOWER XenLower) { if (!is_null_EVTCHN_PORT(XenLower->EvtchnPort)) { EvtchnPortStop(XenLower->EvtchnPort); EvtchnClose(XenLower->EvtchnPort); XenLower->EvtchnPort = null_EVTCHN_PORT(); } }
static VOID V4vUninitializeEventChannel(PDEVICE_OBJECT fdo) { XENV4V_EXTENSION *pde = V4vGetDeviceExtension(fdo); KLOCK_QUEUE_HANDLE lqh; KeAcquireInStackQueuedSpinLock(&pde->virqLock, &lqh); if (is_null_EVTCHN_PORT(pde->virqPort)) { // This is ok, e.g. getting a stop and remove PnP call KeReleaseInStackQueuedSpinLock(&lqh); return; } EvtchnClose(pde->virqPort); pde->virqPort = null_EVTCHN_PORT(); KeReleaseInStackQueuedSpinLock(&lqh); TraceNotice(("V4V VIRQ disconnected.\n")); }
static NTSTATUS v2v_disconnect_temple(const struct v2v_channel *_channel) { NTSTATUS status = STATUS_SUCCESS; xenbus_transaction_t xbt = {0}; struct v2v_channel *channel = (struct v2v_channel *)_channel; enum v2v_endpoint_state remote_state; BOOLEAN failed, any_failed = FALSE; unsigned x; status = v2v_change_local_state(channel, XBT_NIL, v2v_state_disconnecting); if (!NT_SUCCESS(status)) return status; channel->u.temple.accepted = FALSE; /* Get the other end to disconnect */ for (;;) { xenbus_transaction_start(&xbt); status = v2v_get_remote_state_internal(xbt, channel, &remote_state); switch (remote_state) { case v2v_state_unknown: if (status == STATUS_OBJECT_NAME_NOT_FOUND) break; xenbus_transaction_end(xbt, 1); return status; /* The first two shouldn't really happen, but sometimes can if we've managed to screw (e.g. if two processes try to use the same endpoint). Try to recover. */ case v2v_state_unready: case v2v_state_listening: case v2v_state_disconnecting: case v2v_state_disconnected: case v2v_state_crashed: break; case v2v_state_connected: xenbus_transaction_end(xbt, 1); KeWaitForSingleObject(&channel->control_event, Executive, KernelMode, FALSE, NULL); continue; } status = v2v_change_local_state(channel, xbt, v2v_state_disconnected); if (!NT_SUCCESS(status)) { xenbus_transaction_end(xbt, 1); return status; } status = xenbus_transaction_end(xbt, 0); if (NT_SUCCESS(status)) break; /* drop out of loop and do rest */ if (status == STATUS_RETRY) continue; /* try again */ return status; /* else return the error */ } XM_ASSERT(channel->u.temple.grant_cache != NULL); failed = FALSE; for (x = 0; x < channel->nr_prod_ring_pages; x++) { if (!is_null_GRANT_REF(channel->u.temple.prod_grefs[x])) { status = GnttabEndForeignAccessCache(channel->u.temple.prod_grefs[x], channel->u.temple.grant_cache); if (NT_SUCCESS(status)) channel->u.temple.prod_grefs[x] = null_GRANT_REF(); else failed = any_failed = TRUE; } } if (!failed) { ExFreePoolWithTag(channel->prod_sring, V2V_TAG); channel->prod_sring = NULL; } failed = FALSE; for (x = 0; x < channel->nr_cons_ring_pages; x++) { if (!is_null_GRANT_REF(channel->u.temple.cons_grefs[x])) { status = GnttabEndForeignAccessCache(channel->u.temple.cons_grefs[x], channel->u.temple.grant_cache); if (NT_SUCCESS(status)) channel->u.temple.cons_grefs[x] = null_GRANT_REF(); else failed = any_failed = TRUE; } } if (!failed) { ExFreePoolWithTag((void *)channel->cons_sring, V2V_TAG); channel->cons_sring = NULL; } if (!is_null_GRANT_REF(channel->u.temple.control_gref)) { status = GnttabEndForeignAccessCache(channel->u.temple.control_gref, channel->u.temple.grant_cache); if (NT_SUCCESS(status)) { channel->u.temple.control_gref = null_GRANT_REF(); ExFreePoolWithTag(channel->control, V2V_TAG); channel->control = NULL; } else any_failed = TRUE; } if (!any_failed) GnttabFreeCache(channel->u.temple.grant_cache); if (!is_null_EVTCHN_PORT(channel->receive_evtchn_port)) { EvtchnClose(channel->receive_evtchn_port); channel->receive_evtchn_port = null_EVTCHN_PORT(); } if (!is_null_EVTCHN_PORT(channel->send_evtchn_port)) { EvtchnClose(channel->send_evtchn_port); channel->send_evtchn_port = null_EVTCHN_PORT(); } /* We either freed the rings here or they could not be freed. Prevent v2v_destroy_channel() from trying to free grants/rings with outstanding grant refs */ v2v_destroy_channel(channel, FALSE); return STATUS_SUCCESS; }
NTSTATUS v2v_listen(const char *xenbus_prefix, struct v2v_channel **channel, unsigned prod_ring_page_order, unsigned cons_ring_page_order, struct v2v_async *async_values) { NTSTATUS status = STATUS_SUCCESS; unsigned prod_ring_size = PAGE_SIZE << prod_ring_page_order; unsigned cons_ring_size = PAGE_SIZE << cons_ring_page_order; struct v2v_channel *chan; xenbus_transaction_t xbt = {0}; BOOLEAN xbt_pending = FALSE; PHYSICAL_ADDRESS pa; unsigned x; unsigned xen_receive_port, xen_send_port; uint32_t xen_gref; XM_ASSERT(channel != NULL); XM_ASSERT(xenbus_prefix != NULL); if (prod_ring_page_order > MAX_RING_PAGE_ORDER || cons_ring_page_order > MAX_RING_PAGE_ORDER) return STATUS_INVALID_PARAMETER; if (async_values && (!async_values->receive_dpc || !async_values->send_dpc)) return STATUS_INVALID_PARAMETER; *channel = NULL; if (!xenbus_await_initialisation()) return STATUS_NO_SUCH_DEVICE; chan = v2v_make_channel(xenbus_prefix, async_values); if (!chan) return STATUS_NO_MEMORY; chan->is_temple = TRUE; chan->prod_sring = ExAllocatePoolWithTag(NonPagedPool, prod_ring_size, V2V_TAG); chan->cons_sring = ExAllocatePoolWithTag(NonPagedPool, cons_ring_size, V2V_TAG); chan->control = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, V2V_TAG); if (!chan->prod_sring || !chan->cons_sring || !chan->control) goto err_nomem; RtlZeroMemory(chan->prod_sring, prod_ring_size); RtlZeroMemory((void *)chan->cons_sring, cons_ring_size); RtlZeroMemory(chan->control, PAGE_SIZE); chan->nr_prod_ring_pages = 1 << prod_ring_page_order; chan->nr_cons_ring_pages = 1 << cons_ring_page_order; /* pre-allocate the granf refs we are going to need below in a grant cache */ chan->u.temple.grant_cache = GnttabAllocCache(chan->nr_prod_ring_pages + chan->nr_cons_ring_pages + 1); if (!chan->u.temple.grant_cache) goto err_nomem; v2v_nc2_attach_rings_temple(&chan->nc2_rings, chan->cons_sring, cons_ring_size, chan->prod_sring, prod_ring_size, chan->control); for (;;) { xenbus_transaction_start(&xbt); xbt_pending = TRUE; status = v2v_connect_channel_xenbus(chan, xbt); if (!NT_SUCCESS(status)) goto err; for (x = 0; x < 1u << prod_ring_page_order; x++) { pa = MmGetPhysicalAddress((void *)((ULONG_PTR)chan->prod_sring + x * PAGE_SIZE)); chan->u.temple.prod_grefs[x] = GnttabGrantForeignAccessCache(chan->peer_domid, PHYS_TO_PFN(pa), GRANT_MODE_RO, chan->u.temple.grant_cache); XM_ASSERT(!is_null_GRANT_REF(chan->u.temple.prod_grefs[x])); status = v2v_write_grantref(chan, xbt, x, TRUE); if (!NT_SUCCESS(status)) goto err; } for (x = 0; x < 1u << cons_ring_page_order; x++) { pa = MmGetPhysicalAddress((void *)((ULONG_PTR)chan->cons_sring + x * PAGE_SIZE)); chan->u.temple.cons_grefs[x] = GnttabGrantForeignAccessCache(chan->peer_domid, PHYS_TO_PFN(pa), GRANT_MODE_RW, chan->u.temple.grant_cache); XM_ASSERT(!is_null_GRANT_REF(chan->u.temple.cons_grefs[x])); status = v2v_write_grantref(chan, xbt, x, FALSE); if (!NT_SUCCESS(status)) goto err; } pa = MmGetPhysicalAddress((void *)((ULONG_PTR)chan->control)); chan->u.temple.control_gref = GnttabGrantForeignAccessCache(chan->peer_domid, PHYS_TO_PFN(pa), GRANT_MODE_RW, chan->u.temple.grant_cache); XM_ASSERT(!is_null_GRANT_REF(chan->u.temple.control_gref)); chan->receive_evtchn_port = EvtchnAllocUnboundDpc(chan->peer_domid, (chan->is_sync ? v2v_dpc : async_values->receive_dpc), (chan->is_sync ? &chan->s.sync.receive_event : async_values->receive_ctx)); if (is_null_EVTCHN_PORT(chan->receive_evtchn_port)) { status = STATUS_INSUFFICIENT_RESOURCES; goto err; } xen_receive_port = xen_EVTCHN_PORT(chan->receive_evtchn_port); chan->send_evtchn_port = EvtchnAllocUnboundDpc(chan->peer_domid, (chan->is_sync ? v2v_dpc : async_values->send_dpc), (chan->is_sync ? &chan->s.sync.send_event : async_values->send_ctx)); if (is_null_EVTCHN_PORT(chan->send_evtchn_port)) { status = STATUS_INSUFFICIENT_RESOURCES; goto err; } xen_send_port = xen_EVTCHN_PORT(chan->send_evtchn_port); xen_gref = xen_GRANT_REF(chan->u.temple.control_gref); status = v2v_xenstore_scatter(xbt, chan->local_prefix, "prod-order", xenstore_scatter_type_int, prod_ring_page_order, "cons-order", xenstore_scatter_type_int, cons_ring_page_order, "control-gref", xenstore_scatter_type_grant_ref, xen_gref, "prod-evtchn",xenstore_scatter_type_evtchn_port, xen_send_port, "cons-evtchn",xenstore_scatter_type_evtchn_port, xen_receive_port, NULL); if (!NT_SUCCESS(status)) goto err; status = v2v_change_local_state(chan, xbt, v2v_state_listening); if (!NT_SUCCESS(status)) goto err; status = xenbus_transaction_end(xbt, 0); xbt_pending = FALSE; if (NT_SUCCESS(status)) break; if (status != STATUS_RETRY) goto err; /* cleanup for retry */ for (x = 0; x < 1u << prod_ring_page_order; x++) { GnttabEndForeignAccessCache(chan->u.temple.prod_grefs[x], chan->u.temple.grant_cache); } RtlZeroMemory(chan->u.temple.prod_grefs, sizeof(chan->u.temple.prod_grefs)); for (x = 0; x < 1u << cons_ring_page_order; x++) { GnttabEndForeignAccessCache(chan->u.temple.cons_grefs[x], chan->u.temple.grant_cache); } RtlZeroMemory(chan->u.temple.cons_grefs, sizeof(chan->u.temple.cons_grefs)); GnttabEndForeignAccessCache(chan->u.temple.control_gref, chan->u.temple.grant_cache); chan->u.temple.control_gref = null_GRANT_REF(); EvtchnClose(chan->receive_evtchn_port); chan->receive_evtchn_port = null_EVTCHN_PORT(); EvtchnClose(chan->send_evtchn_port); chan->send_evtchn_port = null_EVTCHN_PORT(); xenbus_unregister_watch(chan->remote_state_watch); chan->remote_state_watch = NULL; ExFreePoolWithTag(chan->remote_prefix, V2V_TAG); chan->remote_prefix = NULL; } *channel = chan; return STATUS_SUCCESS; err_nomem: status = STATUS_NO_MEMORY; err: if (xbt_pending) xenbus_transaction_end(xbt, 1); /* since the channel has never been connected here, it is safe to free any temple resources that may have been allocated in this routine */ v2v_destroy_channel(chan, TRUE); return status; }