static NTSTATUS v2v_disconnect_supplicant(const struct v2v_channel *_channel) { NTSTATUS status; struct v2v_channel *channel = (struct v2v_channel *)_channel; v2v_xenops_grant_unmap((void *)channel->cons_sring, channel->u.supplicant.prod_detail); v2v_xenops_grant_unmap(channel->prod_sring, channel->u.supplicant.cons_detail); v2v_xenops_grant_unmap(channel->control, channel->u.supplicant.control_detail); channel->u.supplicant.prod_detail = NULL; channel->u.supplicant.cons_detail = NULL; channel->u.supplicant.control_detail = NULL; channel->prod_sring = NULL; channel->cons_sring = NULL; channel->control = NULL; if (!is_null_EVTCHN_PORT(channel->receive_evtchn_port)) { EvtchnClose(channel->receive_evtchn_port); channel->receive_evtchn_port = null_EVTCHN_PORT(); } if (!is_null_EVTCHN_PORT(channel->send_evtchn_port)) { EvtchnClose(channel->send_evtchn_port); channel->send_evtchn_port = null_EVTCHN_PORT(); } status = v2v_change_local_state(channel, XBT_NIL, v2v_state_disconnected); if (!NT_SUCCESS(status)) return status; v2v_destroy_channel(channel, FALSE); return STATUS_SUCCESS; }
static void close_evtchn(struct scsifilt *sf) { if (!is_null_EVTCHN_PORT(sf->evtchn_port)) EvtchnClose(sf->evtchn_port); sf->evtchn_port = null_EVTCHN_PORT(); }
VOID XenLowerDisconnectEvtChnDPC( PXEN_LOWER XenLower) { if (!is_null_EVTCHN_PORT(XenLower->EvtchnPort)) { EvtchnPortStop(XenLower->EvtchnPort); EvtchnClose(XenLower->EvtchnPort); XenLower->EvtchnPort = null_EVTCHN_PORT(); } }
static VOID V4vUninitializeEventChannel(PDEVICE_OBJECT fdo) { XENV4V_EXTENSION *pde = V4vGetDeviceExtension(fdo); KLOCK_QUEUE_HANDLE lqh; KeAcquireInStackQueuedSpinLock(&pde->virqLock, &lqh); if (is_null_EVTCHN_PORT(pde->virqPort)) { // This is ok, e.g. getting a stop and remove PnP call KeReleaseInStackQueuedSpinLock(&lqh); return; } EvtchnClose(pde->virqPort); pde->virqPort = null_EVTCHN_PORT(); KeReleaseInStackQueuedSpinLock(&lqh); TraceNotice(("V4V VIRQ disconnected.\n")); }
static NTSTATUS v2v_disconnect_temple(const struct v2v_channel *_channel) { NTSTATUS status = STATUS_SUCCESS; xenbus_transaction_t xbt = {0}; struct v2v_channel *channel = (struct v2v_channel *)_channel; enum v2v_endpoint_state remote_state; BOOLEAN failed, any_failed = FALSE; unsigned x; status = v2v_change_local_state(channel, XBT_NIL, v2v_state_disconnecting); if (!NT_SUCCESS(status)) return status; channel->u.temple.accepted = FALSE; /* Get the other end to disconnect */ for (;;) { xenbus_transaction_start(&xbt); status = v2v_get_remote_state_internal(xbt, channel, &remote_state); switch (remote_state) { case v2v_state_unknown: if (status == STATUS_OBJECT_NAME_NOT_FOUND) break; xenbus_transaction_end(xbt, 1); return status; /* The first two shouldn't really happen, but sometimes can if we've managed to screw (e.g. if two processes try to use the same endpoint). Try to recover. */ case v2v_state_unready: case v2v_state_listening: case v2v_state_disconnecting: case v2v_state_disconnected: case v2v_state_crashed: break; case v2v_state_connected: xenbus_transaction_end(xbt, 1); KeWaitForSingleObject(&channel->control_event, Executive, KernelMode, FALSE, NULL); continue; } status = v2v_change_local_state(channel, xbt, v2v_state_disconnected); if (!NT_SUCCESS(status)) { xenbus_transaction_end(xbt, 1); return status; } status = xenbus_transaction_end(xbt, 0); if (NT_SUCCESS(status)) break; /* drop out of loop and do rest */ if (status == STATUS_RETRY) continue; /* try again */ return status; /* else return the error */ } XM_ASSERT(channel->u.temple.grant_cache != NULL); failed = FALSE; for (x = 0; x < channel->nr_prod_ring_pages; x++) { if (!is_null_GRANT_REF(channel->u.temple.prod_grefs[x])) { status = GnttabEndForeignAccessCache(channel->u.temple.prod_grefs[x], channel->u.temple.grant_cache); if (NT_SUCCESS(status)) channel->u.temple.prod_grefs[x] = null_GRANT_REF(); else failed = any_failed = TRUE; } } if (!failed) { ExFreePoolWithTag(channel->prod_sring, V2V_TAG); channel->prod_sring = NULL; } failed = FALSE; for (x = 0; x < channel->nr_cons_ring_pages; x++) { if (!is_null_GRANT_REF(channel->u.temple.cons_grefs[x])) { status = GnttabEndForeignAccessCache(channel->u.temple.cons_grefs[x], channel->u.temple.grant_cache); if (NT_SUCCESS(status)) channel->u.temple.cons_grefs[x] = null_GRANT_REF(); else failed = any_failed = TRUE; } } if (!failed) { ExFreePoolWithTag((void *)channel->cons_sring, V2V_TAG); channel->cons_sring = NULL; } if (!is_null_GRANT_REF(channel->u.temple.control_gref)) { status = GnttabEndForeignAccessCache(channel->u.temple.control_gref, channel->u.temple.grant_cache); if (NT_SUCCESS(status)) { channel->u.temple.control_gref = null_GRANT_REF(); ExFreePoolWithTag(channel->control, V2V_TAG); channel->control = NULL; } else any_failed = TRUE; } if (!any_failed) GnttabFreeCache(channel->u.temple.grant_cache); if (!is_null_EVTCHN_PORT(channel->receive_evtchn_port)) { EvtchnClose(channel->receive_evtchn_port); channel->receive_evtchn_port = null_EVTCHN_PORT(); } if (!is_null_EVTCHN_PORT(channel->send_evtchn_port)) { EvtchnClose(channel->send_evtchn_port); channel->send_evtchn_port = null_EVTCHN_PORT(); } /* We either freed the rings here or they could not be freed. Prevent v2v_destroy_channel() from trying to free grants/rings with outstanding grant refs */ v2v_destroy_channel(channel, FALSE); return STATUS_SUCCESS; }
NTSTATUS v2v_listen(const char *xenbus_prefix, struct v2v_channel **channel, unsigned prod_ring_page_order, unsigned cons_ring_page_order, struct v2v_async *async_values) { NTSTATUS status = STATUS_SUCCESS; unsigned prod_ring_size = PAGE_SIZE << prod_ring_page_order; unsigned cons_ring_size = PAGE_SIZE << cons_ring_page_order; struct v2v_channel *chan; xenbus_transaction_t xbt = {0}; BOOLEAN xbt_pending = FALSE; PHYSICAL_ADDRESS pa; unsigned x; unsigned xen_receive_port, xen_send_port; uint32_t xen_gref; XM_ASSERT(channel != NULL); XM_ASSERT(xenbus_prefix != NULL); if (prod_ring_page_order > MAX_RING_PAGE_ORDER || cons_ring_page_order > MAX_RING_PAGE_ORDER) return STATUS_INVALID_PARAMETER; if (async_values && (!async_values->receive_dpc || !async_values->send_dpc)) return STATUS_INVALID_PARAMETER; *channel = NULL; if (!xenbus_await_initialisation()) return STATUS_NO_SUCH_DEVICE; chan = v2v_make_channel(xenbus_prefix, async_values); if (!chan) return STATUS_NO_MEMORY; chan->is_temple = TRUE; chan->prod_sring = ExAllocatePoolWithTag(NonPagedPool, prod_ring_size, V2V_TAG); chan->cons_sring = ExAllocatePoolWithTag(NonPagedPool, cons_ring_size, V2V_TAG); chan->control = ExAllocatePoolWithTag(NonPagedPool, PAGE_SIZE, V2V_TAG); if (!chan->prod_sring || !chan->cons_sring || !chan->control) goto err_nomem; RtlZeroMemory(chan->prod_sring, prod_ring_size); RtlZeroMemory((void *)chan->cons_sring, cons_ring_size); RtlZeroMemory(chan->control, PAGE_SIZE); chan->nr_prod_ring_pages = 1 << prod_ring_page_order; chan->nr_cons_ring_pages = 1 << cons_ring_page_order; /* pre-allocate the granf refs we are going to need below in a grant cache */ chan->u.temple.grant_cache = GnttabAllocCache(chan->nr_prod_ring_pages + chan->nr_cons_ring_pages + 1); if (!chan->u.temple.grant_cache) goto err_nomem; v2v_nc2_attach_rings_temple(&chan->nc2_rings, chan->cons_sring, cons_ring_size, chan->prod_sring, prod_ring_size, chan->control); for (;;) { xenbus_transaction_start(&xbt); xbt_pending = TRUE; status = v2v_connect_channel_xenbus(chan, xbt); if (!NT_SUCCESS(status)) goto err; for (x = 0; x < 1u << prod_ring_page_order; x++) { pa = MmGetPhysicalAddress((void *)((ULONG_PTR)chan->prod_sring + x * PAGE_SIZE)); chan->u.temple.prod_grefs[x] = GnttabGrantForeignAccessCache(chan->peer_domid, PHYS_TO_PFN(pa), GRANT_MODE_RO, chan->u.temple.grant_cache); XM_ASSERT(!is_null_GRANT_REF(chan->u.temple.prod_grefs[x])); status = v2v_write_grantref(chan, xbt, x, TRUE); if (!NT_SUCCESS(status)) goto err; } for (x = 0; x < 1u << cons_ring_page_order; x++) { pa = MmGetPhysicalAddress((void *)((ULONG_PTR)chan->cons_sring + x * PAGE_SIZE)); chan->u.temple.cons_grefs[x] = GnttabGrantForeignAccessCache(chan->peer_domid, PHYS_TO_PFN(pa), GRANT_MODE_RW, chan->u.temple.grant_cache); XM_ASSERT(!is_null_GRANT_REF(chan->u.temple.cons_grefs[x])); status = v2v_write_grantref(chan, xbt, x, FALSE); if (!NT_SUCCESS(status)) goto err; } pa = MmGetPhysicalAddress((void *)((ULONG_PTR)chan->control)); chan->u.temple.control_gref = GnttabGrantForeignAccessCache(chan->peer_domid, PHYS_TO_PFN(pa), GRANT_MODE_RW, chan->u.temple.grant_cache); XM_ASSERT(!is_null_GRANT_REF(chan->u.temple.control_gref)); chan->receive_evtchn_port = EvtchnAllocUnboundDpc(chan->peer_domid, (chan->is_sync ? v2v_dpc : async_values->receive_dpc), (chan->is_sync ? &chan->s.sync.receive_event : async_values->receive_ctx)); if (is_null_EVTCHN_PORT(chan->receive_evtchn_port)) { status = STATUS_INSUFFICIENT_RESOURCES; goto err; } xen_receive_port = xen_EVTCHN_PORT(chan->receive_evtchn_port); chan->send_evtchn_port = EvtchnAllocUnboundDpc(chan->peer_domid, (chan->is_sync ? v2v_dpc : async_values->send_dpc), (chan->is_sync ? &chan->s.sync.send_event : async_values->send_ctx)); if (is_null_EVTCHN_PORT(chan->send_evtchn_port)) { status = STATUS_INSUFFICIENT_RESOURCES; goto err; } xen_send_port = xen_EVTCHN_PORT(chan->send_evtchn_port); xen_gref = xen_GRANT_REF(chan->u.temple.control_gref); status = v2v_xenstore_scatter(xbt, chan->local_prefix, "prod-order", xenstore_scatter_type_int, prod_ring_page_order, "cons-order", xenstore_scatter_type_int, cons_ring_page_order, "control-gref", xenstore_scatter_type_grant_ref, xen_gref, "prod-evtchn",xenstore_scatter_type_evtchn_port, xen_send_port, "cons-evtchn",xenstore_scatter_type_evtchn_port, xen_receive_port, NULL); if (!NT_SUCCESS(status)) goto err; status = v2v_change_local_state(chan, xbt, v2v_state_listening); if (!NT_SUCCESS(status)) goto err; status = xenbus_transaction_end(xbt, 0); xbt_pending = FALSE; if (NT_SUCCESS(status)) break; if (status != STATUS_RETRY) goto err; /* cleanup for retry */ for (x = 0; x < 1u << prod_ring_page_order; x++) { GnttabEndForeignAccessCache(chan->u.temple.prod_grefs[x], chan->u.temple.grant_cache); } RtlZeroMemory(chan->u.temple.prod_grefs, sizeof(chan->u.temple.prod_grefs)); for (x = 0; x < 1u << cons_ring_page_order; x++) { GnttabEndForeignAccessCache(chan->u.temple.cons_grefs[x], chan->u.temple.grant_cache); } RtlZeroMemory(chan->u.temple.cons_grefs, sizeof(chan->u.temple.cons_grefs)); GnttabEndForeignAccessCache(chan->u.temple.control_gref, chan->u.temple.grant_cache); chan->u.temple.control_gref = null_GRANT_REF(); EvtchnClose(chan->receive_evtchn_port); chan->receive_evtchn_port = null_EVTCHN_PORT(); EvtchnClose(chan->send_evtchn_port); chan->send_evtchn_port = null_EVTCHN_PORT(); xenbus_unregister_watch(chan->remote_state_watch); chan->remote_state_watch = NULL; ExFreePoolWithTag(chan->remote_prefix, V2V_TAG); chan->remote_prefix = NULL; } *channel = chan; return STATUS_SUCCESS; err_nomem: status = STATUS_NO_MEMORY; err: if (xbt_pending) xenbus_transaction_end(xbt, 1); /* since the channel has never been connected here, it is safe to free any temple resources that may have been allocated in this routine */ v2v_destroy_channel(chan, TRUE); return status; }
static NTSTATUS V4vAddDevice(PDRIVER_OBJECT driverObject, PDEVICE_OBJECT pdo) { NTSTATUS status = STATUS_SUCCESS; UNICODE_STRING deviceName; PDEVICE_OBJECT fdo = NULL; PXENV4V_EXTENSION pde = NULL; LONG val; BOOLEAN symlink = FALSE; LARGE_INTEGER seed; WCHAR *szSddl = NULL; UNICODE_STRING sddlString; CHAR *szFpath = NULL; TraceVerbose(("====> '%s'.\n", __FUNCTION__)); // We only allow one instance of this device type. If more than on pdo is created we need val = InterlockedCompareExchange(&g_deviceCreated, 1, 0); if (val != 0) { TraceWarning(("cannot instantiate more that one v4v device node.\n")); return STATUS_UNSUCCESSFUL; } do { // Create our device RtlInitUnicodeString(&deviceName, V4V_DEVICE_NAME); szSddl = g_win5Sddl; RtlInitUnicodeString(&sddlString, szSddl); status = IoCreateDeviceSecure(driverObject, sizeof(XENV4V_EXTENSION), &deviceName, FILE_DEVICE_UNKNOWN, FILE_DEVICE_SECURE_OPEN, FALSE, &sddlString, (LPCGUID)&GUID_SD_XENV4V_CONTROL_OBJECT, &fdo); if (!NT_SUCCESS(status)) { TraceError(("failed to create device object - error: 0x%x\n", status)); fdo = NULL; break; } pde = (PXENV4V_EXTENSION)fdo->DeviceExtension; RtlZeroMemory(pde, sizeof(XENV4V_EXTENSION)); RtlStringCchCopyW(pde->symbolicLinkText, XENV4V_SYM_NAME_LEN, V4V_SYMBOLIC_NAME); RtlInitUnicodeString(&pde->symbolicLink, pde->symbolicLinkText); // Create our symbolic link status = IoCreateSymbolicLink(&pde->symbolicLink, &deviceName); if (!NT_SUCCESS(status)) { TraceError(("failed to create symbolic - error: 0x%x\n", status)); break; } symlink = TRUE; // Get our xenstore path szFpath = xenbus_find_frontend(pdo); if (szFpath == NULL) { status = STATUS_NO_SUCH_DEVICE; TraceError(("failed to locate XenStore front end path\n")); break; } // Setup the extension pde->magic = XENV4V_MAGIC; pde->pdo = pdo; pde->fdo = fdo; IoInitializeRemoveLock(&pde->removeLock, 'v4vx', 0, 0); pde->frontendPath = szFpath; szFpath = NULL; pde->state = XENV4V_DEV_STOPPED; // wait for start pde->lastPoState = PowerSystemWorking; pde->virqPort = null_EVTCHN_PORT(); KeInitializeDpc(&pde->virqDpc, V4vVirqNotifyDpc, fdo); KeInitializeSpinLock(&pde->virqLock); KeInitializeSpinLock(&pde->dpcLock); KeInitializeTimerEx(&pde->timer, NotificationTimer); KeInitializeDpc(&pde->timerDpc, V4vConnectTimerDpc, fdo); KeInitializeSpinLock(&pde->timerLock); pde->timerCounter = 0; InitializeListHead(&pde->contextList); KeInitializeSpinLock(&pde->contextLock); pde->contextCount = 0; InitializeListHead(&pde->ringList); KeInitializeSpinLock(&pde->ringLock); InitializeListHead(&pde->pendingIrpQueue); pde->pendingIrpCount = 0; KeInitializeSpinLock(&pde->queueLock); IoCsqInitializeEx(&pde->csqObject, V4vCsqInsertIrpEx, V4vCsqRemoveIrp, V4vCsqPeekNextIrp, V4vCsqAcquireLock, V4vCsqReleaseLock, V4vCsqCompleteCanceledIrp); InitializeListHead(&pde->destList); pde->destCount = 0; ExInitializeNPagedLookasideList(&pde->destLookasideList, NULL, NULL, 0, sizeof(XENV4V_DESTINATION), XENV4V_TAG, 0); KeQueryTickCount(&seed); pde->seed = seed.u.LowPart; // Now attach us to the stack pde->ldo = IoAttachDeviceToDeviceStack(fdo, pdo); if (pde->ldo == NULL) { TraceError(("failed to attach device to stack - error: 0x%x\n", status)); status = STATUS_NO_SUCH_DEVICE; break; } // Use direct IO and let the IO manager directly map user buffers; clear the init flag fdo->Flags |= DO_DIRECT_IO; fdo->Flags &= ~DO_DEVICE_INITIALIZING; // Made it here, go to connected state to be consistent xenbus_change_state(XBT_NIL, pde->frontendPath, "state", XENBUS_STATE_CONNECTED); } while (FALSE); if (!NT_SUCCESS(status)) { if (fdo != NULL) { if ((pde != NULL)&&(pde->ldo != NULL)) { IoDetachDevice(pde->ldo); } if (szFpath != NULL) { XmFreeMemory(szFpath); } if (symlink) { IoDeleteSymbolicLink(&pde->symbolicLink); } IoDeleteDevice(fdo); } } TraceVerbose(("<==== '%s'.\n", __FUNCTION__)); return status; }