static VOID __BalloonCheckHeap( IN ULONG Line, IN const PFN_NUMBER *Array, IN ULONG Count ) { ULONG Index; for (Index = 0; Index < Count / 2; Index++) { ULONG LeftChild = Index * 2 + 1; ULONG RightChild = Index * 2 + 2; if (LeftChild < Count) { if (Array[Index] <= Array[LeftChild]) { TraceNotice(("PFN[%d] (%p) <= PFN[%d] (%p) (at line %d)\n", Index, Array[Index], LeftChild, Array[LeftChild], Line)); } } if (RightChild < Count) { if (Array[Index] <= Array[RightChild]) { TraceNotice(("PFN[%d] (%p) <= PFN[%d] (%p) (at line %d)\n", Index, Array[Index], RightChild, Array[RightChild], Line)); } } } }
static VOID close_frontend(struct scsifilt *sf, SUSPEND_TOKEN token) { XENBUS_STATE frontend_state; XENBUS_STATE backend_state; NTSTATUS status; TraceNotice(("target %d: closing frontend...\n", sf->target_id)); // Get initial frontend state status = xenbus_read_state(XBT_NIL, sf->frontend_path, "state", &frontend_state); if (!NT_SUCCESS(status)) frontend_state = null_XENBUS_STATE(); // Wait for the backend to stabilise backend_state = null_XENBUS_STATE(); do { backend_state = XenbusWaitForBackendStateChange(sf->backend_path, backend_state, NULL, token); } while (same_XENBUS_STATE(backend_state, XENBUS_STATE_INITIALISING)); TraceVerbose(("%s: target %d: backend state = %s, frontend state = %s\n", __FUNCTION__, sf->target_id, XenbusStateName(backend_state), XenbusStateName(frontend_state))); frontend_state = XENBUS_STATE_CLOSING; while (!same_XENBUS_STATE(backend_state, XENBUS_STATE_CLOSING) && !same_XENBUS_STATE(backend_state, XENBUS_STATE_CLOSED) && !is_null_XENBUS_STATE(backend_state)) { xenbus_change_state(XBT_NIL, sf->frontend_path, "state", frontend_state); backend_state = XenbusWaitForBackendStateChange(sf->backend_path, backend_state, NULL, token); } TraceVerbose(("%s: target %d: backend state = %s, frontend state = %s\n", __FUNCTION__, sf->target_id, XenbusStateName(backend_state), XenbusStateName(frontend_state))); frontend_state = XENBUS_STATE_CLOSED; while (!same_XENBUS_STATE(backend_state, XENBUS_STATE_CLOSED) && !is_null_XENBUS_STATE(backend_state)) { xenbus_change_state(XBT_NIL, sf->frontend_path, "state", frontend_state); backend_state = XenbusWaitForBackendStateChange(sf->backend_path, backend_state, NULL, token); } TraceVerbose(("%s: target %d: backend state = %s, frontend state = %s\n", __FUNCTION__, sf->target_id, XenbusStateName(backend_state), XenbusStateName(frontend_state))); TraceNotice(("target %d: backend closed\n", sf->target_id)); }
static NTSTATUS V4vInitializeEventChannel(PDEVICE_OBJECT fdo) { XENV4V_EXTENSION *pde = V4vGetDeviceExtension(fdo); KLOCK_QUEUE_HANDLE lqh; KeAcquireInStackQueuedSpinLock(&pde->virqLock, &lqh); if (!is_null_EVTCHN_PORT(pde->virqPort)) { KeReleaseInStackQueuedSpinLock(&lqh); TraceWarning(("V4V VIRQ already bound?\n")); return STATUS_SUCCESS; } pde->virqPort = EvtchnBindVirq(VIRQ_V4V, V4vVirqNotifyIsr, fdo); if (is_null_EVTCHN_PORT(pde->virqPort)) { KeReleaseInStackQueuedSpinLock(&lqh); TraceError(("failed to bind V4V VIRQ\n")); return STATUS_INSUFFICIENT_RESOURCES; } KeReleaseInStackQueuedSpinLock(&lqh); TraceNotice(("V4V VIRQ connected.\n")); return STATUS_SUCCESS; }
VOID __XenevtchnInitIoHole(const char *module, PHYSICAL_ADDRESS base, PVOID base_va, ULONG nbytes) { if (!AustereMode && io_hole_initialized) { TraceWarning(("IO hole already initialized by %s\n", io_hole_owner)); return; } io_hole_start = base; io_hole_va_start = base_va; io_hole_nr_pages = MAX(IO_HOLE_MAX_PAGES, (nbytes / PAGE_SIZE)); /* For some reason, RtlInitializeBitmap() isn't allowed to be called above APC level, although all the other bitmap functions work at any irql. Duplicate the entire thing here. */ io_hole_in_use.SizeOfBitMap = io_hole_nr_pages; io_hole_in_use.Buffer = io_hole_bitmap; strncpy(io_hole_owner, module, sizeof(io_hole_owner)); io_hole_owner[sizeof(io_hole_owner) - 1] = '\0'; io_hole_initialized = TRUE; TraceNotice(("%s: IO hole: [%016llx,%016llx) mapped at %p\n", io_hole_owner, io_hole_start.QuadPart, io_hole_start.QuadPart + (io_hole_nr_pages * PAGE_SIZE), io_hole_va_start)); }
static VOID XenvbdTargetResume(ULONG target_id, SUSPEND_TOKEN token) { PXHBD_TARGET_INFO targetInfo; KIRQL irql; TraceNotice(("target %d: %s\n", target_id, __FUNCTION__)); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); targetInfo = XenvbdTargetInfo[target_id]; XM_ASSERT(targetInfo != NULL); targetInfo->References++; if (targetInfo->Suspended) { targetInfo->Suspended = FALSE; targetInfo->Resuming = TRUE; release_irqsafe_lock(XenvbdTargetInfoLock, irql); ResumeTarget(targetInfo, token); } else { release_irqsafe_lock(XenvbdTargetInfoLock, irql); WaitTarget(targetInfo); } irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); XM_ASSERT(targetInfo->References != 0); targetInfo->References--; release_irqsafe_lock(XenvbdTargetInfoLock, irql); }
static VOID XenvbdTargetStop(ULONG target_id) { PXHBD_TARGET_INFO targetInfo; KIRQL irql; TraceNotice(("target %d: %s\n", target_id, __FUNCTION__)); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); targetInfo = XenvbdTargetInfo[target_id]; XM_ASSERT(targetInfo != NULL); targetInfo->References++; release_irqsafe_lock(XenvbdTargetInfoLock, irql); XM_ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); ExAcquireFastMutex(&targetInfo->StateLock); targetInfo->Started = FALSE; ExReleaseFastMutex(&targetInfo->StateLock); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); XM_ASSERT(targetInfo->References != 0); targetInfo->References--; release_irqsafe_lock(XenvbdTargetInfoLock, irql); }
VOID __XenevtchnShutdownIoHole(const char *module) { memset(io_hole_bitmap, 0, sizeof(io_hole_bitmap)); io_hole_initialized = FALSE; TraceNotice(("IO hole cleared by %s\n", module)); memset(io_hole_owner, '\0', sizeof(io_hole_owner)); }
static VOID NTAPI V4vDehibernateWorkItem(PDEVICE_OBJECT fdo, PVOID ctx) { PIO_WORKITEM wi = (PIO_WORKITEM)ctx; if (xenbus_await_initialisation()) { (VOID)V4vInitializeEventChannel(fdo); TraceNotice(("dehibrination work item initialized VIRQ.\n")); } else { TraceError(("wait for XENBUS initialization failed, cannot connect VIRQ.\n")); } IoFreeWorkItem(wi); }
static VOID V4vStartDehibernateWorkItem(PDEVICE_OBJECT fdo) { PIO_WORKITEM wi; TraceNotice(("starting dehibrination work item.\n")); wi = IoAllocateWorkItem(fdo); if (wi == NULL) { TraceError(("failed to allocate dehibernate work item - out of memory.\n")); return; } IoQueueWorkItem(wi, V4vDehibernateWorkItem, DelayedWorkQueue, wi); }
static void v2v_debug_dump(struct v2v_channel *channel) { TraceNotice(("Xen-v2vk instance %p\n", channel)); TraceNotice(("Local prefix %s\n", (channel->local_prefix ? channel->local_prefix : "[unknown]"))); TraceNotice(("Remote prefix %s\n", (channel->remote_prefix ? channel->remote_prefix : "[unknown]"))); TraceNotice(("Peer domain %d\n", unwrap_DOMAIN_ID(channel->peer_domid))); TraceNotice(("Listener %s\n", (channel->is_temple ? "yes" : "no"))); TraceNotice(("Synchronous %s\n", (channel->is_sync ? "yes" : "no"))); }
static VOID XenvbdTargetStart(ULONG target_id, char *backend_path, SUSPEND_TOKEN token) { PXHBD_TARGET_INFO targetInfo; KIRQL irql; NTSTATUS status; TraceNotice(("target %d: %s\n", target_id, __FUNCTION__)); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); targetInfo = XenvbdTargetInfo[target_id]; XM_ASSERT(targetInfo != NULL); targetInfo->References++; release_irqsafe_lock(XenvbdTargetInfoLock, irql); XM_ASSERT(KeGetCurrentIrql() < DISPATCH_LEVEL); ExAcquireFastMutex(&targetInfo->StateLock); XM_ASSERT(!targetInfo->Started); status = PrepareBackendForReconnect(targetInfo, backend_path, token); if (!NT_SUCCESS(status)) goto fail1; targetInfo->Started = TRUE; ExReleaseFastMutex(&targetInfo->StateLock); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); XM_ASSERT(targetInfo->References != 0); targetInfo->References--; release_irqsafe_lock(XenvbdTargetInfoLock, irql); return; fail1: TraceError(("%s: fail1 (0x%08x)\n", __FUNCTION__, status)); ExReleaseFastMutex(&targetInfo->StateLock); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); XM_ASSERT(targetInfo->References != 0); targetInfo->References--; release_irqsafe_lock(XenvbdTargetInfoLock, irql); }
static void XenvbdSetTargetInfo(ULONG target_id, ULONG info) { PXHBD_TARGET_INFO targetInfo; KIRQL irql; TraceNotice(("target %d: %s\n", target_id, __FUNCTION__)); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); targetInfo = XenvbdTargetInfo[target_id]; XM_ASSERT(targetInfo != NULL); targetInfo->info = info; release_irqsafe_lock(XenvbdTargetInfoLock, irql); }
static void XenvbdSwitchFromFilter(ULONG target_id) { PXHBD_TARGET_INFO targetInfo; PFILTER_TARGET filter; KIRQL irql; TraceNotice(("target %d: %s\n", target_id, __FUNCTION__)); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); targetInfo = XenvbdTargetInfo[target_id]; XM_ASSERT(targetInfo != NULL); filter = &targetInfo->FilterTarget; filter->scsifilt = NULL; release_irqsafe_lock(XenvbdTargetInfoLock, irql); }
static VOID V4vUninitializeEventChannel(PDEVICE_OBJECT fdo) { XENV4V_EXTENSION *pde = V4vGetDeviceExtension(fdo); KLOCK_QUEUE_HANDLE lqh; KeAcquireInStackQueuedSpinLock(&pde->virqLock, &lqh); if (is_null_EVTCHN_PORT(pde->virqPort)) { // This is ok, e.g. getting a stop and remove PnP call KeReleaseInStackQueuedSpinLock(&lqh); return; } EvtchnClose(pde->virqPort); pde->virqPort = null_EVTCHN_PORT(); KeReleaseInStackQueuedSpinLock(&lqh); TraceNotice(("V4V VIRQ disconnected.\n")); }
static VOID XenvbdCompleteRedirectedSrb(ULONG target_id, PSCSI_REQUEST_BLOCK srb) { PXHBD_TARGET_INFO targetInfo; PFILTER_TARGET filter; KIRQL irql; TraceNotice(("target %d: %s\n", target_id, __FUNCTION__)); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); targetInfo = XenvbdTargetInfo[target_id]; XM_ASSERT(targetInfo != NULL); filter = &targetInfo->FilterTarget; QueueSrbRaw(srb, &filter->pending_redirect_complete); XM_ASSERT(filter->outstanding_redirected_srbs != 0); release_irqsafe_lock(XenvbdTargetInfoLock, irql); /* Let the polling timer complete it later. */ }
static ULONG BalloonGetTopPage( VOID ) { PHYSICAL_MEMORY_RANGE *Range; PHYSICAL_ADDRESS TopAddress; ULONG Index; Range = MmGetPhysicalMemoryRanges(); TopAddress.QuadPart = 0ull; for (Index = 0; Range[Index].BaseAddress.QuadPart != 0 || Range[Index].NumberOfBytes.QuadPart != 0; Index++) { PHYSICAL_ADDRESS EndAddress; CHAR Key[32]; EndAddress.QuadPart = Range[Index].BaseAddress.QuadPart + Range[Index].NumberOfBytes.QuadPart; TraceInfo(("PHYSICAL MEMORY: RANGE[%u] %08x.%08x - %08x.%08x\n", Index, Range[Index].BaseAddress.HighPart, Range[Index].BaseAddress.LowPart, EndAddress.HighPart, EndAddress.LowPart)); (VOID) Xmsnprintf(Key, sizeof (Key), "data/physical-memory/range%u", Index); (VOID) xenbus_printf(XBT_NIL, Key, "base", "%08x.%08x", Range[Index].BaseAddress.HighPart, Range[Index].BaseAddress.LowPart); (VOID) xenbus_printf(XBT_NIL, Key, "end", "%08x.%08x", EndAddress.HighPart, EndAddress.LowPart); if (EndAddress.QuadPart > TopAddress.QuadPart) TopAddress.QuadPart = EndAddress.QuadPart; } TraceNotice(("PHYSICAL MEMORY: TOP = %08x.%08x\n", TopAddress.HighPart, TopAddress.LowPart)); return (ULONG)(TopAddress.QuadPart >> PAGE_SHIFT); }
static void XenvbdSwitchToFilter(ULONG target_id, struct scsifilt *sf, void (*redirect_srb)(struct scsifilt *sf, PSCSI_REQUEST_BLOCK srb)) { PXHBD_TARGET_INFO targetInfo; PFILTER_TARGET filter; KIRQL irql; TraceNotice(("target %d: %s\n", target_id, __FUNCTION__)); irql = acquire_irqsafe_lock(XenvbdTargetInfoLock); targetInfo = XenvbdTargetInfo[target_id]; XM_ASSERT(targetInfo != NULL); filter = &targetInfo->FilterTarget; filter->scsifilt = sf; filter->redirect_srb = redirect_srb; release_irqsafe_lock(XenvbdTargetInfoLock, irql); }
/* We've bound the scsifilt instance to a xenvbd instance, and we've disconnected xenvbd from the shared ring. Connect scsifilt. */ NTSTATUS connect_scsifilt_with_token(struct scsifilt *sf, SUSPEND_TOKEN token) { XENBUS_STATE state; blkif_sring_t *ring_shared; NTSTATUS status; KIRQL irql; if (sf->backend_path != NULL) { TraceVerbose(("Releasing old backend path (%p)\n", sf->backend_path)); XmFreeMemory(sf->backend_path); sf->backend_path = NULL; } if (sf->ring_shared != NULL) { TraceVerbose(("Releasing old shared ring (%p)\n", sf->ring_shared)); XmFreeMemory(sf->ring_shared); sf->ring_shared = NULL; sf->ring.sring = NULL; } find_backend_handle(sf); status = STATUS_UNSUCCESSFUL; sf->backend_path = get_backend_path(sf, token); if (sf->backend_path == NULL) goto fail1; sf->target_resume(sf->target_id, token); if (sf->stopped) { sf->target_start(sf->target_id, sf->backend_path, token); sf->stopped = FALSE; } state = XenbusWaitForBackendStateChange(sf->backend_path, null_XENBUS_STATE(), NULL, token); if (!same_XENBUS_STATE(state, XENBUS_STATE_INITWAIT)) goto fail2; probe_backend_capabilities(sf); status = STATUS_NO_MEMORY; ring_shared = XmAllocateZeroedMemory(PAGE_SIZE << sf->ring_order); if (ring_shared == NULL) goto fail3; KeAcquireSpinLock(&sf->ring_lock, &irql); sf->ring_shared = ring_shared; SHARED_RING_INIT(sf->ring_shared); FRONT_RING_INIT(&sf->ring, sf->ring_shared, PAGE_SIZE << sf->ring_order); KeReleaseSpinLock(&sf->ring_lock, irql); grant_ring(sf); status = open_evtchn(sf); if (!NT_SUCCESS(status)) goto fail4; do { xenbus_transaction_t xbt; xenbus_transaction_start(&xbt); xenbus_write_evtchn_port(xbt, sf->frontend_path, "event-channel", sf->evtchn_port); if (sf->single_page) { XM_ASSERT3U(sf->ring_order, ==, 0); TraceNotice(("%s: using single page handshake\n", sf->frontend_path)); /* single page handshake */ xenbus_write_grant_ref(xbt, sf->frontend_path, "ring-ref", sf->ring_gref[0]); } else { int i; TraceNotice(("%s: using multi-page handshake\n", sf->frontend_path)); xenbus_printf(xbt, sf->frontend_path, "ring-page-order", "%u", sf->ring_order); for (i = 0; i < (1 << sf->ring_order); i++) { char buffer[10]; Xmsnprintf(buffer, sizeof(buffer), "ring-ref%1u", i); xenbus_write_grant_ref(xbt, sf->frontend_path, buffer, sf->ring_gref[i]); } } xenbus_printf(xbt, sf->frontend_path, "protocol", "x86_32-abi"); xenbus_write_feature_flag(xbt, sf->frontend_path, "feature-surprise-remove", TRUE); xenbus_write_feature_flag(xbt, sf->frontend_path, "feature-online-resize", TRUE); xenbus_change_state(xbt, sf->frontend_path, "state", XENBUS_STATE_INITIALISED); status = xenbus_transaction_end(xbt, 0); } while (status == STATUS_RETRY);
static NTSTATUS NTAPI V4vDispatchPower(PDEVICE_OBJECT fdo, PIRP irp) { NTSTATUS status; PXENV4V_EXTENSION pde = V4vGetDeviceExtension(fdo); PIO_STACK_LOCATION isl = IoGetCurrentIrpStackLocation(irp); TraceVerbose(("====> '%s'.\n", __FUNCTION__)); switch (isl->MinorFunction) { case IRP_MN_SET_POWER: if (isl->Parameters.Power.Type == SystemPowerState) { TraceNotice(("SET system power: %d %d\n", isl->Parameters.Power.State.SystemState, isl->Parameters.Power.ShutdownType)); // If we are transitioning from the working (S0) power state to a lower state, // disconnect the VIRQ. If we are resuming to the working power state, re-connect. if (isl->Parameters.Power.State.SystemState == PowerSystemWorking) { // When resuming from hibernation w/ multi-vCPUs, the pv drivers // may be initialized in parallel causing problems with xenbus being // initialized before we try to bind our VIRQ. Kick the job off to a // work item and wait for initialization there. if (pde->lastPoState == PowerSystemHibernate) { V4vStartDehibernateWorkItem(fdo); } else { (VOID)V4vInitializeEventChannel(fdo); } } else if (isl->Parameters.Power.State.SystemState >= PowerSystemSleeping1) { V4vUninitializeEventChannel(fdo); } // If the last state was S4, flush all connections if (pde->lastPoState == PowerSystemHibernate) { V4vDisconnectAllStreams(pde); } // Reset the last state to what we just saw pde->lastPoState = isl->Parameters.Power.State.SystemState; } else if (isl->Parameters.Power.Type == DevicePowerState) { TraceNotice(("SET device power: %d %d\n", isl->Parameters.Power.State.SystemState, isl->Parameters.Power.ShutdownType)); } break; case IRP_MN_QUERY_POWER: if (isl->Parameters.Power.Type == SystemPowerState) { TraceNotice(("QUERY system power: %d %d\n", isl->Parameters.Power.State.SystemState, isl->Parameters.Power.ShutdownType)); } else if (isl->Parameters.Power.Type == DevicePowerState) { TraceNotice(("QUERY device power: %d %d\n", isl->Parameters.Power.State.SystemState, isl->Parameters.Power.ShutdownType)); } break; }; status = IoAcquireRemoveLock(&pde->removeLock, irp); if (!NT_SUCCESS(status)) { TraceError(("failed to acquire IO lock - error: 0x%x\n", status)); PoStartNextPowerIrp(irp); // for xp and 2k3 return V4vSimpleCompleteIrp(irp, status); } PoStartNextPowerIrp(irp); // for xp and 2k3 IoSkipCurrentIrpStackLocation(irp); status = PoCallDriver(pde->ldo, irp); IoReleaseRemoveLock(&pde->removeLock, irp); TraceVerbose(("<==== '%s'.\n", __FUNCTION__)); return status; }
NDIS_STATUS MiniportInitialize ( IN NDIS_HANDLE MiniportAdapterHandle, IN NDIS_HANDLE MiniportDriverContext, IN PNDIS_MINIPORT_INIT_PARAMETERS MiniportInitParameters ) { PADAPTER adapter = NULL; NDIS_STATUS ndisStatus; PCHAR path; PDEVICE_OBJECT pdo; PCHAR xenbusPath = NULL; int i; UNREFERENCED_PARAMETER(MiniportDriverContext); UNREFERENCED_PARAMETER(MiniportInitParameters); TraceVerbose(("====> '%s'.\n", __FUNCTION__)); // // Wait for xenbus to come up. SMP guests sometimes try and // initialise xennet and xenvbd in parallel when they come back // from hibernation, and that causes problems. // if (!xenbus_await_initialisation()) { ndisStatus = NDIS_STATUS_DEVICE_FAILED; goto exit; } // // 8021P support is disabled by default. // It can be turned on by specifying the appropriate PV boot option. // if (XenPVFeatureEnabled(DEBUG_NIC_8021_P)) { XennetMacOptions |= NDIS_MAC_OPTION_8021P_PRIORITY; } xenbus_write(XBT_NIL, "drivers/xenwnet", XENNET_VERSION); NdisMGetDeviceProperty(MiniportAdapterHandle, &pdo, NULL, NULL, NULL, NULL); xenbusPath = xenbus_find_frontend(pdo); if (!xenbusPath) { ndisStatus = NDIS_STATUS_ADAPTER_NOT_FOUND; goto exit; } TraceNotice(("Found '%s' frontend.\n", xenbusPath)); adapter = XmAllocateZeroedMemory(sizeof(ADAPTER)); if (adapter == NULL) { ndisStatus = NDIS_STATUS_RESOURCES; goto exit; } path = xenbusPath; xenbusPath = NULL; i = 0; do { ndisStatus = AdapterInitialize(adapter, MiniportAdapterHandle, path); if (ndisStatus != NDIS_STATUS_SUCCESS) { TraceWarning (("Waiting for backend...\n")); NdisMSleep (1000000); // 1 sec } } while ((ndisStatus != NDIS_STATUS_SUCCESS) && (++i < 30)); if (ndisStatus != NDIS_STATUS_SUCCESS) { goto exit; } exit: if (ndisStatus != NDIS_STATUS_SUCCESS) { if (adapter) { XmFreeMemory(adapter->BackendPath); adapter->BackendPath = NULL; AdapterDelete(&adapter); } if (xenbusPath) { XmFreeMemory(xenbusPath); } } TraceVerbose(("<==== '%s'.\n", __FUNCTION__)); return ndisStatus; }
static NTSTATUS XenLowerConnectBackendInternal( PXEN_LOWER XenLower, SUSPEND_TOKEN Token) { NTSTATUS status = STATUS_SUCCESS; XENBUS_STATE state; xenbus_transaction_t xbt; PCHAR fepath; if (is_null_EVTCHN_PORT(XenLower->EvtchnPort)) { TraceError((__FUNCTION__ ": no event channel port, this routine must be called after event channel initialization\n")); return STATUS_UNSUCCESSFUL; } //---------------------------Backend Wait Ready-------------------------------// // // Wait for backend to get ready for initialization. // status = xenbus_change_state(XBT_NIL, XenLower->FrontendPath, "state", XENBUS_STATE_INITIALISING); if (!NT_SUCCESS(status)) { TraceWarning((__FUNCTION__ ": Failed to change front end state to XENBUS_STATE_INITIALISING(%d) status: 0x%x\n", XENBUS_STATE_INITIALISING, status)); // Go on, best effort, chin up } TraceInfo((__FUNCTION__ ": Front end state set to XENBUS_STATE_INITIALISING(%d)\n", XENBUS_STATE_INITIALISING)); state = null_XENBUS_STATE(); for ( ; ; ) { // Turns out suspend tokens are not even used. state = XenbusWaitForBackendStateChange(XenLower->BackendPath, state, NULL, Token); if (same_XENBUS_STATE(state, XENBUS_STATE_INITWAIT)) { break; } if (same_XENBUS_STATE(state, XENBUS_STATE_CLOSING) || is_null_XENBUS_STATE(state)) { TraceError((__FUNCTION__ ": backend '%s' went away before we could connect to it?\n", XenLower->BackendPath)); status = STATUS_UNSUCCESSFUL; break; } } if (status != STATUS_SUCCESS) { return status; } TraceInfo((__FUNCTION__ ": Back end state went to XENBUS_STATE_INITWAIT(%d)\n", XENBUS_STATE_INITWAIT)); //----------------------------Backend Connect---------------------------------// // // Communicate configuration to backend. // fepath = XenLower->FrontendPath; do { xenbus_transaction_start(&xbt); xenbus_write_grant_ref(xbt, fepath, "ring-ref", XenLower->SringGrantRef); xenbus_write_evtchn_port(xbt, fepath, "event-channel", XenLower->EvtchnPort); xenbus_change_state(xbt, fepath, "state", XENBUS_STATE_CONNECTED); status = xenbus_transaction_end(xbt, 0); } while (status == STATUS_RETRY); if (status != STATUS_SUCCESS) { TraceError((__FUNCTION__ ": failed to configure xenstore frontend values.\n")); return STATUS_UNSUCCESSFUL; } TraceInfo((__FUNCTION__ ": Front end state set to XENBUS_STATE_CONNECTED(%d)\n", XENBUS_STATE_CONNECTED)); // // Wait for backend to accept configuration and complete initialization. // state = null_XENBUS_STATE(); for ( ; ; ) { state = XenbusWaitForBackendStateChange(XenLower->BackendPath, state, NULL, Token); if (is_null_XENBUS_STATE(state) || same_XENBUS_STATE(state, XENBUS_STATE_CLOSING) || same_XENBUS_STATE(state, XENBUS_STATE_CLOSED)) { TraceError((__FUNCTION__ ": Failed to connected '%s' <-> '%s' backend state: %d\n", XenLower->FrontendPath, XenLower->BackendPath, state)); status = STATUS_UNSUCCESSFUL; break; } if (same_XENBUS_STATE(state, XENBUS_STATE_CONNECTED)) { TraceNotice((__FUNCTION__ ": Connected '%s' <-> '%s' \n", XenLower->FrontendPath, XenLower->BackendPath)); TraceInfo((__FUNCTION__ ": Back end final state went to XENBUS_STATE_CONNECTED(%d)\n", XENBUS_STATE_CONNECTED)); break; } } return status; }
static BOOLEAN BalloonAllocatePfnArray( IN ULONG Requested, OUT PULONG pAllocated ) { LARGE_INTEGER Start; LARGE_INTEGER End; ULONGLONG TimeDelta; BOOLEAN Slow; MDL *Mdl; ULONG Allocated; PFN_NUMBER *Array; XM_ASSERT(Requested <= BALLOON_PFN_ARRAY_SIZE); KeQuerySystemTime(&Start); Allocated = 0; Mdl = BalloonAllocatePagesForMdl(Requested); if (Mdl == NULL) { Balloon.AllocateFail++; goto done; } XM_ASSERT(Mdl->ByteOffset == 0); XM_ASSERT((Mdl->ByteCount & (PAGE_SIZE - 1)) == 0); XM_ASSERT(Mdl->MdlFlags & MDL_PAGES_LOCKED); Allocated = Mdl->ByteCount >> PAGE_SHIFT; if (Allocated < Requested) { TraceNotice(("%s: partial allocation (%d < %d)\n", __FUNCTION__, Allocated, Requested)); Balloon.PartialAllocate++; } Array = MmGetMdlPfnArray(Mdl); BalloonSortPfnArray(Array, Allocated); RtlCopyMemory(Balloon.PfnArray, Array, Allocated * sizeof (PFN_NUMBER)); ExFreePool(Mdl); done: TraceVerbose(("%s: %d page(s)\n", __FUNCTION__, Allocated)); KeQuerySystemTime(&End); TimeDelta = (End.QuadPart - Start.QuadPart) / 10000ull; Slow = FALSE; if (TimeDelta != 0) { ULONGLONG Rate; Rate = (ULONGLONG)(Allocated * 1000) / TimeDelta; if (Rate < MIN_PAGES_PER_S) { TraceWarning(("%s: ran for more than %dms\n", __FUNCTION__, TimeDelta)); Slow = TRUE; } } *pAllocated = Allocated; return Slow; }