static VOID XenPci_HighSyncCallFunction0( PRKDPC Dpc, PVOID Context, PVOID SystemArgument1, PVOID SystemArgument2) { highsync_info_t *highsync_info = Context; ULONG ActiveProcessorCount; KIRQL old_irql; UNREFERENCED_PARAMETER(Dpc); UNREFERENCED_PARAMETER(SystemArgument1); UNREFERENCED_PARAMETER(SystemArgument2); FUNCTION_ENTER(); #if (NTDDI_VERSION >= NTDDI_WINXP) ActiveProcessorCount = (ULONG)KeNumberProcessors; #else ActiveProcessorCount = (ULONG)*KeNumberProcessors; #endif InterlockedIncrement(&highsync_info->nr_procs_at_dispatch_level); if (highsync_info->sync_level > DISPATCH_LEVEL) { while (highsync_info->nr_procs_at_dispatch_level < (LONG)ActiveProcessorCount) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } } _disable(); //__asm cli; KeRaiseIrql(highsync_info->sync_level, &old_irql); while (highsync_info->nr_spinning_at_sync_level < (LONG)ActiveProcessorCount - 1) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } highsync_info->function0(highsync_info->context); KeLowerIrql(old_irql); _enable(); //__asm sti; highsync_info->do_spin = FALSE; KeMemoryBarrier(); /* wait for all the other processors to complete spinning, just in case it matters */ while (highsync_info->nr_spinning_at_sync_level) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } InterlockedDecrement(&highsync_info->nr_procs_at_dispatch_level); /* wait until nr_procs_at_dispatch_level drops to 0 indicating that nothing else requires highsync_info */ while (highsync_info->nr_procs_at_dispatch_level) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } KeSetEvent(&highsync_info->highsync_complete_event, IO_NO_INCREMENT, FALSE); FUNCTION_EXIT(); }
static VOID XenPci_HighSyncCallFunctionN( PRKDPC Dpc, PVOID Context, PVOID SystemArgument1, PVOID SystemArgument2) { highsync_info_t *highsync_info = Context; ULONG ActiveProcessorCount; KIRQL old_irql; UNREFERENCED_PARAMETER(Dpc); UNREFERENCED_PARAMETER(SystemArgument1); UNREFERENCED_PARAMETER(SystemArgument2); FUNCTION_ENTER(); FUNCTION_MSG("(CPU = %d)\n", KeGetCurrentProcessorNumber()); KdPrint((__DRIVER_NAME " CPU %d spinning...\n", KeGetCurrentProcessorNumber())); InterlockedIncrement(&highsync_info->nr_procs_at_dispatch_level); if (highsync_info->sync_level > DISPATCH_LEVEL) { #if (NTDDI_VERSION >= NTDDI_WINXP) ActiveProcessorCount = (ULONG)KeNumberProcessors; #else ActiveProcessorCount = (ULONG)*KeNumberProcessors; #endif while (highsync_info->nr_procs_at_dispatch_level < (LONG)ActiveProcessorCount) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } } _disable(); //__asm cli; KeRaiseIrql(highsync_info->sync_level, &old_irql); InterlockedIncrement(&highsync_info->nr_spinning_at_sync_level); while(highsync_info->do_spin) { KeStallExecutionProcessor(1); KeMemoryBarrier(); } highsync_info->functionN(highsync_info->context); KeLowerIrql(old_irql); _enable(); //__asm sti; InterlockedDecrement(&highsync_info->nr_spinning_at_sync_level); InterlockedDecrement(&highsync_info->nr_procs_at_dispatch_level); FUNCTION_EXIT(); return; }
VOID XenPci_HighSync(PXENPCI_HIGHSYNC_FUNCTION function0, PXENPCI_HIGHSYNC_FUNCTION functionN, PVOID context) { ULONG ActiveProcessorCount; ULONG i; highsync_info_t *highsync_info; KIRQL old_irql; UNREFERENCED_PARAMETER(context); FUNCTION_ENTER(); highsync_info = ExAllocatePoolWithTag(NonPagedPool, sizeof(highsync_info_t), XENPCI_POOL_TAG); RtlZeroMemory(highsync_info, sizeof(highsync_info_t)); KeInitializeEvent(&highsync_info->highsync_complete_event, SynchronizationEvent, FALSE); highsync_info->function0 = function0; highsync_info->functionN = functionN; highsync_info->context = context; highsync_info->sync_level = HIGH_LEVEL; #if (NTDDI_VERSION >= NTDDI_WINXP) ActiveProcessorCount = (ULONG)KeNumberProcessors; #else ActiveProcessorCount = (ULONG)*KeNumberProcessors; #endif /* Go to HIGH_LEVEL to prevent any races with Dpc's on the current processor */ KeRaiseIrql(highsync_info->sync_level, &old_irql); highsync_info->do_spin = TRUE; for (i = 0; i < ActiveProcessorCount; i++) { if (i == 0) KeInitializeDpc(&highsync_info->dpcs[i], XenPci_HighSyncCallFunction0, highsync_info); else KeInitializeDpc(&highsync_info->dpcs[i], XenPci_HighSyncCallFunctionN, highsync_info); KeSetTargetProcessorDpc(&highsync_info->dpcs[i], (CCHAR)i); KeSetImportanceDpc(&highsync_info->dpcs[i], HighImportance); KdPrint((__DRIVER_NAME " queuing Dpc for CPU %d\n", i)); KeInsertQueueDpc(&highsync_info->dpcs[i], NULL, NULL); } KdPrint((__DRIVER_NAME " All Dpc's queued\n")); KeMemoryBarrier(); KeLowerIrql(old_irql); KdPrint((__DRIVER_NAME " Waiting for highsync_complete_event\n")); KeWaitForSingleObject(&highsync_info->highsync_complete_event, Executive, KernelMode, FALSE, NULL); #if (NTDDI_VERSION >= NTDDI_WINXP) KeFlushQueuedDpcs(); #else { /* just wait 1 second until all DPC's finish - not ideal but it's only for W2K */ LARGE_INTEGER interval; interval.QuadPart = -1 * 1000 * 1000 * 10; /* 1 second */ KeDelayExecutionThread(KernelMode, FALSE, &interval); } #endif ExFreePoolWithTag(highsync_info, XENPCI_POOL_TAG); FUNCTION_EXIT(); }
// // FilterRestart Function // http://msdn.microsoft.com/en-us/library/ff549962(v=VS.85).aspx // _Use_decl_annotations_ NDIS_STATUS SxNdisPause( NDIS_HANDLE FilterModuleContext, PNDIS_FILTER_PAUSE_PARAMETERS PauseParameters ) { PSX_SWITCH_OBJECT switchObject = (PSX_SWITCH_OBJECT)(FilterModuleContext); UNREFERENCED_PARAMETER(PauseParameters); DEBUGP(DL_TRACE, ("===>NDISLWF SxPause: SxInstance %p\n", FilterModuleContext)); SxExtPauseSwitch(switchObject, switchObject->ExtensionContext); // // Set the flag that the filter is going to pause. // NT_ASSERT(switchObject->DataFlowState == SxSwitchRunning); switchObject->DataFlowState = SxSwitchPaused; KeMemoryBarrier(); while(switchObject->PendingInjectedNblCount > 0) { NdisMSleep(1000); } DEBUGP(DL_TRACE, ("<===SxPause: status %x\n", NDIS_STATUS_SUCCESS)); return NDIS_STATUS_SUCCESS; }
NTSTATUS DymArrayReserve(PUTILS_DYM_ARRAY Array, SIZE_T Length) { PVOID tmpBuffer = NULL; SIZE_T minLength = 0; NTSTATUS status = STATUS_UNSUCCESSFUL; DEBUG_ENTER_FUNCTION("Array=0x%p; Length=%u", Array, Length); if (Length == 0) Length = 1; tmpBuffer = HeapMemoryAlloc(Array->PoolType, Length*sizeof(PVOID)); if (tmpBuffer != NULL) { minLength = min(Array->ValidLength, Length); memcpy(tmpBuffer, Array->Data, minLength * sizeof(PVOID)); tmpBuffer = InterlockedExchangePointer((PVOID *)&Array->Data, tmpBuffer); KeMemoryBarrier(); HeapMemoryFree(tmpBuffer); Array->AllocatedLength = Length; Array->ValidLength = minLength; status = STATUS_SUCCESS; } else status = STATUS_INSUFFICIENT_RESOURCES; DEBUG_EXIT_FUNCTION("0x%x", status); return status; }
/* * -------------------------------------------------------------------------- * Implements filter driver's FilterNetPnPEvent function. * -------------------------------------------------------------------------- */ NDIS_STATUS OvsExtNetPnPEvent(NDIS_HANDLE filterModuleContext, PNET_PNP_EVENT_NOTIFICATION netPnPEvent) { NDIS_STATUS status = NDIS_STATUS_SUCCESS; POVS_SWITCH_CONTEXT switchContext = (POVS_SWITCH_CONTEXT)filterModuleContext; OVS_LOG_TRACE("Enter: filterModuleContext: %p, NetEvent: %d", filterModuleContext, (netPnPEvent->NetPnPEvent).NetEvent); /* * The only interesting event is the NetEventSwitchActivate. It provides * an asynchronous notification of the switch completing activation. */ if (netPnPEvent->NetPnPEvent.NetEvent == NetEventSwitchActivate) { ASSERT(switchContext->isActivated == FALSE); if (switchContext->isActivated == FALSE) { status = OvsActivateSwitch(switchContext); OVS_LOG_TRACE("OvsExtNetPnPEvent: activated switch: %p " "status: %s", switchContext, status ? "TRUE" : "FALSE"); } } if (netPnPEvent->NetPnPEvent.NetEvent == NetEventFilterPreDetach) { switchContext->dataFlowState = OvsSwitchPaused; KeMemoryBarrier(); } status = NdisFNetPnPEvent(switchContext->NdisFilterHandle, netPnPEvent); OVS_LOG_TRACE("Exit: OvsExtNetPnPEvent"); return status; }
static NTSTATUS PdoSystemPower( IN PXENFILT_THREAD Self, IN PVOID Context ) { PXENFILT_PDO Pdo = Context; PKEVENT Event; Event = ThreadGetEvent(Self); for (;;) { PIRP Irp; PIO_STACK_LOCATION StackLocation; UCHAR MinorFunction; if (Pdo->SystemPowerIrp == NULL) { (VOID) KeWaitForSingleObject(Event, Executive, KernelMode, FALSE, NULL); KeClearEvent(Event); } if (ThreadIsAlerted(Self)) break; Irp = Pdo->SystemPowerIrp; if (Irp == NULL) continue; Pdo->SystemPowerIrp = NULL; KeMemoryBarrier(); StackLocation = IoGetCurrentIrpStackLocation(Irp); MinorFunction = StackLocation->MinorFunction; switch (StackLocation->MinorFunction) { case IRP_MN_SET_POWER: (VOID) __PdoSetSystemPower(Pdo, Irp); break; case IRP_MN_QUERY_POWER: (VOID) __PdoQuerySystemPower(Pdo, Irp); break; default: ASSERT(FALSE); break; } IoReleaseRemoveLock(&Pdo->Dx->RemoveLock, Irp); } return STATUS_SUCCESS; }
static NTSTATUS GnttabPermitForeignAccess( IN PXENBUS_GNTTAB_CONTEXT Context, IN PXENBUS_GNTTAB_CACHE Cache, IN BOOLEAN Locked, IN USHORT Domain, IN PFN_NUMBER Pfn, IN BOOLEAN ReadOnly, OUT PXENBUS_GNTTAB_DESCRIPTOR *Descriptor ) { grant_entry_v1_t *Entry; NTSTATUS status; *Descriptor = CACHE(Get, Context->CacheInterface, Cache->Cache, Locked); status = STATUS_INSUFFICIENT_RESOURCES; if (*Descriptor == NULL) goto fail1; (*Descriptor)->Entry.flags = (ReadOnly) ? GTF_readonly : 0; (*Descriptor)->Entry.domid = Domain; (*Descriptor)->Entry.frame = (uint32_t)Pfn; ASSERT3U((*Descriptor)->Entry.frame, ==, Pfn); Entry = &Context->Entry[(*Descriptor)->Reference]; *Entry = (*Descriptor)->Entry; KeMemoryBarrier(); Entry->flags |= GTF_permit_access; KeMemoryBarrier(); return STATUS_SUCCESS; fail1: Error("fail1 (%08x)\n", status); return status; }
_Use_decl_annotations_ VOID TXSendCompleteWorkItem( PVOID FunctionContext, NDIS_HANDLE WorkItem) /*++ Routine Description: This work item handler is used to do send completions in the case when we are trying to avoid a DPC watchdog timeout Arguments: FunctionContext - The Adapter object for which send-completions are to be done --*/ { PMP_ADAPTER Adapter = MP_ADAPTER_FROM_CONTEXT(FunctionContext); KIRQL OldIrql; UNREFERENCED_PARAMETER(WorkItem); ASSERT(Adapter != NULL); _Analysis_assume_(Adapter != NULL); DEBUGP(MP_TRACE, "[%p] ---> TXSendCompleteWorkItem.\n", Adapter); Adapter->SendCompleteWorkItemRunning = TRUE; KeMemoryBarrier(); Adapter->SendCompleteWorkItemQueued = FALSE; KeMemoryBarrier(); NDIS_RAISE_IRQL_TO_DISPATCH(&OldIrql); TXSendComplete(Adapter); NDIS_LOWER_IRQL(OldIrql,DISPATCH_LEVEL); KeMemoryBarrier(); Adapter->SendCompleteWorkItemRunning = FALSE; DEBUGP(MP_TRACE, "[%p] <--- TXSendCompleteWorkItem.\n", Adapter); }
static NTSTATUS GnttabPermitForeignAccess( IN PINTERFACE Interface, IN PXENBUS_GNTTAB_CACHE Cache, IN BOOLEAN Locked, IN USHORT Domain, IN PFN_NUMBER Pfn, IN BOOLEAN ReadOnly, OUT PXENBUS_GNTTAB_ENTRY *Entry ) { PXENBUS_GNTTAB_CONTEXT Context = Interface->Context; NTSTATUS status; *Entry = XENBUS_CACHE(Get, &Context->CacheInterface, Cache->Cache, Locked); status = STATUS_INSUFFICIENT_RESOURCES; if (*Entry == NULL) goto fail1; (*Entry)->Entry.flags = (ReadOnly) ? GTF_readonly : 0; (*Entry)->Entry.domid = Domain; (*Entry)->Entry.frame = (uint32_t)Pfn; ASSERT3U((*Entry)->Entry.frame, ==, Pfn); Context->Table[(*Entry)->Reference] = (*Entry)->Entry; KeMemoryBarrier(); Context->Table[(*Entry)->Reference].flags |= GTF_permit_access; KeMemoryBarrier(); return STATUS_SUCCESS; fail1: Error("fail1 (%08x)\n", status); return status; }
NDIS_STATUS XenNet_D0Exit(struct xennet_info *xi) { FUNCTION_ENTER(); KdPrint((__DRIVER_NAME " IRQL = %d\n", KeGetCurrentIrql())); xi->shutting_down = TRUE; KeMemoryBarrier(); /* make sure everyone sees that we are now shutting down */ XenNet_TxShutdown(xi); XenNet_RxShutdown(xi); xi->connected = FALSE; KeMemoryBarrier(); /* make sure everyone sees that we are now disconnected */ xi->vectors.XenPci_XenShutdownDevice(xi->vectors.context); FUNCTION_EXIT(); return STATUS_SUCCESS; }
static VOID XenNet_SuspendResume(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2) { struct xennet_info *xi = context; KIRQL old_irql; PIO_WORKITEM resume_work_item; UNREFERENCED_PARAMETER(dpc); UNREFERENCED_PARAMETER(arg1); UNREFERENCED_PARAMETER(arg2); FUNCTION_ENTER(); switch (xi->device_state->suspend_resume_state_pdo) { case SR_STATE_SUSPENDING: KdPrint((__DRIVER_NAME " New state SUSPENDING\n")); KeAcquireSpinLock(&xi->rx_lock, &old_irql); if (xi->rx_id_free == NET_RX_RING_SIZE) { xi->device_state->suspend_resume_state_fdo = SR_STATE_SUSPENDING; KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel)); xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel); } KeReleaseSpinLock(&xi->rx_lock, old_irql); break; case SR_STATE_RESUMING: KdPrint((__DRIVER_NAME " New state SR_STATE_RESUMING\n")); /* do it like this so we don't race and double-free the work item */ resume_work_item = IoAllocateWorkItem(xi->fdo); KeAcquireSpinLock(&xi->resume_lock, &old_irql); if (xi->resume_work_item || xi->device_state->suspend_resume_state_fdo == SR_STATE_RESUMING) { KeReleaseSpinLock(&xi->resume_lock, old_irql); IoFreeWorkItem(resume_work_item); return; } xi->resume_work_item = resume_work_item; KeReleaseSpinLock(&xi->resume_lock, old_irql); IoQueueWorkItem(xi->resume_work_item, XenNet_ResumeWorkItem, DelayedWorkQueue, xi); break; default: KdPrint((__DRIVER_NAME " New state %d\n", xi->device_state->suspend_resume_state_fdo)); xi->device_state->suspend_resume_state_fdo = xi->device_state->suspend_resume_state_pdo; KdPrint((__DRIVER_NAME " Notifying event channel %d\n", xi->device_state->pdo_event_channel)); xi->vectors.EvtChn_Notify(xi->vectors.context, xi->device_state->pdo_event_channel); break; } KeMemoryBarrier(); FUNCTION_EXIT(); }
NDIS_STATUS TXNblReference( _In_ PMP_ADAPTER Adapter, _In_ PNET_BUFFER_LIST NetBufferList) /*++ Routine Description: Adds a reference on a NBL that is being transmitted. The NBL won't be returned to the protocol until the last reference is released. Runs at IRQL <= DISPATCH_LEVEL. Arguments: Adapter Pointer to our adapter NetBufferList The NBL to reference Return Value: NDIS_STATUS_SUCCESS if reference was acquired succesfully. NDIS_STATUS_ADAPTER_NOT_READY if the adapter state is such that we should not acquire new references to resources --*/ { NdisInterlockedIncrement(&Adapter->nBusySend); // // Make sure the increment happens before ready state check // KeMemoryBarrier(); // // If the adapter is not ready, undo the reference and fail the call // if(!MP_IS_READY(Adapter)) { InterlockedDecrement(&Adapter->nBusySend); DEBUGP(MP_LOUD, "[%p] Could not acquire transmit reference, the adapter is not ready.\n", Adapter); return NDIS_STATUS_ADAPTER_NOT_READY; } NdisInterlockedIncrement(&SEND_REF_FROM_NBL(NetBufferList)); return NDIS_STATUS_SUCCESS; }
/* * -------------------------------------------------------------------------- * Implements filter driver's FilterPause function * -------------------------------------------------------------------------- */ NDIS_STATUS OvsExtPause(NDIS_HANDLE filterModuleContext, PNDIS_FILTER_PAUSE_PARAMETERS pauseParameters) { POVS_SWITCH_CONTEXT switchContext = (POVS_SWITCH_CONTEXT)filterModuleContext; UNREFERENCED_PARAMETER(pauseParameters); OVS_LOG_TRACE("Enter: filterModuleContext %p", filterModuleContext); switchContext->dataFlowState = OvsSwitchPaused; KeMemoryBarrier(); while(switchContext->pendingOidCount > 0) { NdisMSleep(1000); } OVS_LOG_TRACE("Exit: OvsExtPause Successfully"); return NDIS_STATUS_SUCCESS; }
/* * -------------------------------------------------------------------------- * Implements filter driver's FilterDetach function. * -------------------------------------------------------------------------- */ _Use_decl_annotations_ VOID OvsExtDetach(NDIS_HANDLE filterModuleContext) { POVS_SWITCH_CONTEXT switchContext = (POVS_SWITCH_CONTEXT)filterModuleContext; OVS_LOG_TRACE("Enter: filterModuleContext %p", filterModuleContext); ASSERT(switchContext->dataFlowState == OvsSwitchPaused); switchContext->controlFlowState = OvsSwitchDetached; KeMemoryBarrier(); while(switchContext->pendingOidCount > 0) { NdisMSleep(1000); } OvsDeleteSwitch(switchContext); OvsCleanupIpHelper(); /* This completes the cleanup, and a new attach can be handled now. */ OVS_LOG_TRACE("Exit: OvsDetach Successfully"); }
static BOOLEAN XenNet_HandleEvent(PVOID context) { struct xennet_info *xi = context; ULONG suspend_resume_state_pdo; //FUNCTION_ENTER(); suspend_resume_state_pdo = xi->device_state->suspend_resume_state_pdo; KeMemoryBarrier(); // KdPrint((__DRIVER_NAME " connected = %d, inactive = %d, suspend_resume_state_pdo = %d\n", // xi->connected, xi->inactive, suspend_resume_state_pdo)); if (!xi->shutting_down && suspend_resume_state_pdo != xi->device_state->suspend_resume_state_fdo) { KeInsertQueueDpc(&xi->suspend_dpc, NULL, NULL); } if (xi->connected && !xi->inactive && suspend_resume_state_pdo != SR_STATE_RESUMING) { KeInsertQueueDpc(&xi->rxtx_dpc, NULL, NULL); } //FUNCTION_EXIT(); return TRUE; }
// // FilterDetach Function // http://msdn.microsoft.com/en-us/library/ff549918(v=VS.85).aspx // _Use_decl_annotations_ VOID SxNdisDetach( NDIS_HANDLE FilterModuleContext ) { PSX_SWITCH_OBJECT switchObject = (PSX_SWITCH_OBJECT)FilterModuleContext; DEBUGP(DL_TRACE, ("===>SxDetach: SxInstance %p\n", FilterModuleContext)); // // The extension must be in paused state. // NT_ASSERT(switchObject->DataFlowState == SxSwitchPaused); switchObject->ControlFlowState = SxSwitchDetached; KeMemoryBarrier(); while(switchObject->PendingOidCount > 0) { NdisMSleep(1000); } SxExtDeleteSwitch(switchObject, switchObject->ExtensionContext); NdisAcquireSpinLock(&SxExtensionListLock); RemoveEntryList(&switchObject->Link); NdisReleaseSpinLock(&SxExtensionListLock); ExFreePool(switchObject); // // Alway return success. // DEBUGP(DL_TRACE, ("<===SxDetach Successfully\n")); return; }
static void set_timer(unsigned long delta) { if (delta == LKL_TIMER_INIT) return; if (delta == LKL_TIMER_SHUTDOWN) { /* should not deliver timer shutdown twice */ if(timer_done) { DbgPrint("*** LKL_TIMER_SHUTDOWN called when timer_done ***"); while(1) ; } /* deque the timer so it won't be put in signaled state */ KeCancelTimer(&timer); /* timers run on DPCs. This returns after all active * DPCs have executed, which means the timer is * certainly not running nor being schduled after this * point. */ KeFlushQueuedDpcs(); /* signal the timer interrupt we're done */ timer_done = 1; /* the memory barrier is needed because it may be * possible for the compiler/cpu to call * KeReleaseSemaphore before assigning * timer_done. That would make the timer_thread wake * from the wait-for-multiple-objs without noticing * out signalling */ KeMemoryBarrier(); KeReleaseSemaphore(&timer_killer_sem, 0, 1, 0); return; } KeSetTimer(&timer, RtlConvertLongToLargeInteger((unsigned long)(-(delta/100))), NULL); }
static FORCEINLINE VOID xen_wmb() { KeMemoryBarrier(); _WriteBarrier(); }
void RosKmdRapAdapter::SubmitControlList( bool bBinningControlList, UINT startAddress, UINT endAddress) { // // Setting End Address register kicks off execution of the Control List // Current Address register starts with CL start address and reaches // CL end address upon completion // V3D_REG_CT0CS regCTnCS = { 0 }; regCTnCS.CTRUN = 1; if (bBinningControlList) { m_pVC4RegFile->V3D_CT0CS = regCTnCS.Value; KeMemoryBarrier(); m_pVC4RegFile->V3D_CT0CA = startAddress; KeMemoryBarrier(); m_pVC4RegFile->V3D_CT0EA = endAddress; KeMemoryBarrier(); } else { m_pVC4RegFile->V3D_CT1CS = regCTnCS.Value; KeMemoryBarrier(); m_pVC4RegFile->V3D_CT1CA = startAddress; KeMemoryBarrier(); m_pVC4RegFile->V3D_CT1EA = endAddress; KeMemoryBarrier(); } // // Completion of DMA buffer is acknowledged with interrupt and // subsequent DPC signals m_hwDmaBufCompletionEvent // // TODO[indyz]: Enable interrupt and handle TDR // NTSTATUS status; #if 1 // // Set time out to 64 millisecond // LARGE_INTEGER timeOut; timeOut.QuadPart = -64 * 1000 * 1000 / 10; UINT i; for (i = 0; i < 32; i++) { status = KeWaitForSingleObject( &m_hwDmaBufCompletionEvent, Executive, KernelMode, FALSE, &timeOut); NT_ASSERT(status == STATUS_TIMEOUT); // Check Control List Executor Thread 0 or 1 Control and Status if (bBinningControlList) { regCTnCS.Value = m_pVC4RegFile->V3D_CT0CS; } else { regCTnCS.Value = m_pVC4RegFile->V3D_CT1CS; } if (regCTnCS.CTRUN == 0) { break; } } // Check for TDR condition NT_ASSERT(i < 32); #else status = KeWaitForSingleObject( &m_hwDmaBufCompletionEvent, Executive, KernelMode, FALSE, NULL); NT_ASSERT(status == STATUS_SUCCESS); #endif }
static FORCEINLINE VOID xen_mb() { KeMemoryBarrier(); _ReadWriteBarrier(); }
static FORCEINLINE VOID __WriteMemoryBarrier() { KeMemoryBarrier(); _WriteBarrier(); }
BOOLEAN xmpReqSetRss( __in NDIS_HANDLE SynchronizeContext ) { USHORT i; UCHAR *pSecretKey; UCHAR *pTable; BOOLEAN bTable, bKey, bCpu, bHashInfo; BOOLEAN bQueue = FALSE; KIRQL OldIrql; xmpNicCtx_t *pNicCtx = SynchronizeContext; NDIS_RECEIVE_SCALE_PARAMETERS *pParams = &pNicCtx->ndisRssSet.params; static int count = 0; XMPTRACE(XMP_DBG_WRN, ("==> xmpReqSetRss Count = %d Flags = 0x%8.8lx\n", ++count, pParams->Flags)); XF_GET_SLOCK(&pNicCtx->lock); pSecretKey = ((UCHAR *) pParams) + pParams->HashSecretKeyOffset; pTable = ((UCHAR *) pParams) + pParams->IndirectionTableOffset; bTable = bKey = bCpu = bHashInfo = FALSE; if ( XMP_NIC_RSS_IS_ENABLED(pNicCtx) && !(bit(pParams->Flags, NDIS_RSS_PARAM_FLAG_DISABLE_RSS)) ) { if ( !(bit(pParams->Flags, NDIS_RSS_PARAM_FLAG_HASH_KEY_UNCHANGED)) ) bKey = TRUE; if ( !(bit(pParams->Flags, NDIS_RSS_PARAM_FLAG_ITABLE_UNCHANGED)) ) bTable = TRUE; if ( !(bit(pParams->Flags, NDIS_RSS_PARAM_FLAG_BASE_CPU_UNCHANGED )) ) bCpu = TRUE; } else { bTable = bKey = bCpu = bHashInfo = TRUE; pNicCtx->RssParamsReq.HashFunction = 0; } /* Enable RTH_CFG, RTS_ENHANCED and RMAC_STRIP_FCS to 0 */ if ( bHashInfo ) { pNicCtx->RssParamsReq.HashType = NDIS_RSS_HASH_TYPE_FROM_HASH_INFO(pParams->HashInformation); pNicCtx->RssParamsReq.HashFunction = NDIS_RSS_HASH_FUNC_FROM_HASH_INFO(pParams->HashInformation); } if ( NDIS_RSS_HASH_FUNC_FROM_HASH_INFO(pParams->HashInformation) != 0 ) { if( bCpu ) pNicCtx->RssParamsReq.BaseCpuNum = pParams->BaseCpuNumber; if ( bTable ) { pNicCtx->RssParamsReq.TableSz = pParams->IndirectionTableSize > XMP_NIC_RSS_MAX_TABLE_SZ ? XMP_NIC_RSS_MAX_TABLE_SZ : pParams->IndirectionTableSize; { ULONG TableSz = pNicCtx->RssParamsReq.TableSz; USHORT NumLsbs = 0; while (TableSz != 1) { TableSz = TableSz>>1; NumLsbs++; } pNicCtx->RssParamsReq.HashBitsSz = NumLsbs; } NdisMoveMemory(pNicCtx->RssParamsReq.Table, pTable, pNicCtx->RssParamsReq.TableSz); } if ( bKey ) { pNicCtx->RssParamsReq.SecretKeySz = pParams->HashSecretKeySize; NdisMoveMemory(pNicCtx->RssParamsReq.SecretKey, pSecretKey, pNicCtx->RssParamsReq.SecretKeySz); } } pNicCtx->RssParamsReq.Flags = pParams->Flags; KeMemoryBarrier(); if ( !pNicCtx->wiQueued ) { pNicCtx->wiQueued = TRUE; if ( !pNicCtx->wiRunning ) bQueue = TRUE; } XF_FREE_SLOCK(&pNicCtx->lock); #ifdef XMP_SET_RSS_IN_WORKER if ( bQueue ) NdisQueueIoWorkItem(pNicCtx->hSetRssWI, (NDIS_IO_WORKITEM_ROUTINE)xmpNicSetRssParameters, pNicCtx); #else xmpNicSetRssParameters(pNicCtx, NULL); #endif XMPTRACE(XMP_DBG_WRN, ("<== xmpReqSetRss\n")); return TRUE; }
/* * -------------------------------------------------------------------------- * IOCTL function handler for the device. * -------------------------------------------------------------------------- */ NTSTATUS OvsDeviceControl(PDEVICE_OBJECT deviceObject, PIRP irp) { PIO_STACK_LOCATION irpSp; NTSTATUS status = STATUS_SUCCESS; PFILE_OBJECT fileObject; PVOID inputBuffer = NULL; PVOID outputBuffer = NULL; UINT32 inputBufferLen, outputBufferLen; UINT32 code, replyLen = 0; POVS_OPEN_INSTANCE instance; UINT32 devOp; OVS_MESSAGE ovsMsgReadOp; POVS_MESSAGE ovsMsg; NETLINK_FAMILY *nlFamilyOps; #ifdef DBG POVS_DEVICE_EXTENSION ovsExt = (POVS_DEVICE_EXTENSION)NdisGetDeviceReservedExtension(deviceObject); ASSERT(deviceObject == gOvsDeviceObject); ASSERT(ovsExt); ASSERT(ovsExt->numberOpenInstance > 0); #else UNREFERENCED_PARAMETER(deviceObject); #endif irpSp = IoGetCurrentIrpStackLocation(irp); ASSERT(irpSp->MajorFunction == IRP_MJ_DEVICE_CONTROL); ASSERT(irpSp->FileObject != NULL); fileObject = irpSp->FileObject; instance = (POVS_OPEN_INSTANCE)fileObject->FsContext; code = irpSp->Parameters.DeviceIoControl.IoControlCode; inputBufferLen = irpSp->Parameters.DeviceIoControl.InputBufferLength; outputBufferLen = irpSp->Parameters.DeviceIoControl.OutputBufferLength; inputBuffer = irp->AssociatedIrp.SystemBuffer; /* Concurrent netlink operations are not supported. */ if (InterlockedCompareExchange((LONG volatile *)&instance->inUse, 1, 0)) { status = STATUS_RESOURCE_IN_USE; goto done; } /* * Validate the input/output buffer arguments depending on the type of the * operation. */ switch (code) { case OVS_IOCTL_TRANSACT: /* Input buffer is mandatory, output buffer is optional. */ if (outputBufferLen != 0) { status = MapIrpOutputBuffer(irp, outputBufferLen, sizeof *ovsMsg, &outputBuffer); if (status != STATUS_SUCCESS) { goto done; } ASSERT(outputBuffer); } if (inputBufferLen < sizeof (*ovsMsg)) { status = STATUS_NDIS_INVALID_LENGTH; goto done; } ovsMsg = inputBuffer; devOp = OVS_TRANSACTION_DEV_OP; break; case OVS_IOCTL_READ: /* Output buffer is mandatory. */ if (outputBufferLen != 0) { status = MapIrpOutputBuffer(irp, outputBufferLen, sizeof *ovsMsg, &outputBuffer); if (status != STATUS_SUCCESS) { goto done; } ASSERT(outputBuffer); } else { status = STATUS_NDIS_INVALID_LENGTH; goto done; } /* * Operate in the mode that read ioctl is similar to ReadFile(). This * might change as the userspace code gets implemented. */ inputBuffer = NULL; inputBufferLen = 0; /* Create an NL message for consumption. */ ovsMsg = &ovsMsgReadOp; devOp = OVS_READ_DEV_OP; /* * For implementing read (ioctl or otherwise), we need to store some * state in the instance to indicate the previous command. The state can * setup 'ovsMsgReadOp' appropriately. * * XXX: Support for that will be added as the userspace code evolves. */ status = STATUS_NOT_IMPLEMENTED; goto done; break; case OVS_IOCTL_WRITE: /* Input buffer is mandatory. */ if (inputBufferLen < sizeof (*ovsMsg)) { status = STATUS_NDIS_INVALID_LENGTH; goto done; } ovsMsg = inputBuffer; devOp = OVS_WRITE_DEV_OP; break; default: status = STATUS_INVALID_DEVICE_REQUEST; goto done; } ASSERT(ovsMsg); switch (ovsMsg->nlMsg.nlmsgType) { case OVS_WIN_NL_CTRL_FAMILY_ID: nlFamilyOps = &nlControlFamilyOps; break; case OVS_WIN_NL_PACKET_FAMILY_ID: case OVS_WIN_NL_DATAPATH_FAMILY_ID: case OVS_WIN_NL_FLOW_FAMILY_ID: case OVS_WIN_NL_VPORT_FAMILY_ID: status = STATUS_NOT_IMPLEMENTED; goto done; default: status = STATUS_INVALID_PARAMETER; goto done; } /* * For read operation, the netlink command has already been validated * previously. */ if (devOp != OVS_READ_DEV_OP) { status = ValidateNetlinkCmd(devOp, ovsMsg, nlFamilyOps); if (status != STATUS_SUCCESS) { goto done; } } status = InvokeNetlinkCmdHandler(irp, fileObject, devOp, ovsMsg, nlFamilyOps, inputBuffer, inputBufferLen, outputBuffer, outputBufferLen, &replyLen); done: KeMemoryBarrier(); instance->inUse = 0; return OvsCompleteIrpRequest(irp, (ULONG_PTR)replyLen, status); }
NTSTATUS MiCcPrepareReadInfo ( IN PMI_READ_INFO MiReadInfo ) /*++ Routine Description: This routine constructs MDLs that describe the pages in the argument read-list. The caller will then issue the I/O on return. Arguments: MiReadInfo - Supplies a pointer to the read-list. Return Value: Various NTSTATUS codes. Environment: Kernel mode, PASSIVE_LEVEL. --*/ { UINT64 PteOffset; NTSTATUS Status; PMMPTE ProtoPte; PMMPTE LastProto; PMMPTE *ProtoPteArray; PCONTROL_AREA ControlArea; PSUBSECTION Subsection; PMMINPAGE_SUPPORT InPageSupport; PMDL Mdl; PMDL IoMdl; PMDL ApiMdl; ULONG i; PFN_NUMBER NumberOfPages; ASSERT (KeGetCurrentIrql() == PASSIVE_LEVEL); NumberOfPages = ADDRESS_AND_SIZE_TO_SPAN_PAGES (MiReadInfo->FileOffset.LowPart, MiReadInfo->LengthInBytes); // // Translate the section object into the relevant control area. // ControlArea = (PCONTROL_AREA)MiReadInfo->FileObject->SectionObjectPointer->DataSectionObject; // // If the section is backed by a ROM, then there's no need to prefetch // anything as it would waste RAM. // if (ControlArea->u.Flags.Rom == 1) { return STATUS_NOT_SUPPORTED; } // // Initialize the internal Mi readlist. // MiReadInfo->ControlArea = ControlArea; // // Allocate and initialize an inpage support block for this run. // InPageSupport = MiGetInPageSupportBlock (MM_NOIRQL, &Status); if (InPageSupport == NULL) { ASSERT (!NT_SUCCESS (Status)); return Status; } MiReadInfo->InPageSupport = InPageSupport; // // Allocate and initialize an MDL to return to our caller. The actual // frame numbers are filled in when all the pages are reference counted. // ApiMdl = MmCreateMdl (NULL, NULL, NumberOfPages << PAGE_SHIFT); if (ApiMdl == NULL) { return STATUS_INSUFFICIENT_RESOURCES; } ApiMdl->MdlFlags |= MDL_PAGES_LOCKED; MiReadInfo->ApiMdl = ApiMdl; // // Allocate and initialize an MDL to use for the actual transfer (if any). // IoMdl = MmCreateMdl (NULL, NULL, NumberOfPages << PAGE_SHIFT); if (IoMdl == NULL) { return STATUS_INSUFFICIENT_RESOURCES; } MiReadInfo->IoMdl = IoMdl; Mdl = IoMdl; // // Make sure the section is really prefetchable - physical and // pagefile-backed sections are not. // if ((ControlArea->u.Flags.PhysicalMemory) || (ControlArea->u.Flags.Image == 1) || (ControlArea->FilePointer == NULL)) { return STATUS_INVALID_PARAMETER_1; } // // Start the read at the proper file offset. // InPageSupport->ReadOffset = MiReadInfo->FileOffset; ASSERT (BYTE_OFFSET (InPageSupport->ReadOffset.LowPart) == 0); InPageSupport->FilePointer = MiReadInfo->FileObject; // // Stash a pointer to the start of the prototype PTE array (the values // in the array are not contiguous as they may cross subsections) // in the inpage block so we can walk it quickly later when the pages // are put into transition. // ProtoPteArray = (PMMPTE *)(Mdl + 1); InPageSupport->BasePte = (PMMPTE) ProtoPteArray; // // Data (but not image) reads use the whole page and the filesystems // zero fill any remainder beyond valid data length so we don't // bother to handle this here. It is important to specify the // entire page where possible so the filesystem won't post this // which will hurt perf. LWFIX: must use CcZero to make this true. // ASSERT (((ULONG_PTR)Mdl & (sizeof(QUAD) - 1)) == 0); InPageSupport->u1.e1.PrefetchMdlHighBits = ((ULONG_PTR)Mdl >> 3); // // Initialize the prototype PTE pointers. // ASSERT (ControlArea->u.Flags.GlobalOnlyPerSession == 0); if (ControlArea->u.Flags.Rom == 0) { Subsection = (PSUBSECTION)(ControlArea + 1); } else { Subsection = (PSUBSECTION)((PLARGE_CONTROL_AREA)ControlArea + 1); } #if DBG if (MiCcDebug & MI_CC_FORCE_PREFETCH) { MiRemoveUserPages (); } #endif // // Calculate the first prototype PTE address. // PteOffset = (UINT64)(MiReadInfo->FileOffset.QuadPart >> PAGE_SHIFT); // // Make sure the PTEs are not in the extended part of the segment. // while (TRUE) { // // A memory barrier is needed to read the subsection chains // in order to ensure the writes to the actual individual // subsection data structure fields are visible in correct // order. This avoids the need to acquire any stronger // synchronization (ie: PFN lock), thus yielding better // performance and pageability. // KeMemoryBarrier (); if (PteOffset < (UINT64) Subsection->PtesInSubsection) { break; } PteOffset -= Subsection->PtesInSubsection; Subsection = Subsection->NextSubsection; } Status = MiAddViewsForSectionWithPfn ((PMSUBSECTION) Subsection, Subsection->PtesInSubsection); if (!NT_SUCCESS (Status)) { return Status; } MiReadInfo->FirstReferencedSubsection = Subsection; MiReadInfo->LastReferencedSubsection = Subsection; ProtoPte = &Subsection->SubsectionBase[PteOffset]; LastProto = &Subsection->SubsectionBase[Subsection->PtesInSubsection]; for (i = 0; i < NumberOfPages; i += 1) { // // Calculate which PTE maps the given logical block offset. // // Always look forwards (as an optimization) in the subsection chain. // // A quick check is made first to avoid recalculations and loops where // possible. // if (ProtoPte >= LastProto) { // // Handle extended subsections. Increment the view count for // every subsection spanned by this request, creating prototype // PTEs if needed. // ASSERT (i != 0); Subsection = Subsection->NextSubsection; Status = MiAddViewsForSectionWithPfn ((PMSUBSECTION) Subsection, Subsection->PtesInSubsection); if (!NT_SUCCESS (Status)) { return Status; } MiReadInfo->LastReferencedSubsection = Subsection; ProtoPte = Subsection->SubsectionBase; LastProto = &Subsection->SubsectionBase[Subsection->PtesInSubsection]; } *ProtoPteArray = ProtoPte; ProtoPteArray += 1; ProtoPte += 1; } return STATUS_SUCCESS; }
static DECLSPEC_NOINLINE NTSTATUS PdoDispatchPower( IN PXENFILT_PDO Pdo, IN PIRP Irp ) { PIO_STACK_LOCATION StackLocation; UCHAR MinorFunction; POWER_STATE_TYPE PowerType; NTSTATUS status; status = IoAcquireRemoveLock(&Pdo->Dx->RemoveLock, Irp); if (!NT_SUCCESS(status)) goto fail1; StackLocation = IoGetCurrentIrpStackLocation(Irp); MinorFunction = StackLocation->MinorFunction; if (MinorFunction != IRP_MN_QUERY_POWER && MinorFunction != IRP_MN_SET_POWER) { IoCopyCurrentIrpStackLocationToNext(Irp); IoSetCompletionRoutine(Irp, __PdoDispatchPower, Pdo, TRUE, TRUE, TRUE); status = IoCallDriver(Pdo->LowerDeviceObject, Irp); goto done; } PowerType = StackLocation->Parameters.Power.Type; Trace("====> (%02x:%s)\n", MinorFunction, PowerMinorFunctionName(MinorFunction)); switch (PowerType) { case DevicePowerState: IoMarkIrpPending(Irp); ASSERT3P(Pdo->DevicePowerIrp, ==, NULL); Pdo->DevicePowerIrp = Irp; KeMemoryBarrier(); ThreadWake(Pdo->DevicePowerThread); status = STATUS_PENDING; break; case SystemPowerState: IoMarkIrpPending(Irp); ASSERT3P(Pdo->SystemPowerIrp, ==, NULL); Pdo->SystemPowerIrp = Irp; KeMemoryBarrier(); ThreadWake(Pdo->SystemPowerThread); status = STATUS_PENDING; break; default: IoCopyCurrentIrpStackLocationToNext(Irp); IoSetCompletionRoutine(Irp, __PdoDispatchPower, Pdo, TRUE, TRUE, TRUE); status = IoCallDriver(Pdo->LowerDeviceObject, Irp); break; } Trace("<==== (%02x:%s) (%08x)\n", MinorFunction, PowerMinorFunctionName(MinorFunction), status); done: return status; fail1: Error("fail1 (%08x)\n", status); Irp->IoStatus.Status = status; IoCompleteRequest(Irp, IO_NO_INCREMENT); return status; }
NDIS_STATUS XenNet_D0Entry(struct xennet_info *xi) { NDIS_STATUS status; PUCHAR ptr; CHAR buf[128]; FUNCTION_ENTER(); xi->shutting_down = FALSE; ptr = xi->config_page; ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_RING, "tx-ring-ref", NULL, NULL); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_RING, "rx-ring-ref", NULL, NULL); #pragma warning(suppress:4054) ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_EVENT_CHANNEL, "event-channel", (PVOID)XenNet_HandleEvent, xi); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_READ_STRING_BACK, "mac", NULL, NULL); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_READ_STRING_BACK, "feature-sg", NULL, NULL); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_READ_STRING_BACK, "feature-gso-tcpv4", NULL, NULL); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_WRITE_STRING, "request-rx-copy", "1", NULL); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_WRITE_STRING, "feature-rx-notify", "1", NULL); RtlStringCbPrintfA(buf, ARRAY_SIZE(buf), "%d", !xi->config_csum); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_WRITE_STRING, "feature-no-csum-offload", buf, NULL); RtlStringCbPrintfA(buf, ARRAY_SIZE(buf), "%d", (int)xi->config_sg); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_WRITE_STRING, "feature-sg", buf, NULL); RtlStringCbPrintfA(buf, ARRAY_SIZE(buf), "%d", !!xi->config_gso); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_WRITE_STRING, "feature-gso-tcpv4", buf, NULL); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_XB_STATE_MAP_PRE_CONNECT, NULL, NULL, NULL); __ADD_XEN_INIT_UCHAR(&ptr, 0); /* no pre-connect required */ ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_XB_STATE_MAP_POST_CONNECT, NULL, NULL, NULL); __ADD_XEN_INIT_UCHAR(&ptr, XenbusStateConnected); __ADD_XEN_INIT_UCHAR(&ptr, XenbusStateConnected); __ADD_XEN_INIT_UCHAR(&ptr, 20); __ADD_XEN_INIT_UCHAR(&ptr, 0); ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_XB_STATE_MAP_SHUTDOWN, NULL, NULL, NULL); __ADD_XEN_INIT_UCHAR(&ptr, XenbusStateClosing); __ADD_XEN_INIT_UCHAR(&ptr, XenbusStateClosing); __ADD_XEN_INIT_UCHAR(&ptr, 50); __ADD_XEN_INIT_UCHAR(&ptr, XenbusStateClosed); __ADD_XEN_INIT_UCHAR(&ptr, XenbusStateClosed); __ADD_XEN_INIT_UCHAR(&ptr, 50); __ADD_XEN_INIT_UCHAR(&ptr, XenbusStateInitialising); __ADD_XEN_INIT_UCHAR(&ptr, XenbusStateInitWait); __ADD_XEN_INIT_UCHAR(&ptr, 50); __ADD_XEN_INIT_UCHAR(&ptr, 0); #ifdef DEBUG_407_1241 ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_COLOR_INIT, NULL, NULL, NULL); #endif ADD_XEN_INIT_REQ(&ptr, XEN_INIT_TYPE_END, NULL, NULL, NULL); status = xi->vectors.XenPci_XenConfigDevice(xi->vectors.context); if (!NT_SUCCESS(status)) { KdPrint(("Failed to complete device configuration (%08x)\n", status)); return status; } status = XenNet_ConnectBackend(xi); if (!NT_SUCCESS(status)) { KdPrint(("Failed to complete device configuration (%08x)\n", status)); return status; } if (!xi->config_sg) { /* without SG, GSO can be a maximum of PAGE_SIZE */ xi->config_gso = min(xi->config_gso, PAGE_SIZE); } XenNet_TxInit(xi); XenNet_RxInit(xi); xi->connected = TRUE; KeMemoryBarrier(); // packets could be received anytime after we set Frontent to Connected FUNCTION_EXIT(); return status; }
/* * -------------------------------------------------------------------------- * Implements filter driver's FilterAttach function. * * This function allocates the switch context, and initializes its necessary * members. * -------------------------------------------------------------------------- */ NDIS_STATUS OvsExtAttach(NDIS_HANDLE ndisFilterHandle, NDIS_HANDLE filterDriverContext, PNDIS_FILTER_ATTACH_PARAMETERS attachParameters) { NDIS_STATUS status = NDIS_STATUS_FAILURE; NDIS_FILTER_ATTRIBUTES ovsExtAttributes; POVS_SWITCH_CONTEXT switchContext = NULL; UNREFERENCED_PARAMETER(filterDriverContext); OVS_LOG_TRACE("Enter: ndisFilterHandle %p", ndisFilterHandle); ASSERT(filterDriverContext == (NDIS_HANDLE)gOvsExtDriverObject); if (attachParameters->MiniportMediaType != NdisMedium802_3) { status = NDIS_STATUS_INVALID_PARAMETER; goto cleanup; } if (gOvsExtDriverHandle == NULL) { OVS_LOG_TRACE("Exit: OVSEXT driver is not loaded."); ASSERT(FALSE); goto cleanup; } NdisAcquireSpinLock(gOvsCtrlLock); if (gOvsSwitchContext) { NdisReleaseSpinLock(gOvsCtrlLock); OVS_LOG_TRACE("Exit: Failed to create OVS Switch, only one datapath is" "supported, %p.", gOvsSwitchContext); goto cleanup; } if (gOvsInAttach) { NdisReleaseSpinLock(gOvsCtrlLock); /* Just fail the request. */ OVS_LOG_TRACE("Exit: Failed to create OVS Switch, since another attach" "instance is in attach process."); goto cleanup; } gOvsInAttach = TRUE; NdisReleaseSpinLock(gOvsCtrlLock); status = OvsInitIpHelper(ndisFilterHandle); if (status != STATUS_SUCCESS) { OVS_LOG_ERROR("Exit: Failed to initialize IP helper."); goto cleanup; } status = OvsCreateSwitch(ndisFilterHandle, &switchContext); if (status != NDIS_STATUS_SUCCESS) { OvsCleanupIpHelper(); goto cleanup; } ASSERT(switchContext); /* * Register the switch context with NDIS so NDIS can pass it back to the * Filterxxx callback functions as the 'FilterModuleContext' parameter. */ RtlZeroMemory(&ovsExtAttributes, sizeof(NDIS_FILTER_ATTRIBUTES)); ovsExtAttributes.Header.Revision = NDIS_FILTER_ATTRIBUTES_REVISION_1; ovsExtAttributes.Header.Size = sizeof(NDIS_FILTER_ATTRIBUTES); ovsExtAttributes.Header.Type = NDIS_OBJECT_TYPE_FILTER_ATTRIBUTES; ovsExtAttributes.Flags = 0; NDIS_DECLARE_FILTER_MODULE_CONTEXT(OVS_SWITCH_CONTEXT); status = NdisFSetAttributes(ndisFilterHandle, switchContext, &ovsExtAttributes); if (status != NDIS_STATUS_SUCCESS) { OVS_LOG_ERROR("Failed to set attributes."); OvsCleanupIpHelper(); goto cleanup; } /* Setup the state machine. */ switchContext->controlFlowState = OvsSwitchAttached; switchContext->dataFlowState = OvsSwitchPaused; gOvsSwitchContext = switchContext; KeMemoryBarrier(); cleanup: gOvsInAttach = FALSE; if (status != NDIS_STATUS_SUCCESS) { if (switchContext != NULL) { OvsDeleteSwitch(switchContext); } } OVS_LOG_TRACE("Exit: status %x", status); return status; }
static VOID XenUsb_HandleEventDpc(PKDPC dpc, PVOID context, PVOID arg1, PVOID arg2) { NTSTATUS status; PXENUSB_DEVICE_DATA xudd = context; RING_IDX prod, cons; usbif_urb_response_t *urb_rsp; usbif_conn_response_t *conn_rsp; usbif_conn_request_t *conn_req; int more_to_do; pvurb_t *pvurb, *complete_head = NULL, *complete_tail = NULL; partial_pvurb_t *partial_pvurb; BOOLEAN port_changed = FALSE; UNREFERENCED_PARAMETER(dpc); UNREFERENCED_PARAMETER(arg1); UNREFERENCED_PARAMETER(arg2); FUNCTION_ENTER(); more_to_do = TRUE; KeAcquireSpinLockAtDpcLevel(&xudd->urb_ring_lock); while (more_to_do) { prod = xudd->urb_ring.sring->rsp_prod; KeMemoryBarrier(); for (cons = xudd->urb_ring.rsp_cons; cons != prod; cons++) { urb_rsp = RING_GET_RESPONSE(&xudd->urb_ring, cons); // FUNCTION_MSG("urb_rsp->id = %d\n", urb_rsp->id); partial_pvurb = xudd->partial_pvurbs[urb_rsp->id]; RemoveEntryList(&partial_pvurb->entry); partial_pvurb->rsp = *urb_rsp; // FUNCTION_MSG("shadow = %p\n", shadow); // FUNCTION_MSG("shadow->rsp = %p\n", shadow->rsp); if (usbif_pipeunlink(partial_pvurb->req.pipe)) { FUNCTION_MSG("is a cancel request for request %p\n", partial_pvurb->pvurb->request); FUNCTION_MSG("urb_ring rsp status = %d\n", urb_rsp->status); // status should be 115 == EINPROGRESS } else { partial_pvurb->pvurb->total_length += urb_rsp->actual_length; if (!partial_pvurb->pvurb->rsp.status) partial_pvurb->pvurb->rsp.status = urb_rsp->status; partial_pvurb->pvurb->rsp.error_count += urb_rsp->error_count;; if (partial_pvurb->mdl) { int i; for (i = 0; i < partial_pvurb->req.nr_buffer_segs; i++) { XnEndAccess(xudd->handle, partial_pvurb->req.seg[i].gref, FALSE, (ULONG)'XUSB'); } } FUNCTION_MSG("urb_ring rsp id = %d\n", partial_pvurb->rsp.id); FUNCTION_MSG("urb_ring rsp start_frame = %d\n", partial_pvurb->rsp.start_frame); FUNCTION_MSG("urb_ring rsp status = %d\n", partial_pvurb->rsp.status); FUNCTION_MSG("urb_ring rsp actual_length = %d\n", partial_pvurb->rsp.actual_length); FUNCTION_MSG("urb_ring rsp error_count = %d\n", partial_pvurb->rsp.error_count); } if (partial_pvurb->other_partial_pvurb) { if (!partial_pvurb->other_partial_pvurb->on_ring) { /* cancel hasn't been put on the ring yet - remove it */ RemoveEntryList(&partial_pvurb->other_partial_pvurb->entry); ASSERT(usbif_pipeunlink(partial_pvurb->other_partial_pvurb->req.pipe)); partial_pvurb->pvurb->ref--; ExFreePoolWithTag(partial_pvurb->other_partial_pvurb, XENUSB_POOL_TAG); } } partial_pvurb->pvurb->ref--; switch (partial_pvurb->rsp.status) { case EINPROGRESS: /* unlink request */ case ECONNRESET: /* cancelled request */ ASSERT(partial_pvurb->pvurb->status == STATUS_CANCELLED); break; default: break; } put_id_on_freelist(xudd->req_id_ss, partial_pvurb->rsp.id); partial_pvurb->pvurb->next = NULL; if (!partial_pvurb->pvurb->ref) { if (complete_tail) { complete_tail->next = partial_pvurb->pvurb; } else { complete_head = partial_pvurb->pvurb; } complete_tail = partial_pvurb->pvurb; } } xudd->urb_ring.rsp_cons = cons; if (cons != xudd->urb_ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&xudd->urb_ring, more_to_do); } else { xudd->urb_ring.sring->rsp_event = cons + 1; more_to_do = FALSE; } } PutRequestsOnRing(xudd); KeReleaseSpinLockFromDpcLevel(&xudd->urb_ring_lock); pvurb = complete_head; while (pvurb != NULL) { complete_head = pvurb->next; status = WdfRequestUnmarkCancelable(pvurb->request); if (status == STATUS_CANCELLED) { FUNCTION_MSG("Cancel was called\n"); } WdfRequestCompleteWithInformation(pvurb->request, pvurb->status, pvurb->total_length); /* the WDFREQUEST is always successfull here even if the pvurb->rsp has an error */ pvurb = complete_head; } more_to_do = TRUE; KeAcquireSpinLockAtDpcLevel(&xudd->conn_ring_lock); while (more_to_do) { prod = xudd->conn_ring.sring->rsp_prod; KeMemoryBarrier(); for (cons = xudd->conn_ring.rsp_cons; cons != prod; cons++) { USHORT old_port_status; conn_rsp = RING_GET_RESPONSE(&xudd->conn_ring, cons); FUNCTION_MSG("conn_rsp->portnum = %d\n", conn_rsp->portnum); FUNCTION_MSG("conn_rsp->speed = %d\n", conn_rsp->speed); old_port_status = xudd->ports[conn_rsp->portnum - 1].port_status; xudd->ports[conn_rsp->portnum - 1].port_type = conn_rsp->speed; xudd->ports[conn_rsp->portnum - 1].port_status &= ~((1 << PORT_LOW_SPEED) | (1 << PORT_HIGH_SPEED) | (1 << PORT_CONNECTION)); switch (conn_rsp->speed) { case USB_PORT_TYPE_NOT_CONNECTED: xudd->ports[conn_rsp->portnum - 1].port_status &= ~(1 << PORT_ENABLE); break; case USB_PORT_TYPE_LOW_SPEED: xudd->ports[conn_rsp->portnum - 1].port_status |= (1 << PORT_LOW_SPEED) | (1 << PORT_CONNECTION); break; case USB_PORT_TYPE_FULL_SPEED: xudd->ports[conn_rsp->portnum - 1].port_status |= (1 << PORT_CONNECTION); break; case USB_PORT_TYPE_HIGH_SPEED: xudd->ports[conn_rsp->portnum - 1].port_status |= (1 << PORT_HIGH_SPEED) | (1 << PORT_CONNECTION); break; } xudd->ports[conn_rsp->portnum - 1].port_change |= (xudd->ports[conn_rsp->portnum - 1].port_status ^ old_port_status) & ((1 << PORT_ENABLE) | (1 << PORT_CONNECTION)); if (xudd->ports[conn_rsp->portnum - 1].port_change) port_changed = TRUE; conn_req = RING_GET_REQUEST(&xudd->conn_ring, xudd->conn_ring.req_prod_pvt); conn_req->id = conn_rsp->id; xudd->conn_ring.req_prod_pvt++; } xudd->conn_ring.rsp_cons = cons; if (cons != xudd->conn_ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&xudd->conn_ring, more_to_do); } else { xudd->conn_ring.sring->rsp_event = cons + 1; more_to_do = FALSE; } } KeReleaseSpinLockFromDpcLevel(&xudd->conn_ring_lock); if (port_changed) { PXENUSB_PDO_DEVICE_DATA xupdd = GetXupdd(xudd->root_hub_device); XenUsbHub_ProcessHubInterruptEvent(xupdd->usb_device->configs[0]->interfaces[0]->endpoints[0]); } FUNCTION_EXIT(); return; }
static BOOLEAN XenScsi_HwScsiInterrupt(PVOID DeviceExtension) { PXENSCSI_DEVICE_DATA xsdd = DeviceExtension; PSCSI_REQUEST_BLOCK Srb; RING_IDX i, rp; int j; vscsiif_response_t *rep; int more_to_do = TRUE; vscsiif_shadow_t *shadow; BOOLEAN last_interrupt = FALSE; XenScsi_CheckNewDevice(DeviceExtension); if (!dump_mode && !xsdd->vectors.EvtChn_AckEvent(xsdd->vectors.context, xsdd->event_channel, &last_interrupt)) { return FALSE; } //FUNCTION_ENTER(); while (more_to_do) { rp = xsdd->ring.sring->rsp_prod; KeMemoryBarrier(); for (i = xsdd->ring.rsp_cons; i != rp; i++) { rep = RING_GET_RESPONSE(&xsdd->ring, i); shadow = &xsdd->shadows[rep->rqid]; Srb = shadow->Srb; Srb->ScsiStatus = (UCHAR)rep->rslt; memset(Srb->SenseInfoBuffer, 0, Srb->SenseInfoBufferLength); if (rep->sense_len > 0 && Srb->SenseInfoBuffer != NULL) { memcpy(Srb->SenseInfoBuffer, rep->sense_buffer, min(Srb->SenseInfoBufferLength, rep->sense_len)); } switch(rep->rslt) { case 0: //KdPrint((__DRIVER_NAME " Xen Operation complete - result = 0x%08x, sense_len = %d, residual = %d\n", rep->rslt, rep->sense_len, rep->residual_len)); Srb->SrbStatus = SRB_STATUS_SUCCESS; if (Srb->Cdb[0] == 0x03) { KdPrint((__DRIVER_NAME " REQUEST_SENSE DataTransferLength = %d, residual = %d\n", Srb->DataTransferLength, rep->residual_len)); //for (j = 0; j < Srb->DataTransferLength - rep->residual_len; j++) // KdPrint((__DRIVER_NAME " sense %02x: %02x\n", j, (ULONG)((PUCHAR)Srb->DataBuffer)[j])); } break; case 0x00010000: /* Device does not exist */ KdPrint((__DRIVER_NAME " Xen Operation error - cdb[0] = %02x, result = 0x%08x, sense_len = %d, residual = %d\n", (ULONG)Srb->Cdb[0], rep->rslt, rep->sense_len, rep->residual_len)); Srb->SrbStatus = SRB_STATUS_NO_DEVICE; break; default: KdPrint((__DRIVER_NAME " Xen Operation error - cdb[0] = %02x, result = 0x%08x, sense_len = %d, residual = %d\n", (ULONG)Srb->Cdb[0], rep->rslt, rep->sense_len, rep->residual_len)); Srb->SrbStatus = SRB_STATUS_ERROR; //for (j = 0; j < Srb->SenseInfoBufferLength; j++) // KdPrint((__DRIVER_NAME " sense %02x: %02x\n", j, (ULONG)((PUCHAR)Srb->SenseInfoBuffer)[j])); if (rep->sense_len > 0 && !(Srb->SrbFlags & SRB_FLAGS_DISABLE_AUTOSENSE) && Srb->SenseInfoBuffer != NULL) { KdPrint((__DRIVER_NAME " Doing autosense\n")); Srb->SrbStatus |= SRB_STATUS_AUTOSENSE_VALID; } else if (Srb->SrbFlags & SRB_FLAGS_DISABLE_AUTOSENSE) { PXENSCSI_LU_DATA lud = ScsiPortGetLogicalUnit(DeviceExtension, Srb->PathId, Srb->TargetId, Srb->Lun); KdPrint((__DRIVER_NAME " Autosense disabled\n")); if (lud != NULL) { KdPrint((__DRIVER_NAME " Saving sense data\n")); lud->sense_len = rep->sense_len; memcpy(lud->sense_buffer, Srb->SenseInfoBuffer, lud->sense_len); } } } /* work around a bug in scsiback that gives an incorrect result to REPORT_LUNS - fail it if the output is only 8 bytes */ if (Srb->Cdb[0] == 0xa0 && Srb->SrbStatus == SRB_STATUS_SUCCESS && Srb->DataTransferLength - rep->residual_len == 8) { /* SRB_STATUS_ERROR appears to be sufficient here - no need to worry about sense data or anything */ KdPrint((__DRIVER_NAME " Worked around bad REPORT_LUNS emulation for %d:%d:%d\n", Srb->PathId, Srb->TargetId, Srb->Lun)); Srb->SrbStatus = SRB_STATUS_ERROR; } //remaining = Srb->DataTransferLength; for (j = 0; j < shadow->req.nr_segments; j++) { xsdd->vectors.GntTbl_EndAccess(xsdd->vectors.context, shadow->req.seg[j].gref, TRUE, (ULONG)'SCSI'); put_grant_on_freelist(xsdd, shadow->req.seg[j].gref); shadow->req.seg[j].gref = 0; } if (Srb->SrbStatus == SRB_STATUS_SUCCESS && rep->residual_len) { // KdPrint((__DRIVER_NAME " SRB_STATUS_DATA_OVERRUN DataTransferLength = %d, adjusted = %d\n", // Srb->DataTransferLength, Srb->DataTransferLength - rep->residual_len)); Srb->DataTransferLength -= rep->residual_len; Srb->SrbStatus = SRB_STATUS_DATA_OVERRUN; } put_shadow_on_freelist(xsdd, shadow); ScsiPortNotification(RequestComplete, xsdd, Srb); if (!xsdd->scsiport_paused) ScsiPortNotification(NextRequest, DeviceExtension); } xsdd->ring.rsp_cons = i; if (i != xsdd->ring.req_prod_pvt) { RING_FINAL_CHECK_FOR_RESPONSES(&xsdd->ring, more_to_do); } else { xsdd->ring.sring->rsp_event = i + 1; more_to_do = FALSE; } } //FUNCTION_EXIT(); return last_interrupt; }