BOOLEAN ndisAddGlobalDb( IN NDIS_INTERFACE_TYPE BusType, IN ULONG BusId, IN ULONG BusNumber, IN ULONG SlotNumber ) { PBUS_SLOT_DB pDb; KIRQL OldIrql; BOOLEAN rc = FALSE; pDb = ALLOC_FROM_POOL(sizeof(BUS_SLOT_DB), NDIS_TAG_DEFAULT); if (pDb != NULL) { pDb->BusType = BusType; pDb->BusId = BusId; pDb->BusNumber = BusNumber; pDb->SlotNumber = SlotNumber; ACQUIRE_SPIN_LOCK(&ndisGlobalDbLock, &OldIrql); pDb->Next = ndisGlobalDb; ndisGlobalDb = pDb; RELEASE_SPIN_LOCK(&ndisGlobalDbLock, OldIrql); rc = TRUE; } return rc; }
static void scavenge_large (gen_workspace *ws) { bdescr *bd; StgPtr p; gct->evac_gen_no = ws->gen->no; bd = ws->todo_large_objects; for (; bd != NULL; bd = ws->todo_large_objects) { // take this object *off* the large objects list and put it on // the scavenged large objects list. This is so that we can // treat new_large_objects as a stack and push new objects on // the front when evacuating. ws->todo_large_objects = bd->link; ACQUIRE_SPIN_LOCK(&ws->gen->sync); dbl_link_onto(bd, &ws->gen->scavenged_large_objects); ws->gen->n_scavenged_large_blocks += bd->blocks; RELEASE_SPIN_LOCK(&ws->gen->sync); p = bd->start; if (scavenge_one(p)) { if (ws->gen->no > 0) { recordMutableGen_GC((StgClosure *)p, ws->gen->no); } } // stats gct->scanned += closure_sizeW((StgClosure*)p); } }
VOID DraidListnerDelAddress( PTDI_ADDRESS_LPX Addr ) { PDRAID_GLOBALS DraidGlobals; PLIST_ENTRY listEntry; KIRQL oldIrql; PDRAID_LISTEN_CONTEXT ListenContext; if (!g_DraidGlobals) { KDPrintM(DBG_LURN_INFO, ("DRAID is not running\n")); return; } DraidGlobals = g_DraidGlobals; // Find matching address and just mark active flag false because Wait event may be in use. ACQUIRE_SPIN_LOCK(&DraidGlobals->ListenContextSpinlock, &oldIrql); for (listEntry = DraidGlobals->ListenContextList.Flink; listEntry != &DraidGlobals->ListenContextList; listEntry = listEntry->Flink) { ListenContext = CONTAINING_RECORD (listEntry, DRAID_LISTEN_CONTEXT, Link); if (RtlCompareMemory(ListenContext->Addr.Node, Addr->Node, 6) == 6) { KDPrintM(DBG_LURN_INFO, ("Found matching address\n")); ListenContext->Destroy = TRUE; KeSetEvent(&DraidGlobals->NetChangedEvent, IO_NO_INCREMENT, FALSE); break; } } RELEASE_SPIN_LOCK(&DraidGlobals->ListenContextSpinlock, oldIrql); }
// // Used when entering power down mode. // VOID DraidFlushAll( VOID ) { KIRQL oldIrql; PLIST_ENTRY listEntry; PDRAID_CLIENT_INFO Client; PDRAID_GLOBALS DraidGlobals = g_DraidGlobals; if (!g_DraidGlobals) return; KDPrintM(DBG_LURN_INFO, ("DRAID flush all\n")); // // Flush request to all client // ACQUIRE_SPIN_LOCK(&DraidGlobals->ClientListSpinlock, &oldIrql); for (listEntry = DraidGlobals->ClientList.Flink; listEntry != &DraidGlobals->ClientList; listEntry = listEntry->Flink) { Client = CONTAINING_RECORD (listEntry, DRAID_CLIENT_INFO, AllClientList); DraidClientFlush(Client, NULL, NULL); } RELEASE_SPIN_LOCK(&DraidGlobals->ClientListSpinlock, oldIrql); // Send flush request to arbiter. (Not needed if multi-write is not used.) }
BOOLEAN ndisSearchGlobalDb( IN NDIS_INTERFACE_TYPE BusType, IN ULONG BusId, IN ULONG BusNumber, IN ULONG SlotNumber ) { PBUS_SLOT_DB pScan; KIRQL OldIrql; BOOLEAN rc = FALSE; ACQUIRE_SPIN_LOCK(&ndisGlobalDbLock, &OldIrql); for (pScan = ndisGlobalDb; pScan != NULL; pScan = pScan->Next) { if ((pScan->BusType == BusType) && (pScan->BusId == BusId) && (pScan->BusNumber == BusNumber) && (pScan->SlotNumber == SlotNumber)) { rc = TRUE; break; } } RELEASE_SPIN_LOCK(&ndisGlobalDbLock, OldIrql); return rc; }
void freeChain_sync(bdescr *bd) { ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); freeChain(bd); RELEASE_SPIN_LOCK(&gc_alloc_block_sync); }
VOID SrvCloseCachedDirectoryEntries( IN PCONNECTION Connection ) /*++ Routine Description: This routine closes all the cached directory entries on the connection Arguments: Connection - Pointer to the connection structure having the cache ++*/ { KIRQL oldIrql; PCACHED_DIRECTORY cd; ACQUIRE_SPIN_LOCK( &Connection->SpinLock, &oldIrql ); while( Connection->CachedDirectoryCount > 0 ) { cd = CONTAINING_RECORD( Connection->CachedDirectoryList.Flink, CACHED_DIRECTORY, ListEntry ); RemoveEntryList( &cd->ListEntry ); Connection->CachedDirectoryCount--; DEALLOCATE_NONPAGED_POOL( cd ); } RELEASE_SPIN_LOCK( &Connection->SpinLock, oldIrql ); }
VOID ArcReferencePackage(VOID) { ACQUIRE_SPIN_LOCK(&ArcReferenceLock); ArcReferenceCount++; if (ArcReferenceCount == 1) { KeResetEvent( &ArcPagedInEvent ); RELEASE_SPIN_LOCK(&ArcReferenceLock); // // Page in all the functions // ArcImageHandle = MmLockPagableCodeSection(ArcCreateFilter); // // Signal to everyone to go // KeSetEvent( &ArcPagedInEvent, 0L, FALSE ); } else { RELEASE_SPIN_LOCK(&ArcReferenceLock); // // Wait for everything to be paged in // KeWaitForSingleObject( &ArcPagedInEvent, Executive, KernelMode, TRUE, NULL ); } }
PDRAID_LISTEN_CONTEXT DraidCreateListenContext( PDRAID_GLOBALS DraidGlobals, PLPX_ADDRESS Addr ) { KIRQL oldIrql; BOOLEAN AlreadyExist; PLIST_ENTRY listEntry; PDRAID_LISTEN_CONTEXT ListenContext; // // Check address is already in the listen context list // ACQUIRE_SPIN_LOCK(&DraidGlobals->ListenContextSpinlock, &oldIrql); AlreadyExist = FALSE; for (listEntry = DraidGlobals->ListenContextList.Flink; listEntry != &DraidGlobals->ListenContextList; listEntry = listEntry->Flink) { ListenContext = CONTAINING_RECORD (listEntry, DRAID_LISTEN_CONTEXT, Link); if (!ListenContext->Destroy && RtlCompareMemory(ListenContext->Addr.Node, Addr->Node, 6) == 6) { KDPrintM(DBG_LURN_INFO, ("New LPX address already exist.Ignoring.\n")); AlreadyExist = TRUE; break; } } RELEASE_SPIN_LOCK(&DraidGlobals->ListenContextSpinlock, oldIrql); if (AlreadyExist) { return NULL; } // // Alloc listen context // ListenContext = ExAllocatePoolWithTag(NonPagedPool, sizeof(DRAID_LISTEN_CONTEXT), DRAID_LISTEN_CONTEXT_POOL_TAG); if (!ListenContext) { KDPrintM(DBG_LURN_INFO, ("Failed to alloc listen context\n")); return NULL; } RtlZeroMemory(ListenContext, sizeof(DRAID_LISTEN_CONTEXT)); KeInitializeEvent( &ListenContext->TdiListenContext.CompletionEvent, NotificationEvent, FALSE ); InitializeListHead(&ListenContext->Link); RtlCopyMemory(ListenContext->Addr.Node, Addr->Node, 6); ListenContext->Addr.Port = HTONS(DRIX_ARBITER_PORT_NUM_BASE); ExInterlockedInsertTailList(&DraidGlobals->ListenContextList, &ListenContext->Link, &DraidGlobals->ListenContextSpinlock ); return ListenContext; }
bdescr* allocGroupOnNode_sync(uint32_t node, uint32_t n) { bdescr *bd; ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); bd = allocGroupOnNode(node,n); RELEASE_SPIN_LOCK(&gc_alloc_block_sync); return bd; }
bdescr * allocBlock_sync(void) { bdescr *bd; ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); bd = allocBlock(); RELEASE_SPIN_LOCK(&gc_alloc_block_sync); return bd; }
static bdescr * allocGroup_sync(nat n) { bdescr *bd; ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); bd = allocGroup(n); RELEASE_SPIN_LOCK(&gc_alloc_block_sync); return bd; }
bdescr* allocGroup_sync(uint32_t n) { bdescr *bd; uint32_t node = capNoToNumaNode(gct->thread_index); ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); bd = allocGroupOnNode(node,n); RELEASE_SPIN_LOCK(&gc_alloc_block_sync); return bd; }
NTSTATUS DraidUnregisterClient( PDRAID_CLIENT_INFO Client ) { KIRQL oldIrql; ASSERT(g_DraidGlobals); ACQUIRE_SPIN_LOCK(&g_DraidGlobals->ClientListSpinlock, &oldIrql); RemoveEntryList(&Client->AllClientList); RELEASE_SPIN_LOCK(&g_DraidGlobals->ClientListSpinlock, oldIrql); return STATUS_SUCCESS; }
NTSTATUS DraidUnregisterArbiter( PDRAID_ARBITER_INFO Arbiter ) { KIRQL oldIrql; ASSERT(g_DraidGlobals); ACQUIRE_SPIN_LOCK(&g_DraidGlobals->ArbiterListSpinlock, &oldIrql); RemoveEntryList(&Arbiter->AllArbiterList); RELEASE_SPIN_LOCK(&g_DraidGlobals->ArbiterListSpinlock, oldIrql); return STATUS_SUCCESS; }
VOID LsuDecrementTdiClientDevice() { KIRQL oldIrql; ACQUIRE_SPIN_LOCK(&TDICLIENT_CONTEXT.TdiPnPSpinLock, &oldIrql); TDICLIENT_CONTEXT.ClientDeviceCount--; ASSERT(TDICLIENT_CONTEXT.ClientDeviceCount >= 0); RELEASE_SPIN_LOCK(&TDICLIENT_CONTEXT.TdiPnPSpinLock, oldIrql); }
VOID LsuIncrementTdiClientInProgress() { KIRQL oldIrql; ACQUIRE_SPIN_LOCK(&TdiPnPSpinLock, &oldIrql); ASSERT(ClientInProgress >= 0); ClientInProgress++; RELEASE_SPIN_LOCK(&TdiPnPSpinLock, oldIrql); }
VOID LsuIncrementTdiClientInProgress() { KIRQL oldIrql; ACQUIRE_SPIN_LOCK(&TDICLIENT_CONTEXT.TdiPnPSpinLock, &oldIrql); ASSERT(TDICLIENT_CONTEXT.ClientInProgressIOCount >= 0); TDICLIENT_CONTEXT.ClientInProgressIOCount++; RELEASE_SPIN_LOCK(&TDICLIENT_CONTEXT.TdiPnPSpinLock, oldIrql); }
VOID LsuDecrementTdiClientInProgress() { KIRQL oldIrql; ACQUIRE_SPIN_LOCK(&TDICLIENT_CONTEXT.TdiPnPSpinLock, &oldIrql); TDICLIENT_CONTEXT.ClientInProgressIOCount--; ASSERT(TDICLIENT_CONTEXT.ClientInProgressIOCount >= 0); LsuCurrentTime(&TDICLIENT_CONTEXT.LastOperationTime); RELEASE_SPIN_LOCK(&TDICLIENT_CONTEXT.TdiPnPSpinLock, oldIrql); }
VOID LsuDecrementTdiClientInProgress() { KIRQL oldIrql; ACQUIRE_SPIN_LOCK(&TdiPnPSpinLock, &oldIrql); ClientInProgress--; ASSERT(ClientInProgress >= 0); LsuCurrentTime(&LastOperationTime); RELEASE_SPIN_LOCK(&TdiPnPSpinLock, oldIrql); }
VOID ArcDereferencePackage(VOID) { ACQUIRE_SPIN_LOCK(&ArcReferenceLock); ArcReferenceCount--; if (ArcReferenceCount == 0) { RELEASE_SPIN_LOCK(&ArcReferenceLock); // // Page out all the functions // MmUnlockPagableImageSection(ArcImageHandle); } else { RELEASE_SPIN_LOCK(&ArcReferenceLock); } }
VOID RdrBackPackFailure ( IN PBACK_PACK pBP ) /*++ Routine Description: This routine is called each time a request fails. Arguments: pBP - supplies back pack data for this request. Return Value: None. --*/ { LARGE_INTEGER CurrentTime; KIRQL OldIrql; DISCARDABLE_CODE(RdrFileDiscardableSection); KeQuerySystemTime(&CurrentTime); ACQUIRE_SPIN_LOCK(&BackPackSpinLock, &OldIrql); if (pBP->CurrentIncrement < pBP->MaximumDelay ) { // // We have reached NextTime but not our maximum delay limit. // pBP->CurrentIncrement++; } // NextTime = CurrentTime + (Interval * CurrentIncrement ) pBP->NextTime.QuadPart = CurrentTime.QuadPart + (pBP->Increment.QuadPart * pBP->CurrentIncrement); RELEASE_SPIN_LOCK(&BackPackSpinLock, OldIrql); }
BOOLEAN PacketQueueEmpty( PLIST_ENTRY PacketQueue, PKSPIN_LOCK QSpinLock ) { PLIST_ENTRY packetListEntry; KIRQL oldIrql; if(QSpinLock) { ACQUIRE_SPIN_LOCK(QSpinLock, &oldIrql); packetListEntry = PacketQueue->Flink; RELEASE_SPIN_LOCK(QSpinLock, oldIrql); } else packetListEntry = PacketQueue->Flink; return (packetListEntry == PacketQueue); }
static void allocBlocks_sync(nat n, bdescr **hd, bdescr **tl, nat gen_no, step *stp, StgWord32 flags) { bdescr *bd; nat i; ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); bd = allocGroup(n); for (i = 0; i < n; i++) { bd[i].blocks = 1; bd[i].gen_no = gen_no; bd[i].step = stp; bd[i].flags = flags; bd[i].link = &bd[i+1]; bd[i].u.scan = bd[i].free = bd[i].start; } *hd = bd; *tl = &bd[n-1]; RELEASE_SPIN_LOCK(&gc_alloc_block_sync); }
PCONNECTION WalkConnectionTable ( IN PENDPOINT Endpoint, IN OUT PCSHORT Index ) { CSHORT i; PTABLE_HEADER tableHeader; PCONNECTION connection; KIRQL oldIrql; ACQUIRE_SPIN_LOCK( &ENDPOINT_SPIN_LOCK(0), &oldIrql ); for ( i = 1; i < ENDPOINT_LOCK_COUNT ; i++ ) { ACQUIRE_DPC_SPIN_LOCK( &ENDPOINT_SPIN_LOCK(i) ); } tableHeader = &Endpoint->ConnectionTable; for ( i = *Index + 1; i < tableHeader->TableSize; i++ ) { connection = (PCONNECTION)tableHeader->Table[i].Owner; if ( (connection != NULL) && (GET_BLOCK_STATE(connection) == BlockStateActive) ) { *Index = i; SrvReferenceConnectionLocked( connection ); goto exit; } } connection = NULL; exit: for ( i = ENDPOINT_LOCK_COUNT-1 ; i > 0 ; i-- ) { RELEASE_DPC_SPIN_LOCK( &ENDPOINT_SPIN_LOCK(i) ); } RELEASE_SPIN_LOCK( &ENDPOINT_SPIN_LOCK(0), oldIrql ); return connection; } // WalkConnectionTable
VOID LlcTraceWrite( IN UINT Event, IN UCHAR AdapterNumber, IN UINT DataBufferSize, IN PVOID pDataBuffer ) { //if ((AdapterNumber & 0x7f) != 0) // return; if (TraceEnabled) { ACQUIRE_SPIN_LOCK( &TraceLock ); if ((ULONG)(&pTraceBufferHead[1]) >= (ULONG)pTraceBufferTop) { pTraceBufferHead = (PLLC_TRACE_HEADER)pTraceBufferBase; } pTraceBufferHead->Event = (USHORT)Event; pTraceBufferHead->AdapterNumber = AdapterNumber; pTraceBufferHead->TimerTick = AbsoluteTime; pTraceBufferHead->DataLength = (UCHAR) #ifdef min min( TRACE_DATA_LENGTH, DataBufferSize ); #else __min( TRACE_DATA_LENGTH, DataBufferSize ); #endif memcpy( pTraceBufferHead->Buffer, pDataBuffer, pTraceBufferHead->DataLength ); pTraceBufferHead++; pTraceBufferHead->Event = LLC_TRACE_END_OF_DATA; RELEASE_SPIN_LOCK( &TraceLock ); } }
static uint32_t allocBlocks_sync(uint32_t n, bdescr **hd) { bdescr *bd; uint32_t i; uint32_t node = capNoToNumaNode(gct->thread_index); ACQUIRE_SPIN_LOCK(&gc_alloc_block_sync); bd = allocLargeChunkOnNode(node,1,n); // NB. allocLargeChunk, rather than allocGroup(n), to allocate in a // fragmentation-friendly way. n = bd->blocks; for (i = 0; i < n; i++) { bd[i].blocks = 1; bd[i].link = &bd[i+1]; bd[i].free = bd[i].start; } bd[n-1].link = NULL; // We have to hold the lock until we've finished fiddling with the metadata, // otherwise the block allocator can get confused. RELEASE_SPIN_LOCK(&gc_alloc_block_sync); *hd = bd; return n; }
BOOLEAN ndisDeleteGlobalDb( IN NDIS_INTERFACE_TYPE BusType, IN ULONG BusId, IN ULONG BusNumber, IN ULONG SlotNumber ) { PBUS_SLOT_DB pScan, *ppScan; KIRQL OldIrql; ACQUIRE_SPIN_LOCK(&ndisGlobalDbLock, &OldIrql); for (ppScan = &ndisGlobalDb; (pScan = *ppScan) != NULL; ppScan = &pScan->Next) { if ((pScan->BusType == BusType) && (pScan->BusId == BusId) && (pScan->BusNumber == BusNumber) && (pScan->SlotNumber == SlotNumber)) { *ppScan = pScan->Next; break; } } RELEASE_SPIN_LOCK(&ndisGlobalDbLock, OldIrql); if (pScan != NULL) { FREE_POOL(pScan); } return (pScan != NULL); }
STATIC_INLINE void evacuate_large(StgPtr p) { bdescr *bd; generation *gen, *new_gen; nat gen_no, new_gen_no; gen_workspace *ws; bd = Bdescr(p); gen = bd->gen; gen_no = bd->gen_no; ACQUIRE_SPIN_LOCK(&gen->sync); // already evacuated? if (bd->flags & BF_EVACUATED) { /* Don't forget to set the gct->failed_to_evac flag if we didn't get * the desired destination (see comments in evacuate()). */ if (gen_no < gct->evac_gen_no) { gct->failed_to_evac = rtsTrue; TICK_GC_FAILED_PROMOTION(); } RELEASE_SPIN_LOCK(&gen->sync); return; } // remove from large_object list if (bd->u.back) { bd->u.back->link = bd->link; } else { // first object in the list gen->large_objects = bd->link; } if (bd->link) { bd->link->u.back = bd->u.back; } /* link it on to the evacuated large object list of the destination gen */ new_gen_no = bd->dest_no; if (new_gen_no < gct->evac_gen_no) { if (gct->eager_promotion) { new_gen_no = gct->evac_gen_no; } else { gct->failed_to_evac = rtsTrue; } } ws = &gct->gens[new_gen_no]; new_gen = &generations[new_gen_no]; bd->flags |= BF_EVACUATED; initBdescr(bd, new_gen, new_gen->to); // If this is a block of pinned objects, we don't have to scan // these objects, because they aren't allowed to contain any // pointers. For these blocks, we skip the scavenge stage and put // them straight on the scavenged_large_objects list. if (bd->flags & BF_PINNED) { ASSERT(get_itbl((StgClosure *)p)->type == ARR_WORDS); if (new_gen != gen) { ACQUIRE_SPIN_LOCK(&new_gen->sync); } dbl_link_onto(bd, &new_gen->scavenged_large_objects); new_gen->n_scavenged_large_blocks += bd->blocks; if (new_gen != gen) { RELEASE_SPIN_LOCK(&new_gen->sync); } } else { bd->link = ws->todo_large_objects; ws->todo_large_objects = bd; } RELEASE_SPIN_LOCK(&gen->sync); }
NTSTATUS NdscAdapterCompletion( IN PCCB Ccb, IN PMINIPORT_DEVICE_EXTENSION HwDeviceExtension ) { KIRQL oldIrql; static LONG SrbSeq; LONG srbSeqIncremented; PSCSI_REQUEST_BLOCK srb; PCCB abortCcb; NTSTATUS return_status; UINT32 AdapterStatus, AdapterStatusBefore; UINT32 NeedToUpdatePdoInfoInLSBus; BOOLEAN busResetOccured; KDPrint(4,("RequestExecuting = %d\n", HwDeviceExtension->RequestExecuting)); srb = Ccb->Srb; if(!srb) { KDPrint(2,("Ccb:%p CcbStatus %d. No srb assigned.\n", Ccb, Ccb->CcbStatus)); ASSERT(srb); return STATUS_SUCCESS; } // // NDASSCSI completion routine will do post operation to complete CCBs. // return_status = STATUS_MORE_PROCESSING_REQUIRED; // // Set SRB completion sequence for debugging // srbSeqIncremented = InterlockedIncrement(&SrbSeq); #if 0 if(KeGetCurrentIrql() == PASSIVE_LEVEL) { if((srbSeqIncremented%100) == 0) { LARGE_INTEGER interval; KDPrint(2,("Interval for debugging.\n")); interval.QuadPart = - 11 * 10000000; // 10 seconds KeDelayExecutionThread(KernelMode, FALSE, &interval); } } #endif // // Update Adapter status flag // NeedToUpdatePdoInfoInLSBus = FALSE; ACQUIRE_SPIN_LOCK(&HwDeviceExtension->LanscsiAdapterSpinLock, &oldIrql); // Save the bus-reset flag if(ADAPTER_ISSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_BUSRESET_PENDING)) { busResetOccured = TRUE; } else { busResetOccured = FALSE; } // Save the current flag AdapterStatusBefore = HwDeviceExtension->AdapterStatus; // Check reconnecting process. if (LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_RECONNECTING)) { ADAPTER_SETSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RECONNECT_PENDING); } else { ADAPTER_RESETSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RECONNECT_PENDING); } if (!LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_RAID_FLAG_VALID)) { NDAS_ASSERT( Ccb->NdasrStatusFlag8 == 0 ); } else { NDAS_ASSERT( Ccb->NdasrStatusFlag8 == CCBSTATUS_FLAG_RAID_DEGRADED >> 8 || Ccb->NdasrStatusFlag8 == CCBSTATUS_FLAG_RAID_RECOVERING >> 8 || Ccb->NdasrStatusFlag8 == CCBSTATUS_FLAG_RAID_FAILURE >> 8 || Ccb->NdasrStatusFlag8 == CCBSTATUS_FLAG_RAID_NORMAL >> 8 ); } // Update adapter status only when CCBSTATUS_FLAG_RAID_FLAG_VALID is on. // In other case, Ccb has no chance to get flag information from RAID. if (LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_RAID_FLAG_VALID)) { // Check to see if the associate member is in error. if (LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_RAID_DEGRADED)) { if (!ADAPTER_ISSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT)) { KDPrint(2, ("NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT is Set\n") ); } ADAPTER_SETSTATUSFLAG( HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT ); } else { ADAPTER_RESETSTATUSFLAG( HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT ); } // Check recovering process. if (LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_RAID_RECOVERING)) { if (!ADAPTER_ISSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING)) { KDPrint(2, ("NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING is Set\n") ); } ADAPTER_SETSTATUSFLAG( HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING ); } else { ADAPTER_RESETSTATUSFLAG( HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING ); } // Check RAID failure if (LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_RAID_FAILURE)) { if (!ADAPTER_ISSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RAID_FAILURE)) { KDPrint(2, ("NDASSCSI_ADAPTER_STATUSFLAG_RAID_FAILURE is Set\n") ); } ADAPTER_SETSTATUSFLAG( HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RAID_FAILURE ); } else { ADAPTER_RESETSTATUSFLAG( HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RAID_FAILURE ); } // Set RAID normal status if (LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_RAID_NORMAL)) { if (!ADAPTER_ISSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RAID_NORMAL)) { KDPrint(2, ("NDASSCSI_ADAPTER_STATUSFLAG_RAID_NORMAL is Set\n") ); } ADAPTER_SETSTATUSFLAG( HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RAID_NORMAL ); } else { ADAPTER_RESETSTATUSFLAG( HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_RAID_NORMAL ); } } // power-recycle occurred. if(LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_POWERRECYLE_OCCUR)) { ADAPTER_SETSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_POWERRECYCLED); } else { ADAPTER_RESETSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_POWERRECYCLED); } if (ADAPTER_ISSTATUSFLAG(HwDeviceExtension, NDASSCSI_ADAPTER_STATUSFLAG_POWERRECYCLED)) { //NDAS_ASSERT( FALSE ); } AdapterStatus = HwDeviceExtension->AdapterStatus; RELEASE_SPIN_LOCK(&HwDeviceExtension->LanscsiAdapterSpinLock, oldIrql); if(AdapterStatus != AdapterStatusBefore) { if( !(AdapterStatusBefore & NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT) && (AdapterStatus & NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT) ) { // NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT on SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_MEMBER_FAULT, EVTLOG_MEMBER_IN_ERROR); KDPrint(2,("Ccb:%p CcbStatus %d. Set member fault.\n", Ccb, Ccb->CcbStatus)); } if( (AdapterStatusBefore & NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT) && !(AdapterStatus & NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT) ) { // NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT off SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_MEMBER_FAULT_RECOVERED, EVTLOG_MEMBER_RECOVERED); KDPrint(2,("Ccb:%p CcbStatus %d. Reset member fault.\n", Ccb, Ccb->CcbStatus)); } if( !(AdapterStatusBefore & NDASSCSI_ADAPTER_STATUSFLAG_RECONNECT_PENDING) && (AdapterStatus & NDASSCSI_ADAPTER_STATUSFLAG_RECONNECT_PENDING) ) { // NDASSCSI_ADAPTER_STATUSFLAG_RECONNECT_PENDING on SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_RECONNECT_START, EVTLOG_START_RECONNECTION); KDPrint(2,("Ccb:%p CcbStatus %d. Start reconnecting\n", Ccb, Ccb->CcbStatus)); } if( (AdapterStatusBefore & NDASSCSI_ADAPTER_STATUSFLAG_RECONNECT_PENDING) && !(AdapterStatus & NDASSCSI_ADAPTER_STATUSFLAG_RECONNECT_PENDING) ) { // NDASSCSI_ADAPTER_STATUSFLAG_RECONNECT_PENDING off SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_RECONNECTED, EVTLOG_END_RECONNECTION); KDPrint(2,("Ccb:%p CcbStatus %d. Finish reconnecting\n", Ccb, Ccb->CcbStatus)); } if( !(AdapterStatusBefore & NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING) && (AdapterStatus & NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING) ) { // NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING on SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_RECOVERY_START, EVTLOG_START_RECOVERING); KDPrint(2,("Ccb:%p CcbStatus %d. Started recovering\n", Ccb, Ccb->CcbStatus)); } if( (AdapterStatusBefore & NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING) && !(AdapterStatus & NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING) && !(AdapterStatus & (NDASSCSI_ADAPTER_STATUSFLAG_RAID_FAILURE|NDASSCSI_ADAPTER_STATUSFLAG_MEMBER_FAULT)) && (AdapterStatus & NDASSCSI_ADAPTER_STATUSFLAG_RAID_NORMAL)) { // NDASSCSI_ADAPTER_STATUSFLAG_RECOVERING off SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_RECOVERED, EVTLOG_END_RECOVERING); KDPrint(2,("Ccb:%p CcbStatus %d. Ended recovering\n", Ccb, Ccb->CcbStatus)); } if ( (AdapterStatus & NDASSCSI_ADAPTER_STATUSFLAG_RAID_FAILURE) && !(AdapterStatusBefore & NDASSCSI_ADAPTER_STATUSFLAG_RAID_FAILURE)) { SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_RAID_FAILURE, EVTLOG_RAID_FAILURE); KDPrint(2,("Ccb:%p CcbStatus %d. RAID failure\n", Ccb, Ccb->CcbStatus)); } if( !(AdapterStatusBefore & NDASSCSI_ADAPTER_STATUSFLAG_POWERRECYCLED) && (AdapterStatus & NDASSCSI_ADAPTER_STATUSFLAG_POWERRECYCLED) ) { // NDASSCSI_ADAPTER_STATUSFLAG_POWERRECYCLED on SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_DISK_POWERRECYCLE, EVTLOG_DISK_POWERRECYCLED); KDPrint(2,("Ccb:%p CcbStatus %d. Started recovering\n", Ccb, Ccb->CcbStatus)); } NeedToUpdatePdoInfoInLSBus = TRUE; } // // If CCB_OPCODE_UPDATE is successful, update adapter status in LanscsiBus // if(Ccb->OperationCode == CCB_OPCODE_UPDATE) { if(Ccb->CcbStatus == CCB_STATUS_SUCCESS) { SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_UPGRADE_SUCC, EVTLOG_SUCCEED_UPGRADE); } else { SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_UPGRADE_FAIL, EVTLOG_FAIL_UPGRADE); } NeedToUpdatePdoInfoInLSBus = TRUE; } // // Copy IO control results to the SRB buffer. // // If device lock CCB is successful, copy the result to the SRB. // if(Ccb->OperationCode == CCB_OPCODE_DEVLOCK) { if(Ccb->CcbStatus == CCB_STATUS_SUCCESS) { PSRB_IO_CONTROL srbIoctlHeader; PUCHAR lockIoctlBuffer; PNDSCIOCTL_DEVICELOCK ioCtlAcReDeviceLock; PLURN_DEVLOCK_CONTROL lurnAcReDeviceLock; // // Get the Ioctl buffer. // srbIoctlHeader = (PSRB_IO_CONTROL)srb->DataBuffer; srbIoctlHeader->ReturnCode = SRB_STATUS_SUCCESS; lockIoctlBuffer = (PUCHAR)(srbIoctlHeader + 1); ioCtlAcReDeviceLock = (PNDSCIOCTL_DEVICELOCK)lockIoctlBuffer; lurnAcReDeviceLock = (PLURN_DEVLOCK_CONTROL)Ccb->DataBuffer; // Copy the result RtlCopyMemory( ioCtlAcReDeviceLock->LockData, lurnAcReDeviceLock->LockData, NDSCLOCK_LOCKDATA_LENGTH); } } else if(Ccb->OperationCode == CCB_OPCODE_QUERY) { if(Ccb->CcbStatus == CCB_STATUS_SUCCESS) { PSRB_IO_CONTROL srbIoctlHeader; NTSTATUS copyStatus; srbIoctlHeader = (PSRB_IO_CONTROL)srb->DataBuffer; copyStatus = NdscCopyQueryOutputToSrb( HwDeviceExtension, Ccb->DataBufferLength, Ccb->DataBuffer, srbIoctlHeader->Length, (PUCHAR)(srbIoctlHeader + 1) ); if(copyStatus == STATUS_BUFFER_TOO_SMALL) { srbIoctlHeader->ReturnCode = SRB_STATUS_DATA_OVERRUN; }else if(NT_SUCCESS(copyStatus)) { srbIoctlHeader->ReturnCode = SRB_STATUS_SUCCESS; } else { srbIoctlHeader->ReturnCode = SRB_STATUS_ERROR; } } } KDPrint(4,("CcbStatus %d\n", Ccb->CcbStatus)); // // Translate CcbStatus to SrbStatus // CcbStatusToSrbStatus(Ccb, srb); // // Perform stop process when we get stop status. // if(Ccb->CcbStatus == CCB_STATUS_STOP) { // // Stop in the timer routine. // KDPrint(2, ("Stop status. Stop in the timer routine.\n")); } else { // // Update PDO information on the NDAS bus. // if(NeedToUpdatePdoInfoInLSBus) { KDPrint(2, ("<<<<<<<<<<<<<<<< %08lx -> %08lx ADAPTER STATUS CHANGED" " >>>>>>>>>>>>>>>>\n", AdapterStatusBefore, AdapterStatus)); UpdatePdoInfoInLSBus(HwDeviceExtension, HwDeviceExtension->AdapterStatus); } } // // Process Abort CCB. // abortCcb = Ccb->AbortCcb; if(abortCcb != NULL) { KDPrint(2,("abortSrb\n")); ASSERT(FALSE); srb->SrbStatus = SRB_STATUS_SUCCESS; LsCcbSetStatusFlag(Ccb, CCBSTATUS_FLAG_TIMER_COMPLETE); InitializeListHead(&Ccb->ListEntry); ExInterlockedInsertTailList( &HwDeviceExtension->CcbTimerCompletionList, &Ccb->ListEntry, &HwDeviceExtension->CcbTimerCompletionListSpinLock ); ((PSCSI_REQUEST_BLOCK)abortCcb->Srb)->SrbStatus = SRB_STATUS_ABORTED; LsCcbSetStatusFlag(abortCcb, CCBSTATUS_FLAG_TIMER_COMPLETE); InitializeListHead(&abortCcb->ListEntry); ExInterlockedInsertTailList( &HwDeviceExtension->CcbTimerCompletionList, &abortCcb->ListEntry, &HwDeviceExtension->CcbTimerCompletionListSpinLock ); } else { BOOLEAN criticalSrb; // // We should not use completion IRP method with disable-disconnect flag. // A SRB with DISABLE_DISCONNECT flag causes the SCSI port queue locked. // criticalSrb = (srb->SrbFlags & SRB_FLAGS_DISABLE_DISCONNECT) != 0 || (srb->SrbFlags & SRB_FLAGS_BYPASS_FROZEN_QUEUE) != 0 || (srb->SrbFlags & SRB_FLAGS_BYPASS_LOCKED_QUEUE) != 0; #if DBG if(criticalSrb) { KDPrint(2, ("Critical Srb:%p\n", srb)); } #if 0 NdscPrintSrb("Comp:", srb); #endif #endif // // Make Complete IRP and Send it. // // // In case of HostStatus == CCB_STATUS_SUCCESS_TIMER, CCB will go to the timer to complete. // if( (Ccb->CcbStatus == CCB_STATUS_SUCCESS || Ccb->CcbStatus == CCB_STATUS_DATA_OVERRUN) && !LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_TIMER_COMPLETE) && !LsCcbIsStatusFlagOn(Ccb, CCBSTATUS_FLAG_BUSCHANGE) && !busResetOccured && !criticalSrb ) { PDEVICE_OBJECT pDeviceObject = HwDeviceExtension->ScsiportFdoObject; PIRP pCompletionIrp = NULL; PIO_STACK_LOCATION pIoStack; NTSTATUS ntStatus; PSCSI_REQUEST_BLOCK completionSrb = NULL; PCOMPLETION_DATA completionData = NULL; completionSrb = ExAllocatePoolWithTag(NonPagedPool, sizeof(SCSI_REQUEST_BLOCK), NDSC_PTAG_SRB); if(completionSrb == NULL) goto Out; RtlZeroMemory( completionSrb, sizeof(SCSI_REQUEST_BLOCK) ); // Build New IRP. pCompletionIrp = IoAllocateIrp((CCHAR)(pDeviceObject->StackSize + 1), FALSE); if(pCompletionIrp == NULL) { ExFreePoolWithTag(completionSrb, NDSC_PTAG_SRB); goto Out; } completionData = ExAllocatePoolWithTag(NonPagedPool, sizeof(COMPLETION_DATA), NDSC_PTAG_CMPDATA); if(completionData == NULL) { ExFreePoolWithTag(completionSrb, NDSC_PTAG_SRB); IoFreeIrp(pCompletionIrp); pCompletionIrp = NULL; goto Out; } pCompletionIrp->MdlAddress = NULL; // Set IRP stack location. pIoStack = IoGetNextIrpStackLocation(pCompletionIrp); pIoStack->DeviceObject = pDeviceObject; pIoStack->MajorFunction = IRP_MJ_SCSI; pIoStack->Parameters.DeviceIoControl.InputBufferLength = 0; pIoStack->Parameters.DeviceIoControl.OutputBufferLength = 0; pIoStack->Parameters.Scsi.Srb = completionSrb; // Set SRB. completionSrb->Length = sizeof(SCSI_REQUEST_BLOCK); completionSrb->Function = SRB_FUNCTION_EXECUTE_SCSI; completionSrb->PathId = srb->PathId; completionSrb->TargetId = srb->TargetId; completionSrb->Lun = srb->Lun; completionSrb->QueueAction = SRB_SIMPLE_TAG_REQUEST; completionSrb->DataBuffer = Ccb; completionSrb->SrbFlags |= SRB_FLAGS_BYPASS_FROZEN_QUEUE | SRB_FLAGS_NO_QUEUE_FREEZE; completionSrb->OriginalRequest = pCompletionIrp; completionSrb->CdbLength = MAXIMUM_CDB_SIZE; completionSrb->Cdb[0] = SCSIOP_COMPLETE; completionSrb->Cdb[1] = (UCHAR)srbSeqIncremented; completionSrb->TimeOutValue = 20; completionSrb->SrbStatus = SRB_STATUS_SUCCESS; // // Set completion data for the completion IRP. // completionData->HwDeviceExtension = HwDeviceExtension; completionData->CompletionSrb = completionSrb; completionData->ShippedCcb = Ccb; completionData->ShippedCcbAllocatedFromPool = LsCcbIsFlagOn(Ccb, CCB_FLAG_ALLOCATED); Out: KDPrint(5,("Before Completion\n")); IoSetCompletionRoutine( pCompletionIrp, CompletionIrpCompletionRoutine, completionData, TRUE, TRUE, TRUE); ASSERT(HwDeviceExtension->RequestExecuting != 0); #if 0 { LARGE_INTEGER interval; ULONG SrbTimeout; static DebugCount = 0; DebugCount ++; SrbTimeout = ((PSCSI_REQUEST_BLOCK)(Ccb->Srb))->TimeOutValue; if( SrbTimeout>9 && (DebugCount%1000) == 0 ) { KDPrint(2,("Experiment!!!!!!! Delay completion. SrbTimeout:%d\n", SrbTimeout)); interval.QuadPart = - (INT64)SrbTimeout * 11 * 1000000; KeDelayExecutionThread(KernelMode, FALSE, &interval); } } #endif // // call Scsiport FDO. // ntStatus = IoCallDriver(pDeviceObject, pCompletionIrp); ASSERT(NT_SUCCESS(ntStatus)); if(ntStatus!= STATUS_SUCCESS && ntStatus!= STATUS_PENDING) { KDPrint(2,("ntStatus = 0x%x\n", ntStatus)); SCSI_PORT_LOG_ERROR_MODULE_COMPLETION(NDASSCSI_IO_COMPIRP_FAIL, EVTLOG_FAIL_COMPLIRP); KDPrint(2,("IoCallDriver() error. CCB(%p) and SRB(%p) is going to the timer." " CcbStatus:%x CcbFlag:%x\n", Ccb, Ccb->Srb, Ccb->CcbStatus, Ccb->Flags)); InitializeListHead(&Ccb->ListEntry); LsCcbSetStatusFlag(Ccb, CCBSTATUS_FLAG_TIMER_COMPLETE); ExInterlockedInsertTailList( &HwDeviceExtension->CcbTimerCompletionList, &Ccb->ListEntry, &HwDeviceExtension->CcbTimerCompletionListSpinLock ); } } else { KDPrint(2,("CCB(%p) and SRB(%p) is going to the timer." " CcbStatus:%x CcbFlag:%x\n", Ccb, Ccb->Srb, Ccb->CcbStatus, Ccb->Flags)); InitializeListHead(&Ccb->ListEntry); LsCcbSetStatusFlag(Ccb, CCBSTATUS_FLAG_TIMER_COMPLETE); ExInterlockedInsertTailList( &HwDeviceExtension->CcbTimerCompletionList, &Ccb->ListEntry, &HwDeviceExtension->CcbTimerCompletionListSpinLock ); } } return return_status; }