void mempool_node_put(mempool p, void *data){ struct node *node; node = DATA_TO_NODE(data); #ifdef MEMPOOLASSERT if(node->magic != NODE_MAGIC){ ShowError("Mempool [%s] node_put failed, given address (%p) has invalid magic.\n", p->name, data); return; // lost, } { struct pool_segment *node_seg = node->segment; if(node_seg->pool != p){ ShowError("Mempool [%s] node_put faild, given node (data address %p) doesnt belongs to this pool. ( Node Origin is [%s] )\n", p->name, data, node_seg->pool); return; } } // reset used flag. node->used = false; #endif // EnterSpinLock(&p->nodeLock); node->next = p->free_list; p->free_list = node; LeaveSpinLock(&p->nodeLock); InterlockedIncrement64(&p->num_nodes_free); }//end: mempool_node_put()
/* NicIndicateRecvPackets indicate received packet (MAC frame) to NDIS (then protocol stack) Parameters: pAdapter : pointer to adapter object created by miniport driver. ppNBSPackets : pointer to an NDIS packet to be indicated up. Return: Note: History: Created by yichen, 1/Apr/2009 IRQL: PASSIVE_LEVEL */ VOID NicIndicateRecvPackets(IN PMP_ADAPTER pAdapter,IN PPNDIS_PACKETS_OR_NBL ppNBSPackets) { NDIS_SET_PACKET_HEADER_SIZE(*ppNBSPackets, ETH_HEADER_SIZE); NDIS_SET_PACKET_STATUS(*ppNBSPackets, NDIS_STATUS_SUCCESS); NdisMIndicateReceivePacket(pAdapter->AdapterHandle, ppNBSPackets, 1); //only 1 packet indicated InterlockedIncrement64(&pAdapter->ullGoodReceives); }
static void *mempool_async_allocator(void *x) { mempool p; while(1) { if(l_async_terminate > 0) break; EnterSpinLock(&l_mempoolListLock); for(p = l_mempoolList; p != NULL; p = p->next) { if(p->num_nodes_free < p->elem_realloc_thresh) { // add new segment. segment_allocate_add(p, p->elem_realloc_step); // increase stats counter InterlockedIncrement64(&p->num_realloc_events); } } LeaveSpinLock(&l_mempoolListLock); ramutex_lock(l_async_lock); racond_wait(l_async_cond, l_async_lock, -1); ramutex_unlock(l_async_lock); } return NULL; }//end: mempool_async_allocator()
void mempool_node_put(mempool p, void *data) { struct node *node; node = DATA_TO_NODE(data); #ifdef MEMPOOLASSERT if(node->magic != NODE_MAGIC) { ShowError(read_message("Source.common.mempool_node_put"), p->name, data); return; // lost, } { struct pool_segment *node_seg = node->segment; if(node_seg->pool != p) { ShowError(read_message("Source.common.mempool_node_put2"), p->name, data, node_seg->pool); return; } } // reset used flag. node->used = false; #endif // EnterSpinLock(&p->nodeLock); node->next = p->free_list; p->free_list = node; LeaveSpinLock(&p->nodeLock); InterlockedIncrement64(&p->num_nodes_free); }//end: mempool_node_put()
VOID NicCompletePacket(IN PMP_ADAPTER Adapter, IN PNDIS_PACKET_OR_NBL pNBSPacket) { NET_BUFFER_LIST_STATUS(pNBSPacket) = NDIS_STATUS_SUCCESS; NdisMSendNetBufferListsComplete(Adapter->AdapterHandle, pNBSPacket, 0); InterlockedIncrement64(&Adapter->ullGoodTransmits); }
VOID NicCompletePacket(IN PMP_ADAPTER Adapter, IN PNDIS_PACKET_OR_NBL pNBSPacket) { NDIS_SET_PACKET_STATUS(pNBSPacket,NDIS_STATUS_SUCCESS); NdisMSendComplete(Adapter->AdapterHandle, pNBSPacket, NDIS_STATUS_SUCCESS); InterlockedIncrement64(&Adapter->ullGoodTransmits); }
VOID NicDropPacket(IN PMP_ADAPTER Adapter, IN PNDIS_PACKET_OR_NBL pNBSPacket) { NET_BUFFER_LIST_STATUS(pNBSPacket) = NDIS_STATUS_FAILURE; NdisMSendNetBufferListsComplete(Adapter->AdapterHandle, pNBSPacket, 0); InterlockedIncrement64(&Adapter->ullTransmitFail); }
VOID NicDropPacket(IN PMP_ADAPTER Adapter, IN PNDIS_PACKET_OR_NBL pNBSPacket) { NDIS_SET_PACKET_STATUS(pNBSPacket,NDIS_STATUS_FAILURE); NdisMSendComplete(Adapter->AdapterHandle, pNBSPacket, NDIS_STATUS_FAILURE); InterlockedIncrement64(&Adapter->ullTransmitFail); }
EXTERN_C void* SysCallCallback( __inout ULONG_PTR* reg ) { InterlockedIncrement64(&m_counter); if (0 == (m_counter % (0x100000 / 4))) DbgPrint("syscalls are really painfull ... : %x\n", m_counter); ULONG core_id = KeGetCurrentProcessorNumber(); if (core_id > MAX_PROCID) core_id = 0;//incorrect ... TODO ... return CSysCall::GetSysCall((BYTE)core_id); }
int64_t Atomics::IncrementAndGet64(int64_t* ptr) { #ifdef _WIN64 return InterlockedIncrement64(reinterpret_cast<LONG64*>(ptr)); #else while (true) { int64_t expVal = *ptr; int64_t newVal = expVal + 1; if (CompareAndSet64(ptr, expVal, newVal)) return newVal; } #endif }
LoaderAllocator::LoaderAllocator() { LIMITED_METHOD_CONTRACT; // initialize all members up front to NULL so that short-circuit failure won't cause invalid values m_InitialReservedMemForLoaderHeaps = NULL; m_pLowFrequencyHeap = NULL; m_pHighFrequencyHeap = NULL; m_pStubHeap = NULL; m_pPrecodeHeap = NULL; m_pExecutableHeap = NULL; #ifdef FEATURE_READYTORUN m_pDynamicHelpersHeap = NULL; #endif m_pFuncPtrStubs = NULL; m_hLoaderAllocatorObjectHandle = NULL; m_pStringLiteralMap = NULL; m_cReferences = (UINT32)-1; m_pDomainAssemblyToDelete = NULL; #ifdef FAT_DISPATCH_TOKENS // DispatchTokenFat pointer table for token overflow scenarios. Lazily allocated. m_pFatTokenSetLock = NULL; m_pFatTokenSet = NULL; #endif #ifndef CROSSGEN_COMPILE m_pVirtualCallStubManager = NULL; #endif m_fGCPressure = false; m_fTerminated = false; m_fUnloaded = false; m_fMarked = false; m_pLoaderAllocatorDestroyNext = NULL; m_pDomain = NULL; m_pCodeHeapInitialAlloc = NULL; m_pVSDHeapInitialAlloc = NULL; m_pLastUsedCodeHeap = NULL; m_pLastUsedDynamicCodeHeap = NULL; m_pJumpStubCache = NULL; m_nLoaderAllocator = InterlockedIncrement64((LONGLONG *)&LoaderAllocator::cLoaderAllocatorsCreated); }
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// /// \brief /// \return CAsyncTcpPeer* //////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// CAsyncTcpPeer* CAsyncPeerListener::PopPeer() { SafeGuard(); CAsyncTcpPeer* peer = nullptr; if (!m_Queue.try_pop(peer)) { peer = CreatePeer(); m_PeerArray.push_back(peer); peer->SetListener(this); InterlockedIncrement64(&m_AllocCount); } return peer; }
_Use_decl_annotations_ NTSTATUS ProcessObserverAddRule( POBSERVER_PROCESS_CREATION_RULE Rule, POBSERVER_RULE_HANDLE RuleHandle ) { static LONG64 RuleCounter = 0; PPROCESS_RULE_LIST_ENTRY pEntry; ULONG Length = FIELD_OFFSET(PROCESS_RULE_LIST_ENTRY, Rule.ParentProcessName) + ((Rule->ParentProcessNameLength + 1) * sizeof(WCHAR)); pEntry = PROCESS_OBSERVER_ALLOCATE( Length, NonPagedPool); if (pEntry == NULL) { DEBUG_LOG("ProcessObserverAddRule: Out of memory"); return STATUS_NO_MEMORY; } RtlCopyMemory( &pEntry->Rule, Rule, sizeof(OBSERVER_PROCESS_CREATION_RULE) ); RtlCopyMemory( pEntry->Rule.ParentProcessName, Rule->ParentProcessName, Rule->ParentProcessNameLength * sizeof(WCHAR) ); pEntry->Rule.ParentProcessName[Rule->ParentProcessNameLength] = L'\0'; RuleHandle->RuleHandle = pEntry->RuleHandle.RuleHandle = InterlockedIncrement64(&RuleCounter); RuleHandle->RuleType = pEntry->RuleHandle.RuleType = RULE_TYPE_CREATE_PROCESS; InsertResourceListHead(&ProcessRuleList, &pEntry->ListEntry); return STATUS_SUCCESS; }
FD3D12BoundShaderState::FD3D12BoundShaderState( FVertexDeclarationRHIParamRef InVertexDeclarationRHI, FVertexShaderRHIParamRef InVertexShaderRHI, FPixelShaderRHIParamRef InPixelShaderRHI, FHullShaderRHIParamRef InHullShaderRHI, FDomainShaderRHIParamRef InDomainShaderRHI, FGeometryShaderRHIParamRef InGeometryShaderRHI ) : CacheLink(InVertexDeclarationRHI, InVertexShaderRHI, InPixelShaderRHI, InHullShaderRHI, InDomainShaderRHI, InGeometryShaderRHI, this), UniqueID(InterlockedIncrement64(reinterpret_cast<volatile int64*>(&BoundShaderStateID))) { INC_DWORD_STAT(STAT_D3D12NumBoundShaderState); // Warning: Input layout desc contains padding which must be zero-initialized to prevent PSO cache misses FMemory::Memzero(&InputLayout, sizeof(InputLayout)); FD3D12VertexDeclaration* InVertexDeclaration = FD3D12DynamicRHI::ResourceCast(InVertexDeclarationRHI); FD3D12VertexShader* InVertexShader = FD3D12DynamicRHI::ResourceCast(InVertexShaderRHI); FD3D12PixelShader* InPixelShader = FD3D12DynamicRHI::ResourceCast(InPixelShaderRHI); FD3D12HullShader* InHullShader = FD3D12DynamicRHI::ResourceCast(InHullShaderRHI); FD3D12DomainShader* InDomainShader = FD3D12DynamicRHI::ResourceCast(InDomainShaderRHI); FD3D12GeometryShader* InGeometryShader = FD3D12DynamicRHI::ResourceCast(InGeometryShaderRHI); // Create an input layout for this combination of vertex declaration and vertex shader. InputLayout.NumElements = (InVertexDeclaration ? InVertexDeclaration->VertexElements.Num() : 0); InputLayout.pInputElementDescs = (InVertexDeclaration ? InVertexDeclaration->VertexElements.GetData() : nullptr); bShaderNeedsGlobalConstantBuffer[SF_Vertex] = InVertexShader->bShaderNeedsGlobalConstantBuffer; bShaderNeedsGlobalConstantBuffer[SF_Hull] = InHullShader ? InHullShader->bShaderNeedsGlobalConstantBuffer : false; bShaderNeedsGlobalConstantBuffer[SF_Domain] = InDomainShader ? InDomainShader->bShaderNeedsGlobalConstantBuffer : false; bShaderNeedsGlobalConstantBuffer[SF_Pixel] = InPixelShader ? InPixelShader->bShaderNeedsGlobalConstantBuffer : false; bShaderNeedsGlobalConstantBuffer[SF_Geometry] = InGeometryShader ? InGeometryShader->bShaderNeedsGlobalConstantBuffer : false; static_assert(ARRAY_COUNT(bShaderNeedsGlobalConstantBuffer) == SF_NumFrequencies, "EShaderFrequency size should match with array count of bShaderNeedsGlobalConstantBuffer."); #if D3D12_SUPPORTS_PARALLEL_RHI_EXECUTE CacheLink.AddToCache(); #endif }
posix_errno_t efile_rename(const efile_path_t *old_path, const efile_path_t *new_path) { BOOL old_is_directory, new_is_directory; DWORD move_flags, last_error; ASSERT_PATH_FORMAT(old_path); ASSERT_PATH_FORMAT(new_path); move_flags = MOVEFILE_COPY_ALLOWED | MOVEFILE_WRITE_THROUGH; if(MoveFileExW((WCHAR*)old_path->data, (WCHAR*)new_path->data, move_flags)) { return 0; } last_error = GetLastError(); old_is_directory = has_file_attributes(old_path, FILE_ATTRIBUTE_DIRECTORY); new_is_directory = has_file_attributes(new_path, FILE_ATTRIBUTE_DIRECTORY); switch(last_error) { case ERROR_SHARING_VIOLATION: case ERROR_ACCESS_DENIED: if(old_is_directory) { BOOL moved_into_itself; moved_into_itself = (old_path->size <= new_path->size) && !_wcsnicmp((WCHAR*)old_path->data, (WCHAR*)new_path->data, PATH_LENGTH(old_path)); if(moved_into_itself) { return EINVAL; } else if(is_path_root(old_path)) { return EINVAL; } /* Renaming a directory across volumes needs to be rewritten as * EXDEV so that the caller can respond by simulating it with * copy/delete operations. * * Files are handled through MOVEFILE_COPY_ALLOWED. */ if(!has_same_mount_point(old_path, new_path)) { return EXDEV; } } break; case ERROR_PATH_NOT_FOUND: case ERROR_FILE_NOT_FOUND: return ENOENT; case ERROR_ALREADY_EXISTS: case ERROR_FILE_EXISTS: if(old_is_directory && !new_is_directory) { return ENOTDIR; } else if(!old_is_directory && new_is_directory) { return EISDIR; } else if(old_is_directory && new_is_directory) { /* This will fail if the destination isn't empty. */ if(RemoveDirectoryW((WCHAR*)new_path->data)) { return efile_rename(old_path, new_path); } return EEXIST; } else if(!old_is_directory && !new_is_directory) { /* This is pretty iffy; the public documentation says that the * operation may EACCES on some systems when either file is open, * which gives us room to use MOVEFILE_REPLACE_EXISTING and be done * with it, but the old implementation simulated Unix semantics and * there's a lot of code that relies on that. * * The simulation renames the destination to a scratch name to get * around the fact that it's impossible to open (and by extension * rename) a file that's been deleted while open. It has a few * drawbacks though; * * 1) It's not atomic as there's a small window where there's no * file at all on the destination path. * 2) It will confuse applications that subscribe to folder * changes. * 3) It will fail if we lack general permission to write in the * same folder. */ WCHAR *swap_path = enif_alloc(new_path->size + sizeof(WCHAR) * 64); if(swap_path == NULL) { return ENOMEM; } else { static LONGLONG unique_counter = 0; WCHAR *swap_path_end; /* We swap in the same folder as the destination to be * reasonably sure that it's on the same volume. Note that * we're avoiding GetTempFileNameW as it will fail on long * paths. */ sys_memcpy(swap_path, (WCHAR*)new_path->data, new_path->size); swap_path_end = swap_path + PATH_LENGTH(new_path); while(!IS_SLASH(*swap_path_end)) { ASSERT(swap_path_end > swap_path); swap_path_end--; } StringCchPrintfW(&swap_path_end[1], 64, L"erl-%lx-%llx.tmp", GetCurrentProcessId(), unique_counter); InterlockedIncrement64(&unique_counter); } if(MoveFileExW((WCHAR*)new_path->data, swap_path, MOVEFILE_REPLACE_EXISTING)) { if(MoveFileExW((WCHAR*)old_path->data, (WCHAR*)new_path->data, move_flags)) { last_error = ERROR_SUCCESS; DeleteFileW(swap_path); } else { last_error = GetLastError(); MoveFileW(swap_path, (WCHAR*)new_path->data); } } else { last_error = GetLastError(); DeleteFileW(swap_path); } enif_free(swap_path); return windows_to_posix_errno(last_error); } return EEXIST; } return windows_to_posix_errno(last_error); }
int64 atomic_increment(int64 volatile *value) { return InterlockedIncrement64(value); }
WindowsAtomics::Int64 WindowsAtomics::AtomicIncrement(AtomicInt64 &Val) { return InterlockedIncrement64(&Val); }
static void segment_allocate_add(mempool p, uint64 count) { // Required Memory: // sz( segment ) // count * sz( real_node_size ) // // where real node size is: // ALIGN_TO_16( sz( node ) ) + p->elem_size // so the nodes usable address is nodebase + ALIGN_TO_16(sz(node)) // size_t total_sz; struct pool_segment *seg = NULL; struct node *nodeList = NULL; struct node *node = NULL; char *ptr = NULL; uint64 i; total_sz = ALIGN_TO_16(sizeof(struct pool_segment)) + ((size_t)count * (sizeof(struct node) + (size_t)p->elem_size)) ; #ifdef MEMPOOL_DEBUG ShowDebug(read_message("Source.common.mempool_debug"), p->name, count, (float)total_sz/1024.f/1024.f); #endif // allocate! (spin forever until weve got the memory.) i=0; while(1) { ptr = (char *)aMalloc(total_sz); if(ptr != NULL) break; i++; // increase failcount. if(!(i & 7)) { ShowWarning(read_message("Source.common.mempool_debug2"), (float)total_sz/1024.f/1024.f, i); #ifdef WIN32 Sleep(1000); #else sleep(1); #endif } else { rathread_yield(); /// allow/force vuln. ctxswitch } }//endwhile: allocation spinloop. // Clear Memory. memset(ptr, 0x00, total_sz); // Initialize segment struct. seg = (struct pool_segment *)ptr; ptr += ALIGN_TO_16(sizeof(struct pool_segment)); seg->pool = p; seg->num_nodes_total = count; seg->num_bytes = total_sz; // Initialze nodes! nodeList = NULL; for(i = 0; i < count; i++) { node = (struct node *)ptr; ptr += sizeof(struct node); ptr += p->elem_size; node->segment = seg; #ifdef MEMPOOLASSERT node->used = false; node->magic = NODE_MAGIC; #endif if(p->onalloc != NULL) p->onalloc(NODE_TO_DATA(node)); node->next = nodeList; nodeList = node; } // Link in Segment. EnterSpinLock(&p->segmentLock); seg->next = p->segments; p->segments = seg; LeaveSpinLock(&p->segmentLock); // Link in Nodes EnterSpinLock(&p->nodeLock); nodeList->next = p->free_list; p->free_list = nodeList; LeaveSpinLock(&p->nodeLock); // Increase Stats: InterlockedExchangeAdd64(&p->num_nodes_total, count); InterlockedExchangeAdd64(&p->num_nodes_free, count); InterlockedIncrement64(&p->num_segments); InterlockedExchangeAdd64(&p->num_bytes_total, total_sz); }//end: segment_allocate_add()
inline ::LONGLONG increment ( volatile ::LONGLONG& x ) { return InterlockedIncrement64(&x); }
/* NicIndicateRecvPackets indicate received packet (MAC frame) to NDIS (then protocol stack) Parameters: pAdapter : pointer to adapter object created by miniport driver. ppNdisPacket : pointer to an NDIS packet to be indicated up. Return: Note: History: Created by yichen, 1/Apr/2009 IRQL: PASSIVE_LEVEL */ VOID NicIndicateRecvPackets(IN PMP_ADAPTER pAdapter, IN PPNDIS_PACKETS_OR_NBL ppNBSPackets) { NdisMIndicateReceiveNetBufferLists(pAdapter->AdapterHandle, ppNBSPackets, pAdapter->PortNumber, 1, 0); InterlockedIncrement64(&pAdapter->ullGoodReceives); }
NTSTATUS KrnlHlprClassifyDataAcquireLocalCopy(_Inout_ CLASSIFY_DATA* pClassifyData, _In_ const FWPS_INCOMING_VALUES* pClassifyValues, _In_ const FWPS_INCOMING_METADATA_VALUES* pMetadata, _In_opt_ VOID* pPacket, _In_opt_ const VOID* pClassifyContext, _In_ const FWPS_FILTER* pFilter, _In_ const UINT64 flowContext, _In_ FWPS_CLASSIFY_OUT* pClassifyOut) { #if DBG DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_INFO_LEVEL, " ---> KrnlHlprClassifyDataAcquireLocalCopy()\n"); #endif /// DBG NT_ASSERT(pClassifyData); NT_ASSERT(pClassifyValues); NT_ASSERT(pMetadata); NT_ASSERT(pFilter); NT_ASSERT(pClassifyOut); NTSTATUS status = STATUS_SUCCESS; pClassifyData->pClassifyValues = KrnlHlprFwpsIncomingValuesCreateLocalCopy(pClassifyValues); HLPR_BAIL_ON_NULL_POINTER_WITH_STATUS(pClassifyData->pClassifyValues, status); pClassifyData->pMetadataValues = KrnlHlprFwpsIncomingMetadataValuesCreateLocalCopy(pMetadata); HLPR_BAIL_ON_NULL_POINTER_WITH_STATUS(pClassifyData->pMetadataValues, status); if(pPacket) { if(pClassifyValues->layerId == FWPS_LAYER_STREAM_V4 || pClassifyValues->layerId == FWPS_LAYER_STREAM_V4_DISCARD || pClassifyValues->layerId == FWPS_LAYER_STREAM_V6 || pClassifyValues->layerId == FWPS_LAYER_STREAM_V6_DISCARD) { pClassifyData->pPacket = KrnlHlprFwpsStreamCalloutIOPacketCreateLocalCopy((FWPS_STREAM_CALLOUT_IO_PACKET*)pPacket); HLPR_BAIL_ON_NULL_POINTER_WITH_STATUS(pClassifyData->pPacket, status); } #if(NTDDI_VERSION >= NTDDI_WIN7) /// LayerData at the FWPM_LAYER_ALE_{BIND/CONNECT}_REDIRECT_V{4/6} is obtained via KrnlHlprRedirectDataCreate() else if(pClassifyValues->layerId == FWPS_LAYER_ALE_CONNECT_REDIRECT_V4 || pClassifyValues->layerId == FWPS_LAYER_ALE_CONNECT_REDIRECT_V6 || pClassifyValues->layerId == FWPS_LAYER_ALE_BIND_REDIRECT_V4 || pClassifyValues->layerId == FWPS_LAYER_ALE_BIND_REDIRECT_V6) { pClassifyData->pPacket = 0; } #endif /// (NTDDI_VERSION >= NTDDI_WIN7) else { if(NET_BUFFER_LIST_NEXT_NBL((NET_BUFFER_LIST*)pPacket)) { pClassifyData->chainedNBL = TRUE; pClassifyData->numChainedNBLs = 1; } if(pClassifyData->chainedNBL && ( /// The IPPACKET and IPFORWARD Layers allow for Fragment Grouping if the option is enabled pClassifyValues->layerId == FWPS_LAYER_INBOUND_IPPACKET_V4 || pClassifyValues->layerId == FWPS_LAYER_INBOUND_IPPACKET_V6 || pClassifyValues->layerId == FWPS_LAYER_IPFORWARD_V4 || pClassifyValues->layerId == FWPS_LAYER_IPFORWARD_V6 #if(NTDDI_VERSION >= NTDDI_WIN8) /// The NDIS layers allow for batched NBLs provided the callout was registered with FWP_CALLOUT_FLAG_ALLOW_L2_BATCH_CLASSIFY set || pClassifyValues->layerId == FWPS_LAYER_INBOUND_MAC_FRAME_ETHERNET || pClassifyValues->layerId == FWPS_LAYER_OUTBOUND_MAC_FRAME_ETHERNET || pClassifyValues->layerId == FWPS_LAYER_INBOUND_MAC_FRAME_NATIVE || pClassifyValues->layerId == FWPS_LAYER_OUTBOUND_MAC_FRAME_NATIVE || pClassifyValues->layerId == FWPS_LAYER_INGRESS_VSWITCH_ETHERNET || pClassifyValues->layerId == FWPS_LAYER_EGRESS_VSWITCH_ETHERNET #endif /// (NTDDI_VERSION >= NTDDI_WIN8) )) { for(NET_BUFFER_LIST* pCurrentNBL = (NET_BUFFER_LIST*)pPacket; pCurrentNBL; pClassifyData->numChainedNBLs++) { NET_BUFFER_LIST* pNextNBL = NET_BUFFER_LIST_NEXT_NBL(pCurrentNBL); FwpsReferenceNetBufferList(pCurrentNBL, TRUE); pCurrentNBL = pNextNBL; #if DBG InterlockedIncrement64((LONG64*)&(g_OutstandingNBLReferences)); #endif /// DBG } pClassifyData->pPacket = pPacket; } else { /// Otherwise we expect to receive a single NBL NT_ASSERT(NET_BUFFER_LIST_NEXT_NBL((NET_BUFFER_LIST*)pPacket) == 0); FwpsReferenceNetBufferList((NET_BUFFER_LIST*)pPacket, TRUE); pClassifyData->pPacket = pPacket; #if DBG InterlockedIncrement64((LONG64*)&(g_OutstandingNBLReferences)); #endif /// DBG } } } #if(NTDDI_VERSION >= NTDDI_WIN7) if(pClassifyContext) { /// ClassifyHandle for these layers is obtained in REDIRECT_DATA if(pClassifyValues->layerId != FWPS_LAYER_ALE_CONNECT_REDIRECT_V4 && pClassifyValues->layerId != FWPS_LAYER_ALE_CONNECT_REDIRECT_V6 && pClassifyValues->layerId != FWPS_LAYER_ALE_BIND_REDIRECT_V4 && pClassifyValues->layerId != FWPS_LAYER_ALE_BIND_REDIRECT_V6) { status = FwpsAcquireClassifyHandle((VOID*)pClassifyContext, 0, &(pClassifyData->classifyContextHandle)); HLPR_BAIL_ON_FAILURE(status); } } #else UNREFERENCED_PARAMETER(pClassifyContext); #endif /// (NTDDI_VERSION >= NTDDI_WIN7) if(pFilter) { pClassifyData->pFilter = KrnlHlprFwpsFilterCreateLocalCopy(pFilter); HLPR_BAIL_ON_NULL_POINTER_WITH_STATUS(pClassifyData->pFilter, status); } pClassifyData->flowContext = flowContext; if(pClassifyOut) { pClassifyData->pClassifyOut = KrnlHlprFwpsClassifyOutCreateLocalCopy(pClassifyOut); HLPR_BAIL_ON_NULL_POINTER_WITH_STATUS(pClassifyData->pClassifyOut, status); } HLPR_BAIL_LABEL: if(status != STATUS_SUCCESS) KrnlHlprClassifyDataReleaseLocalCopy(pClassifyData); #if DBG DbgPrintEx(DPFLTR_IHVNETWORK_ID, DPFLTR_INFO_LEVEL, " <--- KrnlHlprClassifyDataAcquireLocalCopy() [status: %#x]\n", status); #endif /// DBG return status; }
/// @brief bool FileInfoCache::get_flie_info( _In_ const wchar_t * path, _In_ uint64_t create_time, _In_ uint64_t write_time, _In_ uint64_t size, _Out_ std::string & md5_str, _Out_ std::string & sha2_str ) { _ASSERTE(NULL != path); if (NULL == path) return false; bool ret = false; // // 파일 해시 테이블 대문자, 소문자 구별 없이 경로 확인을 위해 소문자로 입력 되어있다. // 그래서, 캐시 쿼리문 조건으로 경로를 소문자로 변환 후 쿼리문에서 사용 한다. // std::wstring pathl(path); to_lower_string<std::wstring>(pathl); // run query try { // // _select_cache_stmt에 바인딩 되어 있는 데이터 제거 // _select_cache_stmt->reset(); // // 파일 해시(md5, sha2)캐시를 읽어 온다. // _select_cache_stmt->bind(1, WcsToMbsUTF8Ex(pathl.c_str()).c_str()); _select_cache_stmt->bind(2, static_cast<long long>(create_time)); _select_cache_stmt->bind(3, static_cast<long long>(write_time)); _select_cache_stmt->bind(4, static_cast<long long>(size)); CppSQLite3Query rs = _select_cache_stmt->execQuery(); // // 검색 결과가 없다면 함수를 빠져나간다. // if (true == rs.eof()) return ret; md5_str = rs.getStringField(0, ""); sha2_str = rs.getStringField(1, ""); int32_t hit_count = rs.getIntField(2); // // 파일 해시(md5, sha2)캐시 읽고 난 후 hit count를 증가 시킨 후 // 테이블내의 값을 변경한다. // _update_cache_stmt->reset(); _update_cache_stmt->bind(1, ++hit_count); _update_cache_stmt->bind(2, WcsToMbsUTF8Ex(pathl.c_str()).c_str()); _update_cache_stmt->bind(3, static_cast<long long>(create_time)); _update_cache_stmt->bind(4, static_cast<long long>(write_time)); _update_cache_stmt->bind(5, static_cast<long long>(size)); // // 찾고자 하는 파일의 정보가 테이블 내에 존재 하는 경우 쿼리 결과가 // 1개 이기 때문에 다음과 같이 처리 한다. // if(0 < _update_cache_stmt->execDML()) { InterlockedIncrement64(&_hit_count); ret = true; } } catch (CppSQLite3Exception& e) { log_err "sqlite exception. FileInfoCache::get_file_info, ecode = %d, emsg = %s", e.errorCode(), e.errorMessage() log_end; } return ret; }
intptr_t atom_inc(volatile intptr_t *dest) { return InterlockedIncrement64((LONGLONG *)dest); }
static ssize_t ofi_nd_ep_readv(struct fid_ep *pep, const struct iovec *iov, void **desc, size_t count, fi_addr_t src_addr, uint64_t addr, uint64_t key, void *context) { struct fi_rma_iov rma_iov = { .addr = addr, .len = iov[0].iov_len, .key = key }; struct fi_msg_rma msg = { .msg_iov = iov, .desc = desc, .iov_count = count, .addr = src_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0 }; assert(pep->fid.fclass == FI_CLASS_EP); if (pep->fid.fclass != FI_CLASS_EP) return -FI_EINVAL; struct nd_ep *ep = container_of(pep, struct nd_ep, fid); return ofi_nd_ep_readmsg(pep, &msg, ep->info->rx_attr->op_flags); } static ssize_t ofi_nd_ep_readmsg(struct fid_ep *pep, const struct fi_msg_rma *msg, uint64_t flags) { assert(pep->fid.fclass == FI_CLASS_EP); assert(msg); if (pep->fid.fclass != FI_CLASS_EP) return -FI_EINVAL; size_t msg_len = 0, rma_len = 0, i; HRESULT hr; struct nd_ep *ep = container_of(pep, struct nd_ep, fid); if (!ep->qp) return -FI_EOPBADSTATE; for (i = 0; i < msg->iov_count; i++) { if (msg->msg_iov[i].iov_len && !msg->msg_iov[i].iov_base) return -FI_EINVAL; msg_len += msg->msg_iov[i].iov_len; } for (i = 0; i < msg->rma_iov_count; i++) { if (msg->rma_iov[i].len && !msg->rma_iov[i].addr) return -FI_EINVAL; rma_len += msg->rma_iov[i].len; } /* Check the following: */ if ((msg_len != rma_len) || /* - msg and rma len are correlated */ /* - iov counts are less or equal than supported */ (msg->iov_count > ND_MSG_IOV_LIMIT || msg->rma_iov_count > ND_MSG_IOV_LIMIT) || /* - transmitted length is less or equal than max possible */ (msg_len > ep->domain->info->ep_attr->max_msg_size)) return -FI_EINVAL; struct nd_cq_entry *main_entry = ofi_nd_buf_alloc_nd_cq_entry(); if (!main_entry) return -FI_ENOMEM; memset(main_entry, 0, sizeof(*main_entry)); main_entry->data = msg->data; main_entry->flags = flags; main_entry->domain = ep->domain; main_entry->context = msg->context; main_entry->seq = InterlockedAdd64(&ep->domain->msg_cnt, 1); /* since write operation can't be canceled, set NULL into * the 1st byte of internal data of context */ if (msg->context) ND_FI_CONTEXT(msg->context) = 0; struct fi_rma_iov rma_iovecs[ND_MSG_INTERNAL_IOV_LIMIT]; size_t rma_count = 0; size_t from_split_map[ND_MSG_INTERNAL_IOV_LIMIT]; size_t to_split_map[ND_MSG_INTERNAL_IOV_LIMIT]; uint64_t remote_addr[ND_MSG_INTERNAL_IOV_LIMIT]; ofi_nd_split_msg_iov_2_rma_iov(msg->rma_iov, msg->rma_iov_count, msg->msg_iov, msg->iov_count, rma_iovecs, &rma_count, from_split_map, to_split_map, remote_addr); assert(rma_count <= ND_MSG_INTERNAL_IOV_LIMIT); main_entry->wait_completion.comp_count = 0; main_entry->wait_completion.total_count = rma_count; InitializeCriticalSection(&main_entry->wait_completion.comp_lock); struct nd_cq_entry *entries[ND_MSG_IOV_LIMIT]; for (i = 0; i < rma_count; i++) { entries[i] = ofi_nd_buf_alloc_nd_cq_entry(); if (!entries[i]) goto fn_fail; memset(entries[i], 0, sizeof(*entries[i])); entries[i]->data = msg->data; entries[i]->flags = flags; entries[i]->domain = ep->domain; entries[i]->context = msg->context; entries[i]->seq = main_entry->seq; entries[i]->aux_entry = main_entry; hr = ep->domain->adapter->lpVtbl->CreateMemoryRegion( ep->domain->adapter, &IID_IND2MemoryRegion, ep->domain->adapter_file, (void**)&entries[i]->mr[0]); if (FAILED(hr)) goto fn_fail; entries[i]->mr_count = 1; hr = ofi_nd_util_register_mr( entries[i]->mr[0], (const void *)remote_addr[i], rma_iovecs[i].len, ND_MR_FLAG_ALLOW_LOCAL_WRITE | ND_MR_FLAG_ALLOW_REMOTE_READ | ND_MR_FLAG_ALLOW_REMOTE_WRITE); if (FAILED(hr)) goto fn_fail; ND2_SGE sge = { .Buffer = (void *)remote_addr[i], .BufferLength = (ULONG)rma_iovecs[i].len, .MemoryRegionToken = (UINT32)(uintptr_t)msg->desc[to_split_map[i]] }; hr = ep->qp->lpVtbl->Read(ep->qp, entries[i], &sge, 1, (UINT64)rma_iovecs[i].addr, (UINT32)rma_iovecs[i].key, 0); if (FAILED(hr)) goto fn_fail; } return FI_SUCCESS; fn_fail: while (i-- > 0) ofi_nd_free_cq_entry(entries[i]); ND_LOG_WARN(FI_LOG_EP_DATA, ofi_nd_strerror((DWORD)hr, NULL)); return H2F(hr); } static ssize_t ofi_nd_ep_write(struct fid_ep *ep, const void *buf, size_t len, void *desc, fi_addr_t dest_addr, uint64_t addr, uint64_t key, void *context) { struct iovec iov = { .iov_base = (void*)buf, .iov_len = len }; return ofi_nd_ep_writev(ep, &iov, &desc, 1, dest_addr, addr, key, context); } static ssize_t ofi_nd_ep_writev(struct fid_ep *pep, const struct iovec *iov, void **desc, size_t count, fi_addr_t dest_addr, uint64_t addr, uint64_t key, void *context) { struct fi_rma_iov rma_iov = { .addr = addr, .len = iov[0].iov_len, .key = key }; struct fi_msg_rma msg = { .msg_iov = iov, .desc = desc, .iov_count = count, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = 0 }; assert(pep->fid.fclass == FI_CLASS_EP); if (pep->fid.fclass != FI_CLASS_EP) return -FI_EINVAL; struct nd_ep *ep = container_of(pep, struct nd_ep, fid); return ofi_nd_ep_writemsg(pep, &msg, ep->info->tx_attr->op_flags); } static ssize_t ofi_nd_ep_writemsg(struct fid_ep *pep, const struct fi_msg_rma *msg, uint64_t flags) { assert(pep->fid.fclass == FI_CLASS_EP); assert(msg); if (pep->fid.fclass != FI_CLASS_EP) return -FI_EINVAL; size_t msg_len = 0, rma_len = 0, i; HRESULT hr; struct nd_cq_entry *entries[ND_MSG_IOV_LIMIT]; struct nd_ep *ep = container_of(pep, struct nd_ep, fid); if (!ep->qp) return -FI_EOPBADSTATE; for (i = 0; i < msg->iov_count; i++) { if (msg->msg_iov[i].iov_len && !msg->msg_iov[i].iov_base) return -FI_EINVAL; msg_len += msg->msg_iov[i].iov_len; } if ((msg_len > ep->domain->info->ep_attr->max_msg_size) && (flags & FI_INJECT)) return -FI_EINVAL; for (i = 0; i < msg->rma_iov_count; i++) { if (msg->rma_iov[i].len && !msg->rma_iov[i].addr) return -FI_EINVAL; rma_len += msg->rma_iov[i].len; } /* Check the following: */ if ((msg_len != rma_len) || /* - msg and rma len are correlated */ /* - iov counts are less or equal than supported */ ((msg->iov_count > ND_MSG_IOV_LIMIT || msg->rma_iov_count > ND_MSG_IOV_LIMIT)) || /* - transmitted length is less or equal than max possible */ (msg_len > ep->domain->info->ep_attr->max_msg_size) || /* - if INJECT, data should be inlined */ ((flags & FI_INJECT) && (msg_len > ep->domain->info->tx_attr->inject_size))) return -FI_EINVAL; struct nd_cq_entry *main_entry = ofi_nd_buf_alloc_nd_cq_entry(); if (!main_entry) return -FI_ENOMEM; memset(main_entry, 0, sizeof(*main_entry)); main_entry->data = msg->data; main_entry->flags = flags; main_entry->domain = ep->domain; main_entry->context = msg->context; main_entry->seq = InterlockedAdd64(&ep->domain->msg_cnt, 1); /* since write operation can't be canceled, set NULL into * the 1st byte of internal data of context */ if (msg->context) ND_FI_CONTEXT(msg->context) = 0; /* TODO */ if (msg_len > (size_t)gl_data.inline_thr) { struct fi_rma_iov rma_iovecs[ND_MSG_INTERNAL_IOV_LIMIT]; size_t rma_count = 0; size_t from_split_map[ND_MSG_INTERNAL_IOV_LIMIT]; size_t to_split_map[ND_MSG_INTERNAL_IOV_LIMIT]; uint64_t remote_addr[ND_MSG_INTERNAL_IOV_LIMIT]; ofi_nd_split_msg_iov_2_rma_iov(msg->rma_iov, msg->rma_iov_count, msg->msg_iov, msg->iov_count, rma_iovecs, &rma_count, from_split_map, to_split_map, remote_addr); assert(rma_count <= ND_MSG_INTERNAL_IOV_LIMIT); main_entry->wait_completion.comp_count = 0; main_entry->wait_completion.total_count = rma_count; InitializeCriticalSection(&main_entry->wait_completion.comp_lock); for (i = 0; i < rma_count; i++) { entries[i] = ofi_nd_buf_alloc_nd_cq_entry(); if (!entries[i]) goto fn_fail; memset(entries[i], 0, sizeof(*entries[i])); entries[i]->data = msg->data; entries[i]->flags = flags; entries[i]->domain = ep->domain; entries[i]->context = msg->context; entries[i]->seq = main_entry->seq; entries[i]->aux_entry = main_entry; ND2_SGE sge = { .Buffer = (void *)remote_addr[i], .BufferLength = (ULONG)rma_iovecs[i].len, .MemoryRegionToken = (UINT32)(uintptr_t)msg->desc[to_split_map[i]] }; hr = ep->qp->lpVtbl->Write(ep->qp, entries[i], &sge, 1, (UINT64)rma_iovecs[i].addr, (UINT32)rma_iovecs[i].key, 0); if (FAILED(hr)) goto fn_fail; } return FI_SUCCESS; } else { if (msg_len) { main_entry->inline_buf = __ofi_nd_buf_alloc_nd_inlinebuf(&ep->domain->inlinebuf); if (!main_entry->inline_buf) return -FI_ENOMEM; char *buf = (char*)main_entry->inline_buf->buffer; for (i = 0; i < msg->iov_count; i++) { memcpy(buf, msg->msg_iov[i].iov_base, msg->msg_iov[i].iov_len); buf += msg->msg_iov[i].iov_len; } } for (i = 0; i < msg->rma_iov_count; i++) { char *buf = (char *)main_entry->inline_buf->buffer; entries[i] = ofi_nd_buf_alloc_nd_cq_entry(); if (!entries[i]) goto fn_fail; memset(entries[i], 0, sizeof(*entries[i])); entries[i]->data = msg->data; entries[i]->flags = flags; entries[i]->domain = ep->domain; entries[i]->context = msg->context; entries[i]->seq = main_entry->seq; entries[i]->aux_entry = main_entry; ND2_SGE sge = { .Buffer = (void *)(buf + msg->rma_iov[i].len), .BufferLength = (ULONG)msg->rma_iov[i].len, .MemoryRegionToken = main_entry->inline_buf->token }; hr = ep->qp->lpVtbl->Write(ep->qp, entries[i], &sge, 1, (UINT64)msg->rma_iov[i].addr, (UINT32)msg->rma_iov[i].key, 0); if (FAILED(hr)) goto fn_fail; } return FI_SUCCESS; } fn_fail: while (i-- > 0) ofi_nd_free_cq_entry(entries[i]); ND_LOG_WARN(FI_LOG_EP_DATA, ofi_nd_strerror((DWORD)hr, NULL)); return H2F(hr); } static ssize_t ofi_nd_ep_inject(struct fid_ep *pep, const void *buf, size_t len, fi_addr_t dest_addr, uint64_t addr, uint64_t key) { struct iovec iov = { .iov_base = (void*)buf, .iov_len = len }; struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = 0, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = 0, .data = 0 }; return ofi_nd_ep_writemsg(pep, &msg, FI_INJECT); } static ssize_t ofi_nd_ep_writedata(struct fid_ep *pep, const void *buf, size_t len, void *desc, uint64_t data, fi_addr_t dest_addr, uint64_t addr, uint64_t key, void *context) { struct iovec iov = { .iov_base = (void*)buf, .iov_len = len }; struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = &desc, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = context, .data = data }; assert(pep->fid.fclass == FI_CLASS_EP); if (pep->fid.fclass != FI_CLASS_EP) return -FI_EINVAL; struct nd_ep *ep = container_of(pep, struct nd_ep, fid); return ofi_nd_ep_writemsg(pep, &msg, ep->info->tx_attr->op_flags | FI_REMOTE_CQ_DATA); } static ssize_t ofi_nd_ep_writeinjectdata(struct fid_ep *ep, const void *buf, size_t len, uint64_t data, fi_addr_t dest_addr, uint64_t addr, uint64_t key) { struct iovec iov = { .iov_base = (void*)buf, .iov_len = len }; struct fi_rma_iov rma_iov = { .addr = addr, .len = len, .key = key }; struct fi_msg_rma msg = { .msg_iov = &iov, .desc = 0, .iov_count = 1, .addr = dest_addr, .rma_iov = &rma_iov, .rma_iov_count = 1, .context = 0, .data = data }; return ofi_nd_ep_writemsg(ep, &msg, FI_INJECT | FI_REMOTE_CQ_DATA); } void ofi_nd_read_event(ND2_RESULT *result) { assert(result); assert(result->RequestType == Nd2RequestTypeRead); nd_cq_entry *entry = (nd_cq_entry*)result->RequestContext; assert(entry); ND_LOG_EVENT_INFO(entry); /* Check whether the operation is complex, i.e. read operation * may consists from several subtasks of read */ if (entry->aux_entry) { EnterCriticalSection(&entry->aux_entry->wait_completion.comp_lock); entry->aux_entry->wait_completion.comp_count++; ND_LOG_DEBUG(FI_LOG_EP_DATA, "READ Event comp_count = %d, total_count = %d\n", entry->aux_entry->wait_completion.comp_count, entry->aux_entry->wait_completion.total_count); if (entry->aux_entry->wait_completion.comp_count < entry->aux_entry->wait_completion.total_count) { /* Should wait some remaining completion events about read operation */ LeaveCriticalSection(&entry->aux_entry->wait_completion.comp_lock); entry->aux_entry = NULL; ofi_nd_free_cq_entry(entry); return; } LeaveCriticalSection(&entry->aux_entry->wait_completion.comp_lock); } /*TODO: Handle erroneous case "result->Status != S_OK" */ ofi_nd_dispatch_cq_event(entry->state == LARGE_MSG_RECV_REQ ? LARGE_MSG_REQ : NORMAL_EVENT, entry, result); } void ofi_nd_write_event(ND2_RESULT *result) { assert(result); assert(result->RequestType == Nd2RequestTypeWrite); nd_cq_entry *entry = (nd_cq_entry*)result->RequestContext; assert(entry); struct nd_ep *ep = (struct nd_ep*)result->QueuePairContext; assert(ep); assert(ep->fid.fid.fclass == FI_CLASS_EP); ND_LOG_EVENT_INFO(entry); /* Check whether the operation is complex, i.e. write operation * may consist from several subtasks of write */ if (entry->aux_entry) { EnterCriticalSection(&entry->aux_entry->wait_completion.comp_lock); entry->aux_entry->wait_completion.comp_count++; if (entry->aux_entry->wait_completion.comp_count < entry->aux_entry->wait_completion.total_count) { /* Should wait some remaining completion events about write operation */ LeaveCriticalSection(&entry->aux_entry->wait_completion.comp_lock); entry->aux_entry = NULL; ofi_nd_free_cq_entry(entry); return; } LeaveCriticalSection(&entry->aux_entry->wait_completion.comp_lock); } if (!entry->context) { /* This means that this write was an internal event, * just release it */ ofi_nd_free_cq_entry(entry); return; } if (entry->flags & FI_REMOTE_CQ_DATA) { if (ofi_nd_ep_injectdata( &ep->fid, 0, 0, entry->data, FI_ADDR_UNSPEC) != FI_SUCCESS) ND_LOG_WARN(FI_LOG_CQ, "failed to write-inject"); } if (ep->cntr_write) { if (result->Status != S_OK) { InterlockedIncrement64(&ep->cntr_write->err); } InterlockedIncrement64(&ep->cntr_write->counter); WakeByAddressAll((void*)&ep->cntr_write->counter); } int notify = ofi_nd_util_completion_blackmagic( ep->info->tx_attr->op_flags, ep->send_flags, entry->flags) || result->Status != S_OK; if (notify) { PostQueuedCompletionStatus( entry->result.Status == S_OK ? ep->cq_send->iocp : ep->cq_send->err, 0, 0, &entry->base.ov); InterlockedIncrement(&ep->cq_send->count); WakeByAddressAll((void*)&ep->cq_send->count); } else { /* if notification is not requested - just free entry */ ofi_nd_free_cq_entry(entry); } } void ofi_nd_split_msg_iov_2_rma_iov(const struct fi_rma_iov *rma_iovecs, const size_t rma_count, const struct iovec *msg_iovecs, const size_t msg_count, struct fi_rma_iov res_iovecs[ND_MSG_INTERNAL_IOV_LIMIT], size_t *res_count, size_t from_split_map[ND_MSG_INTERNAL_IOV_LIMIT], size_t to_split_map[ND_MSG_INTERNAL_IOV_LIMIT], uint64_t remote_addr[ND_MSG_INTERNAL_IOV_LIMIT]) { size_t i; struct iovec from_rma_iovecs[ND_MSG_IOV_LIMIT]; size_t from_rma_count = rma_count; struct iovec res_msg_iovecs[ND_MSG_IOV_LIMIT]; size_t res_msg_count = 0; /* Convert RMA iovecs to MSG iovecs to be able to reuse * them in @ofi_nd_repack_iovecs */ for (i = 0; i < rma_count; i++) { from_rma_iovecs[i].iov_base = (void *)rma_iovecs[i].addr; from_rma_iovecs[i].iov_len = rma_iovecs[i].len; } ofi_nd_repack_iovecs(from_rma_iovecs, from_rma_count, msg_iovecs, msg_count, res_msg_iovecs, &res_msg_count, from_split_map, to_split_map, remote_addr); /* Extract MSG iov to RMA iovecs and returns them */ for (i = 0; i < res_msg_count; i++) { res_iovecs[i].addr = remote_addr[i]; res_iovecs[i].len = res_msg_iovecs[i].iov_len; res_iovecs[i].key = rma_iovecs[from_split_map[i]].key; remote_addr[i] = (uint64_t)res_msg_iovecs[i].iov_base; } *res_count = res_msg_count; }
template<typename T> static T inc(volatile T*scalar) { return (T)InterlockedIncrement64((volatile LONGLONG*)scalar); }
_ALWAYS_INLINE_ uint64_t _atomic_increment_impl(register uint64_t *pw) { return InterlockedIncrement64((LONGLONG volatile *)pw); }
/* PhyDot11BRx demodulates RX sample stream from radio and saves byte stream into ULCB (receive control block). Parameters: pPhy: Pointer to PHY uRadio: Radio num from which we get sample stream pRCB: free ULCB to accommodate byte stream Return: BB11B_OK_FRAME if a good frame is decoded. Otherwise error number. History: 12/May/2009 Created by yichen 23/Nov/2009 code refactory by senxiang IRQL: */ HRESULT PhyDot11BRx( IN PPHY pPhy, IN UINT uRadio, IN PULCB pRCB ) { HRESULT hRes = E_FAIL; PSORA_RADIO pRadio = pPhy->pRadios[uRadio]; ULONG fContinue; if(pPhy->BBContextFor11B.fCanWork) { pPhy->BBContextFor11B.RxContext.b_shiftRight = __GetDagcShiftRightBits(pPhy->BBContextFor11B.SpdContext.b_evalEnergy); BB11BPrepareRx( &pPhy->BBContextFor11B.RxContext, pRCB->pVirtualAddress, pRCB->BufSize);//PHY context is registered when SoraRadioInitialize called do { fContinue = 0; hRes = BB11BRx(&(pPhy->BBContextFor11B.RxContext), SORA_GET_RX_STREAM(pRadio)); //DbgPrint("[RX] BB11BRx:%08X\n", hRes); switch (hRes) { case BB11B_OK_FRAME: // Front-end AGC feedback logic // Note: only do AGC after receiving a good frame, not do after bad frame since it is possible // not for local MAC address // TODO: do AGC only after receiving a good frame and for local MAC address if (pPhy->BBContextFor11B.SpdContext.b_gainLevelNext != pPhy->BBContextFor11B.SpdContext.b_gainLevel) { unsigned int newGainLevel = pPhy->BBContextFor11B.SpdContext.b_gainLevelNext; __ApplyPresetGain(pPhy, newGainLevel); } BB11B_RX_TO_PD(&(pPhy->BBContextFor11B.SpdContext)); pRCB->PacketLength = (USHORT)(pPhy->BBContextFor11B.RxContext.BB11bCommon.b_length); pRCB->CRC32 = *((PULONG)(pRCB->pVirtualAddress + pRCB->PacketLength - 4)); break; case BB11B_E_SFD: case BB11B_E_ENERGY: case BB11B_E_DATA: BB11B_RX_TO_PD(&(pPhy->BBContextFor11B.SpdContext)); if(hRes == BB11B_E_DATA) InterlockedIncrement64(&pPhy->ullReceiveCRC32Error); DbgPrint("[RX][Warning] BB11BRx fail:%08X\n", hRes); break; case E_FETCH_SIGNAL_HW_TIMEOUT: BB11B_RX_TO_PD(&(pPhy->BBContextFor11B.SpdContext)); DbgPrint("[RX][Error] BB11BRx fail:%08X\n", hRes); break; default: BB11B_RX_TO_RX(&(pPhy->BBContextFor11B.RxContext)); DbgPrint("[RX][Warning] BB11BRx Continuous Error : %08X\n",hRes); fContinue = 1; } if (!fContinue) break; } while(pPhy->BBContextFor11B.fCanWork); } return hRes; }
NTSTATUS AIMWrFltrWrite(IN PDEVICE_OBJECT DeviceObject, IN PIRP Irp) { PDEVICE_EXTENSION device_extension = (PDEVICE_EXTENSION)DeviceObject->DeviceExtension; PIO_STACK_LOCATION io_stack = IoGetCurrentIrpStackLocation(Irp); NTSTATUS status; if (!device_extension->Statistics.IsProtected) { return AIMWrFltrSendToNextDriver(DeviceObject, Irp); } if (!device_extension->Statistics.Initialized) { status = AIMWrFltrInitializeDiffDevice(device_extension); if (!NT_SUCCESS(status)) { status = STATUS_MEDIA_WRITE_PROTECTED; Irp->IoStatus.Information = 0; Irp->IoStatus.Status = status; IoCompleteRequest(Irp, IO_NO_INCREMENT); return status; } } InterlockedIncrement64(&device_extension->Statistics.WriteRequests); if (io_stack->Parameters.Write.Length == 0) { // Turn a zero-byte write request into a read request to take // advantage of just bound checks etc by target device driver IoGetNextIrpStackLocation(Irp)->MajorFunction = IRP_MJ_READ; return AIMWrFltrSendToNextDriver(DeviceObject, Irp); } InterlockedExchangeAdd64(&device_extension->Statistics.WrittenBytes, io_stack->Parameters.Write.Length); LONGLONG highest_byte = io_stack->Parameters.Write.ByteOffset.QuadPart + io_stack->Parameters.Write.Length; if ((io_stack->Parameters.Write.ByteOffset.QuadPart >= device_extension->Statistics.DiffDeviceVbr.Fields.Head.Size.QuadPart) || (highest_byte <= 0) || (highest_byte > device_extension->Statistics.DiffDeviceVbr.Fields.Head.Size.QuadPart)) { Irp->IoStatus.Status = STATUS_END_OF_MEDIA; IoCompleteRequest(Irp, IO_NO_INCREMENT); KdBreakPoint(); return STATUS_END_OF_MEDIA; } if (io_stack->Parameters.Write.Length > device_extension->Statistics.LargestWriteSize) { device_extension->Statistics.LargestWriteSize = io_stack->Parameters.Write.Length; KdPrint(("AIMWrFltrWrite: Largest write size is now %u KB\n", device_extension->Statistics.LargestWriteSize >> 10)); }
int64_t Inc64(volatile int64_t *i) { return (int64_t)InterlockedIncrement64((volatile LONG64 *)i); }
/// @brief bool FileInfoCache::insert_file_info( _In_ const wchar_t* path, _In_ uint64_t create_time, _In_ uint64_t write_time, _In_ uint64_t size, _In_ const char* md5_str, _In_ const char* sha2_str ) { _ASSERTE(NULL != path); _ASSERTE(NULL != md5_str); _ASSERTE(NULL != sha2_str); if (NULL == path) return false; if (NULL == md5_str) return false; if (NULL == sha2_str) return false; // run query bool ret = false; // // 파일 해시 테이블 대문자, 소문자 구별 없이 경로 확인을 위해 소문자로 입력 한다. // std::wstring pathl(path); to_lower_string<std::wstring>(pathl); try { // // _insert_cache_stmt에 바인딩 되어 있는 데이터 제거 // _insert_cache_stmt->reset(); // // 파일 정보(path, create_time, write_time, md5, sha1)캐시를 저장한다. // _insert_cache_stmt->bind(1, WcsToMbsUTF8Ex(pathl.c_str()).c_str()); _insert_cache_stmt->bind(2, static_cast<long long>(create_time)); _insert_cache_stmt->bind(3, static_cast<long long>(write_time)); _insert_cache_stmt->bind(4, static_cast<long long>(size)); _insert_cache_stmt->bind(5, md5_str); _insert_cache_stmt->bind(6, sha2_str); // // 파일의 정보가 테이블 내에 정상 적으로 입력이 되었다면 반환값이 // 1개 이기 때문에 다음과 같이 처리 한다. // if (0 < _insert_cache_stmt->execDML()) { InterlockedIncrement64(&_size); ret = true; } } catch (CppSQLite3Exception& e) { log_err "sqlite exception. FileInfoCache::insert_file_info, ecode = %d, emsg = %s", e.errorCode(), e.errorMessage() log_end; } return ret; }