VOID FASTCALL KiEnterV86Mode(IN ULONG_PTR StackFrameUnaligned) { PKTHREAD Thread; PKV8086_STACK_FRAME StackFrame = (PKV8086_STACK_FRAME)(ROUND_UP(StackFrameUnaligned - 4, 16) + 4); PKTRAP_FRAME TrapFrame = &StackFrame->TrapFrame; PKV86_FRAME V86Frame = &StackFrame->V86Frame; PFX_SAVE_AREA NpxFrame = &StackFrame->NpxArea; ASSERT((ULONG_PTR)NpxFrame % 16 == 0); /* Build fake user-mode trap frame */ TrapFrame->SegCs = KGDT_R0_CODE | RPL_MASK; TrapFrame->SegEs = TrapFrame->SegDs = TrapFrame->SegFs = TrapFrame->SegGs = 0; TrapFrame->ErrCode = 0; /* Get the current thread's initial stack */ Thread = KeGetCurrentThread(); V86Frame->ThreadStack = KiGetThreadNpxArea(Thread); /* Save TEB addresses */ V86Frame->ThreadTeb = Thread->Teb; V86Frame->PcrTeb = KeGetPcr()->NtTib.Self; /* Save return EIP */ TrapFrame->Eip = (ULONG_PTR)Ki386BiosCallReturnAddress; /* Save our stack (after the frames) */ TrapFrame->Esi = StackFrameUnaligned; TrapFrame->Edi = (ULONG_PTR)_AddressOfReturnAddress() + 4; /* Sanitize EFlags and enable interrupts */ TrapFrame->EFlags = __readeflags() & 0x60DD7; TrapFrame->EFlags |= EFLAGS_INTERRUPT_MASK; /* Fill out the rest of the frame */ TrapFrame->HardwareSegSs = KGDT_R3_DATA | RPL_MASK; TrapFrame->HardwareEsp = 0x11FFE; TrapFrame->ExceptionList = EXCEPTION_CHAIN_END; TrapFrame->Dr7 = 0; /* Set some debug fields if trap debugging is enabled */ KiFillTrapFrameDebug(TrapFrame); /* Disable interrupts */ _disable(); /* Copy the thread's NPX frame */ RtlCopyMemory(NpxFrame, V86Frame->ThreadStack, sizeof(FX_SAVE_AREA)); /* Clear exception list */ KeGetPcr()->NtTib.ExceptionList = EXCEPTION_CHAIN_END; /* Set new ESP0 */ KeGetPcr()->TSS->Esp0 = (ULONG_PTR)&TrapFrame->V86Es; /* Set new initial stack */ Thread->InitialStack = V86Frame; /* Set VDM TEB */ Thread->Teb = (PTEB)TRAMPOLINE_TEB; KiSetTebBase(KeGetPcr(), (PVOID)TRAMPOLINE_TEB); /* Enable interrupts */ _enable(); /* Start VDM execution */ NtVdmControl(VdmStartExecution, NULL); /* Exit to V86 mode */ KiEoiHelper(TrapFrame); }
__declspec(noreturn) void __cdecl __report_gsfailure (ULONG_PTR StackCookie) { volatile UINT_PTR cookie[2] __MINGW_ATTRIB_UNUSED; #if defined(_WIN64) && !defined(__aarch64__) ULONG64 controlPC, imgBase, establisherFrame; PRUNTIME_FUNCTION fctEntry; PVOID hndData; RtlCaptureContext (&GS_ContextRecord); controlPC = GS_ContextRecord.Rip; fctEntry = RtlLookupFunctionEntry (controlPC, &imgBase, NULL); if (fctEntry != NULL) { RtlVirtualUnwind (UNW_FLAG_NHANDLER, imgBase, controlPC, fctEntry, &GS_ContextRecord, &hndData, &establisherFrame, NULL); } else #endif /* _WIN64 */ { #if defined(__x86_64__) || defined(_AMD64_) GS_ContextRecord.Rip = (ULONGLONG) _ReturnAddress(); GS_ContextRecord.Rsp = (ULONGLONG) _AddressOfReturnAddress() + 8; #elif defined(__i386__) || defined(_X86_) GS_ContextRecord.Eip = (DWORD) _ReturnAddress(); GS_ContextRecord.Esp = (DWORD) _AddressOfReturnAddress() + 4; #elif defined(__arm__) || defined(_ARM_) GS_ContextRecord.Pc = (DWORD) _ReturnAddress(); GS_ContextRecord.Sp = (DWORD) _AddressOfReturnAddress() + 4; #endif /* _WIN64 */ } #if defined(__x86_64__) || defined(_AMD64_) GS_ExceptionRecord.ExceptionAddress = (PVOID) GS_ContextRecord.Rip; GS_ContextRecord.Rcx = StackCookie; #elif defined(__i386__) || defined(_X86_) GS_ExceptionRecord.ExceptionAddress = (PVOID) GS_ContextRecord.Eip; GS_ContextRecord.Ecx = StackCookie; #elif defined(__arm__) || defined(_ARM_) GS_ExceptionRecord.ExceptionAddress = (PVOID) GS_ContextRecord.Pc; UNUSED_PARAM(StackCookie); #endif /* _WIN64 */ GS_ExceptionRecord.ExceptionCode = STATUS_STACK_BUFFER_OVERRUN; GS_ExceptionRecord.ExceptionFlags = EXCEPTION_NONCONTINUABLE; cookie[0] = __security_cookie; cookie[1] = __security_cookie_complement; SetUnhandledExceptionFilter (NULL); UnhandledExceptionFilter ((EXCEPTION_POINTERS *) &GS_ExceptionPointers); TerminateProcess (GetCurrentProcess (), STATUS_STACK_BUFFER_OVERRUN); abort(); }
// The following code gets exception pointers using a workaround found in CRT code. void GetExceptionPointers(DWORD dwExceptionCode, EXCEPTION_POINTERS* pExceptionPointers) { // The following code was taken from VC++ 8.0 CRT (invarg.c: line 104) CONTEXT ContextRecord; memset(&ContextRecord, 0, sizeof(CONTEXT)); #ifdef _X86_ __asm { mov dword ptr [ContextRecord.Eax], eax mov dword ptr [ContextRecord.Ecx], ecx mov dword ptr [ContextRecord.Edx], edx mov dword ptr [ContextRecord.Ebx], ebx mov dword ptr [ContextRecord.Esi], esi mov dword ptr [ContextRecord.Edi], edi mov word ptr [ContextRecord.SegSs], ss mov word ptr [ContextRecord.SegCs], cs mov word ptr [ContextRecord.SegDs], ds mov word ptr [ContextRecord.SegEs], es mov word ptr [ContextRecord.SegFs], fs mov word ptr [ContextRecord.SegGs], gs pushfd pop [ContextRecord.EFlags] } ContextRecord.ContextFlags = CONTEXT_CONTROL; #pragma warning(push) #pragma warning(disable:4311) ContextRecord.Eip = (ULONG)_ReturnAddress(); ContextRecord.Esp = (ULONG)_AddressOfReturnAddress(); #pragma warning(pop) ContextRecord.Ebp = *((ULONG *)_AddressOfReturnAddress()-1); #elif defined (_IA64_) || defined (_AMD64_) /* Need to fill up the Context in IA64 and AMD64. */ RtlCaptureContext(&ContextRecord); #else /* defined (_IA64_) || defined (_AMD64_) */ ZeroMemory(&ContextRecord, sizeof(ContextRecord)); #endif /* defined (_IA64_) || defined (_AMD64_) */ memcpy(pExceptionPointers->ContextRecord, &ContextRecord, sizeof(CONTEXT)); ZeroMemory(pExceptionPointers->ExceptionRecord, sizeof(EXCEPTION_RECORD)); pExceptionPointers->ExceptionRecord->ExceptionCode = dwExceptionCode; pExceptionPointers->ExceptionRecord->ExceptionAddress = _ReturnAddress(); }
static void get_exception_pointers(EXCEPTION_POINTERS* ep) { // The following code was taken from VC++ 8.0 CRT (invarg.c: line 104) CONTEXT context = {}; ZeroMemory(&context, sizeof(context)); #ifdef _X86_ __asm { mov dword ptr [context.Eax], eax mov dword ptr [context.Ecx], ecx mov dword ptr [context.Edx], edx mov dword ptr [context.Ebx], ebx mov dword ptr [context.Esi], esi mov dword ptr [context.Edi], edi mov word ptr [context.SegSs], ss mov word ptr [context.SegCs], cs mov word ptr [context.SegDs], ds mov word ptr [context.SegEs], es mov word ptr [context.SegFs], fs mov word ptr [context.SegGs], gs pushfd pop [context.EFlags] } context.ContextFlags = CONTEXT_CONTROL; #pragma warning(push) #pragma warning(disable:4311) context.Eip = (ULONG)_ReturnAddress(); context.Esp = (ULONG)_AddressOfReturnAddress(); #pragma warning(pop) context.Ebp = *((ULONG *)_AddressOfReturnAddress()-1); #elif defined (_IA64_) || defined (_AMD64_) /* Need to fill up the Context in IA64 and AMD64. */ RtlCaptureContext(&context); #else /* defined (_IA64_) || defined (_AMD64_) */ #endif /* defined (_IA64_) || defined (_AMD64_) */ CopyMemory(ep->ContextRecord, &context, sizeof(CONTEXT)); ZeroMemory(ep->ExceptionRecord, sizeof(EXCEPTION_RECORD)); //ep->ExceptionRecord->ExceptionCode = dwExceptionCode; ep->ExceptionRecord->ExceptionAddress = _ReturnAddress(); }
MOZ_NEVER_INLINE static bool GetStackAfterCurrentFrame(uint8_t** aOutTop, uint8_t** aOutBottom) { // "Top" of the free space on the stack is directly after the memory // holding our return address. uint8_t* top = (uint8_t*)_AddressOfReturnAddress(); // Look down the stack until we find the guard page... MEMORY_BASIC_INFORMATION memInfo = {0}; uint8_t* bottom = top; while (1) { if (!VirtualQuery(bottom, &memInfo, sizeof(memInfo))) { return false; } if ((memInfo.Protect & PAGE_GUARD) == PAGE_GUARD) { bottom = (uint8_t*)memInfo.BaseAddress + memInfo.RegionSize; #ifdef DEBUG if (!VirtualQuery(bottom, &memInfo, sizeof(memInfo))) { return false; } assert(!(memInfo.Protect & PAGE_GUARD)); // Should have found boundary. #endif break; } else if (memInfo.State != MEM_COMMIT || (memInfo.AllocationProtect & PAGE_READWRITE) != PAGE_READWRITE) { return false; } bottom = (uint8_t*)memInfo.BaseAddress - 1; } *aOutTop = top; *aOutBottom = bottom; return true; }
VOID FASTCALL CBattle::CopyMagicAndCraftData(PMONSTER_STATUS MSData) { PUSHORT MagicList; ULONG_PTR MaxMagicNumber; PCRAFT_AI_INFO Magic; if (!IsCustomChar(MSData->CharID)) return; MaxMagicNumber = countof(MSData->MagicAiInfo); Magic = MSData->MagicAiInfo; MagicList = GetSaveData()->GetChrMagicList() + MSData->CharID * MaxMagicNumber; for (ULONG_PTR Count = countof(MSData->MagicAiInfo); Count; --Count) { if (Magic->CraftIndex == 0) break; --MaxMagicNumber; ++Magic; } for (; MaxMagicNumber; ++Magic, ++MagicList, --MaxMagicNumber) { Magic->CraftIndex = *MagicList; Magic->AriaActionIndex = 6; Magic->ActionIndex = 7; Magic->Condition = 0; } *(PVOID *)_AddressOfReturnAddress() = (PVOID)0x9A37F4; }
void cbGetExceptionPointers() { CONTEXT context; memset( &context, 0, sizeof(CONTEXT) ); #ifdef _X86_ __asm { mov dword ptr [context.Eax], eax mov dword ptr [context.Ecx], ecx mov dword ptr [context.Edx], edx mov dword ptr [context.Ebx], ebx mov dword ptr [context.Esi], esi mov dword ptr [context.Edi], edi mov word ptr [context.SegSs], ss mov word ptr [context.SegCs], cs mov word ptr [context.SegDs], ds mov word ptr [context.SegEs], es mov word ptr [context.SegFs], fs mov word ptr [context.SegGs], gs pushfd pop [context.EFlags] } context.ContextFlags = CONTEXT_CONTROL; context.Eip = (ULONG)_ReturnAddress(); context.Esp = (ULONG)_AddressOfReturnAddress(); context.Ebp = *((ULONG *)_AddressOfReturnAddress()-1); #elif defined (_IA64_) || defined (_AMD64_) RtlCaptureContext( &context ); #else #error #endif memcpy( &cbTmpContext, &context, sizeof(CONTEXT) ); ZeroMemory( &cbTmpExceptionRecord, sizeof(EXCEPTION_RECORD) ); cbTmpExceptionRecord.ExceptionCode = 0; cbTmpExceptionRecord.ExceptionAddress = _ReturnAddress(); cbReportData->exceptionPtrs = &cbTmpExceptionPtrs; cbTmpExceptionPtrs.ExceptionRecord = &cbTmpExceptionRecord; cbTmpExceptionPtrs.ContextRecord = &cbTmpContext; }
HANDLE NTAPI QqCreateWaitQQProtectThread( PSECURITY_ATTRIBUTES ThreadAttributes, ULONG_PTR StackSize, PTHREAD_START_ROUTINE StartAddress, PVOID Parameter, ULONG CreationFlags, PULONG ThreadId ) { NTSTATUS Status; PVOID Ebp, CallCreateQQProtectExchangeWindow; PROCESS_BASIC_INFORMATION BasicInfo; LOOP_ONCE { if (PtrAnd(Parameter, 0xFFF00000) != 0) continue; Status = NtQueryInformationProcess((HANDLE)Parameter, ProcessBasicInformation, &BasicInfo, sizeof(BasicInfo), nullptr); FAIL_BREAK(Status); if (BasicInfo.UniqueProcessId != CurrentPid()) break; AllocStack(16); Ebp = *((PVOID *)_AddressOfReturnAddress() - 1); CallCreateQQProtectExchangeWindow = *((PVOID *)Ebp + 1); if (*(PBYTE)CallCreateQQProtectExchangeWindow != CALL) break; NtClose((HANDLE)Parameter); *(PULONG_PTR)((PVOID *)Ebp + 1) += GetOpCodeSize(CallCreateQQProtectExchangeWindow); return nullptr; } return HummerCreateThread(ThreadAttributes, StackSize, StartAddress, Parameter, CreationFlags, ThreadId); }
static HANDLE CreateThreadWrapper(_In_opt_ LPSECURITY_ATTRIBUTES lpThreadAttributes, _In_ SIZE_T dwStackSize, _In_ LPTHREAD_START_ROUTINE lpStartAddress, _In_opt_ __drv_aliasesMem LPVOID lpParameter, _In_ DWORD dwCreationFlags, _Out_opt_ LPDWORD lpThreadId) { //MessageBoxA(NULL, "CreateThreadWrapper called", "CreateThreadWrapper called", 0); // find the name parameter by frobbling the parent stack char* parentStackPtr = reinterpret_cast<char*>(_AddressOfReturnAddress()); char* threadName = *reinterpret_cast<char**>(parentStackPtr + 0x50 /* offset from base pointer to argument */ + 0x60 /* offset from function stack frame stack to base pointer */ + 8 /* return address offset */); // create metadata for passing to the thread struct WrapThreadMeta { char* threadName; LPTHREAD_START_ROUTINE origRoutine; void* originalData; }; WrapThreadMeta* parameter = new WrapThreadMeta{ threadName, lpStartAddress, lpParameter }; // create a thread with 'our' callback HANDLE hThread = CreateThread(lpThreadAttributes, dwStackSize, [](void* arguments) { // get and free metadata WrapThreadMeta* metaPtr = reinterpret_cast<WrapThreadMeta*>(arguments); WrapThreadMeta meta = *metaPtr; delete metaPtr; // set thread name, if any if (meta.threadName) { SetThreadName(-1, meta.threadName); } // invoke original thread start return meta.origRoutine(meta.originalData); }, parameter, dwCreationFlags, lpThreadId); return hThread; }
intptr_t CustomMemFree(void* allocator, void* pointer) { intptr_t retval = g_origMemFree(allocator, pointer); /*if (pointer != nullptr && *g_unsafePointerLoc) { size_t allocSize = 0; if (g_unsafeStack.size() == 0) { { std::unique_lock<std::mutex> lock(g_allocMutex); auto it = g_allocData.find(pointer); if (it != g_allocData.end()) { allocSize = it->second; g_allocData.erase(it); } } if (**(void***)g_unsafePointerLoc >= pointer && **(void***)g_unsafePointerLoc < ((char*)pointer + allocSize)) { std::vector<uintptr_t> stackList(96); uintptr_t* stack = (uintptr_t*)_AddressOfReturnAddress(); for (int i = 0; i < stackList.size(); i++) { stackList[i] = stack[i]; } g_unsafeStack = stackList; } } }*/ if (/*!g_didLevelFree && */pointer != nullptr) { //std::unique_lock<std::mutex> lock(g_allocMutex); EnterCriticalSection(&g_allocCS); uintptr_t ptr = (uintptr_t)pointer; auto it = g_allocData.find(ptr); if (it != g_allocData.end()) { size_t allocSize = it->second; static char* location = hook::pattern("4C 8D 0D ? ? ? ? 48 89 01 4C 89 81 80 00 00").count(1).get(0).get<char>(3); static char** g_collectionRoot = (char**)(location + *(int32_t*)location + 4); for (int i = 0; i < 0x950; i++) { if (g_collectionRoot[i]) { void* baad = *(void**)(g_collectionRoot[i] + 32); if (baad >= pointer && baad < ((char*)pointer + allocSize)) { atArray<char>* array = (atArray<char>*)(g_collectionRoot[i] + 128); trace("freed collection %s (%p-%p)\n", &array->Get(0), pointer, allocSize + (char*)pointer); uintptr_t* stack = (uintptr_t*)_AddressOfReturnAddress(); stack += (32 / 8); for (int i = 0; i < 16; i++) { trace("stack: %p\n", stack[i]); } } } } /*if (g_inLevelFree) { if (allocSize != -1) { int stackIdx = g_stackIdx++; std::vector<uintptr_t> stackList(96); uintptr_t* stack = (uintptr_t*)_AddressOfReturnAddress(); for (int i = 0; i < stackList.size(); i++) { stackList[i] = stack[i]; } g_stacks[stackIdx] = stackList; trace("level free: %p-%p - stack idx: %d\n", pointer, (char*)pointer + allocSize, stackIdx); } }*/ g_allocData.erase(it); } LeaveCriticalSection(&g_allocCS); } return retval; }
_CRTIMP void __cdecl _invoke_watson( const wchar_t *pszExpression, const wchar_t *pszFunction, const wchar_t *pszFile, unsigned int nLine, uintptr_t pReserved ) { /* Fake an exception to call reportfault. */ EXCEPTION_RECORD ExceptionRecord; CONTEXT ContextRecord; EXCEPTION_POINTERS ExceptionPointers; BOOL wasDebuggerPresent = FALSE; DWORD ret = 0; (pszExpression); (pszFunction); (pszFile); (nLine); (pReserved); #ifdef _X86_ __asm { mov dword ptr [ContextRecord.Eax], eax mov dword ptr [ContextRecord.Ecx], ecx mov dword ptr [ContextRecord.Edx], edx mov dword ptr [ContextRecord.Ebx], ebx mov dword ptr [ContextRecord.Esi], esi mov dword ptr [ContextRecord.Edi], edi mov word ptr [ContextRecord.SegSs], ss mov word ptr [ContextRecord.SegCs], cs mov word ptr [ContextRecord.SegDs], ds mov word ptr [ContextRecord.SegEs], es mov word ptr [ContextRecord.SegFs], fs mov word ptr [ContextRecord.SegGs], gs pushfd pop [ContextRecord.EFlags] } ContextRecord.ContextFlags = CONTEXT_CONTROL; #pragma warning(push) #pragma warning(disable:4311) ContextRecord.Eip = (ULONG)_ReturnAddress(); ContextRecord.Esp = (ULONG)_AddressOfReturnAddress(); #pragma warning(pop) ContextRecord.Ebp = *((ULONG *)_AddressOfReturnAddress()-1); #elif defined (_IA64_) || defined (_AMD64_) /* Need to fill up the Context in IA64 and AMD64. */ RtlCaptureContext(&ContextRecord); #else /* defined (_IA64_) || defined (_AMD64_) */ ZeroMemory(&ContextRecord, sizeof(ContextRecord)); #endif /* defined (_IA64_) || defined (_AMD64_) */ ZeroMemory(&ExceptionRecord, sizeof(ExceptionRecord)); ExceptionRecord.ExceptionCode = STATUS_INVALID_PARAMETER; ExceptionRecord.ExceptionAddress = _ReturnAddress(); ExceptionPointers.ExceptionRecord = &ExceptionRecord; ExceptionPointers.ContextRecord = &ContextRecord; wasDebuggerPresent = IsDebuggerPresent(); /* Make sure any filter already in place is deleted. */ SetUnhandledExceptionFilter(NULL); ret = UnhandledExceptionFilter(&ExceptionPointers); // if no handler found and no debugger previously attached // the execution must stop into the debugger hook. if (ret == EXCEPTION_CONTINUE_SEARCH && !wasDebuggerPresent) { _CRT_DEBUGGER_HOOK(_CRT_DEBUGGER_INVALIDPARAMETER); } TerminateProcess(GetCurrentProcess(), STATUS_INVALID_PARAMETER); }
__declspec(noreturn) void __cdecl __report_gsfailure(GSFAILURE_PARAMETER) { if (IsProcessorFeaturePresent(PF_FASTFAIL_AVAILABLE)) { __fastfail(FAST_FAIL_STACK_COOKIE_CHECK_FAILURE); } volatile UINT_PTR cookie[2]; // Set up a fake exception, and report it via UnhandledExceptionFilter. // We can't raise a true exception because the stack (and therefore // exception handling) can't be trusted after a buffer overrun. The // exception should appear as if it originated after the call to // __security_check_cookie, so it is attributed to the function where the // buffer overrun was detected. #if defined _M_IX86 // On x86, we reserve some extra stack which won't be used. That is to // preserve as much of the call frame as possible when the function with // the buffer overrun entered __security_check_cookie with a JMP instead of // a CALL, after the calling frame has been released in the epilogue of // that function. ULONG volatile dw[(sizeof(CONTEXT) + sizeof(EXCEPTION_RECORD)) / sizeof(ULONG)]; // Save the state in the context record immediately. Hopefully, since // opts are disabled, this will happen without modifying ECX, which has // the local cookie which failed the check. __asm { mov dword ptr [GS_ContextRecord.Eax ], eax mov dword ptr [GS_ContextRecord.Ecx ], ecx mov dword ptr [GS_ContextRecord.Edx ], edx mov dword ptr [GS_ContextRecord.Ebx ], ebx mov dword ptr [GS_ContextRecord.Esi ], esi mov dword ptr [GS_ContextRecord.Edi ], edi mov word ptr [GS_ContextRecord.SegSs], ss mov word ptr [GS_ContextRecord.SegCs], cs mov word ptr [GS_ContextRecord.SegDs], ds mov word ptr [GS_ContextRecord.SegEs], es mov word ptr [GS_ContextRecord.SegFs], fs mov word ptr [GS_ContextRecord.SegGs], gs pushfd pop [GS_ContextRecord.EFlags] // Set the context EBP/EIP/ESP to the values which would be found // in the caller to __security_check_cookie. mov eax, [ebp] mov dword ptr [GS_ContextRecord.Ebp], eax mov eax, [ebp+4] mov dword ptr [GS_ContextRecord.Eip], eax lea eax, [ebp+8] mov dword ptr [GS_ContextRecord.Esp], eax // Make sure the dummy stack space looks referenced. mov eax, dword ptr dw } GS_ContextRecord.ContextFlags = CONTEXT_CONTROL; GS_ExceptionRecord.ExceptionAddress = (PVOID)(ULONG_PTR)GS_ContextRecord.Eip; #else // ^^^ _M_IX86 ^^^ // vvv _M_X64 vvv // capture_previous_context(&GS_ContextRecord); GS_ContextRecord.Rip = (ULONGLONG)_ReturnAddress(); GS_ContextRecord.Rsp = (ULONGLONG)_AddressOfReturnAddress()+8; GS_ExceptionRecord.ExceptionAddress = (PVOID)GS_ContextRecord.Rip; GS_ContextRecord.Rcx = stack_cookie; #endif // _M_X64 GS_ExceptionRecord.ExceptionCode = STATUS_SECURITY_CHECK_FAILURE; GS_ExceptionRecord.ExceptionFlags = EXCEPTION_NONCONTINUABLE; GS_ExceptionRecord.NumberParameters = 1; GS_ExceptionRecord.ExceptionInformation[0] = FAST_FAIL_STACK_COOKIE_CHECK_FAILURE; // Save the global cookie and cookie complement locally - using an array // to defeat any potential stack-packing. cookie[0] = __security_cookie; cookie[1] = __security_cookie_complement; // Raise the security failure by passing it to the unhandled exception // filter and then terminate the process. __raise_securityfailure((EXCEPTION_POINTERS*)&GS_ExceptionPointers); }
xgc_void GetStackTrace( StackFrameSequence& FrameSequence ) { UINT_PTR* fp = (UINT_PTR*) _AddressOfReturnAddress(); UINT_PTR* framePointer = (UINT_PTR*) fp - 1; FrameSequence.Count = 0; #if defined(_M_IX86) while ( FrameSequence.Count < MAX_FRAME_SAVE) { if (*framePointer < (UINT_PTR)framePointer) { if (*framePointer == NULL) { // Looks like we reached the end of the stack. break; } else { // Invalid frame pointer. Frame pointer addresses should always // increase as we move up the stack. break; } } if (*framePointer & (sizeof(UINT_PTR*) - 1)) { // Invalid frame pointer. Frame pointer addresses should always // be aligned to the size of a pointer. This probably means that // we've encountered a frame that was created by a module built with // frame pointer omission (FPO) optimization turned on. break; } if (IsBadReadPtr((UINT*)*framePointer, sizeof(UINT_PTR*))) { // Bogus frame pointer. Again, this probably means that we've // encountered a frame built with FPO optimization. break; } //push_back(*(framePointer + 1)); FrameSequence.Frame[ FrameSequence.Count ] = *(framePointer + 1); FrameSequence.Count += 1; framePointer = (UINT_PTR*)*framePointer; } #elif defined(_M_X64) xgc_uint32 maxframes = XGC_MIN( 62, MAX_FRAME_SAVE + 10 ); static USHORT( WINAPI *s_pfnCaptureStackBackTrace )( ULONG FramesToSkip, ULONG FramesToCapture, PVOID* BackTrace, PULONG BackTraceHash ) = 0; if( s_pfnCaptureStackBackTrace == 0 ) { const HMODULE hNtDll = GetModuleHandleW( L"ntdll.dll" ); reinterpret_cast<void*&>( s_pfnCaptureStackBackTrace ) = ::GetProcAddress( hNtDll, "RtlCaptureStackBackTrace" ); if( s_pfnCaptureStackBackTrace == 0 ) return; } UINT_PTR* myFrames = XGC_NEW UINT_PTR[maxframes]; ZeroMemory( myFrames, sizeof( UINT_PTR ) * maxframes ); s_pfnCaptureStackBackTrace( 0, maxframes, (PVOID*) myFrames, NULL ); xgc_uint32 startIndex = 0; xgc_uint32 count = 0; while( count < maxframes ) { if( myFrames[count] == 0 ) break; if( myFrames[count] == *( framePointer + 1 ) ) startIndex = count; count++; } FrameSequence.Count = 0; while( FrameSequence.Count < maxframes && FrameSequence.Count < XGC_COUNTOF( FrameSequence.Frame ) ) { if( myFrames[FrameSequence.Count] == 0 ) break; //push_back(myFrames[count]); FrameSequence.Frame[FrameSequence.Count] = (void*)myFrames[startIndex + FrameSequence.Count]; FrameSequence.Count += 1; } delete[] myFrames; #endif }
void* CustomMemAlloc(void* allocator, intptr_t size, intptr_t align, int subAlloc) { void* ptr = g_origMemAlloc(allocator, size, align, subAlloc); /*if (*g_unsafePointerLoc >= ptr && *g_unsafePointerLoc < ((char*)ptr + size)) { #ifdef _DEBUG __debugbreak(); #endif assert(!"Tried to allocate over unsafe pointer!"); } if (*g_unsafePointerLoc) { void*** unsafePtrLoc = (void***)g_unsafePointerLoc; if (**unsafePtrLoc >= ptr && **unsafePtrLoc < ((char*)ptr + size)) { #ifdef _DEBUG __debugbreak(); #endif assert(!"Tried to allocate over unsafe pointer!"); } }*/ //memset(ptr, 0, size); if (subAlloc == 0) { uintptr_t ptr_ = (uintptr_t)ptr; //std::unique_lock<std::mutex> lock(g_allocMutex); EnterCriticalSection(&g_allocCS); g_allocData[ptr_] = size; /*auto first = g_freeThings.lower_bound(ptr_); auto second = g_freeThings.upper_bound(ptr_ + size); for (auto it = first; first != second; first++) { if (ptr_ >= it->first && ptr_ < (it->first + it->second.first)) { if (size == it->second.first) { trace("allocate over stacky!\n"); auto stacky = it->second.second; for (auto& entry : stacky) { trace("%p\n", entry); } trace("noooooooooo!\n"); } } }*/ //g_allocData[ptr_] = size; uintptr_t* stack = (uintptr_t*)_AddressOfReturnAddress(); stack += (32 / 8); std::array<uintptr_t, 16> stacky; memcpy(stacky.data(), stack, 16 * 8); g_allocStuff[{ ptr_, size }] = stacky; LeaveCriticalSection(&g_allocCS); } return ptr; }