void CodeGenNumberThreadAllocator::FlushAllocations() { AutoCriticalSection autocs(&cs); pendingFlushNumberBlock.MoveTo(&pendingIntegrationNumberBlock); pendingFlushChunkBlock.MoveTo(&pendingIntegrationChunkBlock); }
void GlobalSecurityPolicy::DisableSetProcessValidCallTargets() { // One-way transition from allowing SetProcessValidCallTargets to disabling // the API. if (!s_ro_disableSetProcessValidCallTargets) { AutoCriticalSection autocs(&s_policyCS); DWORD oldProtect; BOOL res = VirtualProtect((LPVOID)&s_ro_disableSetProcessValidCallTargets, sizeof(s_ro_disableSetProcessValidCallTargets), PAGE_READWRITE, &oldProtect); if ((res == FALSE) || (oldProtect != PAGE_READONLY)) { RaiseFailFastException(nullptr, nullptr, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS); } s_ro_disableSetProcessValidCallTargets = true; res = VirtualProtect((LPVOID)&s_ro_disableSetProcessValidCallTargets, sizeof(s_ro_disableSetProcessValidCallTargets), PAGE_READONLY, &oldProtect); if ((res == FALSE) || (oldProtect != PAGE_READWRITE)) { RaiseFailFastException(nullptr, nullptr, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS); } } }
void Output::DirectPrint(char16 const * string) { AutoCriticalSection autocs(&s_critsect); // xplat-todo: support console color #ifdef _WIN32 WORD oldValue = 0; BOOL restoreColor = FALSE; HANDLE hConsole = NULL; if (Output::s_hasColor) { _CONSOLE_SCREEN_BUFFER_INFO info; hConsole = GetStdHandle(STD_OUTPUT_HANDLE); if (hConsole && GetConsoleScreenBufferInfo(hConsole, &info)) { oldValue = info.wAttributes; restoreColor = SetConsoleTextAttribute(hConsole, Output::s_color); } } #endif // _WIN32 fwprintf(stdout, _u("%s"), string); // xplat-todo: support console color #ifdef _WIN32 if (restoreColor) { SetConsoleTextAttribute(hConsole, oldValue); } #endif // _WIN32 }
void ConfigFlagsTable::EnableExperimentalFlag() { AutoCriticalSection autocs(&csExperimentalFlags); #define FLAG_REGOVR_EXP(type, name, description, defaultValue, parentName, hasCallback) this->SetAsBoolean(Js::Flag::name##Flag, true); #include "ConfigFlagsList.h" #undef FLAG_REGOVR_EXP }
bool LeakReport::EnsureLeakReportFile() { AutoCriticalSection autocs(&s_cs); if (openReportFileFailed) { return false; } if (file != nullptr) { return true; } char16 const * filename = Js::Configuration::Global.flags.LeakReport; char16 const * openMode = _u("w+"); char16 defaultFilename[_MAX_PATH]; if (filename == nullptr) { // xplat-todo: Implement swprintf_s in the PAL #ifdef _MSC_VER swprintf_s(defaultFilename, _u("jsleakreport-%u.txt"), ::GetCurrentProcessId()); #else _snwprintf(defaultFilename, _countof(defaultFilename), _u("jsleakreport-%u.txt"), ::GetCurrentProcessId()); #endif filename = defaultFilename; openMode = _u("a+"); // append mode } if (_wfopen_s(&file, filename, openMode) != 0) { openReportFileFailed = true; return false; } Print(_u("================================================================================\n")); Print(_u("Chakra Leak Report - PID: %d\n"), ::GetCurrentProcessId()); struct tm local_time; uint64 time_ms = (uint64) PlatformAgnostic::DateTime::HiResTimer::GetSystemTime(); // utc #ifdef _MSC_VER __time64_t time_sec = time_ms / 1000; // get rid of the milliseconds _localtime64_s(&local_time, &time_sec); #else time_t time_sec = time_ms / 1000; // get rid of the milliseconds localtime_r(&time_sec, &local_time); #endif Print(_u("%04d-%02d-%02d %02d:%02d:%02d.%03d\n"), local_time.tm_year + 1900, local_time.tm_mon + 1, local_time.tm_mday, local_time.tm_hour, local_time.tm_min, local_time.tm_sec, time_ms % 1000); return true; }
LPVOID PreReservedVirtualAllocWrapper::EnsurePreReservedRegion() { LPVOID startAddress = preReservedStartAddress; if (startAddress != nullptr) { return startAddress; } { AutoCriticalSection autocs(&this->cs); return EnsurePreReservedRegionInternal(); } }
void LeakReport::Print(char16 const * msg, ...) { AutoCriticalSection autocs(&s_cs); if (!EnsureLeakReportFile()) { return; } va_list argptr; va_start(argptr, msg); vfwprintf(file, msg, argptr); va_end(argptr); }
void CodeGenNumberThreadAllocator::Integrate() { AutoCriticalSection autocs(&cs); PageAllocator * leafPageAllocator = this->recycler->GetRecyclerLeafPageAllocator(); leafPageAllocator->IntegrateSegments(pendingIntegrationNumberSegment, pendingIntegrationNumberSegmentCount, pendingIntegrationNumberSegmentPageCount); PageAllocator * recyclerPageAllocator = this->recycler->GetRecyclerPageAllocator(); recyclerPageAllocator->IntegrateSegments(pendingIntegrationChunkSegment, pendingIntegrationChunkSegmentCount, pendingIntegrationChunkSegmentPageCount); pendingIntegrationNumberSegmentCount = 0; pendingIntegrationChunkSegmentCount = 0; pendingIntegrationNumberSegmentPageCount = 0; pendingIntegrationChunkSegmentPageCount = 0; #ifdef TRACK_ALLOC TrackAllocData oldAllocData = recycler->nextAllocData; recycler->nextAllocData.Clear(); #endif while (!pendingIntegrationNumberBlock.Empty()) { TRACK_ALLOC_INFO(recycler, Js::JavascriptNumber, Recycler, 0, (size_t)-1); BlockRecord& record = pendingIntegrationNumberBlock.Head(); if (!recycler->IntegrateBlock<LeafBit>(record.blockAddress, record.segment, GetNumberAllocSize(), sizeof(Js::JavascriptNumber))) { Js::Throw::OutOfMemory(); } pendingIntegrationNumberBlock.RemoveHead(&NoThrowHeapAllocator::Instance); } while (!pendingIntegrationChunkBlock.Empty()) { // REVIEW: the above number block integration can be moved into this loop TRACK_ALLOC_INFO(recycler, CodeGenNumberChunk, Recycler, 0, (size_t)-1); BlockRecord& record = pendingIntegrationChunkBlock.Head(); if (!recycler->IntegrateBlock<NoBit>(record.blockAddress, record.segment, GetChunkAllocSize(), sizeof(CodeGenNumberChunk))) { Js::Throw::OutOfMemory(); } pendingIntegrationChunkBlock.RemoveHead(&NoThrowHeapAllocator::Instance); } #ifdef TRACK_ALLOC Assert(recycler->nextAllocData.IsEmpty()); recycler->nextAllocData = oldAllocData; #endif }
size_t __cdecl Output::VerboseNote(const wchar_t * format, ...) { #ifdef ENABLE_TRACE if (Js::Configuration::Global.flags.Verbose) { AutoCriticalSection autocs(&s_critsect); va_list argptr; va_start(argptr, format); size_t size = vfwprintf(stdout, format, argptr); va_end(argptr); return size; } #endif return 0; }
void LeakReport::DumpUrl(DWORD tid) { AutoCriticalSection autocs(&s_cs); if (!EnsureLeakReportFile()) { return; } UrlRecord * prev = nullptr; UrlRecord ** pprev = &LeakReport::urlRecordHead; UrlRecord * curr = *pprev; while (curr != nullptr) { if (curr->tid == tid) { char16 timeStr[26] = _u("00:00"); // xplat-todo: Need to implement _wasctime_s in the PAL #if _MSC_VER struct tm local_time; _localtime64_s(&local_time, &curr->time); _wasctime_s(timeStr, &local_time); #endif timeStr[wcslen(timeStr) - 1] = 0; Print(_u("%s - (%p, %p) %s\n"), timeStr, curr->scriptEngine, curr->globalObject, curr->url); *pprev = curr->next; NoCheckHeapDeleteArray(wcslen(curr->url) + 1, curr->url); NoCheckHeapDelete(curr); } else { pprev = &curr->next; prev = curr; } curr = *pprev; } if (prev == nullptr) { LeakReport::urlRecordTail = nullptr; } else if (prev->next == nullptr) { LeakReport::urlRecordTail = prev; } }
BOOL PreReservedVirtualAllocWrapper::Free(LPVOID lpAddress, size_t dwSize, DWORD dwFreeType) { { AutoCriticalSection autocs(&this->cs); if (dwSize == 0) { Assert(false); return FALSE; } if (preReservedStartAddress == nullptr) { Assert(false); return FALSE; } Assert(dwSize % AutoSystemInfo::PageSize == 0); #pragma warning(suppress: 6250) #pragma warning(suppress: 28160) // Calling VirtualFreeEx without the MEM_RELEASE flag frees memory but not address descriptors (VADs) BOOL success = VirtualFree(lpAddress, dwSize, MEM_DECOMMIT); size_t requestedNumOfSegments = dwSize / AutoSystemInfo::Data.GetAllocationGranularityPageSize(); Assert(requestedNumOfSegments <= MAXUINT32); if (success) { PreReservedHeapTrace(_u("MEM_DECOMMIT: Address: 0x%p of size: 0x%x bytes\n"), lpAddress, dwSize); } if (success && (dwFreeType & MEM_RELEASE) != 0) { Assert((uintptr_t) lpAddress >= (uintptr_t) preReservedStartAddress); AssertMsg(((uintptr_t)lpAddress & (AutoSystemInfo::Data.GetAllocationGranularityPageCount() - 1)) == 0, "Not aligned with Allocation Granularity?"); AssertMsg(dwSize % AutoSystemInfo::Data.GetAllocationGranularityPageSize() == 0, "Release size should match the allocation granularity size"); Assert(requestedNumOfSegments != 0); BVIndex freeSegmentsBVIndex = (BVIndex) (((uintptr_t) lpAddress - (uintptr_t) preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize()); AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid Index ?"); freeSegments.SetRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments)); PreReservedHeapTrace(_u("MEM_RELEASE: Address: 0x%p of size: 0x%x * 0x%x bytes\n"), lpAddress, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize()); } return success; } }
WORD Output::SetConsoleForeground(WORD color) { AutoCriticalSection autocs(&s_critsect); _CONSOLE_SCREEN_BUFFER_INFO info; HANDLE hConsole = GetStdHandle(STD_OUTPUT_HANDLE); if (hConsole && GetConsoleScreenBufferInfo(hConsole, &info)) { Output::Flush(); Output::s_color = color | (info.wAttributes & ~15); Output::s_hasColor = Output::s_color != info.wAttributes; return info.wAttributes; } return 0; }
Js::JavascriptNumber * CodeGenNumberThreadAllocator::AllocNumber() { AutoCriticalSection autocs(&cs); size_t sizeCat = GetNumberAllocSize(); if (nextNumber + sizeCat > currentNumberBlockEnd) { AllocNewNumberBlock(); } Js::JavascriptNumber * newNumber = (Js::JavascriptNumber *)nextNumber; #ifdef RECYCLER_MEMORY_VERIFY recycler->FillCheckPad(newNumber, sizeof(Js::JavascriptNumber), sizeCat); #endif nextNumber += sizeCat; return newNumber; }
CodeGenNumberChunk * CodeGenNumberThreadAllocator::AllocChunk() { AutoCriticalSection autocs(&cs); size_t sizeCat = GetChunkAllocSize(); if (nextChunk + sizeCat > currentChunkBlockEnd) { AllocNewChunkBlock(); } CodeGenNumberChunk * newChunk = (CodeGenNumberChunk *)nextChunk; #ifdef RECYCLER_MEMORY_VERIFY recycler->FillCheckPad(nextChunk, sizeof(CodeGenNumberChunk), sizeCat); #endif memset(newChunk, 0, sizeof(CodeGenNumberChunk)); nextChunk += sizeCat; return newChunk; }
MemoryProfiler * MemoryProfiler::EnsureMemoryProfiler() { MemoryProfiler * memoryProfiler = MemoryProfiler::Instance; if (memoryProfiler == nullptr) { memoryProfiler = NoCheckHeapNew(MemoryProfiler); { AutoCriticalSection autocs(&s_cs); memoryProfiler->next = MemoryProfiler::profilers.Detach(); MemoryProfiler::profilers = memoryProfiler; } MemoryProfiler::Instance = memoryProfiler; } return memoryProfiler; }
void MemoryLogger::Write(const char16* msg) { #ifdef EXCEPTION_CHECK // In most cases this will be called at runtime when we have exception check enabled. AutoNestedHandledExceptionType autoNestedHandledExceptionType(ExceptionType_DisableCheck); #endif AutoCriticalSection autocs(&m_criticalSection); // TODO: with circular buffer now we can use much granular lock. // Create a copy of the message. size_t len = wcslen(msg); char16* buf = AnewArray(m_alloc, char16, len + 1); js_wmemcpy_s(buf, len + 1, msg, len + 1); // Copy with the NULL-terminator. // m_current is the next position to write to. if (m_log[m_current]) { Adelete(m_alloc, m_log[m_current]); } m_log[m_current] = buf; m_current = (m_current + 1) % m_capacity; }
LeakReport::UrlRecord * LeakReport::LogUrl(char16 const * url, void * globalObject) { UrlRecord * record = NoCheckHeapNewStruct(UrlRecord); size_t length = wcslen(url) + 1; // Add 1 for the NULL. char16* urlCopy = NoCheckHeapNewArray(char16, length); js_memcpy_s(urlCopy, (length - 1) * sizeof(char16), url, (length - 1) * sizeof(char16)); urlCopy[length - 1] = _u('\0'); record->url = urlCopy; #if _MSC_VER record->time = _time64(NULL); #else record->time = time(NULL); #endif record->tid = ::GetCurrentThreadId(); record->next = nullptr; record->scriptEngine = nullptr; record->globalObject = globalObject; AutoCriticalSection autocs(&s_cs); if (LeakReport::urlRecordHead == nullptr) { Assert(LeakReport::urlRecordTail == nullptr); LeakReport::urlRecordHead = record; LeakReport::urlRecordTail = record; } else { LeakReport::urlRecordTail->next = record; LeakReport::urlRecordTail = record; } return record; }
void Output::DirectPrint(wchar_t const * string) { AutoCriticalSection autocs(&s_critsect); WORD oldValue = 0; BOOL restoreColor = FALSE; HANDLE hConsole = NULL; if (Output::s_hasColor) { _CONSOLE_SCREEN_BUFFER_INFO info; hConsole = GetStdHandle(STD_OUTPUT_HANDLE); if (hConsole && GetConsoleScreenBufferInfo(hConsole, &info)) { oldValue = info.wAttributes; restoreColor = SetConsoleTextAttribute(hConsole, Output::s_color); } } fwprintf(stdout, L"%s", string); if (restoreColor) { SetConsoleTextAttribute(hConsole, oldValue); } }
AutoEnableDynamicCodeGen::AutoEnableDynamicCodeGen(bool enable) : enabled(false) { if (enable == false) { return; } // // Snap the dynamic code generation policy for this process so that we // don't need to resolve APIs and query it each time. We expect the policy // to have been established upfront. // if (processPolicyObtained == false) { AutoCriticalSection autocs(&processPolicyCS); if (processPolicyObtained == false) { PGET_PROCESS_MITIGATION_POLICY_PROC GetProcessMitigationPolicyProc = nullptr; HMODULE module = GetModuleHandleW(_u("api-ms-win-core-processthreads-l1-1-3.dll")); if (module != nullptr) { GetProcessMitigationPolicyProc = (PGET_PROCESS_MITIGATION_POLICY_PROC) GetProcAddress(module, "GetProcessMitigationPolicy"); SetThreadInformationProc = (PSET_THREAD_INFORMATION_PROC) GetProcAddress(module, "SetThreadInformation"); GetThreadInformationProc = (PGET_THREAD_INFORMATION_PROC) GetProcAddress(module, "GetThreadInformation"); } if ((GetProcessMitigationPolicyProc == nullptr) || (!GetProcessMitigationPolicyProc(GetCurrentProcess(), ProcessDynamicCodePolicy, (PPROCESS_MITIGATION_DYNAMIC_CODE_POLICY) &processPolicy, sizeof(processPolicy)))) { processPolicy.ProhibitDynamicCode = 0; } processPolicyObtained = true; } } // // The process is not prohibiting dynamic code or does not allow threads // to opt out. In either case, return to the caller. // // N.B. It is OK that this policy is mutable at runtime. If a process // really does not allow thread opt-out, then the call below will fail // benignly. // if ((processPolicy.ProhibitDynamicCode == 0) || (processPolicy.AllowThreadOptOut == 0)) { return; } if (SetThreadInformationProc == nullptr || GetThreadInformationProc == nullptr) { return; } // // If dynamic code is already allowed for this thread, then don't attempt to allow it again. // DWORD threadPolicy; if ((GetThreadInformationProc(GetCurrentThread(), ThreadDynamicCodePolicy, &threadPolicy, sizeof(DWORD))) && (threadPolicy == THREAD_DYNAMIC_CODE_ALLOW)) { return; } threadPolicy = THREAD_DYNAMIC_CODE_ALLOW; BOOL result = SetThreadInformationProc(GetCurrentThread(), ThreadDynamicCodePolicy, &threadPolicy, sizeof(DWORD)); Assert(result); enabled = true; }
LPVOID PreReservedVirtualAllocWrapper::Alloc(LPVOID lpAddress, size_t dwSize, DWORD allocationType, DWORD protectFlags, bool isCustomHeapAllocation) { Assert(this); AssertMsg(isCustomHeapAllocation, "PreReservation used for allocations other than CustomHeap?"); AssertMsg(AutoSystemInfo::Data.IsCFGEnabled() || PHASE_FORCE1(Js::PreReservedHeapAllocPhase), "PreReservation without CFG ?"); Assert(dwSize != 0); { AutoCriticalSection autocs(&this->cs); //Return nullptr, if no space to Reserve if (EnsurePreReservedRegionInternal() == nullptr) { PreReservedHeapTrace(_u("No space to pre-reserve memory with %d pages. Returning NULL\n"), PreReservedAllocationSegmentCount * AutoSystemInfo::Data.GetAllocationGranularityPageCount()); return nullptr; } char * addressToReserve = nullptr; uint freeSegmentsBVIndex = BVInvalidIndex; size_t requestedNumOfSegments = dwSize / (AutoSystemInfo::Data.GetAllocationGranularityPageSize()); Assert(requestedNumOfSegments <= MAXUINT32); if (lpAddress == nullptr) { Assert(requestedNumOfSegments != 0); AssertMsg(dwSize % AutoSystemInfo::Data.GetAllocationGranularityPageSize() == 0, "dwSize should be aligned with Allocation Granularity"); do { freeSegmentsBVIndex = freeSegments.GetNextBit(freeSegmentsBVIndex + 1); //Return nullptr, if we don't have free/decommit pages to allocate if ((freeSegments.Length() - freeSegmentsBVIndex < requestedNumOfSegments) || freeSegmentsBVIndex == BVInvalidIndex) { PreReservedHeapTrace(_u("No more space to commit in PreReserved Memory region.\n")); return nullptr; } } while (!freeSegments.TestRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments))); uint offset = freeSegmentsBVIndex * AutoSystemInfo::Data.GetAllocationGranularityPageSize(); addressToReserve = (char*) preReservedStartAddress + offset; //Check if the region is not already in MEM_COMMIT state. MEMORY_BASIC_INFORMATION memBasicInfo; size_t bytes = VirtualQuery(addressToReserve, &memBasicInfo, sizeof(memBasicInfo)); if (bytes == 0 || memBasicInfo.RegionSize < requestedNumOfSegments * AutoSystemInfo::Data.GetAllocationGranularityPageSize() || memBasicInfo.State == MEM_COMMIT ) { CustomHeap_BadPageState_fatal_error((ULONG_PTR)this); return nullptr; } } else { //Check If the lpAddress is within the range of the preReserved Memory Region Assert(((char*) lpAddress) >= (char*) preReservedStartAddress || ((char*) lpAddress + dwSize) < GetPreReservedEndAddress()); addressToReserve = (char*) lpAddress; freeSegmentsBVIndex = (uint) ((addressToReserve - (char*) preReservedStartAddress) / AutoSystemInfo::Data.GetAllocationGranularityPageSize()); #if DBG uint numOfSegments = (uint)ceil((double)dwSize / (double)AutoSystemInfo::Data.GetAllocationGranularityPageSize()); Assert(numOfSegments != 0); Assert(freeSegmentsBVIndex + numOfSegments - 1 < freeSegments.Length()); Assert(!freeSegments.TestRange(freeSegmentsBVIndex, numOfSegments)); #endif } AssertMsg(freeSegmentsBVIndex < PreReservedAllocationSegmentCount, "Invalid BitVector index calculation?"); AssertMsg(dwSize % AutoSystemInfo::PageSize == 0, "COMMIT is managed at AutoSystemInfo::PageSize granularity"); char * allocatedAddress = nullptr; if ((allocationType & MEM_COMMIT) != 0) { #if defined(ENABLE_JIT_CLAMP) AutoEnableDynamicCodeGen enableCodeGen; #endif #if defined(_CONTROL_FLOW_GUARD) if (AutoSystemInfo::Data.IsCFGEnabled()) { DWORD oldProtect = 0; DWORD allocProtectFlags = 0; if (AutoSystemInfo::Data.IsCFGEnabled()) { allocProtectFlags = PAGE_EXECUTE_RW_TARGETS_INVALID; } else { allocProtectFlags = PAGE_EXECUTE_READWRITE; } allocatedAddress = (char *)VirtualAlloc(addressToReserve, dwSize, MEM_COMMIT, allocProtectFlags); AssertMsg(allocatedAddress != nullptr, "If no space to allocate, then how did we fetch this address from the tracking bit vector?"); VirtualProtect(allocatedAddress, dwSize, protectFlags, &oldProtect); AssertMsg(oldProtect == (PAGE_EXECUTE_READWRITE), "CFG Bitmap gets allocated and bits will be set to invalid only upon passing these flags."); } else #endif { allocatedAddress = (char *)VirtualAlloc(addressToReserve, dwSize, MEM_COMMIT, protectFlags); } } else { // Just return the uncommitted address if we didn't ask to commit it. allocatedAddress = addressToReserve; } // Keep track of the committed pages within the preReserved Memory Region if (lpAddress == nullptr && allocatedAddress != nullptr) { Assert(allocatedAddress == addressToReserve); Assert(requestedNumOfSegments != 0); freeSegments.ClearRange(freeSegmentsBVIndex, static_cast<uint>(requestedNumOfSegments)); } PreReservedHeapTrace(_u("MEM_COMMIT: StartAddress: 0x%p of size: 0x%x * 0x%x bytes \n"), allocatedAddress, requestedNumOfSegments, AutoSystemInfo::Data.GetAllocationGranularityPageSize()); return allocatedAddress; } }
bool EmitBufferManager<SyncObject>::IsInHeap(__in void* address) { AutoRealOrFakeCriticalSection<SyncObject> autocs(&this->criticalSection); return this->allocationHeap.IsInHeap(address); }
bool Heap::IsInRange(__in void* address) { AutoCriticalSection autocs(&this->cs); return (this->preReservedHeapPageAllocator.GetVirtualAllocator()->IsInRange(address) || this->pageAllocator.IsAddressFromAllocator(address)); }