PolymorphicInlineCache * PropertyString::CreateBiggerPolymorphicInlineCache(bool isLdElem) { PolymorphicInlineCache * polymorphicInlineCache = isLdElem ? GetLdElemInlineCache() : GetStElemInlineCache(); Assert(polymorphicInlineCache && polymorphicInlineCache->CanAllocateBigger()); uint16 polymorphicInlineCacheSize = polymorphicInlineCache->GetSize(); uint16 newPolymorphicInlineCacheSize = PolymorphicInlineCache::GetNextSize(polymorphicInlineCacheSize); Assert(newPolymorphicInlineCacheSize > polymorphicInlineCacheSize); PolymorphicInlineCache * newPolymorphicInlineCache = ScriptContextPolymorphicInlineCache::New(newPolymorphicInlineCacheSize, GetLibrary()); polymorphicInlineCache->CopyTo(this->propertyRecord->GetPropertyId(), GetScriptContext(), newPolymorphicInlineCache); if (isLdElem) { this->ldElemInlineCache = newPolymorphicInlineCache; } else { this->stElemInlineCache = newPolymorphicInlineCache; } #ifdef ENABLE_DEBUG_CONFIG_OPTIONS if (PHASE_VERBOSE_TRACE1(Js::PolymorphicInlineCachePhase) || PHASE_TRACE1(PropertyStringCachePhase)) { Output::Print(_u("PropertyString '%s' : Bigger PIC, oldSize = %d, newSize = %d\n"), GetString(), polymorphicInlineCacheSize, newPolymorphicInlineCacheSize); } #endif return newPolymorphicInlineCache; }
bool DynamicObject::DeoptimizeObjectHeaderInlining() { if(!IsObjectHeaderInlinedTypeHandler()) { return false; } if (PHASE_TRACE1(Js::ObjectHeaderInliningPhase)) { Output::Print(_u("ObjectHeaderInlining: De-optimizing the object.\n")); Output::Flush(); } PathTypeHandlerBase *const oldTypeHandler = PathTypeHandlerBase::FromTypeHandler(GetTypeHandler()); SimplePathTypeHandler *const newTypeHandler = oldTypeHandler->DeoptimizeObjectHeaderInlining(GetLibrary()); const PropertyIndex newInlineSlotCapacity = newTypeHandler->GetInlineSlotCapacity(); DynamicTypeHandler::AdjustSlots( this, newInlineSlotCapacity, newTypeHandler->GetSlotCapacity() - newInlineSlotCapacity); DynamicType *const newType = DuplicateType(); newType->typeHandler = newTypeHandler; newType->ShareType(); type = newType; return true; }
bool TestEtwEventSink::Load() { wchar_t const * dllname = Js::Configuration::Global.flags.TestEtwDll; if(!dllname) { return false; } HMODULE hModule = ::LoadLibraryW(dllname); if (hModule == nullptr) { Output::Print(L"ERROR: Unable to load ETW event sink %s\n", dllname); Js::Throw::FatalInternalError(); } CreateEventSink procAddress = (CreateEventSink)::GetProcAddress(hModule, CreateEventSinkProcName); if (procAddress == nullptr) { Output::Print(L"ERROR: Unable to get function %S from dll %s\n", CreateEventSinkProcName, dllname); Js::Throw::FatalInternalError(); } // CONSIDER: pass null and skip rundown testing (if a command line switch is present). Instance = procAddress(&EtwTrace::PerformRundown, PHASE_TRACE1(Js::EtwPhase)); if (Instance == nullptr) { Output::Print(L"ERROR: Failed to create ETW event sink from dll %s\n", dllname); Js::Throw::FatalInternalError(); } return true; }
void RegisterSpace::PrintTmpRegisterAllocation(RegSlot loc, bool deallocation) { if (PHASE_TRACE1(Js::AsmjsTmpRegisterAllocationPhase)) { char16 buf[16]; GetTypeDebugName(mType, buf, 16, true); Output::Print(_u("%s%s %d\n"), deallocation ? _u("-") : _u("+"), buf, loc); } }
// targetAddr: target address // targetStartAddr: target start address, some fied might reference to middle of another data chunk, like outParamOffsets // startAddress: current data start address // addrToFixup: address that currently pointing to dataAddr, which need to be updated void NativeCodeData::AddFixupEntry(void* targetAddr, void* targetStartAddr, void* addrToFixup, void* startAddress, DataChunk * chunkList) { Assert(addrToFixup >= startAddress); Assert(((__int64)addrToFixup) % sizeof(void*) == 0); if (targetAddr == nullptr) { return; } Assert(targetStartAddr); unsigned int inDataOffset = (unsigned int)((char*)targetAddr - (char*)targetStartAddr); DataChunk* targetChunk = NativeCodeData::GetDataChunk(targetStartAddr); Assert(targetChunk->len >= inDataOffset); #if DBG bool foundTargetChunk = false; while (chunkList) { foundTargetChunk |= (chunkList == targetChunk); chunkList = chunkList->next; } AssertMsg(foundTargetChunk, "current pointer is not allocated with NativeCodeData allocator?"); // change to valid check instead of assertion? #endif DataChunk* chunk = NativeCodeData::GetDataChunk(startAddress); NativeDataFixupEntry* entry = (NativeDataFixupEntry*)midl_user_allocate(sizeof(NativeDataFixupEntry)); if (!entry) { Js::Throw::OutOfMemory(); } __analysis_assume(entry); entry->addrOffset = (unsigned int)((__int64)addrToFixup - (__int64)startAddress); Assert(entry->addrOffset <= chunk->len - sizeof(void*)); entry->targetTotalOffset = targetChunk->offset + inDataOffset; entry->next = chunk->fixupList; chunk->fixupList = entry; #if DBG if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { Output::Print(L"NativeCodeData Add Fixup: %p(%p+%d, chunk:%p) --> %p(chunk:%p) %S\n", addrToFixup, startAddress, entry->addrOffset, (void*)chunk, targetAddr, (void*)targetChunk, chunk->dataType); } #endif }
void NativeCodeData::AddFixupEntryForPointerArray(void* startAddress, DataChunk * chunkList) { DataChunk* chunk = NativeCodeData::GetDataChunk(startAddress); Assert(chunk->len % sizeof(void*) == 0); for (unsigned int i = 0; i < chunk->len / sizeof(void*); i++) { size_t offset = i * sizeof(void*); void* targetAddr = *(void**)((char*)startAddress + offset); if (targetAddr == nullptr) { continue; } DataChunk* targetChunk = NativeCodeData::GetDataChunk(targetAddr); #if DBG bool foundTargetChunk = false; DataChunk* chunk1 = chunkList; while (chunk1 && !foundTargetChunk) { foundTargetChunk = (chunk1 == targetChunk); chunk1 = chunk1->next; } AssertMsg(foundTargetChunk, "current pointer is not allocated with NativeCodeData allocator?"); // change to valid check instead of assertion? #endif NativeDataFixupEntry* entry = (NativeDataFixupEntry*)midl_user_allocate(sizeof(NativeDataFixupEntry)); if (!entry) { Js::Throw::OutOfMemory(); } __analysis_assume(entry); entry->addrOffset = (unsigned int)offset; entry->targetTotalOffset = targetChunk->offset; entry->next = chunk->fixupList; chunk->fixupList = entry; #if DBG if (PHASE_TRACE1(Js::NativeCodeDataPhase)) { Output::Print(L"NativeCodeData Add Fixup: %p[%d](+%d, chunk:%p) --> %p(chunk:%p) %S\n", startAddress, i, entry->addrOffset, (void*)chunk, targetAddr, (void*)targetChunk, chunk->dataType); } #endif } }
void ThreadContextInfo::SetValidCallTargetForCFG(PVOID callTargetAddress, bool isSetValid) { #ifdef _CONTROL_FLOW_GUARD if (IsCFGEnabled()) { #ifdef _M_ARM AssertMsg(((uintptr_t)callTargetAddress & 0x1) != 0, "on ARM we expect the thumb bit to be set on anything we use as a call target"); AssertMsg(IS_16BYTE_ALIGNED((uintptr_t)callTargetAddress & ~0x1), "callTargetAddress is not 16-byte page aligned?"); #else AssertMsg(IS_16BYTE_ALIGNED(callTargetAddress), "callTargetAddress is not 16-byte page aligned?"); #endif // If SetProcessValidCallTargets is not allowed by global policy (e.g. // OOP JIT is in use in the client), then generate a fast fail // exception as state has been corrupted and attempt is being made to // illegally call SetProcessValidCallTargets. if (!GlobalSecurityPolicy::IsSetProcessValidCallTargetsAllowed()) { RaiseFailFastException(nullptr, nullptr, FAIL_FAST_GENERATE_EXCEPTION_ADDRESS); } PVOID startAddressOfPage = (PVOID)(PAGE_START_ADDR(callTargetAddress)); size_t codeOffset = OFFSET_ADDR_WITHIN_PAGE(callTargetAddress); CFG_CALL_TARGET_INFO callTargetInfo[1]; callTargetInfo[0].Offset = codeOffset; callTargetInfo[0].Flags = (isSetValid ? CFG_CALL_TARGET_VALID : 0); AssertMsg((size_t)callTargetAddress - (size_t)startAddressOfPage <= AutoSystemInfo::PageSize - 1, "Only last bits corresponding to PageSize should be masked"); AssertMsg((size_t)startAddressOfPage + (size_t)codeOffset == (size_t)callTargetAddress, "Wrong masking of address?"); BOOL isCallTargetRegistrationSucceed = GetWinCoreMemoryLibrary()->SetProcessCallTargets(GetProcessHandle(), startAddressOfPage, AutoSystemInfo::PageSize, 1, callTargetInfo); if (!isCallTargetRegistrationSucceed) { DWORD gle = GetLastError(); if (gle == ERROR_COMMITMENT_LIMIT) { //Throw OOM, if there is not enough virtual memory for paging (required for CFG BitMap) Js::Throw::OutOfMemory(); } else if (gle == ERROR_ACCESS_DENIED) { // When this error is set, the target process may be exiting and thus cannot proceed with // JIT output. Throw this exception to safely abort this call. throw Js::OperationAbortedException(); } else { Js::Throw::InternalError(); } } #if DBG if (isSetValid && !JITManager::GetJITManager()->IsOOPJITEnabled()) { _guard_check_icall((uintptr_t)callTargetAddress); } if (PHASE_TRACE1(Js::CFGPhase)) { if (!isSetValid) { Output::Print(_u("DEREGISTER:")); } Output::Print(_u("CFGRegistration: StartAddr: 0x%p , Offset: 0x%x, TargetAddr: 0x%x \n"), (char*)startAddressOfPage, callTargetInfo[0].Offset, ((size_t)startAddressOfPage + (size_t)callTargetInfo[0].Offset)); Output::Flush(); } #endif } #endif // _CONTROL_FLOW_GUARD }
bool TypePropertyCache::TrySetProperty( RecyclableObject *const object, const PropertyId propertyId, Var propertyValue, ScriptContext *const requestContext, PropertyCacheOperationInfo *const operationInfo, PropertyValueInfo *const propertyValueInfo) { Assert(propertyValueInfo); Assert(propertyValueInfo->GetInlineCache() || propertyValueInfo->GetPolymorphicInlineCache()); PropertyIndex propertyIndex; bool isInlineSlot; if(!TryGetIndexForStore(propertyId, &propertyIndex, &isInlineSlot)) { #if DBG_DUMP if(PHASE_TRACE1(TypePropertyCachePhase)) { CacheOperators::TraceCache( static_cast<InlineCache *>(nullptr), L"TypePropertyCache set miss", propertyId, requestContext, object); } #endif return false; } #if DBG_DUMP if(PHASE_TRACE1(TypePropertyCachePhase)) { CacheOperators::TraceCache( static_cast<InlineCache *>(nullptr), L"TypePropertyCache set hit", propertyId, requestContext, object); } #endif Assert(!object->IsFixedProperty(propertyId)); Assert( ( DynamicObject ::FromVar(object) ->GetDynamicType() ->GetTypeHandler() ->InlineOrAuxSlotIndexToPropertyIndex(propertyIndex, isInlineSlot) ) == object->GetPropertyIndex(propertyId)); Assert(object->CanStorePropertyValueDirectly(propertyId, false)); ScriptContext *const objectScriptContext = object->GetScriptContext(); if(objectScriptContext != requestContext) { propertyValue = CrossSite::MarshalVar(objectScriptContext, propertyValue); } if(isInlineSlot) { DynamicObject::FromVar(object)->SetInlineSlot(SetSlotArguments(propertyId, propertyIndex, propertyValue)); } else { DynamicObject::FromVar(object)->SetAuxSlot(SetSlotArguments(propertyId, propertyIndex, propertyValue)); } if(objectScriptContext == requestContext) { CacheOperators::Cache<false, false, false>( false, DynamicObject::FromVar(object), false, object->GetType(), nullptr, propertyId, propertyIndex, isInlineSlot, false, 0, propertyValueInfo, requestContext); return true; } if(operationInfo) { operationInfo->cacheType = CacheType_TypeProperty; operationInfo->slotType = isInlineSlot ? SlotType_Inline : SlotType_Aux; } return true; }
bool TypePropertyCache::TryGetProperty( const bool checkMissing, RecyclableObject *const propertyObject, const PropertyId propertyId, Var *const propertyValue, ScriptContext *const requestContext, PropertyCacheOperationInfo *const operationInfo, PropertyValueInfo *const propertyValueInfo) { Assert(propertyValueInfo); Assert(propertyValueInfo->GetInlineCache() || propertyValueInfo->GetPolymorphicInlineCache()); PropertyIndex propertyIndex; DynamicObject *prototypeObjectWithProperty; bool isInlineSlot, isMissing; if(!TryGetIndexForLoad( checkMissing, propertyId, &propertyIndex, &isInlineSlot, &isMissing, &prototypeObjectWithProperty)) { #if DBG_DUMP if(PHASE_TRACE1(TypePropertyCachePhase)) { CacheOperators::TraceCache( static_cast<InlineCache *>(nullptr), L"TypePropertyCache get miss", propertyId, requestContext, propertyObject); } #endif return false; } if(!prototypeObjectWithProperty) { #if DBG_DUMP if(PHASE_TRACE1(TypePropertyCachePhase)) { CacheOperators::TraceCache( static_cast<InlineCache *>(nullptr), L"TypePropertyCache get hit", propertyId, requestContext, propertyObject); } #endif #if DBG const PropertyIndex typeHandlerPropertyIndex = DynamicObject ::FromVar(propertyObject) ->GetDynamicType() ->GetTypeHandler() ->InlineOrAuxSlotIndexToPropertyIndex(propertyIndex, isInlineSlot); Assert(typeHandlerPropertyIndex == propertyObject->GetPropertyIndex(propertyId)); #endif *propertyValue = isInlineSlot ? DynamicObject::FromVar(propertyObject)->GetInlineSlot(propertyIndex) : DynamicObject::FromVar(propertyObject)->GetAuxSlot(propertyIndex); if(propertyObject->GetScriptContext() == requestContext) { Assert(*propertyValue == JavascriptOperators::GetProperty(propertyObject, propertyId, requestContext)); CacheOperators::Cache<false, true, false>( false, DynamicObject::FromVar(propertyObject), false, propertyObject->GetType(), nullptr, propertyId, propertyIndex, isInlineSlot, false, 0, propertyValueInfo, requestContext); return true; } *propertyValue = CrossSite::MarshalVar(requestContext, *propertyValue); // Cannot use GetProperty and compare results since they may not compare equal when they're marshaled if(operationInfo) { operationInfo->cacheType = CacheType_TypeProperty; operationInfo->slotType = isInlineSlot ? SlotType_Inline : SlotType_Aux; } return true; } #if DBG_DUMP if(PHASE_TRACE1(TypePropertyCachePhase)) { CacheOperators::TraceCache( static_cast<InlineCache *>(nullptr), L"TypePropertyCache get hit prototype", propertyId, requestContext, propertyObject); } #endif #if DBG const PropertyIndex typeHandlerPropertyIndex = prototypeObjectWithProperty ->GetDynamicType() ->GetTypeHandler() ->InlineOrAuxSlotIndexToPropertyIndex(propertyIndex, isInlineSlot); Assert(typeHandlerPropertyIndex == prototypeObjectWithProperty->GetPropertyIndex(propertyId)); #endif *propertyValue = isInlineSlot ? prototypeObjectWithProperty->GetInlineSlot(propertyIndex) : prototypeObjectWithProperty->GetAuxSlot(propertyIndex); if(prototypeObjectWithProperty->GetScriptContext() == requestContext) { Assert(*propertyValue == JavascriptOperators::GetProperty(propertyObject, propertyId, requestContext)); if(propertyObject->GetScriptContext() != requestContext) { return true; } CacheOperators::Cache<false, true, false>( true, prototypeObjectWithProperty, false, propertyObject->GetType(), nullptr, propertyId, propertyIndex, isInlineSlot, isMissing, 0, propertyValueInfo, requestContext); return true; } *propertyValue = CrossSite::MarshalVar(requestContext, *propertyValue); // Cannot use GetProperty and compare results since they may not compare equal when they're marshaled if(operationInfo) { operationInfo->cacheType = CacheType_TypeProperty; operationInfo->slotType = isInlineSlot ? SlotType_Inline : SlotType_Aux; } return true; }
HRESULT ServerRemoteCodeGen( /* [in] */ handle_t binding, /* [in] */ intptr_t scriptContextInfoAddress, /* [in] */ __RPC__in CodeGenWorkItemIDL *workItemData, /* [out] */ __RPC__out JITOutputIDL *jitData) { AUTO_NESTED_HANDLED_EXCEPTION_TYPE(static_cast<ExceptionType>(ExceptionType_OutOfMemory | ExceptionType_StackOverflow)); LARGE_INTEGER start_time = { 0 }; if (PHASE_TRACE1(Js::BackEndPhase)) { QueryPerformanceCounter(&start_time); } memset(jitData, 0, sizeof(JITOutputIDL)); ServerScriptContext * scriptContextInfo = (ServerScriptContext*)DecodePointer((void*)scriptContextInfoAddress); if (scriptContextInfo == nullptr) { Assert(false); return RPC_S_INVALID_ARG; } if (!ServerContextManager::IsScriptContextAlive(scriptContextInfo)) { Assert(false); return E_ACCESSDENIED; } AutoReleaseContext<ServerScriptContext> autoScriptContext(scriptContextInfo); return ServerCallWrapper(scriptContextInfo, [&]() ->HRESULT { scriptContextInfo->UpdateGlobalObjectThisAddr(workItemData->globalThisAddr); ServerThreadContext * threadContextInfo = scriptContextInfo->GetThreadContext(); NoRecoverMemoryJitArenaAllocator jitArena(L"JITArena", threadContextInfo->GetPageAllocator(), Js::Throw::OutOfMemory); JITTimeWorkItem * jitWorkItem = Anew(&jitArena, JITTimeWorkItem, workItemData); if (PHASE_VERBOSE_TRACE_RAW(Js::BackEndPhase, jitWorkItem->GetJITTimeInfo()->GetSourceContextId(), jitWorkItem->GetJITTimeInfo()->GetLocalFunctionId())) { LARGE_INTEGER freq; LARGE_INTEGER end_time; QueryPerformanceCounter(&end_time); QueryPerformanceFrequency(&freq); Output::Print( L"BackendMarshalIn - function: %s time:%8.6f mSec\r\n", jitWorkItem->GetJITFunctionBody()->GetDisplayName(), (((double)((end_time.QuadPart - workItemData->startTime)* (double)1000.0 / (double)freq.QuadPart))) / (1)); Output::Flush(); } auto profiler = scriptContextInfo->GetCodeGenProfiler(); #ifdef PROFILE_EXEC if (profiler && !profiler->IsInitialized()) { profiler->Initialize(threadContextInfo->GetPageAllocator(), nullptr); } #endif if (jitWorkItem->GetWorkItemData()->xProcNumberPageSegment) { jitData->numberPageSegments = (XProcNumberPageSegment*)midl_user_allocate(sizeof(XProcNumberPageSegment)); if (!jitData->numberPageSegments) { return E_OUTOFMEMORY; } __analysis_assume(jitData->numberPageSegments); memcpy_s(jitData->numberPageSegments, sizeof(XProcNumberPageSegment), jitWorkItem->GetWorkItemData()->xProcNumberPageSegment, sizeof(XProcNumberPageSegment)); } Func::Codegen( &jitArena, jitWorkItem, threadContextInfo, scriptContextInfo, jitData, nullptr, nullptr, jitWorkItem->GetPolymorphicInlineCacheInfo(), threadContextInfo->GetCodeGenAllocators(), #if !FLOATVAR nullptr, // number allocator #endif profiler, true); #ifdef PROFILE_EXEC if (profiler && profiler->IsInitialized()) { profiler->ProfilePrint(Js::Configuration::Global.flags.Profile.GetFirstPhase()); } #endif if (PHASE_VERBOSE_TRACE_RAW(Js::BackEndPhase, jitWorkItem->GetJITTimeInfo()->GetSourceContextId(), jitWorkItem->GetJITTimeInfo()->GetLocalFunctionId())) { LARGE_INTEGER freq; LARGE_INTEGER end_time; QueryPerformanceCounter(&end_time); QueryPerformanceFrequency(&freq); Output::Print( L"EndBackEndInner - function: %s time:%8.6f mSec\r\n", jitWorkItem->GetJITFunctionBody()->GetDisplayName(), (((double)((end_time.QuadPart - start_time.QuadPart)* (double)1000.0 / (double)freq.QuadPart))) / (1)); Output::Flush(); } LARGE_INTEGER out_time = { 0 }; if (PHASE_TRACE1(Js::BackEndPhase)) { QueryPerformanceCounter(&out_time); jitData->startTime = out_time.QuadPart; } return S_OK; }); }
void DynamicTypeHandler::AdjustSlots( DynamicObject *const object, const PropertyIndex newInlineSlotCapacity, const int newAuxSlotCapacity) { Assert(object); // Allocate new aux slot array Recycler *const recycler = object->GetRecycler(); TRACK_ALLOC_INFO(recycler, Var, Recycler, 0, newAuxSlotCapacity); Var *const newAuxSlots = reinterpret_cast<Var *>(recycler->AllocZero(newAuxSlotCapacity * sizeof(Var))); DynamicTypeHandler *const oldTypeHandler = object->GetTypeHandler(); const PropertyIndex oldInlineSlotCapacity = oldTypeHandler->GetInlineSlotCapacity(); if(oldInlineSlotCapacity == newInlineSlotCapacity) { const int oldAuxSlotCapacity = oldTypeHandler->GetSlotCapacity() - oldInlineSlotCapacity; Assert(oldAuxSlotCapacity < newAuxSlotCapacity); if(oldAuxSlotCapacity > 0) { // Copy aux slots to the new array Var *const oldAuxSlots = object->auxSlots; Assert(oldAuxSlots); int i = 0; do { newAuxSlots[i] = oldAuxSlots[i]; } while(++i < oldAuxSlotCapacity); #ifdef EXPLICIT_FREE_SLOTS recycler->ExplicitFreeNonLeaf(oldAuxSlots, oldAuxSlotCapacity * sizeof(Var)); #endif } object->auxSlots = newAuxSlots; return; } // An object header-inlined type handler is transitioning into one that is not. Some inline slots need to move, and // there are no old aux slots that need to be copied. Assert(oldTypeHandler->IsObjectHeaderInlinedTypeHandler()); Assert(oldInlineSlotCapacity > newInlineSlotCapacity); Assert(oldInlineSlotCapacity - newInlineSlotCapacity == DynamicTypeHandler::GetObjectHeaderInlinableSlotCapacity()); Assert(newAuxSlotCapacity >= DynamicTypeHandler::GetObjectHeaderInlinableSlotCapacity()); // Move the last few inline slots into the aux slots if(PHASE_TRACE1(Js::ObjectHeaderInliningPhase)) { Output::Print(_u("ObjectHeaderInlining: Moving inlined properties to aux slots.\n")); Output::Flush(); } Var *const oldInlineSlots = reinterpret_cast<Var *>( reinterpret_cast<uintptr_t>(object) + DynamicTypeHandler::GetOffsetOfObjectHeaderInlineSlots()); Assert(DynamicTypeHandler::GetObjectHeaderInlinableSlotCapacity() == 2); newAuxSlots[0] = oldInlineSlots[oldInlineSlotCapacity - 2]; newAuxSlots[1] = oldInlineSlots[oldInlineSlotCapacity - 1]; if(newInlineSlotCapacity > 0) { // Move the remaining inline slots such that none are object header-inlined. Copy backwards, as the two buffers may // overlap, with the new inline slot array starting beyond the start of the old inline slot array. if(PHASE_TRACE1(Js::ObjectHeaderInliningPhase)) { Output::Print(_u("ObjectHeaderInlining: Moving inlined properties out of the object header.\n")); Output::Flush(); } Var *const newInlineSlots = reinterpret_cast<Var *>(object + 1); PropertyIndex i = newInlineSlotCapacity; do { --i; newInlineSlots[i] = oldInlineSlots[i]; } while(i > 0); } object->auxSlots = newAuxSlots; object->objectArray = nullptr; }
// // Loads the profile from the WININET cache // bool SourceDynamicProfileManager::LoadFromProfileCache(IActiveScriptDataCache* profileDataCache, LPCWSTR url) { #ifdef ENABLE_WININET_PROFILE_DATA_CACHE AssertMsg(CONFIG_FLAG(WininetProfileCache), "Profile caching should be enabled for us to get here"); Assert(profileDataCache); AssertMsg(!IsProfileLoadedFromWinInet(), "Duplicate profile cache loading?"); // Keep a copy of this and addref it profileDataCache->AddRef(); this->profileDataCache = profileDataCache; IStream* readStream; HRESULT hr = profileDataCache->GetReadDataStream(&readStream); if(SUCCEEDED(hr)) { Assert(readStream != nullptr); // stream reader owns the stream and will close it on destruction SimpleStreamReader streamReader(readStream); DWORD jscriptMajorVersion; DWORD jscriptMinorVersion; if(FAILED(AutoSystemInfo::GetJscriptFileVersion(&jscriptMajorVersion, &jscriptMinorVersion))) { return false; } DWORD majorVersion; if(!streamReader.Read(&majorVersion) || majorVersion != jscriptMajorVersion) { return false; } DWORD minorVersion; if(!streamReader.Read(&minorVersion) || minorVersion != jscriptMinorVersion) { return false; } uint numberOfFunctions; if(!streamReader.Read(&numberOfFunctions) || numberOfFunctions > MAX_FUNCTION_COUNT) { return false; } BVFixed* functions = BVFixed::New(numberOfFunctions, this->recycler); if(!streamReader.ReadArray(functions->GetData(), functions->WordCount())) { return false; } this->cachedStartupFunctions = functions; OUTPUT_TRACE(Js::DynamicProfilePhase, L"Profile load succeeded. Function count: %d %s\n", numberOfFunctions, url); #if DBG_DUMP if(PHASE_TRACE1(Js::DynamicProfilePhase) && Js::Configuration::Global.flags.Verbose) { OUTPUT_VERBOSE_TRACE(Js::DynamicProfilePhase, L"Profile loaded:\n"); functions->Dump(); } #endif return true; } else if (hr == HRESULT_FROM_WIN32(ERROR_WRITE_PROTECT)) { this->isNonCachableScript = true; OUTPUT_VERBOSE_TRACE(Js::DynamicProfilePhase, L"Profile load failed. Non-cacheable resource. %s\n", url); } else { OUTPUT_TRACE(Js::DynamicProfilePhase, L"Profile load failed. No read stream. %s\n", url); } #endif return false; }
// // Saves the profile to the WININET cache // uint SourceDynamicProfileManager::SaveToProfileCache() { AssertMsg(CONFIG_FLAG(WininetProfileCache), "Profile caching should be enabled for us to get here"); Assert(startupFunctions); uint bytesWritten = 0; #ifdef ENABLE_WININET_PROFILE_DATA_CACHE //TODO: Add some diffing logic to not write unless necessary IStream* writeStream; HRESULT hr = profileDataCache->GetWriteDataStream(&writeStream); if(FAILED(hr)) { return 0; } Assert(writeStream != nullptr); // stream writer owns the stream and will close it on destruction SimpleStreamWriter streamWriter(writeStream); DWORD jscriptMajorVersion; DWORD jscriptMinorVersion; if(FAILED(AutoSystemInfo::GetJscriptFileVersion(&jscriptMajorVersion, &jscriptMinorVersion))) { return 0; } if(!streamWriter.Write(jscriptMajorVersion)) { return 0; } if(!streamWriter.Write(jscriptMinorVersion)) { return 0; } if(!streamWriter.Write(startupFunctions->Length())) { return 0; } if(streamWriter.WriteArray(startupFunctions->GetData(), startupFunctions->WordCount())) { STATSTG stats; if(SUCCEEDED(writeStream->Stat(&stats, STATFLAG_NONAME))) { bytesWritten = stats.cbSize.LowPart; Assert(stats.cbSize.LowPart > 0); AssertMsg(stats.cbSize.HighPart == 0, "We should not be writing such long data that the high part is non-zero"); } hr = profileDataCache->SaveWriteDataStream(writeStream); if(FAILED(hr)) { return 0; } #if DBG_DUMP if(PHASE_TRACE1(Js::DynamicProfilePhase) && Js::Configuration::Global.flags.Verbose) { OUTPUT_VERBOSE_TRACE(Js::DynamicProfilePhase, L"Saved profile:\n"); startupFunctions->Dump(); } #endif } #endif return bytesWritten; }