//
// Helps determine whether a function should be speculatively jitted.
// This function is only used once and is used in a time-critical area, so
// be careful with it (moving it around actually caused around a 5% perf
// regression on a test).
//
bool CodeGenWorkItem::ShouldSpeculativelyJit(uint byteCodeSizeGenerated) const
{
    if(!functionBody->DoFullJit())
    {
        return false;
    }

    byteCodeSizeGenerated += this->GetByteCodeCount();
    if(CONFIG_FLAG(ProfileBasedSpeculativeJit))
    {
        Assert(!CONFIG_ISENABLED(Js::NoDynamicProfileInMemoryCacheFlag));

        // JIT this now if we are under the speculation cap.
        return
            byteCodeSizeGenerated < (uint)CONFIG_FLAG(SpeculationCap) ||
            (
                byteCodeSizeGenerated < (uint)CONFIG_FLAG(ProfileBasedSpeculationCap) &&
                this->ShouldSpeculativelyJitBasedOnProfile()
            );
    }
    else
    {
        return byteCodeSizeGenerated < (uint)CONFIG_FLAG(SpeculationCap);
    }
}
    BYTE* WebAssemblySharedArrayBuffer::AllocBuffer(uint32 length, uint32 maxLength)
    {
#if ENABLE_FAST_ARRAYBUFFER
        if (CONFIG_FLAG(WasmFastArray))
        {
            return (BYTE*)WasmVirtualAllocator(length);
        }
#endif
#ifdef _WIN32
        if (CONFIG_FLAG(WasmSharedArrayVirtualBuffer))
        {
            return (BYTE*)AllocWrapper(length, maxLength);
        }
#endif
        AssertOrFailFast(maxLength >= length);
        uint32 additionalSize = maxLength - length;
        if (additionalSize > 0)
        {
            // SharedArrayBuffer::Init already requested External Memory for `length`, we need to request the balance
            if (!this->GetRecycler()->RequestExternalMemoryAllocation(additionalSize))
            {
                // Failed to request for more memory
                return nullptr;
            }
        }
        // Allocate the full size of the buffer if we can't do VirtualAlloc
        return HeapNewNoThrowArray(BYTE, maxLength);
    }
void
SwitchIRBuilder::BuildCaseBrInstr(uint32 targetOffset)
{
    Assert(m_isAsmJs || m_profiledSwitchInstr);

    int start = 0;
    int end = m_caseNodes->Count() - 1;

    if (m_caseNodes->Count() <= CONFIG_FLAG(MaxLinearIntCaseCount))
    {
        BuildLinearTraverseInstr(start, end, targetOffset);
        ResetCaseNodes();
        return;
    }

    RefineCaseNodes();

    BuildOptimizedIntegerCaseInstrs(targetOffset);

    ResetCaseNodes(); // clear the list for the next new set of integers - or for a new switch case statement

                      //optimization is definitely performed when the number of cases is greater than the threshold
    if (end - start > CONFIG_FLAG(MaxLinearIntCaseCount) - 1) // -1 for handling zero index as the base
    {
        BuildBailOnNotInteger();
    }
}
 void PropertyRecordUsageCache::RegisterCacheMiss()
 {
     this->hitRate -= (int)CONFIG_FLAG(PropertyCacheMissPenalty);
     if (this->hitRate < (int)CONFIG_FLAG(PropertyCacheMissReset))
     {
         this->hitRate = 0;
     }
 }
 void PropertyString::LogCacheMiss()
 {
     this->hitRate -= (int)CONFIG_FLAG(StringCacheMissPenalty);
     if (this->hitRate < (int)CONFIG_FLAG(StringCacheMissReset))
     {
         this->hitRate = 0;
     }
 }
Beispiel #6
0
bool
AutoSystemInfo::VirtualSseAvailable(const int sseLevel) const
{
    #ifdef ENABLE_DEBUG_CONFIG_OPTIONS
        return CONFIG_FLAG(Sse) < 0 || CONFIG_FLAG(Sse) >= sseLevel;
    #else
        return true;
    #endif
}
void InliningThreshold::SetAggressiveHeuristics()
{
    int limit = CONFIG_FLAG(AggressiveInlineThreshold);

    inlineThreshold = limit;
    constructorInlineThreshold = limit;
    outsideLoopInlineThreshold = limit;
    leafInlineThreshold = limit;
    loopInlineThreshold = limit;
    polymorphicInlineThreshold = limit;
    maxNumberOfInlineesWithLoop = CONFIG_FLAG(MaxNumberOfInlineesWithLoop);

    inlineCountMax = CONFIG_FLAG(AggressiveInlineCountMax);
}
void
SwitchIRBuilder::BuildBinaryTraverseInstr(int start, int end, uint32 defaultLeafBranch)
{
    int mid;

    if (start > end)
    {
        return;
    }

    if (end - start <= CONFIG_FLAG(MaxLinearIntCaseCount) - 1) // -1 for handling zero index as the base
    {
        //if only 3 elements, then do linear search on the elements
        BuildLinearTraverseInstr(start, end, defaultLeafBranch);
        return;
    }

    mid = start + ((end - start + 1) / 2);
    CaseNode* midNode = m_caseNodes->Item(mid);
    CaseNode* startNode = m_caseNodes->Item(start);

    // if the value that we are switching on is greater than the start case value
    // then we branch right to the right half of the binary search
    IR::BranchInstr* caseInstr = startNode->GetCaseInstr();
    IR::BranchInstr* branchInstr = IR::BranchInstr::New(m_geOp, nullptr, caseInstr->GetSrc1(), midNode->GetLowerBound(), m_func);
    branchInstr->m_isSwitchBr = true;
    m_adapter->AddBranchInstr(branchInstr, startNode->GetOffset(), midNode->GetOffset(), true);

    BuildBinaryTraverseInstr(start, mid - 1, defaultLeafBranch);
    BuildBinaryTraverseInstr(mid, end, defaultLeafBranch);
}
void InterpreterThunkEmitter::NewOOPJITThunkBlock()
{
    InterpreterThunkInfoIDL thunkInfo;
    HRESULT hr = JITManager::GetJITManager()->NewInterpreterThunkBlock(
        this->scriptContext->GetRemoteScriptAddr(),
        this->isAsmInterpreterThunk,
        &thunkInfo
    );
    JITManager::HandleServerCallResult(hr);


    BYTE* buffer = (BYTE*)thunkInfo.thunkBlockAddr;

    if (!CONFIG_FLAG(OOPCFGRegistration))
    {
        this->scriptContext->GetThreadContext()->SetValidCallTargetForCFG(buffer);
    }

    // Update object state only at the end when everything has succeeded - and no exceptions can be thrown.
    auto block = this->thunkBlocks.PrependNode(allocator, buffer);
#if PDATA_ENABLED
    void* pdataTable;
    PDataManager::RegisterPdata((PRUNTIME_FUNCTION)thunkInfo.pdataTableStart, (ULONG_PTR)buffer, (ULONG_PTR)thunkInfo.epilogEndAddr, &pdataTable);
    block->SetPdata(pdataTable);
#else
    Unused(block);
#endif

    this->thunkCount = thunkInfo.thunkCount;
    this->thunkBuffer = (BYTE*)thunkInfo.thunkBlockAddr;
}
    bool WebAssemblySharedArrayBuffer::IsValidVirtualBufferLength(uint length) const
    {
#if ENABLE_FAST_ARRAYBUFFER
        if (CONFIG_FLAG(WasmFastArray))
        {
            return true;
        }
#endif
#ifdef _WIN32
        if (CONFIG_FLAG(WasmSharedArrayVirtualBuffer))
        {
            return true;
        }
#endif
        return false;
    }
Beispiel #11
0
size_t
Output::VTrace(const wchar_t* shortPrefixFormat, const wchar_t* prefix, const wchar_t *form, va_list argptr)
{
    size_t retValue = 0;

    if (CONFIG_FLAG(RichTraceFormat))
    {
        InterlockedIncrement(&s_traceEntryId);
        retValue += Output::Print(L"[%d ~%d %s] ", s_traceEntryId, ::GetCurrentThreadId(), prefix);
    }
    else
    {
        retValue += Output::Print(shortPrefixFormat, prefix);
    }
    retValue += Output::VPrint(form, argptr);

    // Print stack trace.
    if (s_stackTraceHelper)
    {
        const ULONG c_framesToSkip = 2; // Skip 2 frames -- Output::VTrace and Output::Trace.
        const ULONG c_frameCount = 10;  // TODO: make it configurable.
        const wchar_t callStackPrefix[] = L"call stack:";
        if (s_inMemoryLogger)
        {
            // Trace just addresses of functions, avoid symbol info as it takes too much memory.
            // One line for whole stack trace for easier parsing on the jd side.
            const size_t c_msgCharCount = _countof(callStackPrefix) + (1 + sizeof(void*) * 2) * c_frameCount; // 2 hexadecimal digits per byte + 1 for space.
            wchar_t callStackMsg[c_msgCharCount];
            void* frames[c_frameCount];
            size_t start = 0;
            size_t temp;

            temp = _snwprintf_s(callStackMsg, _countof(callStackMsg), _TRUNCATE, L"%s", callStackPrefix);
            Assert(temp != -1);
            start += temp;

            ULONG framesObtained = s_stackTraceHelper->GetStackTrace(c_framesToSkip, c_frameCount, frames);
            Assert(framesObtained <= c_frameCount);
            for (ULONG i = 0; i < framesObtained && i < c_frameCount; ++i)
            {
                Assert(_countof(callStackMsg) >= start);
                temp = _snwprintf_s(callStackMsg + start, _countof(callStackMsg) - start, _TRUNCATE, L" %p", frames[i]);
                Assert(temp != -1);
                start += temp;
            }

            retValue += Output::Print(L"%s\n", callStackMsg);
        }
        else
        {
            // Trace with full symbol info.
            retValue += Output::Print(L"%s\n", callStackPrefix);
            retValue += s_stackTraceHelper->PrintStackTrace(c_framesToSkip, c_frameCount);
        }
    }

    return retValue;
}
Beispiel #12
0
 void DynamicType::PrepareForTypeSnapshotEnumeration()
 {
     if(!GetIsLocked() && CONFIG_FLAG(TypeSnapshotEnumeration))
     {
         // Lock the type and handler, enabling us to enumerate properties of the type snapshotted
         // at the beginning of enumeration, despite property changes made by script during enumeration.
         LockType(); // Note: this only works for type handlers that support locking.
     }
 }
 DynamicType * DynamicObjectPropertyEnumerator::GetTypeToEnumerate() const
 {
     return
         GetSnapShotSemantics() &&
         initialType->GetIsLocked() &&
         CONFIG_FLAG(TypeSnapshotEnumeration)
         ? PointerValue(initialType)
         : object->GetDynamicType();
 }
double
DateImplementation::DoubleToTvUtc(double tv)
{
    if (JavascriptNumber::IsNan(tv) || tv < ktvMin || tv > ktvMax)
    {
        return JavascriptNumber::NaN;
    }
    return CONFIG_FLAG(HighPrecisionDate)? tv : ConvertToInteger(tv);
}
Beispiel #15
0
void
JITOutput::FinalizeNativeCode()
{
    CustomHeap::Allocation * allocation = GetAllocation();
#if ENABLE_OOP_NATIVE_CODEGEN
    if (JITManager::GetJITManager()->IsJITServer())
    {
        m_func->GetOOPCodeGenAllocators()->emitBufferManager.CompletePreviousAllocation(m_oopAlloc);

#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
        if (!m_func->IsLoopBody() && CONFIG_FLAG(UseJITTrampoline))
        {
            allocation->thunkAddress = m_func->GetOOPThreadContext()->GetJITThunkEmitter()->CreateThunk(m_outputData->codeAddress);
        }
#endif
    }
    else
#endif
    {
        m_func->GetInProcCodeGenAllocators()->emitBufferManager.CompletePreviousAllocation(m_inProcAlloc);
        m_func->GetInProcJITEntryPointInfo()->SetInProcJITNativeCodeData(m_func->GetNativeCodeDataAllocator()->Finalize());
        m_func->GetInProcJITEntryPointInfo()->GetJitTransferData()->SetRawData(m_func->GetTransferDataAllocator()->Finalize());
#if !FLOATVAR
        CodeGenNumberChunk * numberChunks = m_func->GetNumberAllocator()->Finalize();
        m_func->GetInProcJITEntryPointInfo()->SetNumberChunks(numberChunks);
#endif

#if defined(_CONTROL_FLOW_GUARD) && !defined(_M_ARM)
        if (!m_func->IsLoopBody() && CONFIG_FLAG(UseJITTrampoline))
        {
            allocation->thunkAddress = m_func->GetInProcThreadContext()->GetJITThunkEmitter()->CreateThunk(m_outputData->codeAddress);
        }
#endif
    }
    m_outputData->thunkAddress = allocation->thunkAddress;
    if (!allocation->thunkAddress && CONFIG_FLAG(OOPCFGRegistration))
    {
        PVOID callTarget = (PVOID)m_outputData->codeAddress;
#ifdef _M_ARM
        callTarget = (PVOID)((uintptr_t)callTarget | 0x1);
#endif
        m_func->GetThreadContextInfo()->SetValidCallTargetForCFG(callTarget);
    }
}
Beispiel #16
0
    void BackgroundJobProcessor::InitializeThreadCount()
    {
        if (CONFIG_FLAG(ForceMaxJitThreadCount))
        {
            this->maxThreadCount = CONFIG_FLAG(MaxJitThreadCount);
        }
        else if (AutoSystemInfo::Data.IsLowMemoryProcess())
        {
            // In a low-memory scenario, don't spin up multiple threads, regardless of how many cores we have.
            this->maxThreadCount = 1;
        }
        else
        {
            int processorCount = AutoSystemInfo::Data.GetNumberOfPhysicalProcessors();
            //There is 2 threads already in play, one UI (main) thread and a GC thread. So subtract 2 from processorCount to account for the same.

            this->maxThreadCount = max(1, min(processorCount - 2, CONFIG_FLAG(MaxJitThreadCount)));
        }
    }
template <typename TAlloc> inline
void
JITThunkEmitter<TAlloc>::FreeThunk(uintptr_t thunkAddress)
{
    AutoCriticalSection autoCs(&this->cs);
    BVIndex thunkIndex = GetThunkIndexFromAddress(thunkAddress);
    if (thunkIndex >= this->freeThunks.Length() || this->freeThunks.TestAndSet(thunkIndex))
    {
        Assert(UNREACHED);
        this->firstBitToCheck = 0;
        return;
    }

    if (thunkIndex < firstBitToCheck)
    {
        this->firstBitToCheck = thunkIndex;
    }

    if (CONFIG_FLAG(OOPCFGRegistration))
    {
#if ENABLE_OOP_NATIVE_CODEGEN
        if (JITManager::GetJITManager()->IsJITServer())
        {
            HANDLE fileHandle = nullptr;
            PVOID baseAddress = nullptr;
            bool found = this->codeAllocator->GetFileInfo((PVOID)thunkAddress, &fileHandle, &baseAddress);
            AssertOrFailFast(found);
            this->threadContext->SetValidCallTargetFile((PVOID)thunkAddress, fileHandle, baseAddress, false);
        }
        else
#endif
        {
            this->threadContext->SetValidCallTargetForCFG((PVOID)thunkAddress, false);
        }
    }

    uintptr_t pageStartAddress = GetThunkPageStart(thunkAddress);
    if (IsThunkPageEmpty(pageStartAddress))
    {
        this->codeAllocator->Free((PVOID)pageStartAddress, AutoSystemInfo::PageSize, MEM_DECOMMIT);
    }
    else
    {
        char * localAddress = (char *)this->codeAllocator->AllocLocal((PVOID)thunkAddress, ThunkSize);
        if (localAddress == nullptr)
        {
            return;
        }
        UnprotectPage(localAddress);
        memset(localAddress, 0xCC, ThunkSize);
        ProtectPage(localAddress);
        this->codeAllocator->FreeLocal(localAddress);
    }
    FlushInstructionCache(this->processHandle, (PVOID)thunkAddress, ThunkSize);
}
    void WebAssemblySharedArrayBuffer::ValidateBuffer()
    {
#if DBG && _WIN32
        if (CONFIG_FLAG(WasmSharedArrayVirtualBuffer))
        {
            MEMORY_BASIC_INFORMATION info = { 0 };
            size_t size = 0;
            size_t allocationSize = 0;
            // Make sure the beggining of the buffer is committed memory to the expected size
            if (sharedContents->bufferLength > 0)
            {
                size = VirtualQuery((LPCVOID)sharedContents->buffer, &info, sizeof(info));
                Assert(size > 0);
                allocationSize = info.RegionSize + ((uintptr_t)info.BaseAddress - (uintptr_t)info.AllocationBase);
                Assert(allocationSize == sharedContents->bufferLength && info.State == MEM_COMMIT && info.Type == MEM_PRIVATE);
            }

            // Make sure the end of the buffer is reserved memory to the expected size
            size_t expectedAllocationSize = sharedContents->maxBufferLength;
#if ENABLE_FAST_ARRAYBUFFER
            if (CONFIG_FLAG(WasmFastArray))
            {
                expectedAllocationSize = MAX_WASM__ARRAYBUFFER_LENGTH;
            }
#endif
            // If the whole buffer has been committed, no need to verify this
            if (expectedAllocationSize > sharedContents->bufferLength)
            {
                size = VirtualQuery((LPCVOID)(sharedContents->buffer + sharedContents->bufferLength), &info, sizeof(info));
                Assert(size > 0);
                allocationSize = info.RegionSize + ((uintptr_t)info.BaseAddress - (uintptr_t)info.AllocationBase);
                Assert(allocationSize == expectedAllocationSize && info.State == MEM_RESERVE && info.Type == MEM_PRIVATE);
            }
        }
#endif
    }
Beispiel #19
0
void
EmitBufferManager<TAlloc, TPreReservedAlloc, SyncObject>::FreeAllocations(bool release)
{
#if PDATA_ENABLED && defined(_WIN32)
    DelayDeletingFunctionTable::Clear();
#endif

    AutoRealOrFakeCriticalSection<SyncObject> autoCs(&this->criticalSection);

#if DBG_DUMP
    if (!release && PHASE_STATS1(Js::EmitterPhase))
    {
        this->DumpAndResetStats(Js::Configuration::Global.flags.Filename);
    }
#endif

    TEmitBufferAllocation * allocation = this->allocations;
    while (allocation != nullptr)
    {
#ifdef ENABLE_DEBUG_CONFIG_OPTIONS
        if(CONFIG_FLAG(CheckEmitBufferPermissions))
        {
            CheckBufferPermissions(allocation);
        }
#endif
        if (release)
        {
            this->allocationHeap.Free(allocation->allocation);
        }
        else if ((scriptContext != nullptr) && allocation->recorded)
        {
            // In case of ThunkEmitter the script context would be null and we don't want to track that as code size.
            this->scriptContext->GetThreadContext()->SubCodeSize(allocation->bytesCommitted);
            allocation->recorded = false;
        }

        allocation = allocation->nextAllocation;
    }
    if (release)
    {
        this->allocations = nullptr;
    }
    else
    {
        this->allocationHeap.DecommitAll();
    }
}
Beispiel #20
0
void WritePerfHint(PerfHints hint, Js::FunctionBody * functionBody, uint byteCodeOffset /*= Js::Constants::NoByteCodeOffset*/)
{
    Assert(functionBody);
    Assert(((uint)hint) < _countof(s_perfHintContainer));

    PerfHintItem item = s_perfHintContainer[(uint)hint];

    int level = CONFIG_FLAG(PerfHintLevel);
    Assert(level <= (int)PerfHintLevels::VERBOSE);

    if ((int)item.level <= level)
    {
        ULONG lineNumber = functionBody->GetLineNumber();
        LONG columnNumber = functionBody->GetColumnNumber();
        if (byteCodeOffset != Js::Constants::NoByteCodeOffset)
        {
            functionBody->GetLineCharOffset(byteCodeOffset, &lineNumber, &columnNumber, false /*canAllocateLineCache*/);

            // returned values are 0-based. Adjusting.
            lineNumber++;
            columnNumber++;
        }

        // We will print the short name.
        TCHAR shortName[255];
        Js::FunctionBody::GetShortNameFromUrl(functionBody->GetSourceName(), shortName, 255);

        OUTPUT_TRACE(Js::PerfHintPhase, _u("%s : %s {\n      Function : %s [%s @ %u, %u]\n  Consequences : %s\n    Suggestion : %s\n}\n"),
            item.isNotOptimized ? _u("Not optimized") : _u("Optimized"),
            item.description,
            functionBody->GetExternalDisplayName(),
            shortName,
            lineNumber,
            columnNumber,
            item.consequences,
            item.suggestion);
        Output::Flush();
    }
}
Beispiel #21
0
    void Utf8SourceInfo::EnsureInitialized(int initialFunctionCount)
    {
        ThreadContext* threadContext = ThreadContext::GetContextForCurrentThread();
        Recycler* recycler = threadContext->GetRecycler();

        if (this->functionBodyDictionary == nullptr)
        {
            // This collection is allocated with leaf allocation policy. The references to the function body
            // here does not keep the function alive. However, the functions remove themselves at finalize
            // so if a function actually is in this map, it means that it is alive.
            this->functionBodyDictionary = RecyclerNew(recycler, FunctionBodyDictionary, recycler,
                initialFunctionCount, threadContext->GetEtwRundownCriticalSection());
        }

        if (CONFIG_FLAG(DeferTopLevelTillFirstCall) && !m_deferredFunctionsInitialized)
        {
            Assert(this->m_deferredFunctionsDictionary == nullptr);
            this->m_deferredFunctionsDictionary = RecyclerNew(recycler, DeferredFunctionsDictionary, recycler,
                initialFunctionCount, threadContext->GetEtwRundownCriticalSection());
            m_deferredFunctionsInitialized = true;
        }
    }
    void JsBuiltInEngineInterfaceExtensionObject::EnsureJsBuiltInByteCode(ScriptContext * scriptContext)
    {
        if (jsBuiltInByteCode == nullptr)
        {
            SourceContextInfo* sourceContextInfo = RecyclerNewStructZ(scriptContext->GetRecycler(), SourceContextInfo);
            sourceContextInfo->dwHostSourceContext = Js::Constants::JsBuiltInSourceContext;
            sourceContextInfo->isHostDynamicDocument = true;
            sourceContextInfo->sourceContextId = Js::Constants::JsBuiltInSourceContextId;

            Assert(sourceContextInfo != nullptr);

            SRCINFO si;
            memset(&si, 0, sizeof(si));
            si.sourceContextInfo = sourceContextInfo;
            SRCINFO *hsi = scriptContext->AddHostSrcInfo(&si);
            uint32 flags = fscrJsBuiltIn | (CONFIG_FLAG(CreateFunctionProxy) && !scriptContext->IsProfiling() ? fscrAllowFunctionProxy : 0);

            HRESULT hr = Js::ByteCodeSerializer::DeserializeFromBuffer(scriptContext, flags, (LPCUTF8)nullptr, hsi, (byte*)Library_Bytecode_JsBuiltIn, nullptr, &jsBuiltInByteCode);

            IfFailAssertMsgAndThrowHr(hr, "Failed to deserialize JsBuiltIn.js bytecode - very probably the bytecode needs to be rebuilt.");
            this->SetHasBytecode();
        }
    }
 bool Configuration::EnableJitInDebugMode()
 {
     return CONFIG_FLAG(EnableJitInDiagMode);
 }
 bool PropertyString::ShouldUseCache() const
 {
     return this->hitRate > (int)CONFIG_FLAG(StringCacheMissThreshold);
 }
Beispiel #25
0
void
SwitchIRBuilder::BuildMultiBrCaseInstrForStrings(uint32 targetOffset)
{
    Assert(m_caseNodes && m_caseNodes->Count() && m_profiledSwitchInstr && !m_isAsmJs);

    if (m_caseNodes->Count() < CONFIG_FLAG(MaxLinearStringCaseCount))
    {
        int start = 0;
        int end = m_caseNodes->Count() - 1;
        BuildLinearTraverseInstr(start, end, targetOffset);
        ResetCaseNodes();
        return;
    }

    IR::Opnd * srcOpnd = m_caseNodes->Item(0)->GetCaseInstr()->GetSrc1(); // Src1 is same in all the caseNodes
    IR::MultiBranchInstr * multiBranchInstr = IR::MultiBranchInstr::New(Js::OpCode::MultiBr, srcOpnd, m_func);

    uint32 lastCaseOffset = m_caseNodes->Item(m_caseNodes->Count() - 1)->GetOffset();
    uint caseCount = m_caseNodes->Count();

    bool generateDictionary = true;
    char16 minChar = USHORT_MAX;
    char16 maxChar = 0;

    // Either the jump table is within the limit (<= 128) or it is dense (<= 2 * case Count)
    uint const maxJumpTableSize = max<uint>(CONFIG_FLAG(MaxSingleCharStrJumpTableSize), CONFIG_FLAG(MaxSingleCharStrJumpTableRatio) * caseCount);
    if (this->m_seenOnlySingleCharStrCaseNodes)
    {
        generateDictionary = false;
        for (uint i = 0; i < caseCount; i++)
        {
            Js::JavascriptString * str = m_caseNodes->Item(i)->GetSrc2StringConstLocal();
            Assert(str->GetLength() == 1);
            char16 currChar = str->GetString()[0];
            minChar = min(minChar, currChar);
            maxChar = max(maxChar, currChar);
            if ((uint)(maxChar - minChar) > maxJumpTableSize)
            {
                generateDictionary = true;
                break;
            }
        }
    }


    if (generateDictionary)
    {
        multiBranchInstr->CreateBranchTargetsAndSetDefaultTarget(caseCount, IR::MultiBranchInstr::StrDictionary, targetOffset);

        //Adding normal cases to the instruction (except the default case, which we do it later)
        for (uint i = 0; i < caseCount; i++)
        {
            Js::JavascriptString * str = m_caseNodes->Item(i)->GetSrc2StringConstLocal();
            uint32 caseTargetOffset = m_caseNodes->Item(i)->GetTargetOffset();
            multiBranchInstr->AddtoDictionary(caseTargetOffset, str, m_caseNodes->Item(i)->GetSrc2StringConst());
        }
    }
    else
    {
        // If we are only going to save 16 entries, just start from 0 so we don't have to subtract
        if (minChar < 16)
        {
            minChar = 0;
        }
        multiBranchInstr->m_baseCaseValue = minChar;
        multiBranchInstr->m_lastCaseValue = maxChar;
        uint jumpTableSize = maxChar - minChar + 1;
        multiBranchInstr->CreateBranchTargetsAndSetDefaultTarget(jumpTableSize, IR::MultiBranchInstr::SingleCharStrJumpTable, targetOffset);

        for (uint i = 0; i < jumpTableSize; i++)
        {
            // Initialize all the entries to the default target first.
            multiBranchInstr->AddtoJumpTable(targetOffset, i);
        }
        //Adding normal cases to the instruction (except the default case, which we do it later)
        for (uint i = 0; i < caseCount; i++)
        {
            Js::JavascriptString * str = m_caseNodes->Item(i)->GetSrc2StringConstLocal();
            Assert(str->GetLength() == 1);
            uint32 caseTargetOffset = m_caseNodes->Item(i)->GetTargetOffset();
            multiBranchInstr->AddtoJumpTable(caseTargetOffset, str->GetString()[0] - minChar);
        }
    }

    multiBranchInstr->m_isSwitchBr = true;

    m_adapter->CreateRelocRecord(multiBranchInstr, lastCaseOffset, targetOffset);
    m_adapter->AddInstr(multiBranchInstr, lastCaseOffset);
    BuildBailOnNotString();

    ResetCaseNodes();
}
Beispiel #26
0
// This only enables collection of the inlinee data, we are much more aggressive here.
// Actual decision of whether something is inlined or not is taken in CommitInlineIntoInliner
bool InliningDecider::DeciderInlineIntoInliner(Js::FunctionBody * inlinee, Js::FunctionBody * inliner, bool isConstructorCall, bool isPolymorphicCall, uint16 constantArgInfo, uint recursiveInlineDepth, bool allowRecursiveInlining)
{

    if (!CanRecursivelyInline(inlinee, inliner, allowRecursiveInlining, recursiveInlineDepth))
    {
        return false;
    }

    if (inlinee->GetIsAsmjsMode() || inliner->GetIsAsmjsMode())
    {
        return false;
    }

    if (PHASE_FORCE(Js::InlinePhase, this->topFunc) ||
        PHASE_FORCE(Js::InlinePhase, inliner) ||
        PHASE_FORCE(Js::InlinePhase, inlinee))
    {
        return true;
    }

    if (PHASE_OFF(Js::InlinePhase, this->topFunc) ||
        PHASE_OFF(Js::InlinePhase, inliner) ||
        PHASE_OFF(Js::InlinePhase, inlinee))
    {
        return false;
    }

    if (PHASE_FORCE(Js::InlineTreePhase, this->topFunc) ||
        PHASE_FORCE(Js::InlineTreePhase, inliner))
    {
        return true;
    }

    if (PHASE_FORCE(Js::InlineAtEveryCallerPhase, inlinee))
    {
        return true;
    }

    uint inlineeByteCodeCount = inlinee->GetByteCodeWithoutLDACount();

    // Heuristics are hit in the following order (Note *order* is important)
    // 1. Leaf function:  If the inlinee is a leaf (but not a constructor or a polymorphic call) inline threshold is LeafInlineThreshold (60). Also it can have max 1 loop
    // 2. Constant Function Argument: If the inlinee candidate has a constant argument and that argument is used for branching, then the inline threshold is ConstantArgumentInlineThreshold (157)
    // 3. InlineThreshold: If an inlinee candidate exceeds InlineThreshold just don't inline no matter what.

    // Following are additional constraint for an inlinee which meets InlineThreshold (Rule no 3)
    // 4. Rule for inlinee with loops:
    //      4a. Only single loop in inlinee is permitted.
    //      4b. Should not have polymorphic field access.
    //      4c. Should not be a constructor.
    //      4d. Should meet LoopInlineThreshold (25)
    // 5. Rule for polymorphic inlinee:
    //      4a. Should meet PolymorphicInlineThreshold (32)
    // 6. Rule for constructors:
    //       5a. Always inline if inlinee has polymorphic field access (as we have cloned runtime data).
    //       5b. If inlinee is monomorphic, inline only small constructors. They are governed by ConstructorInlineThreshold (21)
    // 7. Rule for inlinee which is not interpreted enough (as we might not have all the profile data):
    //       7a. As of now it is still governed by the InlineThreshold. Plan to play with this in future.
    // 8. Rest should be inlined.

    uint16 mask = constantArgInfo &  inlinee->m_argUsedForBranch;
    if (mask && inlineeByteCodeCount <  (uint)CONFIG_FLAG(ConstantArgumentInlineThreshold))
    {
        return true;
    }

    int inlineThreshold = threshold.inlineThreshold;
    if (!isPolymorphicCall && !isConstructorCall && IsInlineeLeaf(inlinee) && (inlinee->GetLoopCount() <= 2))
    {
        // Inlinee is a leaf function
        if (inlinee->GetLoopCount() == 0 || GetNumberOfInlineesWithLoop() <= (uint)threshold.maxNumberOfInlineesWithLoop) // Don't inlinee too many inlinees with loops.
        {
            // Negative LeafInlineThreshold disable the threshold
            if (threshold.leafInlineThreshold >= 0)
            {
                inlineThreshold += threshold.leafInlineThreshold - threshold.inlineThreshold;
            }
        }
    }

#if ENABLE_DEBUG_CONFIG_OPTIONS
    char16 debugStringBuffer[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
    char16 debugStringBuffer2[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
    char16 debugStringBuffer3[MAX_FUNCTION_BODY_DEBUG_STRING_SIZE];
#endif

    if (inlinee->GetHasLoops())
    {
        if (threshold.loopInlineThreshold < 0 ||                                     // Negative LoopInlineThreshold disable inlining with loop
            GetNumberOfInlineesWithLoop()  >(uint)threshold.maxNumberOfInlineesWithLoop || // See if we are inlining too many inlinees with loops.
            (inlinee->GetLoopCount() > 2) ||                                         // Allow at most 2 loops.
            inlinee->GetHasNestedLoop() ||                                           // Nested loops are not a good inlinee candidate
            isConstructorCall ||                                                     // If the function is constructor with loops, don't inline.
            PHASE_OFF(Js::InlineFunctionsWithLoopsPhase, this->topFunc))
        {
            INLINE_TESTTRACE(_u("INLINING: Skip Inline: Has loops \tBytecode size: %d \tgetNumberOfInlineesWithLoop: %d\tloopCount: %d\thasNestedLoop: %B\tisConstructorCall:%B\tInlinee: %s (%s)\tCaller: %s (%s) \tRoot: %s (%s)\n"),
                inlinee->GetByteCodeCount(),
                GetNumberOfInlineesWithLoop(),
                inlinee->GetLoopCount(),
                inlinee->GetHasNestedLoop(),
                isConstructorCall,
                inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer),
                inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2),
                topFunc->GetDisplayName(), topFunc->GetDebugNumberSet(debugStringBuffer3));
            // Don't inline function with loops
            return false;
        }
        else
        {
            inlineThreshold -= (threshold.inlineThreshold > threshold.loopInlineThreshold) ? threshold.inlineThreshold - threshold.loopInlineThreshold : 0;
        }
    }

    if (isPolymorphicCall)
    {
        if (threshold.polymorphicInlineThreshold < 0 ||                              // Negative PolymorphicInlineThreshold disable inlining
            isConstructorCall)
        {
            INLINE_TESTTRACE(_u("INLINING: Skip Inline: Polymorphic call under PolymorphicInlineThreshold: %d \tBytecode size: %d\tInlinee: %s (%s)\tCaller: %s (%s) \tRoot: %s (%s)\n"),
                threshold.polymorphicInlineThreshold,
                inlinee->GetByteCodeCount(),
                inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer),
                inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2),
                topFunc->GetDisplayName(), topFunc->GetDebugNumberSet(debugStringBuffer3));
            return false;
        }
        else
        {
            inlineThreshold -= (threshold.inlineThreshold > threshold.polymorphicInlineThreshold) ? threshold.inlineThreshold - threshold.polymorphicInlineThreshold : 0;
        }
    }

    if (isConstructorCall)
    {
#pragma prefast(suppress: 6285, "logical-or of constants is by design")
        if (PHASE_OFF(Js::InlineConstructorsPhase, this->topFunc) ||
            PHASE_OFF(Js::InlineConstructorsPhase, inliner) ||
            PHASE_OFF(Js::InlineConstructorsPhase, inlinee) ||
            !CONFIG_FLAG(CloneInlinedPolymorphicCaches))
        {
            return false;
        }

        if (PHASE_FORCE(Js::InlineConstructorsPhase, this->topFunc) ||
            PHASE_FORCE(Js::InlineConstructorsPhase, inliner) ||
            PHASE_FORCE(Js::InlineConstructorsPhase, inlinee))
        {
            return true;
        }

        if (inlinee->HasDynamicProfileInfo() && inlinee->GetAnyDynamicProfileInfo()->HasPolymorphicFldAccess())
        {
            // As of now this is not dependent on bytecodeInlinedThreshold.
            return true;
        }

        // Negative ConstructorInlineThreshold always disable constructor inlining
        if (threshold.constructorInlineThreshold < 0)
        {
            INLINE_TESTTRACE(_u("INLINING: Skip Inline: Constructor with no polymorphic field access \tBytecode size: %d\tInlinee: %s (%s)\tCaller: %s (%s) \tRoot: %s (%s)\n"),
                inlinee->GetByteCodeCount(),
                inlinee->GetDisplayName(), inlinee->GetDebugNumberSet(debugStringBuffer),
                inliner->GetDisplayName(), inliner->GetDebugNumberSet(debugStringBuffer2),
                topFunc->GetDisplayName(), topFunc->GetDebugNumberSet(debugStringBuffer3));
            // Don't inline constructor that does not have a polymorphic field access, or if cloning polymorphic inline
            // caches is disabled
            return false;
        }
        else
        {
            inlineThreshold -= (threshold.inlineThreshold > threshold.constructorInlineThreshold) ? threshold.inlineThreshold - threshold.constructorInlineThreshold : 0;
        }
    }

    if (threshold.forLoopBody)
    {
        inlineThreshold /= CONFIG_FLAG(InlineInLoopBodyScaleDownFactor);
    }

    if (inlineThreshold > 0 && inlineeByteCodeCount <= (uint)inlineThreshold)
    {
        if (inlinee->GetLoopCount())
        {
            IncrementNumberOfInlineesWithLoop();
        }
        return true;
    }
    else
    {
        return false;
    }
}
Beispiel #27
0
bool AutoSystemInfo::IsLowMemoryProcess()
{
    ULONG64 commit = ULONG64(-1);
    this->GetAvailableCommit(&commit);
    return commit <= CONFIG_FLAG(LowMemoryCap);
}
Beispiel #28
0
void
SwitchIRBuilder::BuildOptimizedIntegerCaseInstrs(uint32 targetOffset)
{
    int startjmpTableIndex = 0;
    int endjmpTableIndex = 0;
    int startBinaryTravIndex = 0;
    int endBinaryTravIndex = 0;

    IR::MultiBranchInstr * multiBranchInstr = nullptr;

    /*
    *   Algorithm to find chunks of consecutive integers in a given set of case arms(sorted)
    *   -Start and end indices for jump table and binary tree are maintained.
    *   -The corresponding start and end indices indicate that they are suitable candidate for their respective category(binaryTree/jumpTable)
    *   -All holes are filled with an offset corresponding to the default fallthrough instruction and each block is filled with an offset corresponding to the start of the next block
    *    A Block here refers either to a jump table or to a binary tree.
    *   -Blocks of BinaryTrav/Jump table are traversed in a linear fashion.
    **/
    for (int currentIndex = 0; currentIndex < m_caseNodes->Count() - 1; currentIndex++)
    {
        int nextIndex = currentIndex + 1;
        //Check if there is no missing value between subsequent case arms
        if (m_caseNodes->Item(currentIndex)->GetSrc2IntConst() + 1 != m_caseNodes->Item(nextIndex)->GetSrc2IntConst())
        {
            //value of the case nodes are guaranteed to be 32 bits or less than 32bits at this point(if it is more, the Switch Opt will not kick in)
            Assert(nextIndex == endjmpTableIndex + 1);
            int64 speculatedEndJmpCaseValue = m_caseNodes->Item(nextIndex)->GetSrc2IntConst();
            int64 endJmpCaseValue = m_caseNodes->Item(endjmpTableIndex)->GetSrc2IntConst();
            int64 startJmpCaseValue = m_caseNodes->Item(startjmpTableIndex)->GetSrc2IntConst();

            int64 speculatedJmpTableSize = speculatedEndJmpCaseValue - startJmpCaseValue + 1;
            int64 jmpTableSize = endJmpCaseValue - startJmpCaseValue + 1;

            int numFilledEntries = nextIndex - startjmpTableIndex + 1;

            //Checks if the % of filled entries(unique targets from the case arms) in the jump table is within the threshold
            if (speculatedJmpTableSize != 0 && ((numFilledEntries)* 100 / speculatedJmpTableSize) < (100 - CONFIG_FLAG(SwitchOptHolesThreshold)))
            {
                if (jmpTableSize >= CONFIG_FLAG(MinSwitchJumpTableSize))
                {
                    uint32 fallThrOffset = m_caseNodes->Item(endjmpTableIndex)->GetOffset();
                    TryBuildBinaryTreeOrMultiBrForSwitchInts(multiBranchInstr, fallThrOffset, startjmpTableIndex, endjmpTableIndex, startBinaryTravIndex, targetOffset);

                    //Reset start/end indices of BinaryTrav to the next index.
                    startBinaryTravIndex = nextIndex;
                    endBinaryTravIndex = nextIndex;
                }

                //Reset start/end indices of the jump table to the next index.
                startjmpTableIndex = nextIndex;
                endjmpTableIndex = nextIndex;
            }
            else
            {
                endjmpTableIndex++;
            }
        }
        else
        {
            endjmpTableIndex++;
        }
    }

    int64 endJmpCaseValue = m_caseNodes->Item(endjmpTableIndex)->GetSrc2IntConst();
    int64 startJmpCaseValue = m_caseNodes->Item(startjmpTableIndex)->GetSrc2IntConst();
    int64 jmpTableSize = endJmpCaseValue - startJmpCaseValue + 1;

    if (jmpTableSize < CONFIG_FLAG(MinSwitchJumpTableSize))
    {
        endBinaryTravIndex = endjmpTableIndex;
        BuildBinaryTraverseInstr(startBinaryTravIndex, endBinaryTravIndex, targetOffset);
        if (multiBranchInstr)
        {
            FixUpMultiBrJumpTable(multiBranchInstr, multiBranchInstr->GetNextRealInstr()->GetByteCodeOffset());
            multiBranchInstr = nullptr;
        }
    }
    else
    {
        uint32 fallthrOffset = m_caseNodes->Item(endjmpTableIndex)->GetOffset();
        TryBuildBinaryTreeOrMultiBrForSwitchInts(multiBranchInstr, fallthrOffset, startjmpTableIndex, endjmpTableIndex, startBinaryTravIndex, targetOffset);
        FixUpMultiBrJumpTable(multiBranchInstr, targetOffset);
    }
}
Beispiel #29
0
void ConfigParser::ProcessConfiguration(HANDLE hmod)
{
#if defined(ENABLE_DEBUG_CONFIG_OPTIONS)
    bool hasOutput = false;
    char16 modulename[_MAX_PATH];

    GetModuleFileName((HMODULE)hmod, modulename, _MAX_PATH);

    // Win32 specific console creation code
    // xplat-todo: Consider having this mechanism available on other
    // platforms
    // Not a pressing need since ChakraCore runs only in consoles by
    // default so we don't need to allocate a second console for this
#if CONFIG_CONSOLE_AVAILABLE
    if (Js::Configuration::Global.flags.Console)
    {
        int fd;
        FILE *fp;

        // fail usually means there is an existing console. We don't really care.
        AllocConsole();

        fd = _open_osfhandle((intptr_t)GetStdHandle(STD_OUTPUT_HANDLE), O_TEXT);
        fp = _wfdopen(fd, _u("w"));

        if (fp != nullptr)
        {
            *stdout = *fp;
            setvbuf(stdout, nullptr, _IONBF, 0);

            fd = _open_osfhandle((intptr_t)GetStdHandle(STD_ERROR_HANDLE), O_TEXT);
            fp = _wfdopen(fd, _u("w"));

            if (fp != nullptr)
            {
                *stderr = *fp;
                setvbuf(stderr, nullptr, _IONBF, 0);

                char16 buffer[_MAX_PATH + 70];

                if (ConfigParserAPI::FillConsoleTitle(buffer, _MAX_PATH + 20, modulename))
                {
                    SetConsoleTitle(buffer);
                }

                hasOutput = true;
            }
        }
    }
#endif

    if (Js::Configuration::Global.flags.IsEnabled(Js::OutputFileFlag)
        && Js::Configuration::Global.flags.OutputFile != nullptr)
    {
        SetOutputFile(Js::Configuration::Global.flags.OutputFile, Js::Configuration::Global.flags.OutputFileOpenMode);
        hasOutput = true;
    }

    if (Js::Configuration::Global.flags.DebugWindow)
    {
        Output::UseDebuggerWindow();
        hasOutput = true;
    }

#ifdef ENABLE_TRACE
    if (CONFIG_FLAG(InMemoryTrace))
    {
        Output::SetInMemoryLogger(
            Js::MemoryLogger::Create(::GetOutputAllocator1(),
            CONFIG_FLAG(InMemoryTraceBufferSize) * 3));   // With stack each trace is 3 entries (header, msg, stack).
        hasOutput = true;
    }

#ifdef STACK_BACK_TRACE
    if (CONFIG_FLAG(TraceWithStack))
    {
        Output::SetStackTraceHelper(Js::StackTraceHelper::Create(::GetOutputAllocator2()));
    }
#endif // STACK_BACK_TRACE
#endif // ENABLE_TRACE

    if (hasOutput)
    {
        ConfigParserAPI::DisplayInitialOutput(modulename);

        Output::Print(_u("\n"));

        Js::Configuration::Global.flags.VerboseDump();
        Output::Flush();
    }

    if (Js::Configuration::Global.flags.ForceSerialized)
    {
        // Can't generate or execute byte code under forced serialize
        Js::Configuration::Global.flags.GenerateByteCodeBufferReturnsCantGenerate = true;
        Js::Configuration::Global.flags.ExecuteByteCodeBufferReturnsInvalidByteCode = true;
    }

    ForcedMemoryConstraint::Apply();
#endif

#ifdef MEMSPECT_TRACKING
    bool all = false;
    if (Js::Configuration::Global.flags.Memspect.IsEnabled(Js::AllPhase))
    {
        all = true;
    }
    if (all || Js::Configuration::Global.flags.Memspect.IsEnabled(Js::RecyclerPhase))
    {
        RecyclerMemoryTracking::Activate();
    }
    if (all || Js::Configuration::Global.flags.Memspect.IsEnabled(Js::PageAllocatorPhase))
    {
        PageTracking::Activate();
    }
    if (all || Js::Configuration::Global.flags.Memspect.IsEnabled(Js::ArenaPhase))
    {
        ArenaMemoryTracking::Activate();
    }
#endif
}
void InliningThreshold::SetHeuristics()
{
    inlineThreshold = CONFIG_FLAG(InlineThreshold);
    // Inline less aggressively in large functions since the register pressure is likely high.
    // Small functions shouldn't be a problem.
    if (topFunc->GetByteCodeWithoutLDACount() > 800)
    {
        inlineThreshold -= CONFIG_FLAG(InlineThresholdAdjustCountInLargeFunction);
    }
    else if (topFunc->GetByteCodeWithoutLDACount() > 200)
    {
        inlineThreshold -= CONFIG_FLAG(InlineThresholdAdjustCountInMediumSizedFunction);
    }
    else if (topFunc->GetByteCodeWithoutLDACount() < 50)
    {
        inlineThreshold += CONFIG_FLAG(InlineThresholdAdjustCountInSmallFunction);
    }

    constructorInlineThreshold = CONFIG_FLAG(ConstructorInlineThreshold);
    outsideLoopInlineThreshold = CONFIG_FLAG(OutsideLoopInlineThreshold);
    leafInlineThreshold = CONFIG_FLAG(LeafInlineThreshold);
    loopInlineThreshold = CONFIG_FLAG(LoopInlineThreshold);
    polymorphicInlineThreshold = CONFIG_FLAG(PolymorphicInlineThreshold);
    maxNumberOfInlineesWithLoop = CONFIG_FLAG(MaxNumberOfInlineesWithLoop);
    constantArgumentInlineThreshold = CONFIG_FLAG(ConstantArgumentInlineThreshold);
    inlineCountMax = CONFIG_FLAG(InlineCountMax);
}