BOOL DynamicObject::ToPrimitiveImpl(Var* result, ScriptContext * requestContext) { CompileAssert(propertyId == PropertyIds::valueOf || propertyId == PropertyIds::toString); InlineCache * inlineCache = propertyId == PropertyIds::valueOf ? requestContext->GetValueOfInlineCache() : requestContext->GetToStringInlineCache(); // Use per script context inline cache for valueOf and toString Var aValue = JavascriptOperators::PatchGetValueUsingSpecifiedInlineCache(inlineCache, this, this, propertyId, requestContext); // Fast path to the default valueOf/toString implementation if (propertyId == PropertyIds::valueOf) { if (aValue == requestContext->GetLibrary()->GetObjectValueOfFunction()) { Assert(JavascriptConversion::IsCallable(aValue)); // The default Object.prototype.valueOf will in turn just call ToObject(). // The result is always an object if it is not undefined or null (which "this" is not) return false; } } else { if (aValue == requestContext->GetLibrary()->GetObjectToStringFunction()) { Assert(JavascriptConversion::IsCallable(aValue)); // These typeIds should never be here (they override ToPrimitive or they don't derive to DynamicObject::ToPrimitive) // Otherwise, they may case implicit call in ToStringHelper Assert(this->GetTypeId() != TypeIds_HostDispatch && this->GetTypeId() != TypeIds_HostObject); *result = JavascriptObject::ToStringHelper(this, requestContext); return true; } } return CallToPrimitiveFunction(aValue, propertyId, result, requestContext); }
///---------------------------------------------------------------------------- /// /// GetMethodAddress /// /// returns the memory address of the helperMethod, /// which can the address of debugger wrapper that intercept the original helper. /// ///---------------------------------------------------------------------------- intptr_t GetMethodAddress(ThreadContextInfo * context, IR::HelperCallOpnd* opnd) { Assert(opnd); #if defined(_M_ARM32_OR_ARM64) #define LowererMDFinal LowererMD #else #define LowererMDFinal LowererMDArch #endif CompileAssert(_countof(helperMethodWrappers) == LowererMDFinal::MaxArgumentsToHelper + 1); if (opnd->IsDiagHelperCallOpnd()) { // Note: all arguments are already loaded for the original helper. Here we just return the address. IR::DiagHelperCallOpnd* diagOpnd = (IR::DiagHelperCallOpnd*)opnd; if (0 <= diagOpnd->m_argCount && diagOpnd->m_argCount <= LowererMDFinal::MaxArgumentsToHelper) { return SHIFT_ADDR(context, helperMethodWrappers[diagOpnd->m_argCount]); } else { AssertMsg(FALSE, "Unsupported arg count (need to implement)."); } } return GetMethodOriginalAddress(context, opnd->m_fnHelper); }
bool SetProperty(Js::Var obj, PropertyId id, Js::Var value, void* user_data) { CompileAssert((sizeof(propertyMap)/sizeof(PropertyId)) == ChakraWabt::PropertyIds::COUNT); Context* ctx = (Context*)user_data; Assert(id < ChakraWabt::PropertyIds::COUNT); return !!JavascriptOperators::OP_SetProperty(obj, propertyMap[id], value, ctx->scriptContext); }
int32 WebAssemblyMemory::GrowInternal(uint32 deltaPages) { const uint64 deltaBytes = (uint64)deltaPages * WebAssembly::PageSize; if (deltaBytes > ArrayBuffer::MaxArrayBufferLength) { return -1; } const uint32 oldBytes = m_buffer->GetByteLength(); const uint64 newBytesLong = deltaBytes + oldBytes; if (newBytesLong > ArrayBuffer::MaxArrayBufferLength) { return -1; } CompileAssert(ArrayBuffer::MaxArrayBufferLength <= UINT32_MAX); const uint32 newBytes = (uint32)newBytesLong; const uint32 oldPageCount = oldBytes / WebAssembly::PageSize; Assert(oldBytes % WebAssembly::PageSize == 0); const uint32 newPageCount = oldPageCount + deltaPages; if (newPageCount > m_maximum) { return -1; } WebAssemblyArrayBuffer * newBuffer = nullptr; JavascriptExceptionObject* caughtExceptionObject = nullptr; try { newBuffer = m_buffer->GrowMemory(newBytes); if (newBuffer == nullptr) { return -1; } } catch (const JavascriptException& err) { caughtExceptionObject = err.GetAndClear(); Assert(caughtExceptionObject && caughtExceptionObject == ThreadContext::GetContextForCurrentThread()->GetPendingOOMErrorObject()); return -1; } m_buffer = newBuffer; CompileAssert(ArrayBuffer::MaxArrayBufferLength / WebAssembly::PageSize <= INT32_MAX); return (int32)oldPageCount; }
static bool MatchPatternAt<CaseInsensitive::EquivClassSize, CaseInsensitive::EquivClassSize>(uint inputChar, char16 const * pat, CharCount index) { CompileAssert(CaseInsensitive::EquivClassSize == 4); return inputChar == pat[index * CaseInsensitive::EquivClassSize] || inputChar == pat[index * CaseInsensitive::EquivClassSize + 1] || inputChar == pat[index * CaseInsensitive::EquivClassSize + 2] || inputChar == pat[index * CaseInsensitive::EquivClassSize + 3]; }
JavascriptString* CharStringCache::GetStringForCharSP(codepoint_t c) { Assert(c >= 0x10000); CompileAssert(sizeof(char16) * 2 == sizeof(codepoint_t)); char16 buffer[2]; Js::NumberUtilities::CodePointAsSurrogatePair(c, buffer, buffer + 1); JavascriptString* str = JavascriptString::NewCopyBuffer(buffer, 2, JavascriptLibrary::FromCharStringCache(this)->GetScriptContext()); // TODO: perhaps do some sort of cache for supplementary characters return str; }
SmallLeafHeapBlockT<TBlockAttributes> * SmallLeafHeapBlockT<TBlockAttributes>::New(HeapBucketT<SmallLeafHeapBlockT<TBlockAttributes>> * bucket) { CompileAssert(TBlockAttributes::MaxObjectSize <= USHRT_MAX); Assert(bucket->sizeCat <= TBlockAttributes::MaxObjectSize); Assert((TBlockAttributes::PageCount * AutoSystemInfo::PageSize) / bucket->sizeCat <= USHRT_MAX); ushort objectSize = (ushort)bucket->sizeCat; ushort objectCount = (ushort)(TBlockAttributes::PageCount * AutoSystemInfo::PageSize) / objectSize; return NoMemProtectHeapNewNoThrowPlusPrefixZ(GetAllocPlusSize(objectCount), SmallLeafHeapBlockT<TBlockAttributes>, bucket, objectSize, objectCount); }
uint32 GetTypeByteSize(Types type) { // Since this needs to be done manually for each type, this assert will make sure to not forget to update this if a new type is added CompileAssert(WAsmJs::LIMIT == 5); switch (type) { case INT32 : return sizeof(int32); case INT64 : return sizeof(int64); case FLOAT32: return sizeof(float); case FLOAT64: return sizeof(double); case SIMD : return sizeof(AsmJsSIMDValue); default : break; } Js::Throw::InternalError(); }
void RegisterSpace::GetTypeDebugName(Types type, char16* buf, uint bufsize, bool shortName) { // Since this needs to be done manually for each type, this assert will make sure to not forget to update this if a new type is added CompileAssert(LIMIT == 5); switch (type) { case INT32: wcscpy_s(buf, bufsize , shortName ? _u("I"): _u("INT32")); break; case INT64: wcscpy_s(buf, bufsize , shortName ? _u("L"): _u("INT64")); break; case FLOAT32: wcscpy_s(buf, bufsize, shortName ? _u("F"): _u("FLOAT32")); break; case FLOAT64: wcscpy_s(buf, bufsize, shortName ? _u("D"): _u("FLOAT64")); break; case SIMD: wcscpy_s(buf, bufsize , _u("SIMD")); break; default: wcscpy_s(buf, bufsize , _u("UNKNOWN")); break; } }
PVOID MapView(HANDLE process, HANDLE sectionHandle, size_t size, size_t offset, bool local) { PVOID address = nullptr; DWORD flags = 0; if (local) { if (process != GetCurrentProcess()) { return nullptr; } flags = PAGE_READWRITE; } else { if (process == GetCurrentProcess()) { return nullptr; } flags = AutoSystemInfo::Data.IsCFGEnabled() ? PAGE_EXECUTE_RO_TARGETS_INVALID : PAGE_EXECUTE_READ; } #if USEFILEMAP2 address = MapViewOfFile2(sectionHandle, process, offset, nullptr, size, NULL, flags); if (local && address != nullptr) { address = VirtualAlloc(address, size, MEM_COMMIT, flags); } #else LARGE_INTEGER mapOffset = { 0 }; #if TARGET_32 mapOffset.LowPart = offset; #elif TARGET_64 mapOffset.QuadPart = offset; #else CompileAssert(UNREACHED); #endif SIZE_T viewSize = size; int status = NtdllLibrary::Instance->MapViewOfSection(sectionHandle, process, &address, NULL, viewSize, &mapOffset, &viewSize, NtdllLibrary::ViewUnmap, NULL, flags); if (status != 0) { return nullptr; } #endif return address; }
Var SIMD128SlowShuffle(Var src1, Var src2, Var lane0, Var lane1, Var lane2, Var lane3, int range, ScriptContext* scriptContext) { SIMDType *a = SIMDType::FromVar(src1); SIMDType *b = SIMDType::FromVar(src2); Assert(a); Assert(b); int32 lane0Value = 0; int32 lane1Value = 0; int32 lane2Value = 0; int32 lane3Value = 0; SIMDValue src1Value = a->GetValue(); SIMDValue src2Value = b->GetValue(); SIMDValue result; CompileAssert(laneCount == 4 || laneCount == 2); if (laneCount == 4) { lane0Value = SIMDCheckLaneIndex(scriptContext, lane0, range); lane1Value = SIMDCheckLaneIndex(scriptContext, lane1, range); lane2Value = SIMDCheckLaneIndex(scriptContext, lane2, range); lane3Value = SIMDCheckLaneIndex(scriptContext, lane3, range); Assert(lane0Value >= 0 && lane0Value < range); Assert(lane1Value >= 0 && lane1Value < range); Assert(lane2Value >= 0 && lane2Value < range); Assert(lane3Value >= 0 && lane3Value < range); result = SIMD128InnerShuffle<4>(src1Value, src2Value, lane0Value, lane1Value, lane2Value, lane3Value); } else { lane0Value = SIMDCheckLaneIndex(scriptContext, lane0, range); lane1Value = SIMDCheckLaneIndex(scriptContext, lane1, range); Assert(lane0Value >= 0 && lane0Value < range); Assert(lane1Value >= 0 && lane1Value < range); result = SIMD128InnerShuffle<2>(src1Value, src2Value, lane0Value, lane1Value, lane2Value, lane3Value); } return SIMDType::New(&result, scriptContext); }
SIMDValue SIMD128InnerShuffle(SIMDValue src1, SIMDValue src2, int32 lane0, int32 lane1, int32 lane2, int32 lane3) { SIMDValue result; CompileAssert(laneCount == 4 || laneCount == 2); if (laneCount == 4) { result.i32[SIMD_X] = lane0 < 4 ? src1.i32[lane0] : src2.i32[lane0 - 4]; result.i32[SIMD_Y] = lane1 < 4 ? src1.i32[lane1] : src2.i32[lane1 - 4]; result.i32[SIMD_Z] = lane2 < 4 ? src1.i32[lane2] : src2.i32[lane2 - 4]; result.i32[SIMD_W] = lane3 < 4 ? src1.i32[lane3] : src2.i32[lane3 - 4]; } else { result.f64[SIMD_X] = lane0 < 2 ? src1.f64[lane0] : src2.f64[lane0 - 2]; result.f64[SIMD_Y] = lane1 < 2 ? src1.f64[lane1] : src2.f64[lane1 - 2]; } return result; }
__inline uint ByteCodeWriter::Data::EncodeT(OpCodeAsmJs op, ByteCodeWriter* writer, bool isPatching) { Assert(op < Js::OpCodeAsmJs::ByteCodeLast); CompileAssert(layoutSize != SmallLayout); const byte exop = (byte)((op <= Js::OpCodeAsmJs::MaxByteSizedOpcodes) ? (layoutSize == LargeLayout ? Js::OpCodeAsmJs::LargeLayoutPrefix : Js::OpCodeAsmJs::MediumLayoutPrefix) : (layoutSize == LargeLayout ? Js::OpCodeAsmJs::ExtendedLargeLayoutPrefix : Js::OpCodeAsmJs::ExtendedMediumLayoutPrefix)); uint offset = Write(&exop, sizeof(byte)); byte byteop = (byte)op; Write(&byteop, sizeof(byte)); if (!isPatching) { writer->IncreaseByteCodeCount(); } return offset; }
void WebAssemblyMemory::TraceMemWrite(WebAssemblyMemory* mem, uint32 index, uint32 offset, Js::ArrayBufferView::ViewType viewType, uint32 bytecodeOffset, ScriptContext* context) { // Must call after the write Assert(mem); Output::Print(_u("#%04x "), bytecodeOffset); uint64 bigIndex = (uint64)index + (uint64)offset; if (index >= mem->m_buffer->GetByteLength()) { Output::Print(_u("WasmMemoryTrace:: Writing out of bounds. %llu >= %u\n"), bigIndex, mem->m_buffer->GetByteLength()); } if (offset) { Output::Print(_u("WasmMemoryTrace:: buf[%u + %u (%llu)] = "), index, offset, bigIndex); } else { Output::Print(_u("WasmMemoryTrace:: buf[%u] = "), index); } BYTE* buffer = mem->m_buffer->GetBuffer(); switch (viewType) { case ArrayBufferView::ViewType::TYPE_INT8_TO_INT64: case ArrayBufferView::ViewType::TYPE_INT8: Output::Print(_u("%d\n"), *(int8*)(buffer + bigIndex)); break; case ArrayBufferView::ViewType::TYPE_UINT8_TO_INT64: case ArrayBufferView::ViewType::TYPE_UINT8: Output::Print(_u("%u\n"), *(uint8*)(buffer + bigIndex)); break; case ArrayBufferView::ViewType::TYPE_INT16_TO_INT64: case ArrayBufferView::ViewType::TYPE_INT16: Output::Print(_u("%d\n"), *(int16*)(buffer + bigIndex)); break; case ArrayBufferView::ViewType::TYPE_UINT16_TO_INT64: case ArrayBufferView::ViewType::TYPE_UINT16: Output::Print(_u("%u\n"), *(uint16*)(buffer + bigIndex)); break; case ArrayBufferView::ViewType::TYPE_INT32_TO_INT64: case ArrayBufferView::ViewType::TYPE_INT32: Output::Print(_u("%d\n"), *(int32*)(buffer + bigIndex)); break; case ArrayBufferView::ViewType::TYPE_UINT32_TO_INT64: case ArrayBufferView::ViewType::TYPE_UINT32: Output::Print(_u("%u\n"), *(uint32*)(buffer + bigIndex)); break; case ArrayBufferView::ViewType::TYPE_FLOAT32: Output::Print(_u("%.4f\n"), *(float*)(buffer + bigIndex)); break; case ArrayBufferView::ViewType::TYPE_FLOAT64: Output::Print(_u("%.8f\n"), *(double*)(buffer + bigIndex)); break; case ArrayBufferView::ViewType::TYPE_INT64: Output::Print(_u("%lld\n"), *(int64*)(buffer + bigIndex)); break; default: CompileAssert(ArrayBufferView::ViewType::TYPE_COUNT == 15); Assert(UNREACHED); } return; }
void JITTimeWorkItem::InitializeReader( Js::ByteCodeReader * reader, Js::StatementReader<Js::FunctionBody::ArenaStatementMapList> * statementReader, ArenaAllocator* alloc) { uint startOffset = IsLoopBody() ? GetLoopHeader()->startOffset : 0; if (IsJitInDebugMode()) { // TODO: OOP JIT, directly use the array rather than making a list m_fullStatementList = Js::FunctionBody::ArenaStatementMapList::New(alloc); CompileAssert(sizeof(StatementMapIDL) == sizeof(Js::FunctionBody::StatementMap)); StatementMapIDL * fullArr = m_jitBody.GetFullStatementMap(); for (uint i = 0; i < m_jitBody.GetFullStatementMapCount(); ++i) { m_fullStatementList->Add((Js::FunctionBody::StatementMap*)&fullArr[i]); } } #if DBG reader->Create(m_jitBody.GetByteCodeBuffer(), startOffset, m_jitBody.GetByteCodeLength()); if (!JITManager::GetJITManager()->IsOOPJITEnabled()) { Js::FunctionBody::StatementMapList * runtimeMap = ((Js::FunctionBody*)m_jitBody.GetAddr())->GetStatementMaps(); Assert(!m_fullStatementList || ((int)m_jitBody.GetFullStatementMapCount() == runtimeMap->Count() && runtimeMap->Count() >= 0)); for (uint i = 0; i < m_jitBody.GetFullStatementMapCount(); ++i) { Assert(runtimeMap->Item(i)->byteCodeSpan.begin == m_fullStatementList->Item(i)->byteCodeSpan.begin); Assert(runtimeMap->Item(i)->byteCodeSpan.end == m_fullStatementList->Item(i)->byteCodeSpan.end); Assert(runtimeMap->Item(i)->sourceSpan.begin == m_fullStatementList->Item(i)->sourceSpan.begin); Assert(runtimeMap->Item(i)->sourceSpan.end == m_fullStatementList->Item(i)->sourceSpan.end); Assert(runtimeMap->Item(i)->isSubexpression == m_fullStatementList->Item(i)->isSubexpression); } } #else reader->Create(m_jitBody.GetByteCodeBuffer(), startOffset); #endif bool hasSpanSequenceMap = m_jitBody.InitializeStatementMap(&m_statementMap, alloc); Js::SmallSpanSequence * spanSeq = hasSpanSequenceMap ? &m_statementMap : nullptr; statementReader->Create(m_jitBody.GetByteCodeBuffer(), startOffset, spanSeq, m_fullStatementList); }
SIMDValue SIMD128InnerShuffle(SIMDValue src1, SIMDValue src2, const int32* lanes) { SIMDValue result = { 0 }; CompileAssert(laneCount == 16 || laneCount == 8); Assert(lanes != nullptr); if (laneCount == 8) { for (uint i = 0; i < laneCount; ++i) { result.i16[i] = lanes[i] < laneCount ? src1.i16[lanes[i]] : src2.i16[lanes[i] - laneCount]; } } else { for (uint i = 0; i < laneCount; ++i) { result.i8[i] = lanes[i] < laneCount ? src1.i8[lanes[i]] : src2.i8[lanes[i] - laneCount]; } } return result; }
Var SIMD128SlowShuffle(Var src1, Var src2, Var* lanes, const uint range, ScriptContext* scriptContext) { SIMDType *a = SIMDType::FromVar(src1); SIMDType *b = SIMDType::FromVar(src2); Assert(a); Assert(b); SIMDValue src1Value = a->GetValue(); SIMDValue src2Value = b->GetValue(); SIMDValue result; int32 laneValue[16] = { 0 }; CompileAssert(laneCount == 16 || laneCount == 8); for (uint i = 0; i < laneCount; ++i) { laneValue[i] = SIMDCheckLaneIndex(scriptContext, lanes[i], range); } result = SIMD128InnerShuffle<laneCount>(src1Value, src2Value, laneValue); return SIMDType::New(&result, scriptContext); }
void SmallNormalHeapBucketBase<TBlockType>::SweepPendingObjects(RecyclerSweep& recyclerSweep) { RECYCLER_SLOW_CHECK(VerifyHeapBlockCount(recyclerSweep.IsBackground())); CompileAssert(!BaseT::IsLeafBucket); TBlockType *& pendingSweepList = recyclerSweep.GetPendingSweepBlockList(this); TBlockType * const list = pendingSweepList; Recycler * const recycler = recyclerSweep.GetRecycler(); #if ENABLE_PARTIAL_GC bool const partialSweep = recycler->inPartialCollectMode; #endif if (list) { pendingSweepList = nullptr; #if ENABLE_PARTIAL_GC if (partialSweep) { // We did a partial sweep. // Blocks in the pendingSweepList are the ones we decided not to reuse. HeapBlockList::ForEachEditing(list, [this, recycler](TBlockType * heapBlock) { // We are not going to reuse this block. // SweepMode_ConcurrentPartial will not actually collect anything, it will just update some state. // The sweepable objects will be collected in a future Sweep. // Note, page heap blocks are never swept concurrently heapBlock->template SweepObjects<SweepMode_ConcurrentPartial>(recycler); // page heap mode should never reach here, so don't check pageheap enabled or not if (heapBlock->template HasFreeObject<false>()) { // We have pre-existing free objects, so put this in the partialSweptHeapBlockList heapBlock->SetNextBlock(this->partialSweptHeapBlockList); this->partialSweptHeapBlockList = heapBlock; } else { // No free objects, so put in the fullBlockList heapBlock->SetNextBlock(this->fullBlockList); this->fullBlockList = heapBlock; } }); } else #endif { // We decided not to do a partial sweep. // Blocks in the pendingSweepList need to have a regular sweep. TBlockType * tail = SweepPendingObjects<SweepMode_Concurrent>(recycler, list); tail->SetNextBlock(this->heapBlockList); this->heapBlockList = list; this->StartAllocationAfterSweep(); } RECYCLER_SLOW_CHECK(VerifyHeapBlockCount(recyclerSweep.IsBackground())); } Assert(!this->IsAllocationStopped()); }
bool TextbookBoyerMooreWithLinearMap<C>::Match ( const Char *const input , const CharCount inputLength , CharCount& inputOffset , const Char* pat , const CharCount patLen #if ENABLE_REGEX_CONFIG_OPTIONS , RegexStats* stats #endif ) const { CompileAssert(equivClassSize == 1); Assert(input != 0); Assert(inputOffset <= inputLength); if (inputLength < patLen) return false; const int32* const localGoodSuffix = goodSuffix; const LastOccMap* const localLastOccurrence = &lastOccurrence; CharCount offset = inputOffset; const CharCount lastPatCharIndex = (patLen - 1); const CharCount endOffset = inputLength - lastPatCharIndex; // Using int size instead of Char value is faster const uint lastPatChar = pat[lastPatCharIndex]; Assert(lastPatChar == localLastOccurrence->GetChar(0)); while (offset < endOffset) { // A separate tight loop to find the last character while (true) { #if ENABLE_REGEX_CONFIG_OPTIONS if (stats != 0) stats->numCompares++; #endif uint inputChar = Chars<Char>::CTU(input[offset + lastPatCharIndex]); if (inputChar == lastPatChar) { // Found a match. Break out of this loop and go to the match pattern loop break; } // Negative case is more common, // Write the checks so that we have a super tight loop Assert(inputChar != localLastOccurrence->GetChar(0)); int32 lastOcc; if (localLastOccurrence->GetChar(1) != inputChar) { if (localLastOccurrence->GetChar(2) != inputChar) { if (localLastOccurrence->GetChar(3) != inputChar) { offset += patLen; if (offset >= endOffset) { return false; } continue; } lastOcc = localLastOccurrence->GetLastOcc(3); } else { lastOcc = localLastOccurrence->GetLastOcc(2); } } else { lastOcc = localLastOccurrence->GetLastOcc(1); } Assert((int)lastPatCharIndex - lastOcc >= localGoodSuffix[lastPatCharIndex]); offset += lastPatCharIndex - lastOcc; if (offset >= endOffset) { return false; } } // CONSIDER: we can remove this check if we stop using // TextbookBoyerMoore for one char pattern if (lastPatCharIndex == 0) { inputOffset = offset; return true; } // Match the rest of the pattern int32 j = lastPatCharIndex - 1; while (true) { #if ENABLE_REGEX_CONFIG_OPTIONS if (stats != 0) stats->numCompares++; #endif uint inputChar = Chars<Char>::CTU(input[offset + j]); if (inputChar != pat[j]) { int goodSuffix = localGoodSuffix[j]; Assert(patLen <= MaxCharCount); if (goodSuffix == (int)patLen) { offset += patLen; } else { const int32 e = j - localLastOccurrence->Get(inputChar); offset += e > goodSuffix ? e : goodSuffix; } break; } if (--j < 0) { inputOffset = offset; return true; } } } return false; }
bool MatchPatternAt<CaseInsensitive::EquivClassSize, 1>(uint inputChar, char16 const * pat, CharCount index) { CompileAssert(CaseInsensitive::EquivClassSize == 4); return inputChar == pat[index * 4]; }
AsmJsJITInfo::AsmJsJITInfo(AsmJsDataIDL * data) : m_data(*data) { CompileAssert(sizeof(AsmJsJITInfo) == sizeof(AsmJsDataIDL)); }
bool OpCodeUtilAsmJs::IsValidByteCodeOpcode(OpCodeAsmJs op) { CompileAssert((int)Js::OpCodeAsmJs::MaxByteSizedOpcodes + 1 + _countof(OpCodeUtilAsmJs::ExtendedOpCodeAsmJsLayouts) == (int)Js::OpCodeAsmJs::ByteCodeLast); return op < _countof(OpCodeAsmJsLayouts) || (op > Js::OpCodeAsmJs::MaxByteSizedOpcodes && op < Js::OpCodeAsmJs::ByteCodeLast); }
void ConfigFlagsTable::TranslateFlagConfiguration() { const auto VerifyExecutionModeLimits = [this]() { const Number zero = static_cast<Number>(0); const Number maxUint8 = static_cast<Number>(static_cast<uint8>(-1)); // entry point call count is uint8 const Number maxUint16 = static_cast<Number>(static_cast<uint16>(-1)); #if ENABLE_DEBUG_CONFIG_OPTIONS Assert(MinInterpretCount >= zero); Assert(MinInterpretCount <= maxUint16); Assert(MaxInterpretCount >= zero); Assert(MaxInterpretCount <= maxUint16); Assert(MinSimpleJitRunCount >= zero); Assert(MinSimpleJitRunCount <= maxUint8); Assert(MaxSimpleJitRunCount >= zero); Assert(MaxSimpleJitRunCount <= maxUint8); Assert(SimpleJitAfter >= zero); Assert(SimpleJitAfter <= maxUint8); Assert(FullJitAfter >= zero); Assert(FullJitAfter <= maxUint16); #endif Assert(AutoProfilingInterpreter0Limit >= zero); Assert(AutoProfilingInterpreter0Limit <= maxUint16); Assert(ProfilingInterpreter0Limit >= zero); Assert(ProfilingInterpreter0Limit <= maxUint16); Assert(AutoProfilingInterpreter1Limit >= zero); Assert(AutoProfilingInterpreter1Limit <= maxUint16); Assert(SimpleJitLimit >= zero); Assert(SimpleJitLimit <= maxUint8); Assert(ProfilingInterpreter1Limit >= zero); Assert(ProfilingInterpreter1Limit <= maxUint16); Assert( ( AutoProfilingInterpreter0Limit + ProfilingInterpreter0Limit + AutoProfilingInterpreter1Limit + SimpleJitLimit + ProfilingInterpreter1Limit ) <= maxUint16); }; VerifyExecutionModeLimits(); #if ENABLE_DEBUG_CONFIG_OPTIONS #if !DISABLE_JIT if(ForceDynamicProfile) { Force.Enable(DynamicProfilePhase); } if(ForceJITLoopBody) { Force.Enable(JITLoopBodyPhase); } #endif if(NoDeferParse) { Off.Enable(DeferParsePhase); } #endif #if ENABLE_DEBUG_CONFIG_OPTIONS && !DISABLE_JIT bool dontEnforceLimitsForSimpleJitAfterOrFullJitAfter = false; if((IsEnabled(MinInterpretCountFlag) || IsEnabled(MaxInterpretCountFlag)) && !(IsEnabled(SimpleJitAfterFlag) || IsEnabled(FullJitAfterFlag))) { if(Off.IsEnabled(SimpleJitPhase)) { Enable(FullJitAfterFlag); if(IsEnabled(MaxInterpretCountFlag)) { FullJitAfter = MaxInterpretCount; } else { FullJitAfter = MinInterpretCount; dontEnforceLimitsForSimpleJitAfterOrFullJitAfter = true; } } else { Enable(SimpleJitAfterFlag); if(IsEnabled(MaxInterpretCountFlag)) { SimpleJitAfter = MaxInterpretCount; } else { SimpleJitAfter = MinInterpretCount; dontEnforceLimitsForSimpleJitAfterOrFullJitAfter = true; } if((IsEnabled(MinInterpretCountFlag) && IsEnabled(MinSimpleJitRunCountFlag)) || IsEnabled(MaxSimpleJitRunCountFlag)) { Enable(FullJitAfterFlag); FullJitAfter = SimpleJitAfter; if(IsEnabled(MaxSimpleJitRunCountFlag)) { FullJitAfter += MaxSimpleJitRunCount; } else { FullJitAfter += MinSimpleJitRunCount; Assert(dontEnforceLimitsForSimpleJitAfterOrFullJitAfter); } } } } // Configure execution mode limits do { if(IsEnabled(AutoProfilingInterpreter0LimitFlag) || IsEnabled(ProfilingInterpreter0LimitFlag) || IsEnabled(AutoProfilingInterpreter1LimitFlag) || IsEnabled(SimpleJitLimitFlag) || IsEnabled(ProfilingInterpreter1LimitFlag)) { break; } if(IsEnabled(ExecutionModeLimitsFlag)) { uint autoProfilingInterpreter0Limit; uint profilingInterpreter0Limit; uint autoProfilingInterpreter1Limit; uint simpleJitLimit; uint profilingInterpreter1Limit; const int scannedCount = swscanf_s( static_cast<LPCWSTR>(ExecutionModeLimits), _u("%u.%u.%u.%u.%u"), &autoProfilingInterpreter0Limit, &profilingInterpreter0Limit, &autoProfilingInterpreter1Limit, &simpleJitLimit, &profilingInterpreter1Limit); Assert(scannedCount == 5); Enable(AutoProfilingInterpreter0LimitFlag); Enable(ProfilingInterpreter0LimitFlag); Enable(AutoProfilingInterpreter1LimitFlag); Enable(SimpleJitLimitFlag); Enable(ProfilingInterpreter1LimitFlag); AutoProfilingInterpreter0Limit = autoProfilingInterpreter0Limit; ProfilingInterpreter0Limit = profilingInterpreter0Limit; AutoProfilingInterpreter1Limit = autoProfilingInterpreter1Limit; SimpleJitLimit = simpleJitLimit; ProfilingInterpreter1Limit = profilingInterpreter1Limit; break; } if(!NewSimpleJit) { // Use the defaults for old simple JIT. The flags are not enabled here because the values can be changed later // based on other flags, only the defaults values are adjusted here. AutoProfilingInterpreter0Limit = DEFAULT_CONFIG_AutoProfilingInterpreter0Limit; ProfilingInterpreter0Limit = DEFAULT_CONFIG_ProfilingInterpreter0Limit; CompileAssert( DEFAULT_CONFIG_AutoProfilingInterpreter0Limit <= DEFAULT_CONFIG_AutoProfilingInterpreterLimit_OldSimpleJit); AutoProfilingInterpreter1Limit = DEFAULT_CONFIG_AutoProfilingInterpreterLimit_OldSimpleJit - DEFAULT_CONFIG_AutoProfilingInterpreter0Limit; CompileAssert(DEFAULT_CONFIG_ProfilingInterpreter0Limit <= DEFAULT_CONFIG_SimpleJitLimit_OldSimpleJit); SimpleJitLimit = DEFAULT_CONFIG_SimpleJitLimit_OldSimpleJit - DEFAULT_CONFIG_ProfilingInterpreter0Limit; ProfilingInterpreter1Limit = 0; VerifyExecutionModeLimits(); } if (IsEnabled(SimpleJitAfterFlag)) { Enable(AutoProfilingInterpreter0LimitFlag); Enable(ProfilingInterpreter0LimitFlag); Enable(AutoProfilingInterpreter1LimitFlag); Enable(EnforceExecutionModeLimitsFlag); { Js::Number iterationsNeeded = SimpleJitAfter; ProfilingInterpreter0Limit = min(ProfilingInterpreter0Limit, iterationsNeeded); iterationsNeeded -= ProfilingInterpreter0Limit; AutoProfilingInterpreter0Limit = iterationsNeeded; AutoProfilingInterpreter1Limit = 0; } if(IsEnabled(FullJitAfterFlag)) { Enable(SimpleJitLimitFlag); Enable(ProfilingInterpreter1LimitFlag); Assert(SimpleJitAfter <= FullJitAfter); Js::Number iterationsNeeded = FullJitAfter - SimpleJitAfter; Js::Number profilingIterationsNeeded = min(NewSimpleJit ? DEFAULT_CONFIG_MinProfileIterations : DEFAULT_CONFIG_MinProfileIterations_OldSimpleJit, FullJitAfter) - ProfilingInterpreter0Limit; if(NewSimpleJit) { ProfilingInterpreter1Limit = min(ProfilingInterpreter1Limit, iterationsNeeded); iterationsNeeded -= ProfilingInterpreter1Limit; profilingIterationsNeeded -= ProfilingInterpreter1Limit; SimpleJitLimit = iterationsNeeded; } else { SimpleJitLimit = iterationsNeeded; profilingIterationsNeeded -= min(SimpleJitLimit, profilingIterationsNeeded); ProfilingInterpreter1Limit = 0; } if(profilingIterationsNeeded != 0) { Js::Number iterationsToMove = min(AutoProfilingInterpreter1Limit, profilingIterationsNeeded); AutoProfilingInterpreter1Limit -= iterationsToMove; ProfilingInterpreter0Limit += iterationsToMove; profilingIterationsNeeded -= iterationsToMove; iterationsToMove = min(AutoProfilingInterpreter0Limit, profilingIterationsNeeded); AutoProfilingInterpreter0Limit -= iterationsToMove; ProfilingInterpreter0Limit += iterationsToMove; profilingIterationsNeeded -= iterationsToMove; Assert(profilingIterationsNeeded == 0); } Assert( ( AutoProfilingInterpreter0Limit + ProfilingInterpreter0Limit + AutoProfilingInterpreter1Limit + SimpleJitLimit + ProfilingInterpreter1Limit ) == FullJitAfter); } Assert( ( AutoProfilingInterpreter0Limit + ProfilingInterpreter0Limit + AutoProfilingInterpreter1Limit ) == SimpleJitAfter); EnforceExecutionModeLimits = true; break; } if(IsEnabled(FullJitAfterFlag)) { Enable(AutoProfilingInterpreter0LimitFlag); Enable(ProfilingInterpreter0LimitFlag); Enable(AutoProfilingInterpreter1LimitFlag); Enable(SimpleJitLimitFlag); Enable(ProfilingInterpreter1LimitFlag); Enable(EnforceExecutionModeLimitsFlag); Js::Number iterationsNeeded = FullJitAfter; if(NewSimpleJit) { ProfilingInterpreter1Limit = min(ProfilingInterpreter1Limit, iterationsNeeded); iterationsNeeded -= ProfilingInterpreter1Limit; } else { ProfilingInterpreter1Limit = 0; SimpleJitLimit = min(SimpleJitLimit, iterationsNeeded); iterationsNeeded -= SimpleJitLimit; } ProfilingInterpreter0Limit = min(ProfilingInterpreter0Limit, iterationsNeeded); iterationsNeeded -= ProfilingInterpreter0Limit; if(NewSimpleJit) { SimpleJitLimit = min(SimpleJitLimit, iterationsNeeded); iterationsNeeded -= SimpleJitLimit; } AutoProfilingInterpreter0Limit = min(AutoProfilingInterpreter0Limit, iterationsNeeded); iterationsNeeded -= AutoProfilingInterpreter0Limit; AutoProfilingInterpreter1Limit = iterationsNeeded; Assert( ( AutoProfilingInterpreter0Limit + ProfilingInterpreter0Limit + AutoProfilingInterpreter1Limit + SimpleJitLimit + ProfilingInterpreter1Limit ) == FullJitAfter); EnforceExecutionModeLimits = true; break; } if (IsEnabled(MaxTemplatizedJitRunCountFlag)) { if (MaxTemplatizedJitRunCount >= 0) { MinTemplatizedJitRunCount = MaxTemplatizedJitRunCount; } } if (IsEnabled(MaxAsmJsInterpreterRunCountFlag)) { if (MaxAsmJsInterpreterRunCount >= 0) { MinAsmJsInterpreterRunCount = MaxAsmJsInterpreterRunCount; } } } while(false); #endif if( ( #ifdef ENABLE_PREJIT Prejit || #endif ForceNative ) && !NoNative) { Enable(AutoProfilingInterpreter0LimitFlag); Enable(ProfilingInterpreter0LimitFlag); Enable(AutoProfilingInterpreter1LimitFlag); Enable(EnforceExecutionModeLimitsFlag); // Override any relevant automatic configuration above AutoProfilingInterpreter0Limit = 0; ProfilingInterpreter0Limit = 0; AutoProfilingInterpreter1Limit = 0; #if ENABLE_DEBUG_CONFIG_OPTIONS if(Off.IsEnabled(SimpleJitPhase)) { Enable(SimpleJitLimitFlag); Enable(ProfilingInterpreter1LimitFlag); SimpleJitLimit = 0; ProfilingInterpreter1Limit = 0; } #endif EnforceExecutionModeLimits = true; } VerifyExecutionModeLimits(); }
JITTimeProfileInfo::JITTimeProfileInfo(ProfileDataIDL * profileData) : m_profileData(*profileData) { CompileAssert(sizeof(JITTimeProfileInfo) == sizeof(ProfileDataIDL)); }
JITTimeFunctionBody::JITTimeFunctionBody(FunctionBodyDataIDL * bodyData) : m_bodyData(*bodyData) { CompileAssert(sizeof(JITTimeFunctionBody) == sizeof(FunctionBodyDataIDL)); }
int DynamicTypeHandler::RoundUpAuxSlotCapacity(const int slotCapacity) { CompileAssert(4 * sizeof(Var) % HeapConstants::ObjectGranularity == 0); return ::Math::Align<int>(slotCapacity, 4); }
/* static */ void JITTimeProfileInfo::InitializeJITProfileData( __in ArenaAllocator * alloc, __in Js::DynamicProfileInfo * profileInfo, __in Js::FunctionBody *functionBody, __out ProfileDataIDL * data, bool isForegroundJIT) { if (profileInfo == nullptr) { return; } CompileAssert(sizeof(LdElemIDL) == sizeof(Js::LdElemInfo)); CompileAssert(sizeof(StElemIDL) == sizeof(Js::StElemInfo)); data->profiledLdElemCount = functionBody->GetProfiledLdElemCount(); data->profiledStElemCount = functionBody->GetProfiledStElemCount(); if (JITManager::GetJITManager()->IsOOPJITEnabled() || isForegroundJIT) { data->ldElemData = (LdElemIDL*)profileInfo->GetLdElemInfo(); data->stElemData = (StElemIDL*)profileInfo->GetStElemInfo(); } else { // for in-proc background JIT we need to explicitly copy LdElem and StElem info data->ldElemData = AnewArray(alloc, LdElemIDL, data->profiledLdElemCount); memcpy_s( data->ldElemData, data->profiledLdElemCount * sizeof(LdElemIDL), profileInfo->GetLdElemInfo(), functionBody->GetProfiledLdElemCount() * sizeof(Js::LdElemInfo) ); data->stElemData = AnewArray(alloc, StElemIDL, data->profiledStElemCount); memcpy_s( data->stElemData, data->profiledStElemCount * sizeof(StElemIDL), profileInfo->GetStElemInfo(), functionBody->GetProfiledStElemCount() * sizeof(Js::StElemInfo) ); } CompileAssert(sizeof(ArrayCallSiteIDL) == sizeof(Js::ArrayCallSiteInfo)); data->profiledArrayCallSiteCount = functionBody->GetProfiledArrayCallSiteCount(); data->arrayCallSiteData = (ArrayCallSiteIDL*)profileInfo->GetArrayCallSiteInfo(); data->arrayCallSiteDataAddr = (intptr_t)profileInfo->GetArrayCallSiteInfo(); CompileAssert(sizeof(FldIDL) == sizeof(Js::FldInfo)); data->inlineCacheCount = functionBody->GetProfiledFldCount(); data->fldData = (FldIDL*)profileInfo->GetFldInfo(); data->fldDataAddr = (intptr_t)profileInfo->GetFldInfo(); CompileAssert(sizeof(ThisIDL) == sizeof(Js::ThisInfo)); data->thisData = *reinterpret_cast<ThisIDL*>(&profileInfo->GetThisInfo()); CompileAssert(sizeof(CallSiteIDL) == sizeof(Js::CallSiteInfo)); data->profiledCallSiteCount = functionBody->GetProfiledCallSiteCount(); data->callSiteData = reinterpret_cast<CallSiteIDL*>(profileInfo->GetCallSiteInfo()); CompileAssert(sizeof(BVUnitIDL) == sizeof(BVUnit)); data->loopFlags = (BVFixedIDL*)profileInfo->GetLoopFlags(); CompileAssert(sizeof(ValueType) == sizeof(uint16)); data->profiledSlotCount = functionBody->GetProfiledSlotCount(); data->slotData = reinterpret_cast<uint16*>(profileInfo->GetSlotInfo()); data->profiledReturnTypeCount = functionBody->GetProfiledReturnTypeCount(); data->returnTypeData = reinterpret_cast<uint16*>(profileInfo->GetReturnTypeInfo()); data->profiledDivOrRemCount = functionBody->GetProfiledDivOrRemCount(); data->divideTypeInfo = reinterpret_cast<uint16*>(profileInfo->GetDivideTypeInfo()); data->profiledSwitchCount = functionBody->GetProfiledSwitchCount(); data->switchTypeInfo = reinterpret_cast<uint16*>(profileInfo->GetSwitchTypeInfo()); data->profiledInParamsCount = functionBody->GetProfiledInParamsCount(); data->parameterInfo = reinterpret_cast<uint16*>(profileInfo->GetParameterInfo()); data->loopCount = functionBody->GetLoopCount(); data->loopImplicitCallFlags = reinterpret_cast<byte*>(profileInfo->GetLoopImplicitCallFlags()); data->implicitCallFlags = static_cast<byte>(profileInfo->GetImplicitCallFlags()); data->flags = 0; data->flags |= profileInfo->IsAggressiveIntTypeSpecDisabled(false) ? Flags_disableAggressiveIntTypeSpec : 0; data->flags |= profileInfo->IsAggressiveIntTypeSpecDisabled(true) ? Flags_disableAggressiveIntTypeSpec_jitLoopBody : 0; data->flags |= profileInfo->IsAggressiveMulIntTypeSpecDisabled(false) ? Flags_disableAggressiveMulIntTypeSpec : 0; data->flags |= profileInfo->IsAggressiveMulIntTypeSpecDisabled(true) ? Flags_disableAggressiveMulIntTypeSpec_jitLoopBody : 0; data->flags |= profileInfo->IsDivIntTypeSpecDisabled(false) ? Flags_disableDivIntTypeSpec : 0; data->flags |= profileInfo->IsDivIntTypeSpecDisabled(true) ? Flags_disableDivIntTypeSpec_jitLoopBody : 0; data->flags |= profileInfo->IsLossyIntTypeSpecDisabled() ? Flags_disableLossyIntTypeSpec : 0; data->flags |= profileInfo->IsTrackCompoundedIntOverflowDisabled() ? Flags_disableTrackCompoundedIntOverflow : 0; data->flags |= profileInfo->IsFloatTypeSpecDisabled() ? Flags_disableFloatTypeSpec : 0; data->flags |= profileInfo->IsArrayCheckHoistDisabled(false) ? Flags_disableArrayCheckHoist : 0; data->flags |= profileInfo->IsArrayCheckHoistDisabled(true) ? Flags_disableArrayCheckHoist_jitLoopBody : 0; data->flags |= profileInfo->IsArrayMissingValueCheckHoistDisabled(false) ? Flags_disableArrayMissingValueCheckHoist : 0; data->flags |= profileInfo->IsArrayMissingValueCheckHoistDisabled(true) ? Flags_disableArrayMissingValueCheckHoist_jitLoopBody : 0; data->flags |= profileInfo->IsJsArraySegmentHoistDisabled(false) ? Flags_disableJsArraySegmentHoist : 0; data->flags |= profileInfo->IsJsArraySegmentHoistDisabled(true) ? Flags_disableJsArraySegmentHoist_jitLoopBody : 0; data->flags |= profileInfo->IsArrayLengthHoistDisabled(false) ? Flags_disableArrayLengthHoist : 0; data->flags |= profileInfo->IsArrayLengthHoistDisabled(true) ? Flags_disableArrayLengthHoist_jitLoopBody : 0; data->flags |= profileInfo->IsTypedArrayTypeSpecDisabled(false) ? Flags_disableTypedArrayTypeSpec : 0; data->flags |= profileInfo->IsTypedArrayTypeSpecDisabled(true) ? Flags_disableTypedArrayTypeSpec_jitLoopBody : 0; data->flags |= profileInfo->IsLdLenIntSpecDisabled() ? Flags_disableLdLenIntSpec : 0; data->flags |= profileInfo->IsBoundCheckHoistDisabled(false) ? Flags_disableBoundCheckHoist : 0; data->flags |= profileInfo->IsBoundCheckHoistDisabled(true) ? Flags_disableBoundCheckHoist_jitLoopBody : 0; data->flags |= profileInfo->IsLoopCountBasedBoundCheckHoistDisabled(false) ? Flags_disableLoopCountBasedBoundCheckHoist : 0; data->flags |= profileInfo->IsLoopCountBasedBoundCheckHoistDisabled(true) ? Flags_disableLoopCountBasedBoundCheckHoist_jitLoopBody : 0; data->flags |= profileInfo->IsFloorInliningDisabled() ? Flags_disableFloorInlining : 0; data->flags |= profileInfo->IsNoProfileBailoutsDisabled() ? Flags_disableNoProfileBailouts : 0; data->flags |= profileInfo->IsSwitchOptDisabled() ? Flags_disableSwitchOpt : 0; data->flags |= profileInfo->IsEquivalentObjTypeSpecDisabled() ? Flags_disableEquivalentObjTypeSpec : 0; data->flags |= profileInfo->IsObjTypeSpecDisabledInJitLoopBody() ? Flags_disableObjTypeSpec_jitLoopBody : 0; data->flags |= profileInfo->IsMemOpDisabled() ? Flags_disableMemOp : 0; data->flags |= profileInfo->IsCheckThisDisabled() ? Flags_disableCheckThis : 0; data->flags |= profileInfo->HasLdFldCallSiteInfo() ? Flags_hasLdFldCallSiteInfo : 0; data->flags |= profileInfo->IsStackArgOptDisabled() ? Flags_disableStackArgOpt : 0; data->flags |= profileInfo->IsLoopImplicitCallInfoDisabled() ? Flags_disableLoopImplicitCallInfo : 0; data->flags |= profileInfo->IsPowIntIntTypeSpecDisabled() ? Flags_disablePowIntIntTypeSpec : 0; }