GetByIdStatus GetByIdStatus::computeFor( CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, StringImpl* uid) { #if ENABLE(DFG_JIT) if (dfgBlock) { GetByIdStatus result; { ConcurrentJITLocker locker(dfgBlock->m_lock); result = computeForStubInfo(locker, dfgBlock, dfgMap.get(codeOrigin), uid); } if (result.takesSlowPath()) return result; { ConcurrentJITLocker locker(profiledBlock->m_lock); if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex, ExitFromFTL)) return GetByIdStatus(TakesSlowPath, true); } if (result.isSet()) return result; } #else UNUSED_PARAM(dfgBlock); UNUSED_PARAM(dfgMap); #endif return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid); }
GetByIdStatus GetByIdStatus::computeFor(const StructureSet& set, StringImpl* uid) { // For now we only handle the super simple self access case. We could handle the // prototype case in the future. if (set.isEmpty()) return GetByIdStatus(); if (toUInt32FromStringImpl(uid) != PropertyName::NotAnIndex) return GetByIdStatus(TakesSlowPath); GetByIdStatus result; result.m_state = Simple; result.m_wasSeenInJIT = false; for (unsigned i = 0; i < set.size(); ++i) { Structure* structure = set[i]; if (structure->typeInfo().overridesGetOwnPropertySlot() && structure->typeInfo().type() != GlobalObjectType) return GetByIdStatus(TakesSlowPath); if (!structure->propertyAccessesAreCacheable()) return GetByIdStatus(TakesSlowPath); unsigned attributes; PropertyOffset offset = structure->getConcurrently(uid, attributes); if (!isValidOffset(offset)) return GetByIdStatus(TakesSlowPath); // It's probably a prototype lookup. Give up on life for now, even though we could totally be way smarter about it. if (attributes & Accessor) return GetByIdStatus(MakesCalls); // We could be smarter here, like strenght-reducing this to a Call. if (!result.appendVariant(GetByIdVariant(structure, offset))) return GetByIdStatus(TakesSlowPath); } return result; }
GetByIdStatus GetByIdStatus::computeForStubInfo(const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, CodeOrigin codeOrigin, UniquedStringImpl* uid) { GetByIdStatus result = GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( locker, profiledBlock, stubInfo, uid, CallLinkStatus::computeExitSiteData(locker, profiledBlock, codeOrigin.bytecodeIndex)); if (!result.takesSlowPath() && GetByIdStatus::hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) return GetByIdStatus(result.makesCalls() ? GetByIdStatus::MakesCalls : GetByIdStatus::TakesSlowPath, true); return result; }
GetByIdStatus GetByIdStatus::computeFor( CodeBlock* profiledBlock, CodeBlock* dfgBlock, StubInfoMap& baselineMap, StubInfoMap& dfgMap, CodeOrigin codeOrigin, UniquedStringImpl* uid) { #if ENABLE(DFG_JIT) if (dfgBlock) { CallLinkStatus::ExitSiteData exitSiteData; { ConcurrentJITLocker locker(profiledBlock->m_lock); exitSiteData = CallLinkStatus::computeExitSiteData( locker, profiledBlock, codeOrigin.bytecodeIndex); } GetByIdStatus result; { ConcurrentJITLocker locker(dfgBlock->m_lock); result = computeForStubInfoWithoutExitSiteFeedback( locker, dfgBlock, dfgMap.get(codeOrigin), uid, exitSiteData); } if (result.takesSlowPath()) return result; { ConcurrentJITLocker locker(profiledBlock->m_lock); if (hasExitSite(locker, profiledBlock, codeOrigin.bytecodeIndex)) return GetByIdStatus(TakesSlowPath, true); } if (result.isSet()) return result; } #else UNUSED_PARAM(dfgBlock); UNUSED_PARAM(dfgMap); #endif return computeFor(profiledBlock, baselineMap, codeOrigin.bytecodeIndex, uid); }
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid) { ConcurrentJITLocker locker(profiledBlock->m_lock); GetByIdStatus result; #if ENABLE(DFG_JIT) result = computeForStubInfoWithoutExitSiteFeedback( locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid, CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); if (!result.takesSlowPath() && hasExitSite(locker, profiledBlock, bytecodeIndex)) return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true); #else UNUSED_PARAM(map); #endif if (!result) return computeFromLLInt(profiledBlock, bytecodeIndex, uid); return result; }
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid) { ConcurrentJITLocker locker(profiledBlock->m_lock); GetByIdStatus result; #if ENABLE(DFG_JIT) result = computeForStubInfo( locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid); if (!result.takesSlowPath() && (hasExitSite(locker, profiledBlock, bytecodeIndex) || profiledBlock->likelyToTakeSlowCase(bytecodeIndex))) return GetByIdStatus(TakesSlowPath, true); #else UNUSED_PARAM(map); #endif if (!result) return computeFromLLInt(profiledBlock, bytecodeIndex, uid); return result; }
bool foldConstants(BasicBlock* block) { bool changed = false; m_state.beginBasicBlock(block); for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { if (!m_state.isValid()) break; Node* node = block->at(indexInBlock); bool alreadyHandled = false; bool eliminated = false; switch (node->op()) { case BooleanToNumber: { if (node->child1().useKind() == UntypedUse && !m_interpreter.needsTypeCheck(node->child1(), SpecBoolean)) node->child1().setUseKind(BooleanUse); break; } case CheckArgumentsNotCreated: { if (!isEmptySpeculation( m_state.variables().operand( m_graph.argumentsRegisterFor(node->origin.semantic)).m_type)) break; node->convertToPhantom(); eliminated = true; break; } case CheckStructure: case ArrayifyToStructure: { AbstractValue& value = m_state.forNode(node->child1()); StructureSet set; if (node->op() == ArrayifyToStructure) set = node->structure(); else set = node->structureSet(); if (value.m_structure.isSubsetOf(set)) { m_interpreter.execute(indexInBlock); // Catch the fact that we may filter on cell. node->convertToPhantom(); eliminated = true; break; } break; } case CheckArray: case Arrayify: { if (!node->arrayMode().alreadyChecked(m_graph, node, m_state.forNode(node->child1()))) break; node->convertToPhantom(); eliminated = true; break; } case PutStructure: { if (m_state.forNode(node->child1()).m_structure.onlyStructure() != node->transition()->next) break; node->convertToPhantom(); eliminated = true; break; } case CheckFunction: { if (m_state.forNode(node->child1()).value() != node->function()->value()) break; node->convertToPhantom(); eliminated = true; break; } case CheckInBounds: { JSValue left = m_state.forNode(node->child1()).value(); JSValue right = m_state.forNode(node->child2()).value(); if (left && right && left.isInt32() && right.isInt32() && static_cast<uint32_t>(left.asInt32()) < static_cast<uint32_t>(right.asInt32())) { node->convertToPhantom(); eliminated = true; break; } break; } case MultiGetByOffset: { Edge baseEdge = node->child1(); Node* base = baseEdge.node(); MultiGetByOffsetData& data = node->multiGetByOffsetData(); // First prune the variants, then check if the MultiGetByOffset can be // strength-reduced to a GetByOffset. AbstractValue baseValue = m_state.forNode(base); m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. alreadyHandled = true; // Don't allow the default constant folder to do things to this. for (unsigned i = 0; i < data.variants.size(); ++i) { GetByIdVariant& variant = data.variants[i]; variant.structureSet().filter(baseValue); if (variant.structureSet().isEmpty()) { data.variants[i--] = data.variants.last(); data.variants.removeLast(); changed = true; } } if (data.variants.size() != 1) break; emitGetByOffset( indexInBlock, node, baseValue, data.variants[0], data.identifierNumber); changed = true; break; } case MultiPutByOffset: { Edge baseEdge = node->child1(); Node* base = baseEdge.node(); MultiPutByOffsetData& data = node->multiPutByOffsetData(); AbstractValue baseValue = m_state.forNode(base); m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. alreadyHandled = true; // Don't allow the default constant folder to do things to this. for (unsigned i = 0; i < data.variants.size(); ++i) { PutByIdVariant& variant = data.variants[i]; variant.oldStructure().filter(baseValue); if (variant.oldStructure().isEmpty()) { data.variants[i--] = data.variants.last(); data.variants.removeLast(); changed = true; continue; } if (variant.kind() == PutByIdVariant::Transition && variant.oldStructure().onlyStructure() == variant.newStructure()) { variant = PutByIdVariant::replace( variant.oldStructure(), variant.offset()); changed = true; } } if (data.variants.size() != 1) break; emitPutByOffset( indexInBlock, node, baseValue, data.variants[0], data.identifierNumber); changed = true; break; } case GetById: case GetByIdFlush: { Edge childEdge = node->child1(); Node* child = childEdge.node(); unsigned identifierNumber = node->identifierNumber(); AbstractValue baseValue = m_state.forNode(child); m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. alreadyHandled = true; // Don't allow the default constant folder to do things to this. if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered() || (node->child1().useKind() == UntypedUse || (baseValue.m_type & ~SpecCell))) break; GetByIdStatus status = GetByIdStatus::computeFor( vm(), baseValue.m_structure.set(), m_graph.identifiers()[identifierNumber]); if (!status.isSimple()) break; for (unsigned i = status.numVariants(); i--;) { if (!status[i].constantChecks().isEmpty() || status[i].alternateBase()) { // FIXME: We could handle prototype cases. // https://bugs.webkit.org/show_bug.cgi?id=110386 break; } } if (status.numVariants() == 1) { emitGetByOffset(indexInBlock, node, baseValue, status[0], identifierNumber); changed = true; break; } if (!isFTL(m_graph.m_plan.mode)) break; MultiGetByOffsetData* data = m_graph.m_multiGetByOffsetData.add(); data->variants = status.variants(); data->identifierNumber = identifierNumber; node->convertToMultiGetByOffset(data); changed = true; break; } case PutById: case PutByIdDirect: case PutByIdFlush: { NodeOrigin origin = node->origin; Edge childEdge = node->child1(); Node* child = childEdge.node(); unsigned identifierNumber = node->identifierNumber(); ASSERT(childEdge.useKind() == CellUse); AbstractValue baseValue = m_state.forNode(child); m_interpreter.execute(indexInBlock); // Push CFA over this node after we get the state before. alreadyHandled = true; // Don't allow the default constant folder to do things to this. if (baseValue.m_structure.isTop() || baseValue.m_structure.isClobbered()) break; PutByIdStatus status = PutByIdStatus::computeFor( vm(), m_graph.globalObjectFor(origin.semantic), baseValue.m_structure.set(), m_graph.identifiers()[identifierNumber], node->op() == PutByIdDirect); if (!status.isSimple()) break; ASSERT(status.numVariants()); if (status.numVariants() > 1 && !isFTL(m_graph.m_plan.mode)) break; changed = true; for (unsigned i = status.numVariants(); i--;) addChecks(origin, indexInBlock, status[i].constantChecks()); if (status.numVariants() == 1) { emitPutByOffset(indexInBlock, node, baseValue, status[0], identifierNumber); break; } ASSERT(isFTL(m_graph.m_plan.mode)); MultiPutByOffsetData* data = m_graph.m_multiPutByOffsetData.add(); data->variants = status.variants(); data->identifierNumber = identifierNumber; node->convertToMultiPutByOffset(data); break; } case ToPrimitive: { if (m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean | SpecString)) break; node->convertToIdentity(); changed = true; break; } case GetMyArgumentByVal: { InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame; JSValue value = m_state.forNode(node->child1()).m_value; if (inlineCallFrame && value && value.isInt32()) { int32_t index = value.asInt32(); if (index >= 0 && static_cast<size_t>(index + 1) < inlineCallFrame->arguments.size()) { // Roll the interpreter over this. m_interpreter.execute(indexInBlock); eliminated = true; int operand = inlineCallFrame->stackOffset + m_graph.baselineCodeBlockFor(inlineCallFrame)->argumentIndexAfterCapture(index); m_insertionSet.insertNode( indexInBlock, SpecNone, CheckArgumentsNotCreated, node->origin); m_insertionSet.insertNode( indexInBlock, SpecNone, Phantom, node->origin, node->children); node->convertToGetLocalUnlinked(VirtualRegister(operand)); break; } } break; } case Check: { alreadyHandled = true; m_interpreter.execute(indexInBlock); for (unsigned i = 0; i < AdjacencyList::Size; ++i) { Edge edge = node->children.child(i); if (!edge) break; if (edge.isProved() || edge.willNotHaveCheck()) { node->children.removeEdge(i--); changed = true; } } break; } default: break; } if (eliminated) { changed = true; continue; } if (alreadyHandled) continue; m_interpreter.execute(indexInBlock); if (!m_state.isValid()) { // If we invalidated then we shouldn't attempt to constant-fold. Here's an // example: // // c: JSConstant(4.2) // x: ValueToInt32(Check:Int32:@const) // // It would be correct for an analysis to assume that execution cannot // proceed past @x. Therefore, constant-folding @x could be rather bad. But, // the CFA may report that it found a constant even though it also reported // that everything has been invalidated. This will only happen in a couple of // the constant folding cases; most of them are also separately defensive // about such things. break; } if (!node->shouldGenerate() || m_state.didClobber() || node->hasConstant()) continue; // Interesting fact: this freezing that we do right here may turn an fragile value into // a weak value. See DFGValueStrength.h. FrozenValue* value = m_graph.freeze(m_state.forNode(node).value()); if (!*value) continue; NodeOrigin origin = node->origin; AdjacencyList children = node->children; m_graph.convertToConstant(node, value); if (!children.isEmpty()) { m_insertionSet.insertNode( indexInBlock, SpecNone, Phantom, origin, children); } changed = true; } m_state.reset(); m_insertionSet.execute(block); return changed; }
bool foldConstants(BasicBlock* block) { bool changed = false; m_state.beginBasicBlock(block); for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { if (!m_state.isValid()) break; Node* node = block->at(indexInBlock); bool eliminated = false; switch (node->op()) { case BooleanToNumber: { if (node->child1().useKind() == UntypedUse && !m_interpreter.needsTypeCheck(node->child1(), SpecBoolean)) node->child1().setUseKind(BooleanUse); break; } case CheckArgumentsNotCreated: { if (!isEmptySpeculation( m_state.variables().operand( m_graph.argumentsRegisterFor(node->origin.semantic)).m_type)) break; node->convertToPhantom(); eliminated = true; break; } case CheckStructure: case ArrayifyToStructure: { AbstractValue& value = m_state.forNode(node->child1()); StructureSet set; if (node->op() == ArrayifyToStructure) set = node->structure(); else set = node->structureSet(); if (value.m_currentKnownStructure.isSubsetOf(set)) { m_interpreter.execute(indexInBlock); // Catch the fact that we may filter on cell. node->convertToPhantom(); eliminated = true; break; } StructureAbstractValue& structureValue = value.m_futurePossibleStructure; if (structureValue.isSubsetOf(set) && structureValue.hasSingleton()) { Structure* structure = structureValue.singleton(); m_interpreter.execute(indexInBlock); // Catch the fact that we may filter on cell. AdjacencyList children = node->children; children.removeEdge(0); if (!!children.child1()) m_insertionSet.insertNode(indexInBlock, SpecNone, Phantom, node->origin, children); node->children.setChild2(Edge()); node->children.setChild3(Edge()); node->convertToStructureTransitionWatchpoint(structure); eliminated = true; break; } break; } case CheckArray: case Arrayify: { if (!node->arrayMode().alreadyChecked(m_graph, node, m_state.forNode(node->child1()))) break; node->convertToPhantom(); eliminated = true; break; } case CheckFunction: { if (m_state.forNode(node->child1()).value() != node->function()) break; node->convertToPhantom(); eliminated = true; break; } case CheckInBounds: { JSValue left = m_state.forNode(node->child1()).value(); JSValue right = m_state.forNode(node->child2()).value(); if (left && right && left.isInt32() && right.isInt32() && static_cast<uint32_t>(left.asInt32()) < static_cast<uint32_t>(right.asInt32())) { node->convertToPhantom(); eliminated = true; break; } break; } case MultiGetByOffset: { Edge childEdge = node->child1(); Node* child = childEdge.node(); MultiGetByOffsetData& data = node->multiGetByOffsetData(); Structure* structure = m_state.forNode(child).bestProvenStructure(); if (!structure) break; for (unsigned i = data.variants.size(); i--;) { const GetByIdVariant& variant = data.variants[i]; if (!variant.structureSet().contains(structure)) continue; if (variant.chain()) break; emitGetByOffset(indexInBlock, node, structure, variant, data.identifierNumber); eliminated = true; break; } break; } case MultiPutByOffset: { Edge childEdge = node->child1(); Node* child = childEdge.node(); MultiPutByOffsetData& data = node->multiPutByOffsetData(); Structure* structure = m_state.forNode(child).bestProvenStructure(); if (!structure) break; for (unsigned i = data.variants.size(); i--;) { const PutByIdVariant& variant = data.variants[i]; if (variant.oldStructure() != structure) continue; emitPutByOffset(indexInBlock, node, structure, variant, data.identifierNumber); eliminated = true; break; } break; } case GetById: case GetByIdFlush: { Edge childEdge = node->child1(); Node* child = childEdge.node(); unsigned identifierNumber = node->identifierNumber(); if (childEdge.useKind() != CellUse) break; Structure* structure = m_state.forNode(child).bestProvenStructure(); if (!structure) break; GetByIdStatus status = GetByIdStatus::computeFor( vm(), structure, m_graph.identifiers()[identifierNumber]); if (!status.isSimple() || status.numVariants() != 1) { // FIXME: We could handle prototype cases. // https://bugs.webkit.org/show_bug.cgi?id=110386 break; } emitGetByOffset(indexInBlock, node, structure, status[0], identifierNumber); eliminated = true; break; } case PutById: case PutByIdDirect: { NodeOrigin origin = node->origin; Edge childEdge = node->child1(); Node* child = childEdge.node(); unsigned identifierNumber = node->identifierNumber(); ASSERT(childEdge.useKind() == CellUse); Structure* structure = m_state.forNode(child).bestProvenStructure(); if (!structure) break; PutByIdStatus status = PutByIdStatus::computeFor( vm(), m_graph.globalObjectFor(origin.semantic), structure, m_graph.identifiers()[identifierNumber], node->op() == PutByIdDirect); if (!status.isSimple()) break; if (status.numVariants() != 1) break; emitPutByOffset(indexInBlock, node, structure, status[0], identifierNumber); eliminated = true; break; } case ToPrimitive: { if (m_state.forNode(node->child1()).m_type & ~(SpecFullNumber | SpecBoolean | SpecString)) break; node->convertToIdentity(); break; } default: break; } if (eliminated) { changed = true; continue; } m_interpreter.execute(indexInBlock); if (!m_state.isValid()) { // If we invalidated then we shouldn't attempt to constant-fold. Here's an // example: // // c: JSConstant(4.2) // x: ValueToInt32(Check:Int32:@const) // // It would be correct for an analysis to assume that execution cannot // proceed past @x. Therefore, constant-folding @x could be rather bad. But, // the CFA may report that it found a constant even though it also reported // that everything has been invalidated. This will only happen in a couple of // the constant folding cases; most of them are also separately defensive // about such things. break; } if (!node->shouldGenerate() || m_state.didClobber() || node->hasConstant()) continue; JSValue value = m_state.forNode(node).value(); if (!value) continue; // Check if merging the abstract value of the constant into the abstract value // we've proven for this node wouldn't widen the proof. If it widens the proof // (i.e. says that the set contains more things in it than it previously did) // then we refuse to fold. AbstractValue oldValue = m_state.forNode(node); AbstractValue constantValue; constantValue.set(m_graph, value); constantValue.fixTypeForRepresentation(node); if (oldValue.merge(constantValue)) continue; NodeOrigin origin = node->origin; AdjacencyList children = node->children; if (node->op() == GetLocal) m_graph.dethread(); else ASSERT(!node->hasVariableAccessData(m_graph)); m_graph.convertToConstant(node, value); m_insertionSet.insertNode( indexInBlock, SpecNone, Phantom, origin, children); changed = true; } m_state.reset(); m_insertionSet.execute(block); return changed; }
GetByIdStatus GetByIdStatus::computeForStubInfo( const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, StringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData) { if (!stubInfo || !stubInfo->seen) return GetByIdStatus(NoInformation); PolymorphicGetByIdList* list = 0; State slowPathState = TakesSlowPath; if (stubInfo->accessType == access_get_by_id_list) { list = stubInfo->u.getByIdList.list; for (unsigned i = 0; i < list->size(); ++i) { const GetByIdAccess& access = list->at(i); if (access.doesCalls()) slowPathState = MakesCalls; } } // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_state = Simple; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo->accessType) { case access_unset: return GetByIdStatus(NoInformation); case access_get_by_id_self: { Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(slowPathState, true); unsigned attributesIgnored; GetByIdVariant variant; variant.m_offset = structure->getConcurrently(uid, attributesIgnored); if (!isValidOffset(variant.m_offset)) return GetByIdStatus(slowPathState, true); variant.m_structureSet.add(structure); bool didAppend = result.appendVariant(variant); ASSERT_UNUSED(didAppend, didAppend); return result; } case access_get_by_id_list: { for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { Structure* structure = list->at(listIndex).structure(); ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( profiledBlock, structure, list->at(listIndex).chain(), list->at(listIndex).chainCount(), uid); switch (complexGetStatus.kind()) { case ComplexGetStatus::ShouldSkip: continue; case ComplexGetStatus::TakesSlowPath: return GetByIdStatus(slowPathState, true); case ComplexGetStatus::Inlineable: { std::unique_ptr<CallLinkStatus> callLinkStatus; switch (list->at(listIndex).type()) { case GetByIdAccess::SimpleInline: case GetByIdAccess::SimpleStub: { break; } case GetByIdAccess::Getter: { AccessorCallJITStubRoutine* stub = static_cast<AccessorCallJITStubRoutine*>( list->at(listIndex).stubRoutine()); callLinkStatus = std::make_unique<CallLinkStatus>( CallLinkStatus::computeFor( locker, profiledBlock, *stub->m_callLinkInfo, callExitSiteData)); break; } case GetByIdAccess::CustomGetter: case GetByIdAccess::WatchedStub:{ // FIXME: It would be totally sweet to support this at some point in the future. // https://bugs.webkit.org/show_bug.cgi?id=133052 return GetByIdStatus(slowPathState, true); } default: RELEASE_ASSERT_NOT_REACHED(); } GetByIdVariant variant( StructureSet(structure), complexGetStatus.offset(), complexGetStatus.chain(), std::move(callLinkStatus)); if (!result.appendVariant(variant)) return GetByIdStatus(slowPathState, true); break; } } } return result; } default: return GetByIdStatus(slowPathState, true); } RELEASE_ASSERT_NOT_REACHED(); return GetByIdStatus(); }
GetByIdStatus GetByIdStatus::computeForStubInfoWithoutExitSiteFeedback( const ConcurrentJITLocker& locker, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, UniquedStringImpl* uid, CallLinkStatus::ExitSiteData callExitSiteData) { if (!stubInfo || !stubInfo->everConsidered) return GetByIdStatus(NoInformation); PolymorphicAccess* list = 0; State slowPathState = TakesSlowPath; if (stubInfo->cacheType == CacheType::Stub) { list = stubInfo->u.stub; for (unsigned i = 0; i < list->size(); ++i) { const AccessCase& access = list->at(i); if (access.doesCalls()) slowPathState = MakesCalls; } } if (stubInfo->tookSlowPath) return GetByIdStatus(slowPathState); // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_state = Simple; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo->cacheType) { case CacheType::Unset: return GetByIdStatus(NoInformation); case CacheType::GetByIdSelf: { Structure* structure = stubInfo->u.byIdSelf.baseObjectStructure.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(slowPathState, true); unsigned attributesIgnored; GetByIdVariant variant; variant.m_offset = structure->getConcurrently(uid, attributesIgnored); if (!isValidOffset(variant.m_offset)) return GetByIdStatus(slowPathState, true); variant.m_structureSet.add(structure); bool didAppend = result.appendVariant(variant); ASSERT_UNUSED(didAppend, didAppend); return result; } case CacheType::Stub: { for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { const AccessCase& access = list->at(listIndex); if (access.viaProxy()) return GetByIdStatus(slowPathState, true); Structure* structure = access.structure(); if (!structure) { // The null structure cases arise due to array.length and string.length. We have no way // of creating a GetByIdVariant for those, and we don't really have to since the DFG // handles those cases in FixupPhase using value profiling. That's a bit awkward - we // shouldn't have to use value profiling to discover something that the AccessCase // could have told us. But, it works well enough. So, our only concern here is to not // crash on null structure. return GetByIdStatus(slowPathState, true); } ComplexGetStatus complexGetStatus = ComplexGetStatus::computeFor( structure, access.conditionSet(), uid); switch (complexGetStatus.kind()) { case ComplexGetStatus::ShouldSkip: continue; case ComplexGetStatus::TakesSlowPath: return GetByIdStatus(slowPathState, true); case ComplexGetStatus::Inlineable: { std::unique_ptr<CallLinkStatus> callLinkStatus; JSFunction* intrinsicFunction = nullptr; switch (access.type()) { case AccessCase::Load: { break; } case AccessCase::IntrinsicGetter: { intrinsicFunction = access.intrinsicFunction(); break; } case AccessCase::Getter: { CallLinkInfo* callLinkInfo = access.callLinkInfo(); ASSERT(callLinkInfo); callLinkStatus = std::make_unique<CallLinkStatus>( CallLinkStatus::computeFor( locker, profiledBlock, *callLinkInfo, callExitSiteData)); break; } default: { // FIXME: It would be totally sweet to support more of these at some point in the // future. https://bugs.webkit.org/show_bug.cgi?id=133052 return GetByIdStatus(slowPathState, true); } } GetByIdVariant variant( StructureSet(structure), complexGetStatus.offset(), complexGetStatus.conditionSet(), WTFMove(callLinkStatus), intrinsicFunction); if (!result.appendVariant(variant)) return GetByIdStatus(slowPathState, true); break; } } } return result; } default: return GetByIdStatus(slowPathState, true); } RELEASE_ASSERT_NOT_REACHED(); return GetByIdStatus(); }
GetByIdStatus GetByIdStatus::computeForStubInfo( const ConcurrentJITLocker&, CodeBlock* profiledBlock, StructureStubInfo* stubInfo, StringImpl* uid) { if (!stubInfo || !stubInfo->seen) return GetByIdStatus(NoInformation); if (stubInfo->resetByGC) return GetByIdStatus(TakesSlowPath, true); PolymorphicGetByIdList* list = 0; if (stubInfo->accessType == access_get_by_id_list) { list = stubInfo->u.getByIdList.list; for (unsigned i = 0; i < list->size(); ++i) { if (list->at(i).doesCalls()) return GetByIdStatus(MakesCalls, true); } } // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_state = Simple; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo->accessType) { case access_unset: return GetByIdStatus(NoInformation); case access_get_by_id_self: { Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(TakesSlowPath, true); unsigned attributesIgnored; JSCell* specificValue; GetByIdVariant variant; variant.m_offset = structure->getConcurrently( *profiledBlock->vm(), uid, attributesIgnored, specificValue); if (!isValidOffset(variant.m_offset)) return GetByIdStatus(TakesSlowPath, true); if (structure->isDictionary()) specificValue = 0; variant.m_structureSet.add(structure); variant.m_specificValue = JSValue(specificValue); result.appendVariant(variant); return result; } case access_get_by_id_list: { for (unsigned listIndex = 0; listIndex < list->size(); ++listIndex) { ASSERT(!list->at(listIndex).doesCalls()); Structure* structure = list->at(listIndex).structure(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(TakesSlowPath, true); if (list->at(listIndex).chain()) { RefPtr<IntendedStructureChain> chain = adoptRef(new IntendedStructureChain( profiledBlock, structure, list->at(listIndex).chain(), list->at(listIndex).chainCount())); if (!result.computeForChain(profiledBlock, uid, chain)) return GetByIdStatus(TakesSlowPath, true); continue; } unsigned attributesIgnored; JSCell* specificValue; PropertyOffset myOffset = structure->getConcurrently( *profiledBlock->vm(), uid, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (!isValidOffset(myOffset)) return GetByIdStatus(TakesSlowPath, true); bool found = false; for (unsigned variantIndex = 0; variantIndex < result.m_variants.size(); ++variantIndex) { GetByIdVariant& variant = result.m_variants[variantIndex]; if (variant.m_chain) continue; if (variant.m_offset != myOffset) continue; found = true; if (variant.m_structureSet.contains(structure)) break; if (variant.m_specificValue != JSValue(specificValue)) variant.m_specificValue = JSValue(); variant.m_structureSet.add(structure); break; } if (found) continue; if (!result.appendVariant(GetByIdVariant(StructureSet(structure), myOffset, specificValue))) return GetByIdStatus(TakesSlowPath, true); } return result; } case access_get_by_id_chain: { if (!stubInfo->u.getByIdChain.isDirect) return GetByIdStatus(MakesCalls, true); RefPtr<IntendedStructureChain> chain = adoptRef(new IntendedStructureChain( profiledBlock, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), stubInfo->u.getByIdChain.count)); if (result.computeForChain(profiledBlock, uid, chain)) return result; return GetByIdStatus(TakesSlowPath, true); } default: return GetByIdStatus(TakesSlowPath, true); } RELEASE_ASSERT_NOT_REACHED(); return GetByIdStatus(); }