CallLinkStatus CallLinkStatus::computeFor( CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map) { ConcurrentJITLocker locker(profiledBlock->m_lock); UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(map); #if ENABLE(DFG_JIT) if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCache)) || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadCacheWatchpoint)) || profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadExecutable))) return takesSlowPath(); CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex)); if (!callLinkInfo) return computeFromLLInt(locker, profiledBlock, bytecodeIndex); CallLinkStatus result = computeFor(locker, *callLinkInfo); if (!result) return computeFromLLInt(locker, profiledBlock, bytecodeIndex); if (profiledBlock->hasExitSite(locker, DFG::FrequentExitSite(bytecodeIndex, BadFunction))) result.makeClosureCall(); return result; #else return CallLinkStatus(); #endif }
CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex) { ConcurrentJITLocker locker(profiledBlock->m_lock); UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); #if ENABLE(JIT) && ENABLE(VALUE_PROFILER) if (!profiledBlock->hasBaselineJITProfiling()) return computeFromLLInt(profiledBlock, bytecodeIndex); if (profiledBlock->couldTakeSlowCase(bytecodeIndex)) return CallLinkStatus::takesSlowPath(); CallLinkInfo& callLinkInfo = profiledBlock->getCallLinkInfo(bytecodeIndex); if (callLinkInfo.stub) return CallLinkStatus(callLinkInfo.stub->executable(), callLinkInfo.stub->structure()); JSFunction* target = callLinkInfo.lastSeenCallee.get(); if (!target) return computeFromLLInt(profiledBlock, bytecodeIndex); if (callLinkInfo.hasSeenClosure) return CallLinkStatus(target->executable(), target->structure()); return CallLinkStatus(target); #else return CallLinkStatus(); #endif }
PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(ident); #if ENABLE(JIT) && ENABLE(VALUE_PROFILER) if (!profiledBlock->numberOfStructureStubInfos()) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex); if (!stubInfo.seen) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); if (stubInfo.resetByGC) return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); switch (stubInfo.accessType) { case access_unset: // If the JIT saw it but didn't optimize it, then assume that this takes slow path. return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); case access_put_by_id_replace: { PropertyOffset offset = stubInfo.u.putByIdReplace.baseObjectStructure->get( *profiledBlock->vm(), ident); if (isValidOffset(offset)) { return PutByIdStatus( SimpleReplace, stubInfo.u.putByIdReplace.baseObjectStructure.get(), 0, 0, offset); } return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); } case access_put_by_id_transition_normal: case access_put_by_id_transition_direct: { ASSERT(stubInfo.u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated()); PropertyOffset offset = stubInfo.u.putByIdTransition.structure->get( *profiledBlock->vm(), ident); if (isValidOffset(offset)) { return PutByIdStatus( SimpleTransition, stubInfo.u.putByIdTransition.previousStructure.get(), stubInfo.u.putByIdTransition.structure.get(), stubInfo.u.putByIdTransition.chain.get(), offset); } return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); } default: return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); } #else // ENABLE(JIT) return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); #endif // ENABLE(JIT) }
PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(ident); #if ENABLE(JIT) && ENABLE(VALUE_PROFILER) if (!profiledBlock->numberOfStructureStubInfos()) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) return PutByIdStatus(TakesSlowPath, 0, 0, 0, notFound); StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex); if (!stubInfo.seen) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); switch (stubInfo.accessType) { case access_unset: return computeFromLLInt(profiledBlock, bytecodeIndex, ident); case access_put_by_id_replace: { size_t offset = stubInfo.u.putByIdReplace.baseObjectStructure->get( *profiledBlock->globalData(), ident); if (offset != notFound) { return PutByIdStatus( SimpleReplace, stubInfo.u.putByIdReplace.baseObjectStructure.get(), 0, 0, offset); } return PutByIdStatus(TakesSlowPath, 0, 0, 0, notFound); } case access_put_by_id_transition_normal: case access_put_by_id_transition_direct: { ASSERT(stubInfo.u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated()); size_t offset = stubInfo.u.putByIdTransition.structure->get( *profiledBlock->globalData(), ident); if (offset != notFound) { return PutByIdStatus( SimpleTransition, stubInfo.u.putByIdTransition.previousStructure.get(), stubInfo.u.putByIdTransition.structure.get(), stubInfo.u.putByIdTransition.chain.get(), offset); } return PutByIdStatus(TakesSlowPath, 0, 0, 0, notFound); } default: return PutByIdStatus(TakesSlowPath, 0, 0, 0, notFound); } #else // ENABLE(JIT) return PutByIdStatus(NoInformation, 0, 0, 0, notFound); #endif // ENABLE(JIT) }
PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid) { ConcurrentJITLocker locker(profiledBlock->m_lock); UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(uid); #if ENABLE(DFG_JIT) if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex) || hasExitSite(locker, profiledBlock, bytecodeIndex)) return PutByIdStatus(TakesSlowPath); StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex)); PutByIdStatus result = computeForStubInfo( locker, profiledBlock, stubInfo, uid, CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); if (!result) return computeFromLLInt(profiledBlock, bytecodeIndex, uid); return result; #else // ENABLE(JIT) UNUSED_PARAM(map); return PutByIdStatus(NoInformation); #endif // ENABLE(JIT) }
CallLinkStatus CallLinkStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); #if ENABLE(JIT) && ENABLE(VALUE_PROFILER) if (!profiledBlock->numberOfCallLinkInfos()) return computeFromLLInt(profiledBlock, bytecodeIndex); if (profiledBlock->couldTakeSlowCase(bytecodeIndex)) return CallLinkStatus(0, true); CallLinkInfo& callLinkInfo = profiledBlock->getCallLinkInfo(bytecodeIndex); JSFunction* target = callLinkInfo.lastSeenCallee.get(); if (!target) return computeFromLLInt(profiledBlock, bytecodeIndex); return CallLinkStatus(target, false, !!callLinkInfo.stub); #else return CallLinkStatus(0, false, false); #endif }
CallLinkStatus CallLinkStatus::computeFor( CodeBlock* profiledBlock, unsigned bytecodeIndex, const CallLinkInfoMap& map) { ConcurrentJITLocker locker(profiledBlock->m_lock); UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(map); #if ENABLE(DFG_JIT) ExitSiteData exitSiteData = computeExitSiteData(locker, profiledBlock, bytecodeIndex); if (exitSiteData.m_takesSlowPath) return takesSlowPath(); CallLinkInfo* callLinkInfo = map.get(CodeOrigin(bytecodeIndex)); if (!callLinkInfo) return computeFromLLInt(locker, profiledBlock, bytecodeIndex); return computeFor(locker, *callLinkInfo, exitSiteData); #else return CallLinkStatus(); #endif }
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, UniquedStringImpl* uid) { ConcurrentJITLocker locker(profiledBlock->m_lock); GetByIdStatus result; #if ENABLE(DFG_JIT) result = computeForStubInfoWithoutExitSiteFeedback( locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid, CallLinkStatus::computeExitSiteData(locker, profiledBlock, bytecodeIndex)); if (!result.takesSlowPath() && hasExitSite(locker, profiledBlock, bytecodeIndex)) return GetByIdStatus(result.makesCalls() ? MakesCalls : TakesSlowPath, true); #else UNUSED_PARAM(map); #endif if (!result) return computeFromLLInt(profiledBlock, bytecodeIndex, uid); return result; }
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid) { ConcurrentJITLocker locker(profiledBlock->m_lock); GetByIdStatus result; #if ENABLE(DFG_JIT) result = computeForStubInfo( locker, profiledBlock, map.get(CodeOrigin(bytecodeIndex)), uid); if (!result.takesSlowPath() && (hasExitSite(locker, profiledBlock, bytecodeIndex) || profiledBlock->likelyToTakeSlowCase(bytecodeIndex))) return GetByIdStatus(TakesSlowPath, true); #else UNUSED_PARAM(map); #endif if (!result) return computeFromLLInt(profiledBlock, bytecodeIndex, uid); return result; }
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytecodeIndex, Identifier& ident) { UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(ident); #if ENABLE(JIT) && ENABLE(VALUE_PROFILER) if (!profiledBlock->numberOfStructureStubInfos()) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); // First check if it makes either calls, in which case we want to be super careful, or // if it's not set at all, in which case we punt. StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex); if (!stubInfo.seen) return computeFromLLInt(profiledBlock, bytecodeIndex, ident); if (stubInfo.resetByGC) return GetByIdStatus(TakesSlowPath, true); PolymorphicAccessStructureList* list; int listSize; switch (stubInfo.accessType) { case access_get_by_id_self_list: list = stubInfo.u.getByIdSelfList.structureList; listSize = stubInfo.u.getByIdSelfList.listSize; break; case access_get_by_id_proto_list: list = stubInfo.u.getByIdProtoList.structureList; listSize = stubInfo.u.getByIdProtoList.listSize; break; default: list = 0; listSize = 0; break; } for (int i = 0; i < listSize; ++i) { if (!list->list[i].isDirect) return GetByIdStatus(MakesCalls, true); } // Next check if it takes slow case, in which case we want to be kind of careful. if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) return GetByIdStatus(TakesSlowPath, true); // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo.accessType) { case access_unset: return computeFromLLInt(profiledBlock, bytecodeIndex, ident); case access_get_by_id_self: { Structure* structure = stubInfo.u.getByIdSelf.baseObjectStructure.get(); unsigned attributesIgnored; JSCell* specificValue; result.m_offset = structure->get( *profiledBlock->vm(), ident, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (isValidOffset(result.m_offset)) { result.m_structureSet.add(structure); result.m_specificValue = JSValue(specificValue); } if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } case access_get_by_id_self_list: { for (int i = 0; i < listSize; ++i) { ASSERT(list->list[i].isDirect); Structure* structure = list->list[i].base.get(); if (result.m_structureSet.contains(structure)) continue; unsigned attributesIgnored; JSCell* specificValue; PropertyOffset myOffset = structure->get( *profiledBlock->vm(), ident, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (!isValidOffset(myOffset)) { result.m_offset = invalidOffset; break; } if (!i) { result.m_offset = myOffset; result.m_specificValue = JSValue(specificValue); } else if (result.m_offset != myOffset) { result.m_offset = invalidOffset; break; } else if (result.m_specificValue != JSValue(specificValue)) result.m_specificValue = JSValue(); result.m_structureSet.add(structure); } if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } case access_get_by_id_proto: { if (!stubInfo.u.getByIdProto.isDirect) return GetByIdStatus(MakesCalls, true); result.m_chain.append(stubInfo.u.getByIdProto.prototypeStructure.get()); computeForChain( result, profiledBlock, ident, stubInfo.u.getByIdProto.baseObjectStructure.get()); break; } case access_get_by_id_chain: { if (!stubInfo.u.getByIdChain.isDirect) return GetByIdStatus(MakesCalls, true); for (unsigned i = 0; i < stubInfo.u.getByIdChain.count; ++i) result.m_chain.append(stubInfo.u.getByIdChain.chain->head()[i].get()); computeForChain( result, profiledBlock, ident, stubInfo.u.getByIdChain.baseObjectStructure.get()); break; } default: ASSERT(!isValidOffset(result.m_offset)); break; } if (!isValidOffset(result.m_offset)) { result.m_state = TakesSlowPath; result.m_structureSet.clear(); result.m_chain.clear(); result.m_specificValue = JSValue(); } else result.m_state = Simple; return result; #else // ENABLE(JIT) return GetByIdStatus(NoInformation, false); #endif // ENABLE(JIT) }
GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, StubInfoMap& map, unsigned bytecodeIndex, StringImpl* uid) { ConcurrentJITLocker locker(profiledBlock->m_lock); UNUSED_PARAM(profiledBlock); UNUSED_PARAM(bytecodeIndex); UNUSED_PARAM(uid); #if ENABLE(JIT) StructureStubInfo* stubInfo = map.get(CodeOrigin(bytecodeIndex)); if (!stubInfo || !stubInfo->seen) return computeFromLLInt(profiledBlock, bytecodeIndex, uid); if (stubInfo->resetByGC) return GetByIdStatus(TakesSlowPath, true); PolymorphicAccessStructureList* list; int listSize; switch (stubInfo->accessType) { case access_get_by_id_self_list: list = stubInfo->u.getByIdSelfList.structureList; listSize = stubInfo->u.getByIdSelfList.listSize; break; case access_get_by_id_proto_list: list = stubInfo->u.getByIdProtoList.structureList; listSize = stubInfo->u.getByIdProtoList.listSize; break; default: list = 0; listSize = 0; break; } for (int i = 0; i < listSize; ++i) { if (!list->list[i].isDirect) return GetByIdStatus(MakesCalls, true); } // Next check if it takes slow case, in which case we want to be kind of careful. if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) return GetByIdStatus(TakesSlowPath, true); // Finally figure out if we can derive an access strategy. GetByIdStatus result; result.m_wasSeenInJIT = true; // This is interesting for bytecode dumping only. switch (stubInfo->accessType) { case access_unset: return computeFromLLInt(profiledBlock, bytecodeIndex, uid); case access_get_by_id_self: { Structure* structure = stubInfo->u.getByIdSelf.baseObjectStructure.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(TakesSlowPath, true); unsigned attributesIgnored; JSCell* specificValue; result.m_offset = structure->getConcurrently( *profiledBlock->vm(), uid, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (isValidOffset(result.m_offset)) { result.m_structureSet.add(structure); result.m_specificValue = JSValue(specificValue); } if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } case access_get_by_id_self_list: { for (int i = 0; i < listSize; ++i) { ASSERT(list->list[i].isDirect); Structure* structure = list->list[i].base.get(); if (structure->takesSlowPathInDFGForImpureProperty()) return GetByIdStatus(TakesSlowPath, true); if (result.m_structureSet.contains(structure)) continue; unsigned attributesIgnored; JSCell* specificValue; PropertyOffset myOffset = structure->getConcurrently( *profiledBlock->vm(), uid, attributesIgnored, specificValue); if (structure->isDictionary()) specificValue = 0; if (!isValidOffset(myOffset)) { result.m_offset = invalidOffset; break; } if (!i) { result.m_offset = myOffset; result.m_specificValue = JSValue(specificValue); } else if (result.m_offset != myOffset) { result.m_offset = invalidOffset; break; } else if (result.m_specificValue != JSValue(specificValue)) result.m_specificValue = JSValue(); result.m_structureSet.add(structure); } if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } case access_get_by_id_proto: { if (!stubInfo->u.getByIdProto.isDirect) return GetByIdStatus(MakesCalls, true); result.m_chain = adoptRef(new IntendedStructureChain( profiledBlock, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get())); computeForChain(result, profiledBlock, uid); break; } case access_get_by_id_chain: { if (!stubInfo->u.getByIdChain.isDirect) return GetByIdStatus(MakesCalls, true); result.m_chain = adoptRef(new IntendedStructureChain( profiledBlock, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), stubInfo->u.getByIdChain.count)); computeForChain(result, profiledBlock, uid); break; } default: ASSERT(!isValidOffset(result.m_offset)); break; } if (!isValidOffset(result.m_offset)) { result.m_state = TakesSlowPath; result.m_structureSet.clear(); result.m_chain.clear(); result.m_specificValue = JSValue(); } else result.m_state = Simple; return result; #else // ENABLE(JIT) UNUSED_PARAM(map); return GetByIdStatus(NoInformation, false); #endif // ENABLE(JIT) }