void Worklist::waitUntilAllPlansForVMAreReady(VM& vm) { DeferGC deferGC(vm.heap); // Wait for all of the plans for the given VM to complete. The idea here // is that we want all of the caller VM's plans to be done. We don't care // about any other VM's plans, and we won't attempt to wait on those. // After we release this lock, we know that although other VMs may still // be adding plans, our VM will not be. MutexLocker locker(m_lock); if (Options::verboseCompilationQueue()) { dump(locker, WTF::dataFile()); dataLog(": Waiting for all in VM to complete.\n"); } for (;;) { bool allAreCompiled = true; PlanMap::iterator end = m_plans.end(); for (PlanMap::iterator iter = m_plans.begin(); iter != end; ++iter) { if (&iter->value->vm != &vm) continue; if (!iter->value->isCompiled) { allAreCompiled = false; break; } } if (allAreCompiled) break; m_planCompiled.wait(m_lock); } }
void WatchpointSet::fireAllWatchpoints(VM& vm, const FireDetail& detail) { // In case there are any adaptive watchpoints, we need to make sure that they see that this // watchpoint has been already invalidated. RELEASE_ASSERT(hasBeenInvalidated()); // Firing a watchpoint may cause a GC to happen. This GC could destroy various // Watchpoints themselves while they're in the process of firing. It's not safe // for most Watchpoints to be destructed while they're in the middle of firing. // This GC could also destroy us, and we're not in a safe state to be destroyed. // The safest thing to do is to DeferGCForAWhile to prevent this GC from happening. DeferGCForAWhile deferGC(vm.heap); while (!m_set.isEmpty()) { Watchpoint* watchpoint = m_set.begin(); ASSERT(watchpoint->isOnList()); // Removing the Watchpoint before firing it makes it possible to implement watchpoints // that add themselves to a different set when they fire. This kind of "adaptive" // watchpoint can be used to track some semantic property that is more fine-graiend than // what the set can convey. For example, we might care if a singleton object ever has a // property called "foo". We can watch for this by checking if its Structure has "foo" and // then watching its transitions. But then the watchpoint fires if any property is added. // So, before the watchpoint decides to invalidate any code, it can check if it is // possible to add itself to the transition watchpoint set of the singleton object's new // Structure. watchpoint->remove(); ASSERT(m_set.begin() != watchpoint); ASSERT(!watchpoint->isOnList()); watchpoint->fire(detail); // After we fire the watchpoint, the watchpoint pointer may be a dangling pointer. That's // fine, because we have no use for the pointer anymore. } }
JSObject* ScriptExecutable::prepareForExecutionImpl( ExecState* exec, JSScope* scope, CodeSpecializationKind kind) { VM& vm = exec->vm(); DeferGC deferGC(vm.heap); JSObject* exception = 0; RefPtr<CodeBlock> codeBlock = newCodeBlockFor(kind, scope, exception); if (!codeBlock) { RELEASE_ASSERT(exception); return exception; } bool shouldUseLLInt; #if !ENABLE(JIT) // No JIT implies use of the C Loop LLINT. Override the options to reflect this. Options::useLLInt() = true; shouldUseLLInt = true; #elif ENABLE(LLINT) shouldUseLLInt = Options::useLLInt(); #else shouldUseLLInt = false; #endif if (shouldUseLLInt) setupLLInt(vm, codeBlock.get()); else setupJIT(vm, codeBlock.get()); installCode(codeBlock.get()); return 0; }
Worklist::State Worklist::completeAllReadyPlansForVM(VM& vm, CompilationKey requestedKey) { DeferGC deferGC(vm.heap); Vector<RefPtr<Plan>, 8> myReadyPlans; removeAllReadyPlansForVM(vm, myReadyPlans); State resultingState = NotKnown; while (!myReadyPlans.isEmpty()) { RefPtr<Plan> plan = myReadyPlans.takeLast(); CompilationKey currentKey = plan->key(); if (Options::verboseCompilationQueue()) dataLog(*this, ": Completing ", currentKey, "\n"); RELEASE_ASSERT(plan->isCompiled); plan->finalizeAndNotifyCallback(); if (currentKey == requestedKey) resultingState = Compiled; } if (!!requestedKey && resultingState == NotKnown) { MutexLocker locker(m_lock); if (m_plans.contains(requestedKey)) resultingState = Compiling; } return resultingState; }
void InspectorHeapAgent::getRemoteObject(ErrorString& errorString, int heapObjectId, const String* optionalObjectGroup, RefPtr<Inspector::Protocol::Runtime::RemoteObject>& result) { // Prevent the cell from getting collected as we look it up. VM& vm = m_environment.vm(); JSLockHolder lock(vm); DeferGC deferGC(vm.heap); unsigned heapObjectIdentifier = static_cast<unsigned>(heapObjectId); const Optional<HeapSnapshotNode> optionalNode = nodeForHeapObjectIdentifier(errorString, heapObjectIdentifier); if (!optionalNode) return; JSCell* cell = optionalNode->cell; Structure* structure = cell->structure(m_environment.vm()); if (!structure) { errorString = ASCIILiteral("Unable to get object details"); return; } JSGlobalObject* globalObject = structure->globalObject(); if (!globalObject) { errorString = ASCIILiteral("Unable to get object details"); return; } InjectedScript injectedScript = m_injectedScriptManager.injectedScriptFor(globalObject->globalExec()); if (injectedScript.hasNoValue()) { errorString = ASCIILiteral("Unable to get object details - InjectedScript"); return; } Deprecated::ScriptValue cellScriptValue(m_environment.vm(), JSValue(cell)); String objectGroup = optionalObjectGroup ? *optionalObjectGroup : String(); result = injectedScript.wrapObject(cellScriptValue, objectGroup, true); }
void JITWorklist::compileNow(CodeBlock* codeBlock) { DeferGC deferGC(codeBlock->vm()->heap); if (codeBlock->jitType() != JITCode::InterpreterThunk) return; bool isPlanned; { LockHolder locker(m_lock); isPlanned = m_planned.contains(codeBlock); } if (isPlanned) { RELEASE_ASSERT(Options::useConcurrentJIT()); // This is expensive, but probably good enough. completeAllForVM(*codeBlock->vm()); } // Now it might be compiled! if (codeBlock->jitType() != JITCode::InterpreterThunk) return; // We do this in case we had previously attempted, and then failed, to compile with the // baseline JIT. codeBlock->resetJITData(); // OK, just compile it. JIT::compile(codeBlock->vm(), codeBlock, JITCompilationMustSucceed); codeBlock->ownerScriptExecutable()->installCode(codeBlock); }
JSCell* JIT_OPERATION operationCreateDirectArgumentsDuringExit(ExecState* exec, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount) { VM& vm = exec->vm(); NativeCallFrameTracer target(&vm, exec); DeferGCForAWhile deferGC(vm.heap); CodeBlock* codeBlock; if (inlineCallFrame) codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame); else codeBlock = exec->codeBlock(); unsigned length = argumentCount - 1; unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1)); DirectArguments* result = DirectArguments::create( vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity); result->callee().set(vm, result, callee); Register* arguments = exec->registers() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0) + CallFrame::argumentOffset(0); for (unsigned i = length; i--;) result->setIndexQuickly(vm, i, arguments[i].jsValue()); return result; }
JSObject* ScriptExecutable::prepareForExecutionImpl( ExecState* exec, JSFunction* function, JSScope* scope, CodeSpecializationKind kind) { VM& vm = exec->vm(); DeferGC deferGC(vm.heap); if (vm.getAndClearFailNextNewCodeBlock()) return createError(exec->callerFrame(), ASCIILiteral("Forced Failure")); JSObject* exception = 0; CodeBlock* codeBlock = newCodeBlockFor(kind, function, scope, exception); if (!codeBlock) { RELEASE_ASSERT(exception); return exception; } if (Options::validateBytecode()) codeBlock->validate(); if (Options::useLLInt()) setupLLInt(vm, codeBlock); else setupJIT(vm, codeBlock); installCode(*codeBlock->vm(), codeBlock, codeBlock->codeType(), codeBlock->specializationKind()); return 0; }
JSCell* JIT_OPERATION operationCreateClonedArgumentsDuringExit(ExecState* exec, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount) { VM& vm = exec->vm(); NativeCallFrameTracer target(&vm, exec); DeferGCForAWhile deferGC(vm.heap); CodeBlock* codeBlock; if (inlineCallFrame) codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame); else codeBlock = exec->codeBlock(); unsigned length = argumentCount - 1; ClonedArguments* result = ClonedArguments::createEmpty( vm, codeBlock->globalObject()->outOfBandArgumentsStructure(), callee); Register* arguments = exec->registers() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0) + CallFrame::argumentOffset(0); for (unsigned i = length; i--;) result->putDirectIndex(exec, i, arguments[i].jsValue()); result->putDirect(vm, vm.propertyNames->length, jsNumber(length)); return result; }
bool InferredType::set(const ConcurrentJSLocker& locker, VM& vm, Descriptor newDescriptor) { // We will trigger write barriers while holding our lock. Currently, write barriers don't GC, but that // could change. If it does, we don't want to deadlock. Note that we could have used // GCSafeConcurrentJSLocker in the caller, but the caller is on a fast path so maybe that wouldn't be // a good idea. DeferGCForAWhile deferGC(vm.heap); // Be defensive: if we're not really changing the type, then we don't have to do anything. if (descriptor(locker) == newDescriptor) return false; bool shouldFireWatchpointSet = false; // The new descriptor must be more general than the previous one. ASSERT(newDescriptor.subsumes(descriptor(locker))); // If the new descriptors have different structures, then it can only be because one is null. if (descriptor(locker).structure() != newDescriptor.structure()) ASSERT(!descriptor(locker).structure() || !newDescriptor.structure()); // We are changing the type, so make sure that if anyone was watching, they find out about it now. If // anyone is watching, we make sure to go to Top so that we don't do this sort of thing again. if (m_watchpointSet.state() != ClearWatchpoint) { // We cannot have been invalidated, since if we were, then we'd already be at Top. ASSERT(m_watchpointSet.state() != IsInvalidated); // We're about to do expensive things because some compiler thread decided to watch this type and // then the type changed. Assume that this property is crazy, and don't ever do any more things for // it. newDescriptor = Top; shouldFireWatchpointSet = true; } // Remove the old InferredStructure object if we no longer need it. if (!newDescriptor.structure()) m_structure = nullptr; // Add a new InferredStructure object if we need one now. if (newDescriptor.structure()) { if (m_structure) { // We should agree on the structures if we get here. ASSERT(newDescriptor.structure() == m_structure->structure()); } else { m_structure = adoptRef(new InferredStructure(vm, this, newDescriptor.structure())); newDescriptor.structure()->addTransitionWatchpoint(&m_structure->m_watchpoint); } } // Finally, set the descriptor kind. m_kind = newDescriptor.kind(); // Assert that we did things. ASSERT(descriptor(locker) == newDescriptor); return shouldFireWatchpointSet; }
void prepareCodeOriginForOSRExit(ExecState* exec, CodeOrigin codeOrigin) { VM& vm = exec->vm(); DeferGC deferGC(vm.heap); for (; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->directCaller) { CodeBlock* codeBlock = codeOrigin.inlineCallFrame->baselineCodeBlock.get(); JITWorklist::instance()->compileNow(codeBlock); } }
char* JIT_OPERATION operationReallocateButterflyToGrowPropertyStorage(ExecState* exec, JSObject* object, size_t newSize) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); DeferGC deferGC(vm.heap); Butterfly* result = object->growOutOfLineStorage(vm, object->structure()->outOfLineCapacity(), newSize); object->setButterflyWithoutChangingStructure(vm, result); return reinterpret_cast<char*>(result); }
char* JIT_OPERATION operationReallocateButterflyToHavePropertyStorageWithInitialCapacity(ExecState* exec, JSObject* object) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); ASSERT(!object->structure()->outOfLineCapacity()); DeferGC deferGC(vm.heap); Butterfly* result = object->growOutOfLineStorage(vm, 0, initialOutOfLineCapacity); object->setButterflyWithoutChangingStructure(vm, result); return reinterpret_cast<char*>(result); }
void JITWorklist::compileLater(CodeBlock* codeBlock) { DeferGC deferGC(codeBlock->vm()->heap); RELEASE_ASSERT(codeBlock->jitType() == JITCode::InterpreterThunk); if (codeBlock->m_didFailJITCompilation) { codeBlock->dontJITAnytimeSoon(); return; } if (!Options::useConcurrentJIT()) { Plan::compileNow(codeBlock); return; } codeBlock->jitSoon(); { LockHolder locker(m_lock); if (m_planned.contains(codeBlock)) return; if (m_numAvailableThreads) { m_planned.add(codeBlock); RefPtr<Plan> plan = adoptRef(new Plan(codeBlock)); m_plans.append(plan); m_queue.append(plan); m_condition.notifyAll(); return; } } // Compiling on the main thread if the helper thread isn't available is a defense against this // pathology: // // 1) Do something that is allowed to take a while, like load a giant piece of initialization // code. This plans the compile of the init code, but doesn't finish it. It will take a // while. // // 2) Do something that is supposed to be quick. Now all baseline compiles, and so all DFG and // FTL compiles, of everything is blocked on the long-running baseline compile of that // initialization code. // // The single-threaded concurrent JIT has this tendency to convoy everything while at the same // time postponing when it happens, which means that the convoy delays are less predictable. // This works around the issue. If the concurrent JIT thread is convoyed, we revert to main // thread compiles. This is probably not as good as if we had multiple JIT threads. Maybe we // can do that someday. Plan::compileNow(codeBlock); }
extern "C" void JIT_OPERATION triggerReoptimizationNow(CodeBlock* codeBlock, OSRExitBase* exit) { // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't // really be profitable. DeferGCForAWhile deferGC(codeBlock->vm()->heap); if (Options::verboseOSR()) dataLog(*codeBlock, ": Entered reoptimize\n"); // We must be called with the baseline code block. ASSERT(JITCode::isBaselineCode(codeBlock->jitType())); // If I am my own replacement, then reoptimization has already been triggered. // This can happen in recursive functions. if (codeBlock->replacement() == codeBlock) { if (Options::verboseOSR()) dataLog(*codeBlock, ": Not reoptimizing because we've already been jettisoned.\n"); return; } // Otherwise, the replacement must be optimized code. Use this as an opportunity // to check our logic. ASSERT(codeBlock->hasOptimizedReplacement()); CodeBlock* optimizedCodeBlock = codeBlock->replacement(); ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType())); bool didTryToEnterIntoInlinedLoops = false; for (InlineCallFrame* inlineCallFrame = exit->m_codeOrigin.inlineCallFrame; inlineCallFrame; inlineCallFrame = inlineCallFrame->caller.inlineCallFrame) { if (inlineCallFrame->executable->didTryToEnterInLoop()) { didTryToEnterIntoInlinedLoops = true; break; } } // In order to trigger reoptimization, one of two things must have happened: // 1) We exited more than some number of times. // 2) We exited and got stuck in a loop, and now we're exiting again. bool didExitABunch = optimizedCodeBlock->shouldReoptimizeNow(); bool didGetStuckInLoop = (codeBlock->checkIfOptimizationThresholdReached() || didTryToEnterIntoInlinedLoops) && optimizedCodeBlock->shouldReoptimizeFromLoopNow(); if (!didExitABunch && !didGetStuckInLoop) { if (Options::verboseOSR()) dataLog(*codeBlock, ": Not reoptimizing ", *optimizedCodeBlock, " because it either didn't exit enough or didn't loop enough after exit.\n"); codeBlock->optimizeAfterLongWarmUp(); return; } optimizedCodeBlock->jettison(Profiler::JettisonDueToOSRExit, CountReoptimization); }
void InspectorHeapAgent::getPreview(ErrorString& errorString, int heapObjectId, Inspector::Protocol::OptOutput<String>* resultString, RefPtr<Inspector::Protocol::Debugger::FunctionDetails>& functionDetails, RefPtr<Inspector::Protocol::Runtime::ObjectPreview>& objectPreview) { // Prevent the cell from getting collected as we look it up. VM& vm = m_environment.vm(); JSLockHolder lock(vm); DeferGC deferGC(vm.heap); unsigned heapObjectIdentifier = static_cast<unsigned>(heapObjectId); const Optional<HeapSnapshotNode> optionalNode = nodeForHeapObjectIdentifier(errorString, heapObjectIdentifier); if (!optionalNode) return; // String preview. JSCell* cell = optionalNode->cell; if (cell->isString()) { *resultString = cell->getString(nullptr); return; } // FIXME: Provide preview information for Internal Objects? CodeBlock, Executable, etc. Structure* structure = cell->structure(m_environment.vm()); if (!structure) { errorString = ASCIILiteral("Unable to get object details - Structure"); return; } JSGlobalObject* globalObject = structure->globalObject(); if (!globalObject) { errorString = ASCIILiteral("Unable to get object details - GlobalObject"); return; } InjectedScript injectedScript = m_injectedScriptManager.injectedScriptFor(globalObject->globalExec()); if (injectedScript.hasNoValue()) { errorString = ASCIILiteral("Unable to get object details - InjectedScript"); return; } // Function preview. if (cell->inherits(JSFunction::info())) { Deprecated::ScriptValue functionScriptValue(m_environment.vm(), JSValue(cell)); injectedScript.functionDetails(errorString, functionScriptValue, &functionDetails); return; } // Object preview. Deprecated::ScriptValue cellScriptValue(m_environment.vm(), JSValue(cell)); objectPreview = injectedScript.previewValue(cellScriptValue); }
// Call this only if you know that exception fuzzing is enabled. void doExceptionFuzzing(ExecState* exec, const char* where, void* returnPC) { ASSERT(Options::useExceptionFuzz()); DeferGCForAWhile deferGC(exec->vm().heap); s_numberOfExceptionFuzzChecks++; unsigned fireTarget = Options::fireExceptionFuzzAt(); if (fireTarget == s_numberOfExceptionFuzzChecks) { printf("JSC EXCEPTION FUZZ: Throwing fuzz exception with call frame %p, seen in %s and return address %p.\n", exec, where, returnPC); exec->vm().throwException( exec, createError(exec, ASCIILiteral("Exception Fuzz"))); } }
extern "C" void* JIT_OPERATION compileFTLLazySlowPath(ExecState* exec, unsigned index) { VM& vm = exec->vm(); // We cannot GC. We've got pointers in evil places. DeferGCForAWhile deferGC(vm.heap); CodeBlock* codeBlock = exec->codeBlock(); JITCode* jitCode = codeBlock->jitCode()->ftl(); LazySlowPath& lazySlowPath = *jitCode->lazySlowPaths[index]; lazySlowPath.generate(codeBlock); return lazySlowPath.stub().code().executableAddress(); }
void Heap::collectAndSweep(HeapOperation collectionType) { if (!m_isSafeToCollect) return; collect(collectionType); SamplingRegion samplingRegion("Garbage Collection: Sweeping"); DeferGCForAWhile deferGC(*this); m_objectSpace.sweep(); m_objectSpace.shrink(); sweepAllLogicallyEmptyWeakBlocks(); }
void prepareCodeOriginForOSRExit(ExecState* exec, CodeOrigin codeOrigin) { VM& vm = exec->vm(); DeferGC deferGC(vm.heap); for (; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->directCaller) { CodeBlock* codeBlock = codeOrigin.inlineCallFrame->baselineCodeBlock(); if (codeBlock->jitType() == JSC::JITCode::BaselineJIT) continue; ASSERT(codeBlock->jitType() == JSC::JITCode::InterpreterThunk); JIT::compile(&vm, codeBlock, JITCompilationMustSucceed); codeBlock->ownerScriptExecutable()->installCode(codeBlock); } }
void Worklist::removeAllReadyPlansForVM(VM& vm, Vector<RefPtr<Plan>, 8>& myReadyPlans) { DeferGC deferGC(vm.heap); MutexLocker locker(m_lock); for (size_t i = 0; i < m_readyPlans.size(); ++i) { RefPtr<Plan> plan = m_readyPlans[i]; if (&plan->vm != &vm) continue; if (!plan->isCompiled) continue; myReadyPlans.append(plan); m_readyPlans[i--] = m_readyPlans.last(); m_readyPlans.removeLast(); m_plans.remove(plan->key()); } }
void prepareCodeOriginForOSRExit(ExecState* exec, CodeOrigin codeOrigin) { VM& vm = exec->vm(); DeferGC deferGC(vm.heap); for (; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { FunctionExecutable* executable = static_cast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get()); CodeBlock* codeBlock = executable->baselineCodeBlockFor( codeOrigin.inlineCallFrame->specializationKind()); if (codeBlock->jitType() == JSC::JITCode::BaselineJIT) continue; ASSERT(codeBlock->jitType() == JSC::JITCode::InterpreterThunk); JIT::compile(&vm, codeBlock, JITCompilationMustSucceed); codeBlock->install(); } }
void JITWorklist::poll(VM& vm) { DeferGC deferGC(vm.heap); Plans myPlans; { LockHolder locker(m_lock); m_plans.removeAllMatching( [&] (RefPtr<Plan>& plan) { if (plan->vm() != &vm) return false; if (!plan->isFinishedCompiling()) return false; myPlans.append(WTFMove(plan)); return true; }); } finalizePlans(myPlans); }
void WebAssemblyExecutable::prepareForExecution(ExecState* exec) { if (hasJITCodeForCall()) return; VM& vm = exec->vm(); DeferGC deferGC(vm.heap); WebAssemblyCodeBlock* codeBlock = WebAssemblyCodeBlock::create(&vm, this, exec->lexicalGlobalObject()); WASMFunctionParser::compile(vm, codeBlock, m_module.get(), m_source, m_functionIndex); m_jitCodeForCall = codeBlock->jitCode(); m_jitCodeForCallWithArityCheck = MacroAssemblerCodePtr(); m_numParametersForCall = codeBlock->numParameters(); m_codeBlockForCall.set(vm, this, codeBlock); Heap::heap(this)->writeBarrier(this); }
void WebAssemblyExecutable::prepareForExecution(ExecState* exec) { if (hasJITCodeForCall()) return; VM& vm = exec->vm(); DeferGC deferGC(vm.heap); RefPtr<WebAssemblyCodeBlock> codeBlock = adoptRef(new WebAssemblyCodeBlock( this, vm, exec->lexicalGlobalObject())); WASMFunctionParser::compile(vm, codeBlock.get(), m_module.get(), m_source, m_functionIndex); m_jitCodeForCall = codeBlock->jitCode(); m_jitCodeForCallWithArityCheck = MacroAssemblerCodePtr(); m_jitCodeForCallWithArityCheckAndPreserveRegs = MacroAssemblerCodePtr(); m_numParametersForCall = codeBlock->numParameters(); m_codeBlockForCall = codeBlock; Heap::heap(this)->writeBarrier(this); }
void JIT_OPERATION triggerTierUpNow(ExecState* exec) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); DeferGC deferGC(vm->heap); CodeBlock* codeBlock = exec->codeBlock(); if (codeBlock->jitType() != JITCode::DFGJIT) { dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n"); RELEASE_ASSERT_NOT_REACHED(); } JITCode* jitCode = codeBlock->jitCode()->dfg(); if (Options::verboseOSR()) { dataLog( *codeBlock, ": Entered triggerTierUpNow with executeCounter = ", jitCode->tierUpCounter, "\n"); } triggerFTLReplacementCompile(vm, codeBlock, jitCode); }
void JITWorklist::completeAllForVM(VM& vm) { DeferGC deferGC(vm.heap); for (;;) { Vector<RefPtr<Plan>, 32> myPlans; { LockHolder locker(m_lock); for (;;) { bool didFindUnfinishedPlan = false; m_plans.removeAllMatching( [&] (RefPtr<Plan>& plan) { if (plan->vm() != &vm) return false; if (!plan->isFinishedCompiling()) { didFindUnfinishedPlan = true; return false; } myPlans.append(WTFMove(plan)); return true; }); // If we found plans then we should finalize them now. if (!myPlans.isEmpty()) break; // If we don't find plans, then we're either done or we need to wait, depending on // whether we found some unfinished plans. if (!didFindUnfinishedPlan) return; m_condition.wait(m_lock); } } finalizePlans(myPlans); } }
JSObject* ScriptExecutable::prepareForExecutionImpl( ExecState* exec, JSFunction* function, JSScope* scope, CodeSpecializationKind kind) { VM& vm = exec->vm(); DeferGC deferGC(vm.heap); JSObject* exception = 0; RefPtr<CodeBlock> codeBlock = newCodeBlockFor(kind, function, scope, exception); if (!codeBlock) { RELEASE_ASSERT(exception); return exception; } if (Options::validateBytecode()) codeBlock->validate(); if (Options::useLLInt()) setupLLInt(vm, codeBlock.get()); else setupJIT(vm, codeBlock.get()); installCode(codeBlock.get()); return 0; }
char* JIT_OPERATION triggerOSREntryNow( ExecState* exec, int32_t bytecodeIndex, int32_t streamIndex) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); DeferGC deferGC(vm->heap); CodeBlock* codeBlock = exec->codeBlock(); if (codeBlock->jitType() != JITCode::DFGJIT) { dataLog("Unexpected code block in DFG->FTL tier-up: ", *codeBlock, "\n"); RELEASE_ASSERT_NOT_REACHED(); } JITCode* jitCode = codeBlock->jitCode()->dfg(); if (Options::verboseOSR()) { dataLog( *codeBlock, ": Entered triggerOSREntryNow with executeCounter = ", jitCode->tierUpCounter, "\n"); } // - If we don't have an FTL code block, then try to compile one. // - If we do have an FTL code block, then try to enter for a while. // - If we couldn't enter for a while, then trigger OSR entry. triggerFTLReplacementCompile(vm, codeBlock, jitCode); if (!codeBlock->hasOptimizedReplacement()) return 0; if (jitCode->osrEntryRetry < Options::ftlOSREntryRetryThreshold()) { jitCode->osrEntryRetry++; return 0; } // It's time to try to compile code for OSR entry. Worklist::State worklistState; if (Worklist* worklist = existingGlobalFTLWorklistOrNull()) { worklistState = worklist->completeAllReadyPlansForVM( *vm, CompilationKey(codeBlock->baselineVersion(), FTLForOSREntryMode)); } else worklistState = Worklist::NotKnown; if (worklistState == Worklist::Compiling) return 0; if (CodeBlock* entryBlock = jitCode->osrEntryBlock.get()) { void* address = FTL::prepareOSREntry( exec, codeBlock, entryBlock, bytecodeIndex, streamIndex); if (address) return static_cast<char*>(address); FTL::ForOSREntryJITCode* entryCode = entryBlock->jitCode()->ftlForOSREntry(); entryCode->countEntryFailure(); if (entryCode->entryFailureCount() < Options::ftlOSREntryFailureCountForReoptimization()) return 0; // OSR entry failed. Oh no! This implies that we need to retry. We retry // without exponential backoff and we only do this for the entry code block. jitCode->osrEntryBlock.clear(); jitCode->osrEntryRetry = 0; return 0; } if (worklistState == Worklist::Compiled) { // This means that compilation failed and we already set the thresholds. if (Options::verboseOSR()) dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n"); return 0; } // We aren't compiling and haven't compiled anything for OSR entry. So, try to compile // something. Operands<JSValue> mustHandleValues; jitCode->reconstruct( exec, codeBlock, CodeOrigin(bytecodeIndex), streamIndex, mustHandleValues); RefPtr<CodeBlock> replacementCodeBlock = codeBlock->newReplacement(); CompilationResult forEntryResult = compile( *vm, replacementCodeBlock.get(), codeBlock, FTLForOSREntryMode, bytecodeIndex, mustHandleValues, ToFTLForOSREntryDeferredCompilationCallback::create(codeBlock)); if (forEntryResult != CompilationSuccessful) { ASSERT(forEntryResult == CompilationDeferred || replacementCodeBlock->hasOneRef()); return 0; } // It's possible that the for-entry compile already succeeded. In that case OSR // entry will succeed unless we ran out of stack. It's not clear what we should do. // We signal to try again after a while if that happens. void* address = FTL::prepareOSREntry( exec, codeBlock, jitCode->osrEntryBlock.get(), bytecodeIndex, streamIndex); return static_cast<char*>(address); }
void Worklist::completeAllPlansForVM(VM& vm) { DeferGC deferGC(vm.heap); waitUntilAllPlansForVMAreReady(vm); completeAllReadyPlansForVM(vm); }