inline bool compile(CompileMode compileMode, JSGlobalData& globalData, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck) { SamplingRegion samplingRegion("DFG Compilation (Driver)"); ASSERT(codeBlock); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("DFG compiling code block %p(%p), number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->instructionCount()); #endif Graph dfg; if (!parse(dfg, &globalData, codeBlock)) return false; if (compileMode == CompileFunction) dfg.predictArgumentTypes(codeBlock); propagate(dfg, &globalData, codeBlock); JITCompiler dataFlowJIT(&globalData, dfg, codeBlock); if (compileMode == CompileFunction) { ASSERT(jitCodeWithArityCheck); dataFlowJIT.compileFunction(jitCode, *jitCodeWithArityCheck); } else { ASSERT(compileMode == CompileOther); ASSERT(!jitCodeWithArityCheck); dataFlowJIT.compile(jitCode); } return true; }
JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, JSScope* scope, JITCode::JITType jitType, unsigned bytecodeIndex) { SamplingRegion samplingRegion(samplingDescription(jitType)); #if !ENABLE(JIT) UNUSED_PARAM(jitType); UNUSED_PARAM(exec); UNUSED_PARAM(bytecodeIndex); #endif ASSERT((jitType == JITCode::bottomTierJIT()) == !m_codeBlockForConstruct); JSObject* exception; OwnPtr<FunctionCodeBlock> newCodeBlock = produceCodeBlockFor(scope, CodeForConstruct, exception); if (!newCodeBlock) return exception; newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_codeBlockForConstruct.release())); m_codeBlockForConstruct = newCodeBlock.release(); m_numParametersForConstruct = m_codeBlockForConstruct->numParameters(); ASSERT(m_numParametersForConstruct); #if ENABLE(JIT) if (!prepareFunctionForExecution(exec, m_codeBlockForConstruct, m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, jitType, bytecodeIndex, CodeForConstruct)) return 0; #endif #if ENABLE(JIT) Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForConstruct) + m_jitCodeForConstruct.size()); #else Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForConstruct)); #endif return 0; }
JSArray* createRegExpMatchesArray(ExecState* exec, JSString* input, RegExp* regExp, MatchResult result) { ASSERT(result); VM& vm = exec->vm(); JSArray* array = JSArray::tryCreateUninitialized(vm, exec->lexicalGlobalObject()->arrayStructureForIndexingTypeDuringAllocation(ArrayWithContiguous), regExp->numSubpatterns() + 1); RELEASE_ASSERT(array); SamplingRegion samplingRegion("Reifying substring properties"); array->initializeIndex(vm, 0, jsSubstring(exec, input, result.start, result.end - result.start), ArrayWithContiguous); if (unsigned numSubpatterns = regExp->numSubpatterns()) { Vector<int, 32> subpatternResults; int position = regExp->match(vm, input->value(exec), result.start, subpatternResults); ASSERT_UNUSED(position, position >= 0 && static_cast<size_t>(position) == result.start); ASSERT(result.start == static_cast<size_t>(subpatternResults[0])); ASSERT(result.end == static_cast<size_t>(subpatternResults[1])); for (unsigned i = 1; i <= numSubpatterns; ++i) { int start = subpatternResults[2 * i]; if (start >= 0) array->initializeIndex(vm, i, jsSubstring(exec, input, start, subpatternResults[2 * i + 1] - start), ArrayWithContiguous); else array->initializeIndex(vm, i, jsUndefined(), ArrayWithContiguous); } } array->putDirect(vm, vm.propertyNames->index, jsNumber(result.start)); array->putDirect(vm, vm.propertyNames->input, input); return array; }
JSObject* ProgramExecutable::compileInternal(ExecState* exec, JSScope* scope, JITCode::JITType jitType, unsigned bytecodeIndex) { SamplingRegion samplingRegion(samplingDescription(jitType)); #if !ENABLE(JIT) UNUSED_PARAM(exec); UNUSED_PARAM(jitType); UNUSED_PARAM(bytecodeIndex); #endif if (!!m_programCodeBlock) { OwnPtr<ProgramCodeBlock> newCodeBlock = adoptPtr(new ProgramCodeBlock(CodeBlock::CopyParsedBlock, *m_programCodeBlock)); newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_programCodeBlock.release())); m_programCodeBlock = newCodeBlock.release(); } else { JSGlobalObject* globalObject = scope->globalObject(); m_programCodeBlock = adoptPtr(new ProgramCodeBlock(this, m_unlinkedProgramCodeBlock.get(), globalObject, source().provider(), m_programCodeBlock.release())); m_programCodeBlock->copyPostParseDataFromAlternative(); } #if ENABLE(JIT) if (!prepareForExecution(exec, m_programCodeBlock, m_jitCodeForCall, jitType, bytecodeIndex)) return 0; #endif #if ENABLE(JIT) Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_programCodeBlock) + m_jitCodeForCall.size()); #else Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_programCodeBlock)); #endif return 0; }
JSObject* EvalExecutable::compileInternal(ExecState* exec, ScopeChainNode* scopeChainNode, JITCode::JITType jitType) { SamplingRegion samplingRegion(jitType == JITCode::BaselineJIT ? "Baseline Compilation (TOTAL)" : "DFG Compilation (TOTAL)"); #if !ENABLE(JIT) UNUSED_PARAM(jitType); #endif JSObject* exception = 0; JSGlobalData* globalData = &exec->globalData(); JSGlobalObject* lexicalGlobalObject = exec->lexicalGlobalObject(); if (!!m_evalCodeBlock && m_evalCodeBlock->canProduceCopyWithBytecode()) { BytecodeDestructionBlocker blocker(m_evalCodeBlock.get()); OwnPtr<EvalCodeBlock> newCodeBlock = adoptPtr(new EvalCodeBlock(CodeBlock::CopyParsedBlock, *m_evalCodeBlock)); newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_evalCodeBlock.release())); m_evalCodeBlock = newCodeBlock.release(); } else { if (!lexicalGlobalObject->evalEnabled()) return throwError(exec, createEvalError(exec, "Eval is disabled")); RefPtr<EvalNode> evalNode = parse<EvalNode>(globalData, lexicalGlobalObject, m_source, 0, isStrictMode() ? JSParseStrict : JSParseNormal, EvalNode::isFunctionNode ? JSParseFunctionCode : JSParseProgramCode, lexicalGlobalObject->debugger(), exec, &exception); if (!evalNode) { ASSERT(exception); return exception; } recordParse(evalNode->features(), evalNode->hasCapturedVariables(), evalNode->lineNo(), evalNode->lastLine()); JSGlobalObject* globalObject = scopeChainNode->globalObject.get(); OwnPtr<CodeBlock> previousCodeBlock = m_evalCodeBlock.release(); ASSERT((jitType == JITCode::bottomTierJIT()) == !previousCodeBlock); m_evalCodeBlock = adoptPtr(new EvalCodeBlock(this, globalObject, source().provider(), scopeChainNode->localDepth(), previousCodeBlock.release())); OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(evalNode.get(), scopeChainNode, m_evalCodeBlock->symbolTable(), m_evalCodeBlock.get(), !!m_evalCodeBlock->alternative() ? OptimizingCompilation : FirstCompilation))); if ((exception = generator->generate())) { m_evalCodeBlock = static_pointer_cast<EvalCodeBlock>(m_evalCodeBlock->releaseAlternative()); evalNode->destroyData(); return exception; } evalNode->destroyData(); m_evalCodeBlock->copyPostParseDataFromAlternative(); } #if ENABLE(JIT) if (!jitCompileIfAppropriate(*globalData, m_evalCodeBlock, m_jitCodeForCall, jitType)) return 0; #endif #if ENABLE(JIT) #if ENABLE(INTERPRETER) if (!m_jitCodeForCall) Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_evalCodeBlock)); else #endif Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_evalCodeBlock) + m_jitCodeForCall.size()); #else Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_evalCodeBlock)); #endif return 0; }
void Heap::markRoots(double gcStartTime, void* stackOrigin, void* stackTop, MachineThreads::RegisterState& calleeSavedRegisters) { SamplingRegion samplingRegion("Garbage Collection: Marking"); GCPHASE(MarkRoots); ASSERT(isValidThreadState(m_vm)); #if ENABLE(GGC) Vector<const JSCell*> rememberedSet(m_slotVisitor.markStack().size()); m_slotVisitor.markStack().fillVector(rememberedSet); #else Vector<const JSCell*> rememberedSet; #endif #if ENABLE(DFG_JIT) DFG::clearCodeBlockMarks(*m_vm); #endif if (m_operationInProgress == EdenCollection) m_codeBlocks.clearMarksForEdenCollection(rememberedSet); else m_codeBlocks.clearMarksForFullCollection(); // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. ConservativeRoots conservativeRoots(&m_objectSpace.blocks(), &m_storageSpace); gatherStackRoots(conservativeRoots, stackOrigin, stackTop, calleeSavedRegisters); gatherJSStackRoots(conservativeRoots); gatherScratchBufferRoots(conservativeRoots); clearLivenessData(); m_sharedData.didStartMarking(); m_slotVisitor.didStartMarking(); HeapRootVisitor heapRootVisitor(m_slotVisitor); { ParallelModeEnabler enabler(m_slotVisitor); visitExternalRememberedSet(); visitSmallStrings(); visitConservativeRoots(conservativeRoots); visitProtectedObjects(heapRootVisitor); visitArgumentBuffers(heapRootVisitor); visitException(heapRootVisitor); visitStrongHandles(heapRootVisitor); visitHandleStack(heapRootVisitor); traceCodeBlocksAndJITStubRoutines(); converge(); } // Weak references must be marked last because their liveness depends on // the liveness of the rest of the object graph. visitWeakHandles(heapRootVisitor); clearRememberedSet(rememberedSet); m_sharedData.didFinishMarking(); updateObjectCounts(gcStartTime); resetVisitors(); }
JSObject* ProgramExecutable::compileInternal(ExecState* exec, ScopeChainNode* scopeChainNode, JITCode::JITType jitType) { SamplingRegion samplingRegion(samplingDescription(jitType)); #if !ENABLE(JIT) UNUSED_PARAM(jitType); #endif JSObject* exception = 0; JSGlobalData* globalData = &exec->globalData(); JSGlobalObject* lexicalGlobalObject = exec->lexicalGlobalObject(); if (!!m_programCodeBlock) { OwnPtr<ProgramCodeBlock> newCodeBlock = adoptPtr(new ProgramCodeBlock(CodeBlock::CopyParsedBlock, *m_programCodeBlock)); newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_programCodeBlock.release())); m_programCodeBlock = newCodeBlock.release(); } else { RefPtr<ProgramNode> programNode = parse<ProgramNode>(globalData, lexicalGlobalObject, m_source, 0, isStrictMode() ? JSParseStrict : JSParseNormal, ProgramNode::isFunctionNode ? JSParseFunctionCode : JSParseProgramCode, lexicalGlobalObject->debugger(), exec, &exception); if (!programNode) { ASSERT(exception); return exception; } recordParse(programNode->features(), programNode->hasCapturedVariables(), programNode->lineNo(), programNode->lastLine()); JSGlobalObject* globalObject = scopeChainNode->globalObject.get(); OwnPtr<CodeBlock> previousCodeBlock = m_programCodeBlock.release(); ASSERT((jitType == JITCode::bottomTierJIT()) == !previousCodeBlock); m_programCodeBlock = adoptPtr(new ProgramCodeBlock(this, GlobalCode, globalObject, source().provider(), previousCodeBlock.release())); OwnPtr<BytecodeGenerator> generator(adoptPtr(new BytecodeGenerator(programNode.get(), scopeChainNode, &globalObject->symbolTable(), m_programCodeBlock.get(), !!m_programCodeBlock->alternative() ? OptimizingCompilation : FirstCompilation))); if ((exception = generator->generate())) { m_programCodeBlock = static_pointer_cast<ProgramCodeBlock>(m_programCodeBlock->releaseAlternative()); programNode->destroyData(); return exception; } programNode->destroyData(); m_programCodeBlock->copyPostParseDataFromAlternative(); } #if ENABLE(JIT) if (!prepareForExecution(exec, m_programCodeBlock, m_jitCodeForCall, jitType)) return 0; #endif #if ENABLE(JIT) #if ENABLE(CLASSIC_INTERPRETER) if (!m_jitCodeForCall) Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_programCodeBlock)); else #endif Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_programCodeBlock) + m_jitCodeForCall.size()); #else Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_programCodeBlock)); #endif return 0; }
static CompilationResult compileImpl( VM& vm, CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationMode mode, unsigned osrEntryBytecodeIndex, const Operands<JSValue>& mustHandleValues, PassRefPtr<DeferredCompilationCallback> callback) { SamplingRegion samplingRegion("DFG Compilation (Driver)"); numCompilations++; ASSERT(codeBlock); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT); ASSERT(!profiledDFGCodeBlock || profiledDFGCodeBlock->jitType() == JITCode::DFGJIT); if (logCompilationChanges(mode)) dataLog("DFG(Driver) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n"); // Make sure that any stubs that the DFG is going to use are initialized. We want to // make sure that all JIT code generation does finalization on the main thread. vm.getCTIStub(osrExitGenerationThunkGenerator); vm.getCTIStub(throwExceptionFromCallSlowPathGenerator); if (mode == DFGMode) { vm.getCTIStub(linkCallThunkGenerator); vm.getCTIStub(linkConstructThunkGenerator); vm.getCTIStub(linkClosureCallThunkGenerator); vm.getCTIStub(virtualCallThunkGenerator); vm.getCTIStub(virtualConstructThunkGenerator); } else { vm.getCTIStub(linkCallThatPreservesRegsThunkGenerator); vm.getCTIStub(linkConstructThatPreservesRegsThunkGenerator); vm.getCTIStub(linkClosureCallThatPreservesRegsThunkGenerator); vm.getCTIStub(virtualCallThatPreservesRegsThunkGenerator); vm.getCTIStub(virtualConstructThatPreservesRegsThunkGenerator); } if (CallEdgeLog::isEnabled()) vm.ensureCallEdgeLog().processLog(); if (vm.typeProfiler()) vm.typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for DFG compilation.")); RefPtr<Plan> plan = adoptRef( new Plan(codeBlock, profiledDFGCodeBlock, mode, osrEntryBytecodeIndex, mustHandleValues)); if (Options::enableConcurrentJIT()) { Worklist* worklist = ensureGlobalWorklistFor(mode); plan->callback = callback; if (logCompilationChanges(mode)) dataLog("Deferring DFG compilation of ", *codeBlock, " with queue length ", worklist->queueLength(), ".\n"); worklist->enqueue(plan); return CompilationDeferred; } plan->compileInThread(*vm.dfgState, 0); return plan->finalizeWithoutNotifyingCallback(); }
void Heap::zombifyDeadObjects() { // Sweep now because destructors will crash once we're zombified. { SamplingRegion samplingRegion("Garbage Collection: Sweeping"); m_objectSpace.zombifySweep(); } HeapIterationScope iterationScope(*this); m_objectSpace.forEachDeadCell<Zombify>(iterationScope); }
void Heap::collectAndSweep(HeapOperation collectionType) { if (!m_isSafeToCollect) return; collect(collectionType); SamplingRegion samplingRegion("Garbage Collection: Sweeping"); DeferGCForAWhile deferGC(*this); m_objectSpace.sweep(); m_objectSpace.shrink(); sweepAllLogicallyEmptyWeakBlocks(); }
JSObject* EvalExecutable::compileInternal(ExecState* exec, JSScope* scope, JITCode::JITType jitType, unsigned bytecodeIndex) { SamplingRegion samplingRegion(samplingDescription(jitType)); #if !ENABLE(JIT) UNUSED_PARAM(jitType); UNUSED_PARAM(bytecodeIndex); #endif JSGlobalData* globalData = &exec->globalData(); JSGlobalObject* lexicalGlobalObject = exec->lexicalGlobalObject(); if (!!m_evalCodeBlock) { OwnPtr<EvalCodeBlock> newCodeBlock = adoptPtr(new EvalCodeBlock(CodeBlock::CopyParsedBlock, *m_evalCodeBlock)); newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_evalCodeBlock.release())); m_evalCodeBlock = newCodeBlock.release(); } else { UNUSED_PARAM(scope); UNUSED_PARAM(globalData); UNUSED_PARAM(lexicalGlobalObject); if (!lexicalGlobalObject->evalEnabled()) return throwError(exec, createEvalError(exec, lexicalGlobalObject->evalDisabledErrorMessage())); JSObject* exception = 0; UnlinkedEvalCodeBlock* unlinkedEvalCode = lexicalGlobalObject->createEvalCodeBlock(exec, this, &exception); if (!unlinkedEvalCode) return exception; OwnPtr<CodeBlock> previousCodeBlock = m_evalCodeBlock.release(); ASSERT((jitType == JITCode::bottomTierJIT()) == !previousCodeBlock); m_unlinkedEvalCodeBlock.set(*globalData, this, unlinkedEvalCode); m_evalCodeBlock = adoptPtr(new EvalCodeBlock(this, unlinkedEvalCode, lexicalGlobalObject, source().provider(), scope->localDepth(), previousCodeBlock.release())); m_evalCodeBlock->copyPostParseDataFromAlternative(); } #if ENABLE(JIT) if (!prepareForExecution(exec, m_evalCodeBlock, m_jitCodeForCall, jitType, bytecodeIndex)) return 0; #endif #if ENABLE(JIT) Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_evalCodeBlock) + m_jitCodeForCall.size()); #else Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_evalCodeBlock)); #endif return 0; }
inline bool compile(CompileMode compileMode, JSGlobalData& globalData, CodeBlock* codeBlock, JITCode& jitCode, MacroAssemblerCodePtr* jitCodeWithArityCheck) { SamplingRegion samplingRegion("DFG Compilation (Driver)"); ASSERT(codeBlock); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("DFG compiling code block %p(%p), number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->instructionCount()); #endif Graph dfg(globalData, codeBlock); if (!parse(dfg)) return false; if (compileMode == CompileFunction) dfg.predictArgumentTypes(); performRedundantPhiElimination(dfg); performPredictionPropagation(dfg); performFixup(dfg); performCSE(dfg); performVirtualRegisterAllocation(dfg); performCFA(dfg); #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("Graph after optimization:\n"); dfg.dump(); #endif JITCompiler dataFlowJIT(dfg); bool result; if (compileMode == CompileFunction) { ASSERT(jitCodeWithArityCheck); result = dataFlowJIT.compileFunction(jitCode, *jitCodeWithArityCheck); } else { ASSERT(compileMode == CompileOther); ASSERT(!jitCodeWithArityCheck); result = dataFlowJIT.compile(jitCode); } return result; }
void Plan::compileInThread(LongLivedState& longLivedState, ThreadData* threadData) { this->threadData = threadData; double before = 0; if (reportCompileTimes()) before = currentTimeMS(); SamplingRegion samplingRegion("DFG Compilation (Plan)"); CompilationScope compilationScope; if (logCompilationChanges(mode)) dataLog("DFG(Plan) compiling ", *codeBlock, " with ", mode, ", number of instructions = ", codeBlock->instructionCount(), "\n"); CompilationPath path = compileInThreadImpl(longLivedState); RELEASE_ASSERT(finalizer); if (reportCompileTimes()) { const char* pathName; switch (path) { case FailPath: pathName = "N/A (fail)"; break; case DFGPath: pathName = "DFG"; break; case FTLPath: pathName = "FTL"; break; default: RELEASE_ASSERT_NOT_REACHED(); pathName = ""; break; } double now = currentTimeMS(); dataLog("Optimized ", *codeBlock, " using ", mode, " with ", pathName, " into ", finalizer->codeSize(), " bytes in ", now - before, " ms"); if (path == FTLPath) dataLog(" (DFG: ", beforeFTL - before, ", LLVM: ", now - beforeFTL, ")"); dataLog(".\n"); } }
void JITCompiler::compile() { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); m_speculative = adoptPtr(new SpeculativeJIT(*this)); compileBody(); setEndOfMainPath(); // Generate slow path code. m_speculative->runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); }
JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, ScopeChainNode* scopeChainNode, JITCode::JITType jitType) { SamplingRegion samplingRegion(samplingDescription(jitType)); #if !ENABLE(JIT) UNUSED_PARAM(jitType); UNUSED_PARAM(exec); #endif ASSERT((jitType == JITCode::bottomTierJIT()) == !m_codeBlockForConstruct); JSObject* exception; OwnPtr<FunctionCodeBlock> newCodeBlock = produceCodeBlockFor(scopeChainNode, !!m_codeBlockForConstruct ? OptimizingCompilation : FirstCompilation, CodeForConstruct, exception); if (!newCodeBlock) return exception; newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_codeBlockForConstruct.release())); m_codeBlockForConstruct = newCodeBlock.release(); m_numParametersForConstruct = m_codeBlockForConstruct->numParameters(); ASSERT(m_numParametersForConstruct); m_numCapturedVariables = m_codeBlockForConstruct->m_numCapturedVars; m_symbolTable = m_codeBlockForConstruct->sharedSymbolTable(); #if ENABLE(JIT) if (!prepareFunctionForExecution(exec, m_codeBlockForConstruct, m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, m_symbolTable, jitType, CodeForConstruct)) return 0; #endif #if ENABLE(JIT) #if ENABLE(CLASSIC_INTERPRETER) if (!m_jitCodeForConstruct) Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForConstruct)); else #endif Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForConstruct) + m_jitCodeForConstruct.size()); #else Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForConstruct)); #endif return 0; }
JSObject* FunctionExecutable::compileForCallInternal(ExecState* exec, ScopeChainNode* scopeChainNode, JITCode::JITType jitType) { SamplingRegion samplingRegion(jitType == JITCode::BaselineJIT ? "Baseline Compilation (TOTAL)" : "DFG Compilation (TOTAL)"); #if !ENABLE(JIT) UNUSED_PARAM(exec); UNUSED_PARAM(jitType); UNUSED_PARAM(exec); #endif ASSERT((jitType == JITCode::bottomTierJIT()) == !m_codeBlockForCall); JSObject* exception; OwnPtr<FunctionCodeBlock> newCodeBlock = produceCodeBlockFor(scopeChainNode, !!m_codeBlockForCall ? OptimizingCompilation : FirstCompilation, CodeForCall, exception); if (!newCodeBlock) return exception; newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_codeBlockForCall.release())); m_codeBlockForCall = newCodeBlock.release(); m_numParametersForCall = m_codeBlockForCall->numParameters(); ASSERT(m_numParametersForCall); m_numCapturedVariables = m_codeBlockForCall->m_numCapturedVars; m_symbolTable = m_codeBlockForCall->sharedSymbolTable(); #if ENABLE(JIT) if (!jitCompileFunctionIfAppropriate(exec->globalData(), m_codeBlockForCall, m_jitCodeForCall, m_jitCodeForCallWithArityCheck, m_symbolTable, jitType)) return 0; #endif #if ENABLE(JIT) #if ENABLE(INTERPRETER) if (!m_jitCodeForCall) Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForCall)); else #endif Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForCall) + m_jitCodeForCall.size()); #else Heap::heap(this)->reportExtraMemoryCost(sizeof(*m_codeBlockForCall)); #endif return 0; }
void JITCompiler::compile() { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); m_speculative = adoptPtr(new SpeculativeJIT(*this)); addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); compileBody(); setEndOfMainPath(); // Generate slow path code. m_speculative->runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); }
JSObject* FunctionExecutable::compileForConstructInternal(ExecState* exec, JSScope* scope, JITCode::JITType jitType, CompilationResult* result, unsigned bytecodeIndex) { SamplingRegion samplingRegion(samplingDescription(jitType)); if (result) *result = CompilationFailed; ASSERT((jitType == JITCode::bottomTierJIT()) == !m_codeBlockForConstruct); JSObject* exception = 0; RefPtr<FunctionCodeBlock> newCodeBlock = produceCodeBlockFor(scope, CodeForConstruct, exception); if (!newCodeBlock) return exception; CompilationResult theResult = prepareFunctionForExecution( exec, m_codeBlockForConstruct, newCodeBlock.get(), m_jitCodeForConstruct, m_jitCodeForConstructWithArityCheck, m_numParametersForConstruct, jitType, bytecodeIndex, CodeForConstruct); if (result) *result = theResult; return 0; }
JSObject* ProgramExecutable::compileInternal(ExecState* exec, JSScope* scope, JITCode::JITType jitType, CompilationResult* result, unsigned bytecodeIndex) { SamplingRegion samplingRegion(samplingDescription(jitType)); if (result) *result = CompilationFailed; RefPtr<ProgramCodeBlock> newCodeBlock; if (!!m_programCodeBlock) { newCodeBlock = adoptRef(new ProgramCodeBlock(CodeBlock::CopyParsedBlock, *m_programCodeBlock)); newCodeBlock->setAlternative(static_pointer_cast<CodeBlock>(m_programCodeBlock)); } else { newCodeBlock = adoptRef(new ProgramCodeBlock(this, m_unlinkedProgramCodeBlock.get(), scope, source().provider(), source().startColumn())); } CompilationResult theResult = prepareForExecution( exec, m_programCodeBlock, newCodeBlock.get(), m_jitCodeForCall, jitType, bytecodeIndex); if (result) *result = theResult; return 0; }
bool JITCompiler::compile(JITCode& entry) { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); SpeculativeJIT speculative(*this); compileBody(speculative); setEndOfMainPath(); // Generate slow path code. speculative.runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. speculative.createOSREntries(); setEndOfCode(); LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail); if (linkBuffer.didFailToAllocate()) return false; link(linkBuffer); speculative.linkOSREntries(linkBuffer); if (shouldShowDisassembly()) m_disassembler->dump(linkBuffer); if (m_graph.m_compilation) m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer); entry = JITCode( linkBuffer.finalizeCodeWithoutDisassembly(), JITCode::DFGJIT); return true; }
bool performFixup(Graph& graph) { SamplingRegion samplingRegion("DFG Fixup Phase"); return runPhase<FixupPhase>(graph); }
void Heap::markRoots() { SamplingRegion samplingRegion("Garbage Collection: Tracing"); GCPHASE(MarkRoots); ASSERT(isValidThreadState(m_vm)); #if ENABLE(OBJECT_MARK_LOGGING) double gcStartTime = WTF::monotonicallyIncreasingTime(); #endif void* dummy; // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace); m_jitStubRoutines.clearMarks(); { GCPHASE(GatherConservativeRoots); m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy); } ConservativeRoots stackRoots(&m_objectSpace.blocks(), &m_storageSpace); m_codeBlocks.clearMarks(); { GCPHASE(GatherStackRoots); stack().gatherConservativeRoots(stackRoots, m_jitStubRoutines, m_codeBlocks); stack().sanitizeStack(); } #if ENABLE(DFG_JIT) ConservativeRoots scratchBufferRoots(&m_objectSpace.blocks(), &m_storageSpace); { GCPHASE(GatherScratchBufferRoots); m_vm->gatherConservativeRoots(scratchBufferRoots); } #endif { GCPHASE(ClearLivenessData); m_objectSpace.clearNewlyAllocated(); m_objectSpace.clearMarks(); } m_sharedData.didStartMarking(); SlotVisitor& visitor = m_slotVisitor; visitor.setup(); HeapRootVisitor heapRootVisitor(visitor); { ParallelModeEnabler enabler(visitor); m_vm->smallStrings.visitStrongReferences(visitor); { GCPHASE(VisitMachineRoots); MARK_LOG_ROOT(visitor, "C++ Stack"); visitor.append(machineThreadRoots); visitor.donateAndDrain(); } { GCPHASE(VisitStackRoots); MARK_LOG_ROOT(visitor, "Stack"); visitor.append(stackRoots); visitor.donateAndDrain(); } #if ENABLE(DFG_JIT) { GCPHASE(VisitScratchBufferRoots); MARK_LOG_ROOT(visitor, "Scratch Buffers"); visitor.append(scratchBufferRoots); visitor.donateAndDrain(); } #endif { GCPHASE(VisitProtectedObjects); MARK_LOG_ROOT(visitor, "Protected Objects"); markProtectedObjects(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(VisitTempSortVectors); MARK_LOG_ROOT(visitor, "Temp Sort Vectors"); markTempSortVectors(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(MarkingArgumentBuffers); if (m_markListSet && m_markListSet->size()) { MARK_LOG_ROOT(visitor, "Argument Buffers"); MarkedArgumentBuffer::markLists(heapRootVisitor, *m_markListSet); visitor.donateAndDrain(); } } if (m_vm->exception()) { GCPHASE(MarkingException); MARK_LOG_ROOT(visitor, "Exceptions"); heapRootVisitor.visit(m_vm->addressOfException()); visitor.donateAndDrain(); } { GCPHASE(VisitStrongHandles); MARK_LOG_ROOT(visitor, "Strong Handles"); m_handleSet.visitStrongHandles(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(HandleStack); MARK_LOG_ROOT(visitor, "Handle Stack"); m_handleStack.visit(heapRootVisitor); visitor.donateAndDrain(); } { GCPHASE(TraceCodeBlocksAndJITStubRoutines); MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines"); m_codeBlocks.traceMarked(visitor); m_jitStubRoutines.traceMarkedStubRoutines(visitor); visitor.donateAndDrain(); } #if ENABLE(PARALLEL_GC) { GCPHASE(Convergence); visitor.drainFromShared(SlotVisitor::MasterDrain); } #endif } // Weak references must be marked last because their liveness depends on // the liveness of the rest of the object graph. { GCPHASE(VisitingLiveWeakHandles); MARK_LOG_ROOT(visitor, "Live Weak Handles"); while (true) { m_objectSpace.visitWeakSets(heapRootVisitor); harvestWeakReferences(); if (visitor.isEmpty()) break; { ParallelModeEnabler enabler(visitor); visitor.donateAndDrain(); #if ENABLE(PARALLEL_GC) visitor.drainFromShared(SlotVisitor::MasterDrain); #endif } } } GCCOUNTER(VisitedValueCount, visitor.visitCount()); m_sharedData.didFinishMarking(); #if ENABLE(OBJECT_MARK_LOGGING) size_t visitCount = visitor.visitCount(); #if ENABLE(PARALLEL_GC) visitCount += m_sharedData.childVisitCount(); #endif MARK_LOG_MESSAGE2("\nNumber of live Objects after full GC %lu, took %.6f secs\n", visitCount, WTF::monotonicallyIncreasingTime() - gcStartTime); #endif m_totalBytesVisited = visitor.bytesVisited(); m_totalBytesCopied = visitor.bytesCopied(); #if ENABLE(PARALLEL_GC) m_totalBytesVisited += m_sharedData.childBytesVisited(); m_totalBytesCopied += m_sharedData.childBytesCopied(); #endif visitor.reset(); #if ENABLE(PARALLEL_GC) m_sharedData.resetChildren(); #endif m_sharedData.reset(); }
bool performDCE(Graph& graph) { SamplingRegion samplingRegion("DFG DCE Phase"); return runPhase<DCEPhase>(graph); }
bool performPredictionPropagation(Graph& graph) { SamplingRegion samplingRegion("DFG Prediction Propagation Phase"); return runPhase<PredictionPropagationPhase>(graph); }
bool performConstantFolding(Graph& graph) { SamplingRegion samplingRegion("DFG Constant Folding Phase"); return runPhase<ConstantFoldingPhase>(graph); }
bool performSSALowering(Graph& graph) { SamplingRegion samplingRegion("DFG SSA Lowering Phase"); return runPhase<SSALoweringPhase>(graph); }
bool performTierUpCheckInjection(Graph& graph) { SamplingRegion samplingRegion("DFG Tier-up Check Injection"); return runPhase<TierUpCheckInjectionPhase>(graph); }
bool performCriticalEdgeBreaking(Graph& graph) { SamplingRegion samplingRegion("DFG Critical Edge Breaking Phase"); return runPhase<CriticalEdgeBreakingPhase>(graph); }
bool performWatchpointCollection(Graph& graph) { SamplingRegion samplingRegion("DFG Watchpoint Collection Phase"); return runPhase<WatchpointCollectionPhase>(graph); }
void Heap::collect(SweepToggle sweepToggle) { #if ENABLE(ALLOCATION_LOGGING) dataLogF("JSC GC starting collection.\n"); #endif double before = 0; if (Options::logGC()) { dataLog("[GC", sweepToggle == DoSweep ? " (eager sweep)" : "", ": "); before = currentTimeMS(); } SamplingRegion samplingRegion("Garbage Collection"); RELEASE_ASSERT(!m_deferralDepth); GCPHASE(Collect); ASSERT(vm()->currentThreadIsHoldingAPILock()); RELEASE_ASSERT(vm()->identifierTable == wtfThreadData().currentIdentifierTable()); ASSERT(m_isSafeToCollect); JAVASCRIPTCORE_GC_BEGIN(); RELEASE_ASSERT(m_operationInProgress == NoOperation); m_deferralDepth++; // Make sure that we don't GC in this call. m_vm->prepareToDiscardCode(); m_deferralDepth--; // Decrement deferal manually, so we don't GC when we do so, since we are already GCing!. m_operationInProgress = Collection; m_extraMemoryUsage = 0; if (m_activityCallback) m_activityCallback->willCollect(); double lastGCStartTime = WTF::monotonicallyIncreasingTime(); if (lastGCStartTime - m_lastCodeDiscardTime > minute) { deleteAllCompiledCode(); m_lastCodeDiscardTime = WTF::monotonicallyIncreasingTime(); } { GCPHASE(StopAllocation); m_objectSpace.stopAllocating(); } markRoots(); { GCPHASE(ReapingWeakHandles); m_objectSpace.reapWeakSets(); } JAVASCRIPTCORE_GC_MARKED(); { GCPHASE(SweepingArrayBuffers); m_arrayBuffers.sweep(); } { m_blockSnapshot.resize(m_objectSpace.blocks().set().size()); MarkedBlockSnapshotFunctor functor(m_blockSnapshot); m_objectSpace.forEachBlock(functor); } copyBackingStores(); { GCPHASE(FinalizeUnconditionalFinalizers); finalizeUnconditionalFinalizers(); } { GCPHASE(DeleteCodeBlocks); deleteUnmarkedCompiledCode(); } { GCPHASE(DeleteSourceProviderCaches); m_vm->clearSourceProviderCaches(); } if (sweepToggle == DoSweep) { SamplingRegion samplingRegion("Garbage Collection: Sweeping"); GCPHASE(Sweeping); m_objectSpace.sweep(); m_objectSpace.shrink(); } m_sweeper->startSweeping(m_blockSnapshot); m_bytesAbandoned = 0; { GCPHASE(ResetAllocators); m_objectSpace.resetAllocators(); } size_t currentHeapSize = sizeAfterCollect(); if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize()) HeapStatistics::exitWithFailure(); m_sizeAfterLastCollect = currentHeapSize; // To avoid pathological GC churn in very small and very large heaps, we set // the new allocation limit based on the current size of the heap, with a // fixed minimum. size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize)); m_bytesAllocatedLimit = maxHeapSize - currentHeapSize; m_bytesAllocated = 0; double lastGCEndTime = WTF::monotonicallyIncreasingTime(); m_lastGCLength = lastGCEndTime - lastGCStartTime; if (Options::recordGCPauseTimes()) HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime); RELEASE_ASSERT(m_operationInProgress == Collection); m_operationInProgress = NoOperation; JAVASCRIPTCORE_GC_END(); if (Options::useZombieMode()) zombifyDeadObjects(); if (Options::objectsAreImmortal()) markDeadObjects(); if (Options::showObjectStatistics()) HeapStatistics::showObjectStatistics(this); if (Options::logGC()) { double after = currentTimeMS(); dataLog(after - before, " ms, ", currentHeapSize / 1024, " kb]\n"); } #if ENABLE(ALLOCATION_LOGGING) dataLogF("JSC GC finishing collection.\n"); #endif }