void JSGlobalObject::mark() { JSVariableObject::mark(); HashSet<ProgramCodeBlock*>::const_iterator end = codeBlocks().end(); for (HashSet<ProgramCodeBlock*>::const_iterator it = codeBlocks().begin(); it != end; ++it) (*it)->mark(); RegisterFile& registerFile = globalData()->interpreter->registerFile(); if (registerFile.globalObject() == this) registerFile.markGlobals(&globalData()->heap); markIfNeeded(d()->regExpConstructor); markIfNeeded(d()->errorConstructor); markIfNeeded(d()->evalErrorConstructor); markIfNeeded(d()->rangeErrorConstructor); markIfNeeded(d()->referenceErrorConstructor); markIfNeeded(d()->syntaxErrorConstructor); markIfNeeded(d()->typeErrorConstructor); markIfNeeded(d()->URIErrorConstructor); markIfNeeded(d()->evalFunction); markIfNeeded(d()->callFunction); markIfNeeded(d()->applyFunction); markIfNeeded(d()->objectPrototype); markIfNeeded(d()->functionPrototype); markIfNeeded(d()->arrayPrototype); markIfNeeded(d()->booleanPrototype); markIfNeeded(d()->stringPrototype); markIfNeeded(d()->numberPrototype); markIfNeeded(d()->datePrototype); markIfNeeded(d()->regExpPrototype); markIfNeeded(d()->errorStructure); // No need to mark the other structures, because their prototypes are all // guaranteed to be referenced elsewhere. Register* registerArray = d()->registerArray.get(); if (!registerArray) return; size_t size = d()->registerArraySize; for (size_t i = 0; i < size; ++i) { Register& r = registerArray[i]; if (!r.marked()) r.mark(); } }
void Heap::collect(SweepToggle sweepToggle) { ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); JAVASCRIPTCORE_GC_BEGIN(); markRoots(); m_handleHeap.finalizeWeakHandles(); m_globalData->smallStrings.finalizeSmallStrings(); JAVASCRIPTCORE_GC_MARKED(); resetAllocator(); #if ENABLE(JSC_ZOMBIES) sweepToggle = DoSweep; #endif if (sweepToggle == DoSweep) { sweep(); shrink(); } // To avoid pathological GC churn in large heaps, we set the allocation high // water mark to be proportional to the current size of the heap. The exact // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size : // new bytes allocated) proportion, and seems to work well in benchmarks. size_t proportionalBytes = 2 * size(); m_newSpace.setHighWaterMark(max(proportionalBytes, minBytesPerCycle)); JAVASCRIPTCORE_GC_END(); (*m_activityCallback)(); }
void WorkerScriptController::disableEval(const String& errorMessage) { initScriptIfNeeded(); JSLockHolder lock(globalData()); m_workerContextWrapper->setEvalEnabled(false, errorMessage); }
JSGlobalObject::~JSGlobalObject() { ASSERT(JSLock::currentThreadIsHoldingLock()); if (d()->debugger) d()->debugger->detach(this); Profiler** profiler = Profiler::enabledProfilerReference(); if (UNLIKELY(*profiler != 0)) { (*profiler)->stopProfiling(globalExec(), UString()); } d()->next->d()->prev = d()->prev; d()->prev->d()->next = d()->next; JSGlobalObject*& headObject = head(); if (headObject == this) headObject = d()->next; if (headObject == this) headObject = 0; HashSet<ProgramCodeBlock*>::const_iterator end = codeBlocks().end(); for (HashSet<ProgramCodeBlock*>::const_iterator it = codeBlocks().begin(); it != end; ++it) (*it)->clearGlobalObject(); RegisterFile& registerFile = globalData()->interpreter->registerFile(); if (registerFile.globalObject() == this) { registerFile.setGlobalObject(0); registerFile.setNumGlobals(0); } delete d(); }
MarkedBlock* MarkedSpace::allocateBlock(SizeClass& sizeClass) { MarkedBlock* block = MarkedBlock::create(globalData(), sizeClass.cellSize); sizeClass.blockList.append(block); sizeClass.nextBlock = block; m_blocks.add(block); return block; }
void Heap::collect(SweepToggle sweepToggle) { GCPHASE(Collect); ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); ASSERT(m_isSafeToCollect); JAVASCRIPTCORE_GC_BEGIN(); #if ENABLE(GGC) bool fullGC = sweepToggle == DoSweep; if (!fullGC) fullGC = (capacity() > 4 * m_lastFullGCSize); #else bool fullGC = true; #endif { GCPHASE(Canonicalize); canonicalizeCellLivenessData(); } markRoots(fullGC); { GCPHASE(HarvestWeakReferences); harvestWeakReferences(); m_handleHeap.finalizeWeakHandles(); m_globalData->smallStrings.finalizeSmallStrings(); } JAVASCRIPTCORE_GC_MARKED(); { GCPHASE(ResetAllocator); resetAllocator(); } if (sweepToggle == DoSweep) { GCPHASE(Sweeping); sweep(); shrink(); } // To avoid pathological GC churn in large heaps, we set the allocation high // water mark to be proportional to the current size of the heap. The exact // proportion is a bit arbitrary. A 2X multiplier gives a 1:1 (heap size : // new bytes allocated) proportion, and seems to work well in benchmarks. size_t newSize = size(); size_t proportionalBytes = 2 * newSize; if (fullGC) { m_lastFullGCSize = newSize; m_objectSpace.setHighWaterMark(max(proportionalBytes, m_minBytesPerCycle)); } JAVASCRIPTCORE_GC_END(); (*m_activityCallback)(); }
void WorkerScriptController::evaluate(const ScriptSourceCode& sourceCode) { if (isExecutionForbidden()) return; ScriptValue exception; evaluate(sourceCode, &exception); if (exception.jsValue()) { JSLockHolder lock(globalData()); reportException(m_workerContextWrapper->globalExec(), exception.jsValue()); } }
void JSGlobalObject::resizeRegisters(int oldSize, int newSize) { ASSERT(oldSize <= newSize); if (newSize == oldSize) return; ASSERT(newSize && newSize > oldSize); if (m_registerArray || !m_registers) { ASSERT(static_cast<size_t>(oldSize) == m_registerArraySize); OwnArrayPtr<WriteBarrier<Unknown> > registerArray = adoptArrayPtr(new WriteBarrier<Unknown>[newSize]); for (int i = 0; i < oldSize; i++) registerArray[newSize - oldSize + i].set(globalData(), this, m_registerArray[i].get()); WriteBarrier<Unknown>* registers = registerArray.get() + newSize; setRegisters(registers, registerArray.release(), newSize); } else { ASSERT(static_cast<size_t>(newSize) < globalData().interpreter->registerFile().maxGlobals()); globalData().interpreter->registerFile().setNumGlobals(newSize); } for (int i = -newSize; i < -oldSize; ++i) m_registers[i].setUndefined(); }
void JSGlobalObject::copyGlobalsFrom(RegisterFile& registerFile) { ASSERT(!m_registerArray); ASSERT(!m_registerArraySize); int numGlobals = registerFile.numGlobals(); if (!numGlobals) { m_registers = 0; return; } OwnArrayPtr<WriteBarrier<Unknown> > registerArray = copyRegisterArray(globalData(), reinterpret_cast<WriteBarrier<Unknown>*>(registerFile.lastGlobal()), numGlobals, numGlobals); WriteBarrier<Unknown>* registers = registerArray.get() + numGlobals; setRegisters(registers, registerArray.release(), numGlobals); }
void* Heap::allocateSlowCase(size_t bytes) { ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); ASSERT(JSLock::lockCount() > 0); ASSERT(JSLock::currentThreadIsHoldingLock()); ASSERT(bytes <= MarkedSpace::maxCellSize); ASSERT(m_operationInProgress == NoOperation); #if COLLECT_ON_EVERY_SLOW_ALLOCATION collectAllGarbage(); ASSERT(m_operationInProgress == NoOperation); #endif reset(DoNotSweep); m_operationInProgress = Allocation; void* result = m_markedSpace.allocate(bytes); m_operationInProgress = NoOperation; ASSERT(result); return result; }
void JSDOMGlobalObject::setInjectedScript(JSObject* injectedScript) { m_injectedScript.setMayBeNull(globalData(), this, injectedScript); }
void JSGlobalObject::markChildren(MarkStack& markStack) { JSVariableObject::markChildren(markStack); HashSet<GlobalCodeBlock*>::const_iterator end = codeBlocks().end(); for (HashSet<GlobalCodeBlock*>::const_iterator it = codeBlocks().begin(); it != end; ++it) (*it)->markAggregate(markStack); RegisterFile& registerFile = globalData()->interpreter->registerFile(); if (registerFile.globalObject() == this) registerFile.markGlobals(markStack, &globalData()->heap); markIfNeeded(markStack, d()->regExpConstructor); markIfNeeded(markStack, d()->errorConstructor); markIfNeeded(markStack, d()->evalErrorConstructor); markIfNeeded(markStack, d()->rangeErrorConstructor); markIfNeeded(markStack, d()->referenceErrorConstructor); markIfNeeded(markStack, d()->syntaxErrorConstructor); markIfNeeded(markStack, d()->typeErrorConstructor); markIfNeeded(markStack, d()->URIErrorConstructor); markIfNeeded(markStack, d()->evalFunction); markIfNeeded(markStack, d()->callFunction); markIfNeeded(markStack, d()->applyFunction); markIfNeeded(markStack, d()->objectPrototype); markIfNeeded(markStack, d()->functionPrototype); markIfNeeded(markStack, d()->arrayPrototype); markIfNeeded(markStack, d()->booleanPrototype); markIfNeeded(markStack, d()->stringPrototype); markIfNeeded(markStack, d()->numberPrototype); markIfNeeded(markStack, d()->datePrototype); markIfNeeded(markStack, d()->regExpPrototype); markIfNeeded(markStack, d()->methodCallDummy); markIfNeeded(markStack, d()->errorStructure); markIfNeeded(markStack, d()->argumentsStructure); markIfNeeded(markStack, d()->arrayStructure); markIfNeeded(markStack, d()->booleanObjectStructure); markIfNeeded(markStack, d()->callbackConstructorStructure); markIfNeeded(markStack, d()->callbackFunctionStructure); markIfNeeded(markStack, d()->callbackObjectStructure); markIfNeeded(markStack, d()->dateStructure); markIfNeeded(markStack, d()->emptyObjectStructure); markIfNeeded(markStack, d()->errorStructure); markIfNeeded(markStack, d()->functionStructure); markIfNeeded(markStack, d()->numberObjectStructure); markIfNeeded(markStack, d()->prototypeFunctionStructure); markIfNeeded(markStack, d()->regExpMatchesArrayStructure); markIfNeeded(markStack, d()->regExpStructure); markIfNeeded(markStack, d()->stringObjectStructure); // No need to mark the other structures, because their prototypes are all // guaranteed to be referenced elsewhere. Register* registerArray = d()->registerArray.get(); if (!registerArray) return; size_t size = d()->registerArraySize; markStack.appendValues(reinterpret_cast<JSValue*>(registerArray), size); }
void JSGlobalObject::stopTimeoutCheck() { globalData()->interpreter->stopTimeoutCheck(); }
void JSGlobalObject::setTimeoutTime(unsigned timeoutTime) { globalData()->interpreter->setTimeoutTime(timeoutTime); }
void fixupNode(Node& node) { if (!node.shouldGenerate()) return; NodeType op = node.op(); #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" %s @%u: ", Graph::opName(op), m_compileIndex); #endif switch (op) { case GetById: { if (!isInt32Prediction(m_graph[m_compileIndex].prediction())) break; if (codeBlock()->identifier(node.identifierNumber()) != globalData().propertyNames->length) break; bool isArray = isArrayPrediction(m_graph[node.child1()].prediction()); bool isString = isStringPrediction(m_graph[node.child1()].prediction()); bool isByteArray = m_graph[node.child1()].shouldSpeculateByteArray(); bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array(); bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array(); bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array(); bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array(); bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray(); bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array(); bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array(); bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array(); bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array(); if (!isArray && !isString && !isByteArray && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array) break; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength"); #endif if (isArray) node.setOp(GetArrayLength); else if (isString) node.setOp(GetStringLength); else if (isByteArray) node.setOp(GetByteArrayLength); else if (isInt8Array) node.setOp(GetInt8ArrayLength); else if (isInt16Array) node.setOp(GetInt16ArrayLength); else if (isInt32Array) node.setOp(GetInt32ArrayLength); else if (isUint8Array) node.setOp(GetUint8ArrayLength); else if (isUint8ClampedArray) node.setOp(GetUint8ClampedArrayLength); else if (isUint16Array) node.setOp(GetUint16ArrayLength); else if (isUint32Array) node.setOp(GetUint32ArrayLength); else if (isFloat32Array) node.setOp(GetFloat32ArrayLength); else if (isFloat64Array) node.setOp(GetFloat64ArrayLength); else ASSERT_NOT_REACHED(); // No longer MustGenerate ASSERT(node.flags() & NodeMustGenerate); node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); break; } case GetIndexedPropertyStorage: { PredictedType basePrediction = m_graph[node.child2()].prediction(); if (!(basePrediction & PredictInt32) && basePrediction) { node.setOpAndDefaultFlags(Nop); m_graph.clearAndDerefChild1(node); m_graph.clearAndDerefChild2(node); m_graph.clearAndDerefChild3(node); node.setRefCount(0); } break; } case GetByVal: case StringCharAt: case StringCharCodeAt: { if (!!node.child3() && m_graph[node.child3()].op() == Nop) node.children.child3() = Edge(); break; } case ValueToInt32: { if (m_graph[node.child1()].shouldSpeculateNumber()) { node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); } break; } case BitAnd: case BitOr: case BitXor: case BitRShift: case BitLShift: case BitURShift: { fixIntEdge(node.children.child1()); fixIntEdge(node.children.child2()); break; } case CompareEq: case CompareLess: case CompareLessEq: case CompareGreater: case CompareGreaterEq: case CompareStrictEq: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])) break; if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case LogicalNot: { if (m_graph[node.child1()].shouldSpeculateInteger()) break; if (!m_graph[node.child1()].shouldSpeculateNumber()) break; fixDoubleEdge(0); break; } case Branch: { if (!m_graph[node.child1()].shouldSpeculateInteger() && m_graph[node.child1()].shouldSpeculateNumber()) fixDoubleEdge(0); Node& myNode = m_graph[m_compileIndex]; // reload because the graph may have changed Edge logicalNotEdge = myNode.child1(); Node& logicalNot = m_graph[logicalNotEdge]; if (logicalNot.op() == LogicalNot && logicalNot.adjustedRefCount() == 1) { Edge newChildEdge = logicalNot.child1(); m_graph.ref(newChildEdge); m_graph.deref(logicalNotEdge); myNode.children.setChild1(newChildEdge); BlockIndex toBeTaken = myNode.notTakenBlockIndex(); BlockIndex toBeNotTaken = myNode.takenBlockIndex(); myNode.setTakenBlockIndex(toBeTaken); myNode.setNotTakenBlockIndex(toBeNotTaken); } break; } case SetLocal: { if (m_graph.isCaptured(node.local())) break; if (!node.variableAccessData()->shouldUseDoubleFormat()) break; fixDoubleEdge(0); break; } case ArithAdd: case ValueAdd: { if (m_graph.addShouldSpeculateInteger(node)) break; if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithSub: { if (m_graph.addShouldSpeculateInteger(node) && node.canSpeculateInteger()) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithNegate: { if (m_graph.negateShouldSpeculateInteger(node)) break; fixDoubleEdge(0); break; } case ArithMin: case ArithMax: case ArithMul: case ArithDiv: case ArithMod: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithAbs: { if (m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()) break; fixDoubleEdge(0); break; } case ArithSqrt: { fixDoubleEdge(0); break; } case PutByVal: { if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction()) break; if (!m_graph[node.child2()].shouldSpeculateInteger()) break; if (isActionableIntMutableArrayPrediction(m_graph[node.child1()].prediction())) { if (m_graph[node.child3()].isConstant()) break; if (m_graph[node.child3()].shouldSpeculateInteger()) break; fixDoubleEdge(2); break; } if (isActionableFloatMutableArrayPrediction(m_graph[node.child1()].prediction())) { fixDoubleEdge(2); break; } break; } default: break; } #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) if (!(node.flags() & NodeHasVarArgs)) { dataLog("new children: "); node.dumpChildren(WTF::dataFile()); } dataLog("\n"); #endif }
WorkerScriptController::~WorkerScriptController() { JSLockHolder lock(globalData()); m_workerContextWrapper.clear(); m_globalData.clear(); }
void JITCompiler::link(LinkBuffer& linkBuffer) { // Link the code, populate data in CodeBlock data structures. #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize()); #endif // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size()); for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin; while (codeOrigin.inlineCallFrame) codeOrigin = codeOrigin.inlineCallFrame->caller; unsigned exceptionInfo = codeOrigin.bytecodeIndex; m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo)); } Vector<CodeOriginAtCallReturnOffset>& codeOrigins = m_codeBlock->codeOrigins(); codeOrigins.resize(m_exceptionChecks.size()); for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { CallExceptionRecord& record = m_exceptionChecks[i]; unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); codeOrigins[i].codeOrigin = record.m_codeOrigin; codeOrigins[i].callReturnOffset = returnAddressOffset; record.m_token.assertCodeOriginIndex(i); } m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size()); for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) { StructureStubInfo& info = m_codeBlock->structureStubInfo(i); CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call()); info.codeOrigin = m_propertyAccesses[i].m_codeOrigin; info.callReturnLocation = callReturnLocation; info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation); info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck)); #if USE(JSVALUE64) info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore)); #else info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore)); info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore)); #endif info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label())); info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done)); info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR; #if USE(JSVALUE64) info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR; #else info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR; info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR; #endif info.patch.dfg.scratchGPR = m_propertyAccesses[i].m_scratchGPR; info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed; } m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); info.callType = m_jsCalls[i].m_callType; info.isDFG = true; info.callReturnLocation = CodeLocationLabel(linkBuffer.locationOf(m_jsCalls[i].m_slowCall)); info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); } MacroAssemblerCodeRef osrExitThunk = globalData()->getCTIStub(osrExitGenerationThunkGenerator); CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code()); for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) { OSRExit& exit = codeBlock()->osrExit(i); linkBuffer.link(exit.m_check.lateJump(), target); exit.m_check.correctLateJump(linkBuffer); if (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max()) codeBlock()->watchpoint(exit.m_watchpointIndex).correctLabels(linkBuffer); } codeBlock()->shrinkToFit(CodeBlock::LateShrink); }
void fixupNode(Node& node) { if (!node.shouldGenerate()) return; NodeType op = node.op(); #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" %s @%u: ", Graph::opName(op), m_compileIndex); #endif switch (op) { case GetById: { if (!isInt32Speculation(m_graph[m_compileIndex].prediction())) break; if (codeBlock()->identifier(node.identifierNumber()) != globalData().propertyNames->length) break; bool isArray = isArraySpeculation(m_graph[node.child1()].prediction()); bool isArguments = isArgumentsSpeculation(m_graph[node.child1()].prediction()); bool isString = isStringSpeculation(m_graph[node.child1()].prediction()); bool isInt8Array = m_graph[node.child1()].shouldSpeculateInt8Array(); bool isInt16Array = m_graph[node.child1()].shouldSpeculateInt16Array(); bool isInt32Array = m_graph[node.child1()].shouldSpeculateInt32Array(); bool isUint8Array = m_graph[node.child1()].shouldSpeculateUint8Array(); bool isUint8ClampedArray = m_graph[node.child1()].shouldSpeculateUint8ClampedArray(); bool isUint16Array = m_graph[node.child1()].shouldSpeculateUint16Array(); bool isUint32Array = m_graph[node.child1()].shouldSpeculateUint32Array(); bool isFloat32Array = m_graph[node.child1()].shouldSpeculateFloat32Array(); bool isFloat64Array = m_graph[node.child1()].shouldSpeculateFloat64Array(); if (!isArray && !isArguments && !isString && !isInt8Array && !isInt16Array && !isInt32Array && !isUint8Array && !isUint8ClampedArray && !isUint16Array && !isUint32Array && !isFloat32Array && !isFloat64Array) break; #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) dataLog(" @%u -> %s", m_compileIndex, isArray ? "GetArrayLength" : "GetStringLength"); #endif if (isArray) { node.setOp(GetArrayLength); ASSERT(node.flags() & NodeMustGenerate); node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); ArrayProfile* arrayProfile = m_graph.baselineCodeBlockFor(node.codeOrigin)->getArrayProfile( node.codeOrigin.bytecodeIndex); if (!arrayProfile) break; arrayProfile->computeUpdatedPrediction(); if (!arrayProfile->hasDefiniteStructure()) break; m_graph.ref(node.child1()); Node checkStructure(CheckStructure, node.codeOrigin, OpInfo(m_graph.addStructureSet(arrayProfile->expectedStructure())), node.child1().index()); checkStructure.ref(); NodeIndex checkStructureIndex = m_graph.size(); m_graph.append(checkStructure); m_insertionSet.append(m_indexInBlock, checkStructureIndex); break; } if (isArguments) node.setOp(GetArgumentsLength); else if (isString) node.setOp(GetStringLength); else if (isInt8Array) node.setOp(GetInt8ArrayLength); else if (isInt16Array) node.setOp(GetInt16ArrayLength); else if (isInt32Array) node.setOp(GetInt32ArrayLength); else if (isUint8Array) node.setOp(GetUint8ArrayLength); else if (isUint8ClampedArray) node.setOp(GetUint8ClampedArrayLength); else if (isUint16Array) node.setOp(GetUint16ArrayLength); else if (isUint32Array) node.setOp(GetUint32ArrayLength); else if (isFloat32Array) node.setOp(GetFloat32ArrayLength); else if (isFloat64Array) node.setOp(GetFloat64ArrayLength); else ASSERT_NOT_REACHED(); // No longer MustGenerate ASSERT(node.flags() & NodeMustGenerate); node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); break; } case GetIndexedPropertyStorage: { if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].shouldSpeculateInteger() || m_graph[node.child1()].shouldSpeculateArguments()) { node.setOpAndDefaultFlags(Nop); m_graph.clearAndDerefChild1(node); m_graph.clearAndDerefChild2(node); m_graph.clearAndDerefChild3(node); node.setRefCount(0); } break; } case GetByVal: case StringCharAt: case StringCharCodeAt: { if (!!node.child3() && m_graph[node.child3()].op() == Nop) node.children.child3() = Edge(); break; } case ValueToInt32: { if (m_graph[node.child1()].shouldSpeculateNumber() && node.mustGenerate()) { node.clearFlags(NodeMustGenerate); m_graph.deref(m_compileIndex); } break; } case BitAnd: case BitOr: case BitXor: case BitRShift: case BitLShift: case BitURShift: { fixIntEdge(node.children.child1()); fixIntEdge(node.children.child2()); break; } case CompareEq: case CompareLess: case CompareLessEq: case CompareGreater: case CompareGreaterEq: case CompareStrictEq: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()])) break; if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case LogicalNot: { if (m_graph[node.child1()].shouldSpeculateInteger()) break; if (!m_graph[node.child1()].shouldSpeculateNumber()) break; fixDoubleEdge(0); break; } case Branch: { if (!m_graph[node.child1()].shouldSpeculateInteger() && m_graph[node.child1()].shouldSpeculateNumber()) fixDoubleEdge(0); Node& myNode = m_graph[m_compileIndex]; // reload because the graph may have changed Edge logicalNotEdge = myNode.child1(); Node& logicalNot = m_graph[logicalNotEdge]; if (logicalNot.op() == LogicalNot && logicalNot.adjustedRefCount() == 1) { Edge newChildEdge = logicalNot.child1(); if (m_graph[newChildEdge].hasBooleanResult()) { m_graph.ref(newChildEdge); m_graph.deref(logicalNotEdge); myNode.children.setChild1(newChildEdge); BlockIndex toBeTaken = myNode.notTakenBlockIndex(); BlockIndex toBeNotTaken = myNode.takenBlockIndex(); myNode.setTakenBlockIndex(toBeTaken); myNode.setNotTakenBlockIndex(toBeNotTaken); } } break; } case SetLocal: { if (node.variableAccessData()->isCaptured()) break; if (!node.variableAccessData()->shouldUseDoubleFormat()) break; fixDoubleEdge(0); break; } case ArithAdd: case ValueAdd: { if (m_graph.addShouldSpeculateInteger(node)) break; if (!Node::shouldSpeculateNumber(m_graph[node.child1()], m_graph[node.child2()])) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithSub: { if (m_graph.addShouldSpeculateInteger(node) && node.canSpeculateInteger()) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithNegate: { if (m_graph.negateShouldSpeculateInteger(node)) break; fixDoubleEdge(0); break; } case ArithMin: case ArithMax: case ArithMod: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithMul: { if (m_graph.mulShouldSpeculateInteger(node)) break; fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithDiv: { if (Node::shouldSpeculateInteger(m_graph[node.child1()], m_graph[node.child2()]) && node.canSpeculateInteger()) { if (isX86()) break; fixDoubleEdge(0); fixDoubleEdge(1); Node& oldDivision = m_graph[m_compileIndex]; Node newDivision = oldDivision; newDivision.setRefCount(2); newDivision.predict(SpecDouble); NodeIndex newDivisionIndex = m_graph.size(); oldDivision.setOp(DoubleAsInt32); oldDivision.children.initialize(Edge(newDivisionIndex, DoubleUse), Edge(), Edge()); m_graph.append(newDivision); m_insertionSet.append(m_indexInBlock, newDivisionIndex); break; } fixDoubleEdge(0); fixDoubleEdge(1); break; } case ArithAbs: { if (m_graph[node.child1()].shouldSpeculateInteger() && node.canSpeculateInteger()) break; fixDoubleEdge(0); break; } case ArithSqrt: { fixDoubleEdge(0); break; } case PutByVal: case PutByValSafe: { Edge child1 = m_graph.varArgChild(node, 0); Edge child2 = m_graph.varArgChild(node, 1); Edge child3 = m_graph.varArgChild(node, 2); if (!m_graph[child1].prediction() || !m_graph[child2].prediction()) break; if (!m_graph[child2].shouldSpeculateInteger()) break; if (isActionableIntMutableArraySpeculation(m_graph[child1].prediction())) { if (m_graph[child3].isConstant()) break; if (m_graph[child3].shouldSpeculateInteger()) break; fixDoubleEdge(2); break; } if (isActionableFloatMutableArraySpeculation(m_graph[child1].prediction())) { fixDoubleEdge(2); break; } break; } default: break; } #if DFG_ENABLE(DEBUG_PROPAGATION_VERBOSE) if (!(node.flags() & NodeHasVarArgs)) { dataLog("new children: "); node.dumpChildren(WTF::dataFile()); } dataLog("\n"); #endif }
void Heap::collect(SweepToggle sweepToggle) { SamplingRegion samplingRegion("Garbage Collection"); GCPHASE(Collect); ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); ASSERT(m_isSafeToCollect); JAVASCRIPTCORE_GC_BEGIN(); m_activityCallback->willCollect(); double lastGCStartTime = WTF::currentTime(); if (lastGCStartTime - m_lastCodeDiscardTime > minute) { discardAllCompiledCode(); m_lastCodeDiscardTime = WTF::currentTime(); } #if ENABLE(GGC) bool fullGC = sweepToggle == DoSweep; if (!fullGC) fullGC = (capacity() > 4 * m_sizeAfterLastCollect); #else bool fullGC = true; #endif { GCPHASE(Canonicalize); canonicalizeCellLivenessData(); } markRoots(fullGC); { GCPHASE(FinalizeUnconditionalFinalizers); finalizeUnconditionalFinalizers(); } { GCPHASE(FinalizeWeakHandles); m_weakSet.sweep(); m_globalData->smallStrings.finalizeSmallStrings(); } JAVASCRIPTCORE_GC_MARKED(); { GCPHASE(ResetAllocator); resetAllocators(); } { GCPHASE(DeleteCodeBlocks); m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks(); } if (sweepToggle == DoSweep) { SamplingRegion samplingRegion("Garbage Collection: Sweeping"); GCPHASE(Sweeping); sweep(); m_objectSpace.shrink(); m_weakSet.shrink(); } // To avoid pathological GC churn in large heaps, we set the new allocation // limit to be the current size of the heap. This heuristic // is a bit arbitrary. Using the current size of the heap after this // collection gives us a 2X multiplier, which is a 1:1 (heap size : // new bytes allocated) proportion, and seems to work well in benchmarks. size_t newSize = size(); if (fullGC) { m_sizeAfterLastCollect = newSize; m_bytesAllocatedLimit = max(newSize, m_minBytesPerCycle); } m_bytesAllocated = 0; double lastGCEndTime = WTF::currentTime(); m_lastGCLength = lastGCEndTime - lastGCStartTime; JAVASCRIPTCORE_GC_END(); }
void Heap::addFinalizer(JSCell* cell, Finalizer finalizer) { Weak<JSCell> weak(*globalData(), cell, &m_finalizerOwner, reinterpret_cast<void*>(finalizer)); weak.leakHandle(); // Balanced by FinalizerOwner::finalize(). }
Foam::polyMesh::polyMesh(const IOobject& io) : objectRegistry(io), primitiveMesh(), allPoints_ ( IOobject ( "points", time().findInstance(meshDir(), "points"), meshSubDir, *this, IOobject::MUST_READ, IOobject::NO_WRITE ) ), // To be re-sliced later. HJ, 19/oct/2008 points_(allPoints_, allPoints_.size()), allFaces_ ( IOobject ( "faces", time().findInstance(meshDir(), "faces"), meshSubDir, *this, IOobject::MUST_READ, IOobject::NO_WRITE ) ), // To be re-sliced later. HJ, 19/oct/2008 faces_(allFaces_, allFaces_.size()), owner_ ( IOobject ( "owner", time().findInstance(meshDir(), "faces"), meshSubDir, *this, IOobject::READ_IF_PRESENT, IOobject::NO_WRITE ) ), neighbour_ ( IOobject ( "neighbour", time().findInstance(meshDir(), "faces"), meshSubDir, *this, IOobject::READ_IF_PRESENT, IOobject::NO_WRITE ) ), clearedPrimitives_(false), boundary_ ( IOobject ( "boundary", time().findInstance(meshDir(), "boundary"), meshSubDir, *this, IOobject::MUST_READ, IOobject::NO_WRITE ), *this ), bounds_(allPoints_), geometricD_(Vector<label>::zero), solutionD_(Vector<label>::zero), pointZones_ ( IOobject ( "pointZones", time().findInstance ( meshDir(), "pointZones", IOobject::READ_IF_PRESENT ), meshSubDir, *this, IOobject::READ_IF_PRESENT, IOobject::NO_WRITE ), *this ), faceZones_ ( IOobject ( "faceZones", time().findInstance ( meshDir(), "faceZones", IOobject::READ_IF_PRESENT ), meshSubDir, *this, IOobject::READ_IF_PRESENT, IOobject::NO_WRITE ), *this ), cellZones_ ( IOobject ( "cellZones", time().findInstance ( meshDir(), "cellZones", IOobject::READ_IF_PRESENT ), meshSubDir, *this, IOobject::READ_IF_PRESENT, IOobject::NO_WRITE ), *this ), globalMeshDataPtr_(NULL), moving_(false), changing_(false), curMotionTimeIndex_(time().timeIndex()), oldAllPointsPtr_(NULL), oldPointsPtr_(NULL) { if (exists(owner_.objectPath())) { initMesh(); } else { cellIOList cLst ( IOobject ( "cells", // Find the cells file on the basis of the faces file // HJ, 8/Jul/2009 // time().findInstance(meshDir(), "cells"), time().findInstance(meshDir(), "faces"), meshSubDir, *this, IOobject::MUST_READ, IOobject::NO_WRITE ) ); // Set the primitive mesh initMesh(cLst); owner_.write(); neighbour_.write(); } // Calculate topology for the patches (processor-processor comms etc.) boundary_.updateMesh(); // Calculate the geometry for the patches (transformation tensors etc.) boundary_.calcGeometry(); // Warn if global empty mesh (constructs globalData!) if (globalData().nTotalPoints() == 0) { WarningIn("polyMesh(const IOobject&)") << "no points in mesh" << endl; } if (globalData().nTotalCells() == 0) { WarningIn("polyMesh(const IOobject&)") << "no cells in mesh" << endl; } }
void Heap::collect(SweepToggle sweepToggle) { SamplingRegion samplingRegion("Garbage Collection"); GCPHASE(Collect); ASSERT(globalData()->apiLock().currentThreadIsHoldingLock()); RELEASE_ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); ASSERT(m_isSafeToCollect); JAVASCRIPTCORE_GC_BEGIN(); RELEASE_ASSERT(m_operationInProgress == NoOperation); m_operationInProgress = Collection; m_activityCallback->willCollect(); double lastGCStartTime = WTF::currentTime(); if (lastGCStartTime - m_lastCodeDiscardTime > minute) { deleteAllCompiledCode(); m_lastCodeDiscardTime = WTF::currentTime(); } { GCPHASE(Canonicalize); m_objectSpace.canonicalizeCellLivenessData(); } markRoots(); { GCPHASE(ReapingWeakHandles); m_objectSpace.reapWeakSets(); } JAVASCRIPTCORE_GC_MARKED(); { m_blockSnapshot.resize(m_objectSpace.blocks().set().size()); MarkedBlockSnapshotFunctor functor(m_blockSnapshot); m_objectSpace.forEachBlock(functor); } copyBackingStores(); { GCPHASE(FinalizeUnconditionalFinalizers); finalizeUnconditionalFinalizers(); } { GCPHASE(finalizeSmallStrings); m_globalData->smallStrings.finalizeSmallStrings(); } { GCPHASE(DeleteCodeBlocks); deleteUnmarkedCompiledCode(); } { GCPHASE(DeleteSourceProviderCaches); m_globalData->clearSourceProviderCaches(); } if (sweepToggle == DoSweep) { SamplingRegion samplingRegion("Garbage Collection: Sweeping"); GCPHASE(Sweeping); m_objectSpace.sweep(); m_objectSpace.shrink(); } m_sweeper->startSweeping(m_blockSnapshot); m_bytesAbandoned = 0; { GCPHASE(ResetAllocators); m_objectSpace.resetAllocators(); } size_t currentHeapSize = size(); if (Options::gcMaxHeapSize() && currentHeapSize > Options::gcMaxHeapSize()) HeapStatistics::exitWithFailure(); m_sizeAfterLastCollect = currentHeapSize; // To avoid pathological GC churn in very small and very large heaps, we set // the new allocation limit based on the current size of the heap, with a // fixed minimum. size_t maxHeapSize = max(minHeapSize(m_heapType, m_ramSize), proportionalHeapSize(currentHeapSize, m_ramSize)); m_bytesAllocatedLimit = maxHeapSize - currentHeapSize; m_bytesAllocated = 0; double lastGCEndTime = WTF::currentTime(); m_lastGCLength = lastGCEndTime - lastGCStartTime; if (Options::recordGCPauseTimes()) HeapStatistics::recordGCPauseTime(lastGCStartTime, lastGCEndTime); RELEASE_ASSERT(m_operationInProgress == Collection); m_operationInProgress = NoOperation; JAVASCRIPTCORE_GC_END(); if (Options::useZombieMode()) zombifyDeadObjects(); if (Options::objectsAreImmortal()) markDeadObjects(); if (Options::showObjectStatistics()) HeapStatistics::showObjectStatistics(this); }
void JITCompiler::link(LinkBuffer& linkBuffer) { // Link the code, populate data in CodeBlock data structures. #if DFG_ENABLE(DEBUG_VERBOSE) fprintf(stderr, "JIT code for %p start at [%p, %p). Size = %lu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize()); #endif // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); if (m_codeBlock->needsCallReturnIndices()) { m_codeBlock->callReturnIndexVector().reserveCapacity(m_exceptionChecks.size()); for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); CodeOrigin codeOrigin = m_exceptionChecks[i].m_codeOrigin; while (codeOrigin.inlineCallFrame) codeOrigin = codeOrigin.inlineCallFrame->caller; unsigned exceptionInfo = codeOrigin.bytecodeIndex; m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo)); } } unsigned numCallsFromInlineCode = 0; for (unsigned i = 0; i < m_exceptionChecks.size(); ++i) { if (m_exceptionChecks[i].m_codeOrigin.inlineCallFrame) numCallsFromInlineCode++; } if (numCallsFromInlineCode) { Vector<CodeOriginAtCallReturnOffset>& codeOrigins = m_codeBlock->codeOrigins(); codeOrigins.resize(numCallsFromInlineCode); for (unsigned i = 0, j = 0; i < m_exceptionChecks.size(); ++i) { CallExceptionRecord& record = m_exceptionChecks[i]; if (record.m_codeOrigin.inlineCallFrame) { unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_exceptionChecks[i].m_call); codeOrigins[j].codeOrigin = record.m_codeOrigin; codeOrigins[j].callReturnOffset = returnAddressOffset; j++; } } } m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size()); for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) { StructureStubInfo& info = m_codeBlock->structureStubInfo(i); CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_functionCall); info.callReturnLocation = callReturnLocation; info.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCheckImmToCall), callReturnLocation); info.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToStructCheck)); #if USE(JSVALUE64) info.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToLoadOrStore)); #else info.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToTagLoadOrStore)); info.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToPayloadLoadOrStore)); #endif info.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToSlowCase)); info.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_deltaCallToDone)); info.baseGPR = m_propertyAccesses[i].m_baseGPR; #if USE(JSVALUE64) info.valueGPR = m_propertyAccesses[i].m_valueGPR; #else info.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR; info.valueGPR = m_propertyAccesses[i].m_valueGPR; #endif info.scratchGPR = m_propertyAccesses[i].m_scratchGPR; } m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); info.callType = m_jsCalls[i].m_callType; info.isDFG = true; info.callReturnLocation = CodeLocationLabel(linkBuffer.locationOf(m_jsCalls[i].m_slowCall)); info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); } MacroAssemblerCodeRef osrExitThunk = globalData()->getCTIStub(osrExitGenerationThunkGenerator); CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code()); for (unsigned i = 0; i < codeBlock()->numberOfOSRExits(); ++i) { OSRExit& exit = codeBlock()->osrExit(i); linkBuffer.link(exit.m_check.lateJump(), target); exit.m_check.correctLateJump(linkBuffer); } codeBlock()->shrinkWeakReferencesToFit(); codeBlock()->shrinkWeakReferenceTransitionsToFit(); }