bool run() { ASSERT(m_graph.m_form != SSA); BitVector blocksThatNeedInvalidationPoints; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) handle(nodeIndex, block->at(nodeIndex)); // Note: this assumes that control flow occurs at bytecode instruction boundaries. if (m_originThatHadFire.isSet()) { for (unsigned i = block->numSuccessors(); i--;) blocksThatNeedInvalidationPoints.set(block->successor(i)->index); } m_insertionSet.execute(block); } for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { if (!blocksThatNeedInvalidationPoints.get(blockIndex)) continue; BasicBlock* block = m_graph.block(blockIndex); insertInvalidationCheck(0, block->at(0)); m_insertionSet.execute(block); } return true; }
bool BitVector::equalsSlowCase(const BitVector& other) const { // This is really cheesy, but probably good enough for now. for (unsigned i = std::max(size(), other.size()); i--;) { if (get(i) != other.get(i)) return false; } return true; }
void JITCompiler::link(LinkBuffer& linkBuffer) { // Link the code, populate data in CodeBlock data structures. m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount(); m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit(); if (!m_graph.m_plan.inlineCallFrames->isEmpty()) m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames; #if USE(JSVALUE32_64) m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants); #endif m_graph.registerFrozenValues(); BitVector usedJumpTables; for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) { SwitchData& data = **iter; if (!data.didUseJumpTable) continue; if (data.kind == SwitchString) continue; RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar); usedJumpTables.set(data.switchTableIndex); SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]); table.ctiOffsets.grow(table.branchOffsets.size()); for (unsigned j = table.ctiOffsets.size(); j--;) table.ctiOffsets[j] = table.ctiDefault; for (unsigned j = data.cases.size(); j--;) { SwitchCase& myCase = data.cases[j]; table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]); } } for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) { if (usedJumpTables.get(i)) continue; m_codeBlock->switchJumpTable(i).clear(); } // NOTE: we cannot clear string switch tables because (1) we're running concurrently // and we cannot deref StringImpl's and (2) it would be weird to deref those // StringImpl's since we refer to them. for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) { SwitchData& data = **switchDataIter; if (!data.didUseJumpTable) continue; if (data.kind != SwitchString) continue; StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]); StringJumpTable::StringOffsetTable::iterator iter; StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); for (iter = table.offsetTable.begin(); iter != end; ++iter) iter->value.ctiOffset = table.ctiDefault; for (unsigned j = data.cases.size(); j--;) { SwitchCase& myCase = data.cases[j]; iter = table.offsetTable.find(myCase.value.stringImpl()); RELEASE_ASSERT(iter != end); iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]); } } // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); for (unsigned i = m_getByIds.size(); i--;) m_getByIds[i].finalize(linkBuffer); for (unsigned i = m_putByIds.size(); i--;) m_putByIds[i].finalize(linkBuffer); for (unsigned i = 0; i < m_ins.size(); ++i) { StructureStubInfo& info = *m_ins[i].m_stubInfo; CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call()); info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done)); info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump)); info.callReturnLocation = callReturnLocation; info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label())); } for (unsigned i = 0; i < m_jsCalls.size(); ++i) { JSCallRecord& record = m_jsCalls[i]; CallLinkInfo& info = *record.m_info; linkBuffer.link(record.m_slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress())); info.setCallLocations(linkBuffer.locationOfNearCall(record.m_slowCall), linkBuffer.locationOf(record.m_targetToCheck), linkBuffer.locationOfNearCall(record.m_fastCall)); } MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator); CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code()); for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) { OSRExit& exit = m_jitCode->osrExit[i]; OSRExitCompilationInfo& info = m_exitCompilationInfo[i]; linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target); exit.correctJump(linkBuffer); if (info.m_replacementSource.isSet()) { m_jitCode->common.jumpReplacements.append(JumpReplacement( linkBuffer.locationOf(info.m_replacementSource), linkBuffer.locationOf(info.m_replacementDestination))); } } if (m_graph.compilation()) { ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size()); for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) { Vector<Label>& labels = m_exitSiteLabels[i]; Vector<const void*> addresses; for (unsigned j = 0; j < labels.size(); ++j) addresses.append(linkBuffer.locationOf(labels[j]).executableAddress()); m_graph.compilation()->addOSRExitSite(addresses); } } else ASSERT(!m_exitSiteLabels.size()); m_jitCode->common.compilation = m_graph.compilation(); // Link new DFG exception handlers and remove baseline JIT handlers. m_codeBlock->clearExceptionHandlers(); for (unsigned i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) { OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo; if (info.m_replacementDestination.isSet()) { // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow. // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame. // If this *is set*, it means we will be landing at this code location from genericUnwind from an // exception thrown in a child call frame. CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination); HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler; CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex; newExceptionHandler.start = callSite.bits(); newExceptionHandler.end = callSite.bits() + 1; newExceptionHandler.nativeCode = catchLabel; m_codeBlock->appendExceptionHandler(newExceptionHandler); } } if (m_pcToCodeOriginMapBuilder.didBuildMapping()) m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer)); }
String StylePropertySet::asText() const { StringBuilder result; const CSSProperty* positionXProp = 0; const CSSProperty* positionYProp = 0; const CSSProperty* repeatXProp = 0; const CSSProperty* repeatYProp = 0; // FIXME: Stack-allocate the buffer for these BitVectors. BitVector shorthandPropertyUsed; BitVector shorthandPropertyAppeared; unsigned size = m_properties.size(); for (unsigned n = 0; n < size; ++n) { const CSSProperty& prop = m_properties[n]; CSSPropertyID propertyID = prop.id(); CSSPropertyID shorthandPropertyID = CSSPropertyInvalid; CSSPropertyID borderFallbackShorthandProperty = CSSPropertyInvalid; String value; switch (propertyID) { case CSSPropertyBackgroundPositionX: positionXProp = ∝ continue; case CSSPropertyBackgroundPositionY: positionYProp = ∝ continue; case CSSPropertyBackgroundRepeatX: repeatXProp = ∝ continue; case CSSPropertyBackgroundRepeatY: repeatYProp = ∝ continue; case CSSPropertyBorderTopWidth: case CSSPropertyBorderRightWidth: case CSSPropertyBorderBottomWidth: case CSSPropertyBorderLeftWidth: if (!borderFallbackShorthandProperty) borderFallbackShorthandProperty = CSSPropertyBorderWidth; case CSSPropertyBorderTopStyle: case CSSPropertyBorderRightStyle: case CSSPropertyBorderBottomStyle: case CSSPropertyBorderLeftStyle: if (!borderFallbackShorthandProperty) borderFallbackShorthandProperty = CSSPropertyBorderStyle; case CSSPropertyBorderTopColor: case CSSPropertyBorderRightColor: case CSSPropertyBorderBottomColor: case CSSPropertyBorderLeftColor: if (!borderFallbackShorthandProperty) borderFallbackShorthandProperty = CSSPropertyBorderColor; // FIXME: Deal with cases where only some of border-(top|right|bottom|left) are specified. if (!shorthandPropertyAppeared.get(CSSPropertyBorder - firstCSSProperty)) { value = borderPropertyValue(ReturnNullOnUncommonValues); if (value.isNull()) shorthandPropertyAppeared.ensureSizeAndSet(CSSPropertyBorder - firstCSSProperty, numCSSProperties); else shorthandPropertyID = CSSPropertyBorder; } else if (shorthandPropertyUsed.get(CSSPropertyBorder - firstCSSProperty)) shorthandPropertyID = CSSPropertyBorder; if (!shorthandPropertyID) shorthandPropertyID = borderFallbackShorthandProperty; break; case CSSPropertyWebkitBorderHorizontalSpacing: case CSSPropertyWebkitBorderVerticalSpacing: shorthandPropertyID = CSSPropertyBorderSpacing; break; case CSSPropertyFontFamily: case CSSPropertyLineHeight: case CSSPropertyFontSize: case CSSPropertyFontStyle: case CSSPropertyFontVariant: case CSSPropertyFontWeight: // Don't use CSSPropertyFont because old UAs can't recognize them but are important for editing. break; case CSSPropertyListStyleType: case CSSPropertyListStylePosition: case CSSPropertyListStyleImage: shorthandPropertyID = CSSPropertyListStyle; break; case CSSPropertyMarginTop: case CSSPropertyMarginRight: case CSSPropertyMarginBottom: case CSSPropertyMarginLeft: shorthandPropertyID = CSSPropertyMargin; break; case CSSPropertyOutlineWidth: case CSSPropertyOutlineStyle: case CSSPropertyOutlineColor: shorthandPropertyID = CSSPropertyOutline; break; case CSSPropertyOverflowX: case CSSPropertyOverflowY: shorthandPropertyID = CSSPropertyOverflow; break; case CSSPropertyPaddingTop: case CSSPropertyPaddingRight: case CSSPropertyPaddingBottom: case CSSPropertyPaddingLeft: shorthandPropertyID = CSSPropertyPadding; break; case CSSPropertyWebkitAnimationName: case CSSPropertyWebkitAnimationDuration: case CSSPropertyWebkitAnimationTimingFunction: case CSSPropertyWebkitAnimationDelay: case CSSPropertyWebkitAnimationIterationCount: case CSSPropertyWebkitAnimationDirection: case CSSPropertyWebkitAnimationFillMode: shorthandPropertyID = CSSPropertyWebkitAnimation; break; case CSSPropertyWebkitFlexDirection: case CSSPropertyWebkitFlexWrap: shorthandPropertyID = CSSPropertyWebkitFlexFlow; break; case CSSPropertyWebkitMaskPositionX: case CSSPropertyWebkitMaskPositionY: case CSSPropertyWebkitMaskRepeatX: case CSSPropertyWebkitMaskRepeatY: case CSSPropertyWebkitMaskImage: case CSSPropertyWebkitMaskRepeat: case CSSPropertyWebkitMaskAttachment: case CSSPropertyWebkitMaskPosition: case CSSPropertyWebkitMaskClip: case CSSPropertyWebkitMaskOrigin: shorthandPropertyID = CSSPropertyWebkitMask; break; case CSSPropertyWebkitTransformOriginX: case CSSPropertyWebkitTransformOriginY: case CSSPropertyWebkitTransformOriginZ: shorthandPropertyID = CSSPropertyWebkitTransformOrigin; break; case CSSPropertyWebkitTransitionProperty: case CSSPropertyWebkitTransitionDuration: case CSSPropertyWebkitTransitionTimingFunction: case CSSPropertyWebkitTransitionDelay: shorthandPropertyID = CSSPropertyWebkitTransition; break; case CSSPropertyWebkitWrapFlow: case CSSPropertyWebkitWrapMargin: case CSSPropertyWebkitWrapPadding: shorthandPropertyID = CSSPropertyWebkitWrap; break; default: break; } unsigned shortPropertyIndex = shorthandPropertyID - firstCSSProperty; if (shorthandPropertyID) { if (shorthandPropertyUsed.get(shortPropertyIndex)) continue; if (!shorthandPropertyAppeared.get(shortPropertyIndex) && value.isNull()) value = getPropertyValue(shorthandPropertyID); shorthandPropertyAppeared.ensureSizeAndSet(shortPropertyIndex, numCSSProperties); } if (!value.isNull()) { propertyID = shorthandPropertyID; shorthandPropertyUsed.ensureSizeAndSet(shortPropertyIndex, numCSSProperties); } else value = prop.value()->cssText(); if (value == "initial" && !CSSProperty::isInheritedProperty(propertyID)) continue; result.append(getPropertyName(propertyID)); result.append(": "); result.append(value); result.append(prop.isImportant() ? " !important" : ""); result.append("; "); } // FIXME: This is a not-so-nice way to turn x/y positions into single background-position in output. // It is required because background-position-x/y are non-standard properties and WebKit generated output // would not work in Firefox (<rdar://problem/5143183>) // It would be a better solution if background-position was CSS_PAIR. if (positionXProp && positionYProp && positionXProp->isImportant() == positionYProp->isImportant()) { result.append("background-position: "); if (positionXProp->value()->isValueList() || positionYProp->value()->isValueList()) result.append(getLayeredShorthandValue(backgroundPositionShorthand())); else { result.append(positionXProp->value()->cssText()); result.append(" "); result.append(positionYProp->value()->cssText()); } if (positionXProp->isImportant()) result.append(" !important"); result.append("; "); } else { if (positionXProp) result.append(positionXProp->cssText()); if (positionYProp) result.append(positionYProp->cssText()); } // FIXME: We need to do the same for background-repeat. if (repeatXProp && repeatYProp && repeatXProp->isImportant() == repeatYProp->isImportant()) { result.append("background-repeat: "); if (repeatXProp->value()->isValueList() || repeatYProp->value()->isValueList()) result.append(getLayeredShorthandValue(backgroundRepeatShorthand())); else { result.append(repeatXProp->value()->cssText()); result.append(" "); result.append(repeatYProp->value()->cssText()); } if (repeatXProp->isImportant()) result.append(" !important"); result.append("; "); } else { if (repeatXProp) result.append(repeatXProp->cssText()); if (repeatYProp) result.append(repeatYProp->cssText()); } return result.toString(); }
void JITCompiler::link(LinkBuffer& linkBuffer) { // Link the code, populate data in CodeBlock data structures. #if DFG_ENABLE(DEBUG_VERBOSE) dataLogF("JIT code for %p start at [%p, %p). Size = %zu.\n", m_codeBlock, linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize(), linkBuffer.debugSize()); #endif if (!m_graph.m_inlineCallFrames->isEmpty()) { m_graph.m_inlineCallFrames->shrinkToFit(); m_jitCode->common.inlineCallFrames = m_graph.m_inlineCallFrames.release(); } BitVector usedJumpTables; for (unsigned i = m_graph.m_switchData.size(); i--;) { SwitchData& data = m_graph.m_switchData[i]; if (!data.didUseJumpTable) continue; if (data.kind == SwitchString) continue; RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar); usedJumpTables.set(data.switchTableIndex); SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); table.ctiOffsets.grow(table.branchOffsets.size()); for (unsigned j = table.ctiOffsets.size(); j--;) table.ctiOffsets[j] = table.ctiDefault; for (unsigned j = data.cases.size(); j--;) { SwitchCase& myCase = data.cases[j]; table.ctiOffsets[myCase.value.switchLookupValue() - table.min] = linkBuffer.locationOf(m_blockHeads[myCase.target->index]); } } for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) { if (usedJumpTables.get(i)) continue; m_codeBlock->switchJumpTable(i).clear(); } // NOTE: we cannot clear string switch tables because (1) we're running concurrently // and we cannot deref StringImpl's and (2) it would be weird to deref those // StringImpl's since we refer to them. for (unsigned i = m_graph.m_switchData.size(); i--;) { SwitchData& data = m_graph.m_switchData[i]; if (!data.didUseJumpTable) continue; if (data.kind != SwitchString) continue; StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); StringJumpTable::StringOffsetTable::iterator iter; StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); for (iter = table.offsetTable.begin(); iter != end; ++iter) iter->value.ctiOffset = table.ctiDefault; for (unsigned j = data.cases.size(); j--;) { SwitchCase& myCase = data.cases[j]; iter = table.offsetTable.find(myCase.value.stringImpl()); RELEASE_ASSERT(iter != end); iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target->index]); } } // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccesses.size() + m_ins.size()); for (unsigned i = 0; i < m_propertyAccesses.size(); ++i) { StructureStubInfo& info = m_codeBlock->structureStubInfo(i); CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->call()); info.codeOrigin = m_propertyAccesses[i].m_codeOrigin; info.callReturnLocation = callReturnLocation; info.patch.dfg.deltaCheckImmToCall = differenceBetweenCodePtr(linkBuffer.locationOf(m_propertyAccesses[i].m_structureImm), callReturnLocation); info.patch.dfg.deltaCallToStructCheck = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_structureCheck)); #if USE(JSVALUE64) info.patch.dfg.deltaCallToLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_loadOrStore)); #else info.patch.dfg.deltaCallToTagLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_tagLoadOrStore)); info.patch.dfg.deltaCallToPayloadLoadOrStore = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_payloadLoadOrStore)); #endif info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label())); info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done)); info.patch.dfg.deltaCallToStorageLoad = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_propertyStorageLoad)); info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR; #if USE(JSVALUE64) info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR; #else info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR; info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR; #endif m_propertyAccesses[i].m_usedRegisters.copyInfo(info.patch.dfg.usedRegisters); info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed; } for (unsigned i = 0; i < m_ins.size(); ++i) { StructureStubInfo& info = m_codeBlock->structureStubInfo(m_propertyAccesses.size() + i); CodeLocationLabel jump = linkBuffer.locationOf(m_ins[i].m_jump); CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call()); info.codeOrigin = m_ins[i].m_codeOrigin; info.hotPathBegin = jump; info.callReturnLocation = callReturnLocation; info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label())); info.patch.dfg.baseGPR = m_ins[i].m_baseGPR; info.patch.dfg.valueGPR = m_ins[i].m_resultGPR; m_ins[i].m_usedRegisters.copyInfo(info.patch.dfg.usedRegisters); info.patch.dfg.registersFlushed = false; } m_codeBlock->sortStructureStubInfos(); m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); info.callType = m_jsCalls[i].m_callType; info.isDFG = true; info.codeOrigin = m_jsCalls[i].m_codeOrigin; linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress())); info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall); info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee); } MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator); CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code()); for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) { OSRExit& exit = m_jitCode->osrExit[i]; linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target); exit.correctJump(linkBuffer); if (exit.m_watchpointIndex != std::numeric_limits<unsigned>::max()) m_jitCode->watchpoints[exit.m_watchpointIndex].correctLabels(linkBuffer); } if (m_graph.compilation()) { ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size()); for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) { Vector<Label>& labels = m_exitSiteLabels[i]; Vector<const void*> addresses; for (unsigned j = 0; j < labels.size(); ++j) addresses.append(linkBuffer.locationOf(labels[j]).executableAddress()); m_graph.compilation()->addOSRExitSite(addresses); } } else ASSERT(!m_exitSiteLabels.size()); m_jitCode->common.compilation = m_graph.compilation(); }
void JITCompiler::link(LinkBuffer& linkBuffer) { // Link the code, populate data in CodeBlock data structures. m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount(); m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit(); if (!m_graph.m_inlineCallFrames->isEmpty()) m_jitCode->common.inlineCallFrames = m_graph.m_inlineCallFrames.release(); m_jitCode->common.machineCaptureStart = m_graph.m_machineCaptureStart; m_jitCode->common.slowArguments = std::move(m_graph.m_slowArguments); BitVector usedJumpTables; for (unsigned i = m_graph.m_switchData.size(); i--;) { SwitchData& data = m_graph.m_switchData[i]; if (!data.didUseJumpTable) continue; if (data.kind == SwitchString) continue; RELEASE_ASSERT(data.kind == SwitchImm || data.kind == SwitchChar); usedJumpTables.set(data.switchTableIndex); SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex); table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); table.ctiOffsets.grow(table.branchOffsets.size()); for (unsigned j = table.ctiOffsets.size(); j--;) table.ctiOffsets[j] = table.ctiDefault; for (unsigned j = data.cases.size(); j--;) { SwitchCase& myCase = data.cases[j]; table.ctiOffsets[myCase.value.switchLookupValue() - table.min] = linkBuffer.locationOf(m_blockHeads[myCase.target->index]); } } for (unsigned i = m_codeBlock->numberOfSwitchJumpTables(); i--;) { if (usedJumpTables.get(i)) continue; m_codeBlock->switchJumpTable(i).clear(); } // NOTE: we cannot clear string switch tables because (1) we're running concurrently // and we cannot deref StringImpl's and (2) it would be weird to deref those // StringImpl's since we refer to them. for (unsigned i = m_graph.m_switchData.size(); i--;) { SwitchData& data = m_graph.m_switchData[i]; if (!data.didUseJumpTable) continue; if (data.kind != SwitchString) continue; StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex); table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]); StringJumpTable::StringOffsetTable::iterator iter; StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end(); for (iter = table.offsetTable.begin(); iter != end; ++iter) iter->value.ctiOffset = table.ctiDefault; for (unsigned j = data.cases.size(); j--;) { SwitchCase& myCase = data.cases[j]; iter = table.offsetTable.find(myCase.value.stringImpl()); RELEASE_ASSERT(iter != end); iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target->index]); } } // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); for (unsigned i = m_getByIds.size(); i--;) m_getByIds[i].finalize(linkBuffer); for (unsigned i = m_putByIds.size(); i--;) m_putByIds[i].finalize(linkBuffer); for (unsigned i = 0; i < m_ins.size(); ++i) { StructureStubInfo& info = *m_ins[i].m_stubInfo; CodeLocationLabel jump = linkBuffer.locationOf(m_ins[i].m_jump); CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call()); info.hotPathBegin = jump; info.callReturnLocation = callReturnLocation; info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label())); } m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size()); for (unsigned i = 0; i < m_jsCalls.size(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); info.callType = m_jsCalls[i].m_callType; info.isDFG = true; info.codeOrigin = m_jsCalls[i].m_codeOrigin; linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress())); info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall); info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck); info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall); info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee); } MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator); CodeLocationLabel target = CodeLocationLabel(osrExitThunk.code()); for (unsigned i = 0; i < m_jitCode->osrExit.size(); ++i) { OSRExit& exit = m_jitCode->osrExit[i]; OSRExitCompilationInfo& info = m_exitCompilationInfo[i]; linkBuffer.link(exit.getPatchableCodeOffsetAsJump(), target); exit.correctJump(linkBuffer); if (info.m_replacementSource.isSet()) { m_jitCode->common.jumpReplacements.append(JumpReplacement( linkBuffer.locationOf(info.m_replacementSource), linkBuffer.locationOf(info.m_replacementDestination))); } } if (m_graph.compilation()) { ASSERT(m_exitSiteLabels.size() == m_jitCode->osrExit.size()); for (unsigned i = 0; i < m_exitSiteLabels.size(); ++i) { Vector<Label>& labels = m_exitSiteLabels[i]; Vector<const void*> addresses; for (unsigned j = 0; j < labels.size(); ++j) addresses.append(linkBuffer.locationOf(labels[j]).executableAddress()); m_graph.compilation()->addOSRExitSite(addresses); } } else ASSERT(!m_exitSiteLabels.size()); m_jitCode->common.compilation = m_graph.compilation(); }
bool run() { // This enumerates the locals that we actually care about and packs them. So for example // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal, // Flush, PhantomLocal). BitVector usedLocals; // Collect those variables that are used from IR. bool hasNodesThatNeedFixup = false; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocal: case SetLocal: case Flush: case PhantomLocal: { VariableAccessData* variable = node->variableAccessData(); if (variable->local().isArgument()) break; usedLocals.set(variable->local().toLocal()); break; } case GetLocalUnlinked: { VirtualRegister operand = node->unlinkedLocal(); if (operand.isArgument()) break; usedLocals.set(operand.toLocal()); hasNodesThatNeedFixup = true; break; } case LoadVarargs: case ForwardVarargs: { LoadVarargsData* data = node->loadVarargsData(); if (data->count.isLocal()) usedLocals.set(data->count.toLocal()); if (data->start.isLocal()) { // This part really relies on the contiguity of stack layout // assignments. ASSERT(VirtualRegister(data->start.offset() + data->limit - 1).isLocal()); for (unsigned i = data->limit; i--;) usedLocals.set(VirtualRegister(data->start.offset() + i).toLocal()); } // the else case shouldn't happen. hasNodesThatNeedFixup = true; break; } case PutStack: case GetStack: { StackAccessData* stack = node->stackAccessData(); if (stack->local.isArgument()) break; usedLocals.set(stack->local.toLocal()); break; } default: break; } } } for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) { InlineCallFrame* inlineCallFrame = *iter; if (inlineCallFrame->isVarargs()) { usedLocals.set(VirtualRegister( JSStack::ArgumentCount + inlineCallFrame->stackOffset).toLocal()); } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { usedLocals.set(VirtualRegister( virtualRegisterForArgument(argument).offset() + inlineCallFrame->stackOffset).toLocal()); } } Vector<unsigned> allocation(usedLocals.size()); m_graph.m_nextMachineLocal = 0; for (unsigned i = 0; i < usedLocals.size(); ++i) { if (!usedLocals.get(i)) { allocation[i] = UINT_MAX; continue; } allocation[i] = m_graph.m_nextMachineLocal++; } for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->local().isArgument()) { variable->machineLocal() = variable->local(); continue; } size_t local = variable->local().toLocal(); if (local >= allocation.size()) continue; if (allocation[local] == UINT_MAX) continue; variable->machineLocal() = assign(allocation, variable->local()); } for (StackAccessData* data : m_graph.m_stackAccessData) { if (!data->local.isLocal()) { data->machineLocal = data->local; continue; } if (static_cast<size_t>(data->local.toLocal()) >= allocation.size()) continue; if (allocation[data->local.toLocal()] == UINT_MAX) continue; data->machineLocal = assign(allocation, data->local); } // This register is never valid for DFG code blocks. codeBlock()->setActivationRegister(VirtualRegister()); if (LIKELY(!m_graph.hasDebuggerEnabled())) codeBlock()->setScopeRegister(VirtualRegister()); else codeBlock()->setScopeRegister(assign(allocation, codeBlock()->scopeRegister())); for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) { InlineVariableData data = m_graph.m_inlineVariableData[i]; InlineCallFrame* inlineCallFrame = data.inlineCallFrame; if (inlineCallFrame->isVarargs()) { inlineCallFrame->argumentCountRegister = assign( allocation, VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount)); } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { ArgumentPosition& position = m_graph.m_argumentPositions[ data.argumentPositionStart + argument]; VariableAccessData* variable = position.someVariable(); ValueSource source; if (!variable) source = ValueSource(SourceIsDead); else { source = ValueSource::forFlushFormat( variable->machineLocal(), variable->flushFormat()); } inlineCallFrame->arguments[argument] = source.valueRecovery(); } RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable); if (inlineCallFrame->isClosureCall) { VariableAccessData* variable = data.calleeVariable->find(); ValueSource source = ValueSource::forFlushFormat( variable->machineLocal(), variable->flushFormat()); inlineCallFrame->calleeRecovery = source.valueRecovery(); } else RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant()); } // Fix GetLocalUnlinked's variable references. if (hasNodesThatNeedFixup) { for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocalUnlinked: { node->setUnlinkedMachineLocal(assign(allocation, node->unlinkedLocal())); break; } case LoadVarargs: case ForwardVarargs: { LoadVarargsData* data = node->loadVarargsData(); data->machineCount = assign(allocation, data->count); data->machineStart = assign(allocation, data->start); break; } default: break; } } } } return true; }
bool run() { SharedSymbolTable* symbolTable = codeBlock()->symbolTable(); // This enumerates the locals that we actually care about and packs them. So for example // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal, // Flush, PhantomLocal). BitVector usedLocals; // Collect those variables that are used from IR. bool hasGetLocalUnlinked = false; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocal: case SetLocal: case Flush: case PhantomLocal: { VariableAccessData* variable = node->variableAccessData(); if (variable->local().isArgument()) break; usedLocals.set(variable->local().toLocal()); break; } case GetLocalUnlinked: { VirtualRegister operand = node->unlinkedLocal(); if (operand.isArgument()) break; usedLocals.set(operand.toLocal()); hasGetLocalUnlinked = true; break; } default: break; } } } // Ensure that captured variables and captured inline arguments are pinned down. // They should have been because of flushes, except that the flushes can be optimized // away. if (symbolTable) { for (int i = symbolTable->captureStart(); i > symbolTable->captureEnd(); i--) usedLocals.set(VirtualRegister(i).toLocal()); } if (codeBlock()->usesArguments()) { usedLocals.set(codeBlock()->argumentsRegister().toLocal()); usedLocals.set(unmodifiedArgumentsRegister(codeBlock()->argumentsRegister()).toLocal()); } if (codeBlock()->uncheckedActivationRegister().isValid()) usedLocals.set(codeBlock()->activationRegister().toLocal()); for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) { InlineCallFrame* inlineCallFrame = *iter; if (!inlineCallFrame->executable->usesArguments()) continue; VirtualRegister argumentsRegister = m_graph.argumentsRegisterFor(inlineCallFrame); usedLocals.set(argumentsRegister.toLocal()); usedLocals.set(unmodifiedArgumentsRegister(argumentsRegister).toLocal()); for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { usedLocals.set(VirtualRegister( virtualRegisterForArgument(argument).offset() + inlineCallFrame->stackOffset).toLocal()); } } Vector<unsigned> allocation(usedLocals.size()); m_graph.m_nextMachineLocal = 0; for (unsigned i = 0; i < usedLocals.size(); ++i) { if (!usedLocals.get(i)) { allocation[i] = UINT_MAX; continue; } allocation[i] = m_graph.m_nextMachineLocal++; } for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->local().isArgument()) { variable->machineLocal() = variable->local(); continue; } size_t local = variable->local().toLocal(); if (local >= allocation.size()) continue; if (allocation[local] == UINT_MAX) continue; variable->machineLocal() = virtualRegisterForLocal( allocation[variable->local().toLocal()]); } if (codeBlock()->usesArguments()) { VirtualRegister argumentsRegister = virtualRegisterForLocal( allocation[codeBlock()->argumentsRegister().toLocal()]); RELEASE_ASSERT( virtualRegisterForLocal(allocation[ unmodifiedArgumentsRegister( codeBlock()->argumentsRegister()).toLocal()]) == unmodifiedArgumentsRegister(argumentsRegister)); codeBlock()->setArgumentsRegister(argumentsRegister); } if (codeBlock()->uncheckedActivationRegister().isValid()) { codeBlock()->setActivationRegister( virtualRegisterForLocal(allocation[codeBlock()->activationRegister().toLocal()])); } for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) { InlineVariableData data = m_graph.m_inlineVariableData[i]; InlineCallFrame* inlineCallFrame = data.inlineCallFrame; if (inlineCallFrame->executable->usesArguments()) { inlineCallFrame->argumentsRegister = virtualRegisterForLocal( allocation[m_graph.argumentsRegisterFor(inlineCallFrame).toLocal()]); RELEASE_ASSERT( virtualRegisterForLocal(allocation[unmodifiedArgumentsRegister( m_graph.argumentsRegisterFor(inlineCallFrame)).toLocal()]) == unmodifiedArgumentsRegister(inlineCallFrame->argumentsRegister)); } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { ArgumentPosition& position = m_graph.m_argumentPositions[ data.argumentPositionStart + argument]; VariableAccessData* variable = position.someVariable(); ValueSource source; if (!variable) source = ValueSource(SourceIsDead); else { source = ValueSource::forFlushFormat( variable->machineLocal(), variable->flushFormat()); } inlineCallFrame->arguments[argument] = source.valueRecovery(); } RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable); if (inlineCallFrame->isClosureCall) { ValueSource source = ValueSource::forFlushFormat( data.calleeVariable->machineLocal(), data.calleeVariable->flushFormat()); inlineCallFrame->calleeRecovery = source.valueRecovery(); } else RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant()); } if (symbolTable) { if (symbolTable->captureCount()) { unsigned captureStartLocal = allocation[ VirtualRegister(codeBlock()->symbolTable()->captureStart()).toLocal()]; ASSERT(captureStartLocal != UINT_MAX); m_graph.m_machineCaptureStart = virtualRegisterForLocal(captureStartLocal).offset(); } else m_graph.m_machineCaptureStart = virtualRegisterForLocal(0).offset(); // This is an abomination. If we had captured an argument then the argument ends // up being "slow", meaning that loads of the argument go through an extra lookup // table. if (const SlowArgument* slowArguments = symbolTable->slowArguments()) { auto newSlowArguments = std::make_unique<SlowArgument[]>( symbolTable->parameterCount()); for (size_t i = symbolTable->parameterCount(); i--;) { newSlowArguments[i] = slowArguments[i]; VirtualRegister reg = VirtualRegister(slowArguments[i].index); if (reg.isLocal()) newSlowArguments[i].index = virtualRegisterForLocal(allocation[reg.toLocal()]).offset(); } m_graph.m_slowArguments = std::move(newSlowArguments); } } // Fix GetLocalUnlinked's variable references. if (hasGetLocalUnlinked) { for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocalUnlinked: { VirtualRegister operand = node->unlinkedLocal(); if (operand.isLocal()) operand = virtualRegisterForLocal(allocation[operand.toLocal()]); node->setUnlinkedMachineLocal(operand); break; } default: break; } } } } return true; }