MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm) { JSInterfaceJIT jit(vm); unsigned numberOfParameters = 0; numberOfParameters++; // The 'this' argument. numberOfParameters++; // The value to set. numberOfParameters++; // The true return PC. unsigned numberOfRegsForCall = JSStack::CallFrameHeaderSize + numberOfParameters; unsigned numberOfBytesForCall = numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC); unsigned alignedNumberOfBytesForCall = WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall); // The real return address is stored above the arguments. We passed two arguments, so // the argument at index 2 is the return address. jit.loadPtr( AssemblyHelpers::Address( AssemblyHelpers::stackPointerRegister, (virtualRegisterForArgument(2).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)), GPRInfo::regT2); jit.addPtr( AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall), AssemblyHelpers::stackPointerRegister); jit.jump(GPRInfo::regT2); LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); return FINALIZE_CODE(patchBuffer, ("baseline setter return thunk")); }
BytecodeSequence::BytecodeSequence(CodeBlock* codeBlock) { StringPrintStream out; for (unsigned i = 0; i < codeBlock->numberOfArgumentValueProfiles(); ++i) { ConcurrentJITLocker locker(codeBlock->m_lock); CString description = codeBlock->valueProfileForArgument(i)->briefDescription(locker); if (!description.length()) continue; out.reset(); out.print("arg", i, " (r", virtualRegisterForArgument(i).offset(), "): ", description); m_header.append(out.toCString()); } StubInfoMap stubInfos; codeBlock->getStubInfoMap(stubInfos); for (unsigned bytecodeIndex = 0; bytecodeIndex < codeBlock->instructions().size();) { out.reset(); codeBlock->dumpBytecode(out, bytecodeIndex, stubInfos); m_sequence.append(Bytecode(bytecodeIndex, codeBlock->vm()->interpreter->getOpcodeID(codeBlock->instructions()[bytecodeIndex].u.opcode), out.toCString())); bytecodeIndex += opcodeLength( codeBlock->vm()->interpreter->getOpcodeID( codeBlock->instructions()[bytecodeIndex].u.opcode)); } }
void* prepareOSREntry( ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock, unsigned bytecodeIndex, unsigned streamIndex) { VM& vm = exec->vm(); CodeBlock* baseline = dfgCodeBlock->baselineVersion(); DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg(); ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry(); if (Options::verboseOSR()) { dataLog( "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#", bytecodeIndex, ".\n"); } if (bytecodeIndex != entryCode->bytecodeIndex()) { if (Options::verboseOSR()) dataLog(" OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex()); return 0; } Operands<JSValue> values; dfgCode->reconstruct( exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values); if (Options::verboseOSR()) dataLog(" Values at entry: ", values, "\n"); for (int argument = values.numberOfArguments(); argument--;) { RELEASE_ASSERT( exec->r(virtualRegisterForArgument(argument).offset()).jsValue() == values.argument(argument)); } RELEASE_ASSERT( static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters); EncodedJSValue* scratch = static_cast<EncodedJSValue*>( entryCode->entryBuffer()->dataBuffer()); for (int local = values.numberOfLocals(); local--;) scratch[local] = JSValue::encode(values.local(local)); int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit(); if (!vm.interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(stackFrameSize).offset()])) { if (Options::verboseOSR()) dataLog(" OSR failed because stack growth failed.\n"); return 0; } exec->setCodeBlock(entryCodeBlock); void* result = entryCode->addressForCall().executableAddress(); if (Options::verboseOSR()) dataLog(" Entry will succeed, going to address", RawPointer(result), "\n"); return result; }
void jettisonBlock(BasicBlock* block, BasicBlock* jettisonedBlock, CodeOrigin boundaryCodeOrigin) { for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i) keepOperandAlive(block, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForArgument(i)); for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i) keepOperandAlive(block, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForLocal(i)); fixJettisonedPredecessors(block, jettisonedBlock); }
bool run() { ASSERT(m_graph.m_form == SSA); for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; block->ssa->availabilityAtHead.clear(); block->ssa->availabilityAtTail.clear(); } BasicBlock* root = m_graph.block(0); root->ssa->availabilityAtHead.m_locals.fill(Availability::unavailable()); for (unsigned argument = m_graph.m_argumentFormats.size(); argument--;) { FlushedAt flushedAt = FlushedAt( m_graph.m_argumentFormats[argument], virtualRegisterForArgument(argument)); root->ssa->availabilityAtHead.m_locals.argument(argument) = Availability(flushedAt); } // This could be made more efficient by processing blocks in reverse postorder. LocalOSRAvailabilityCalculator calculator; bool changed; do { changed = false; for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; calculator.beginBlock(block); for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) calculator.executeNode(block->at(nodeIndex)); if (calculator.m_availability == block->ssa->availabilityAtTail) continue; block->ssa->availabilityAtTail = calculator.m_availability; changed = true; for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) { BasicBlock* successor = block->successor(successorIndex); successor->ssa->availabilityAtHead.merge(calculator.m_availability); successor->ssa->availabilityAtHead.pruneByLiveness( m_graph, successor->firstOrigin().forExit); } } } while (changed); return true; }
void OSREntryData::dumpInContext(PrintStream& out, DumpContext* context) const { out.print("bc#", m_bytecodeIndex, ", machine code offset = ", m_machineCodeOffset); out.print(", stack rules = ["); auto printOperand = [&] (VirtualRegister reg) { out.print(inContext(m_expectedValues.operand(reg), context), " ("); VirtualRegister toReg; bool overwritten = false; for (OSREntryReshuffling reshuffling : m_reshufflings) { if (reg == VirtualRegister(reshuffling.fromOffset)) { toReg = VirtualRegister(reshuffling.toOffset); break; } if (reg == VirtualRegister(reshuffling.toOffset)) overwritten = true; } if (!overwritten && !toReg.isValid()) toReg = reg; if (toReg.isValid()) { if (toReg.isLocal() && !m_machineStackUsed.get(toReg.toLocal())) out.print("ignored"); else out.print("maps to ", toReg); } else out.print("overwritten"); if (reg.isLocal() && m_localsForcedDouble.get(reg.toLocal())) out.print(", forced double"); if (reg.isLocal() && m_localsForcedAnyInt.get(reg.toLocal())) out.print(", forced machine int"); out.print(")"); }; CommaPrinter comma; for (size_t argumentIndex = m_expectedValues.numberOfArguments(); argumentIndex--;) { out.print(comma, "arg", argumentIndex, ":"); printOperand(virtualRegisterForArgument(argumentIndex)); } for (size_t localIndex = 0; localIndex < m_expectedValues.numberOfLocals(); ++localIndex) { out.print(comma, "loc", localIndex, ":"); printOperand(virtualRegisterForLocal(localIndex)); } out.print("], machine stack used = ", m_machineStackUsed); }
void treatRegularBlock(BasicBlock* block, InsertionSet& insertionSet) { Operands<VariableAccessData*> currentBlockAccessData(block->variablesAtTail.numberOfArguments(), block->variablesAtTail.numberOfLocals(), nullptr); // Insert a Flush before every SetLocal to properly pattern the graph such that // any range between SetLocal and Flush has access to the local on the stack. { for (unsigned i = 0; i < block->size(); i++) { Node* node = block->at(i); bool isPrimordialSetArgument = node->op() == SetArgument && node->local().isArgument() && node == m_graph.m_arguments[node->local().toArgument()]; if (node->op() == SetLocal || (node->op() == SetArgument && !isPrimordialSetArgument)) { VirtualRegister operand = node->local(); VariableAccessData* flushAccessData = currentBlockAccessData.operand(operand); if (!flushAccessData) flushAccessData = newVariableAccessData(operand); insertionSet.insertNode(i, SpecNone, Flush, node->origin, OpInfo(flushAccessData)); } if (node->hasVariableAccessData(m_graph)) currentBlockAccessData.operand(node->local()) = node->variableAccessData(); } } // Flush everything at the end of the block. { NodeOrigin origin = block->at(block->size() - 1)->origin; auto insertFlushAtEnd = [&] (VirtualRegister operand) { VariableAccessData* accessData = currentBlockAccessData.operand(operand); if (!accessData) accessData = newVariableAccessData(operand); currentBlockAccessData.operand(operand) = accessData; insertionSet.insertNode(block->size(), SpecNone, Flush, origin, OpInfo(accessData)); }; for (unsigned i = 0; i < block->variablesAtTail.numberOfLocals(); i++) insertFlushAtEnd(virtualRegisterForLocal(i)); for (unsigned i = 0; i < block->variablesAtTail.numberOfArguments(); i++) insertFlushAtEnd(virtualRegisterForArgument(i)); } }
bool run() { ASSERT(m_graph.m_form == SSA); for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; block->ssa->availabilityAtHead.clear(); block->ssa->availabilityAtTail.clear(); } BasicBlock* root = m_graph.block(0); root->ssa->availabilityAtHead.m_locals.fill(Availability::unavailable()); for (unsigned argument = m_graph.m_argumentFormats.size(); argument--;) { FlushedAt flushedAt = FlushedAt( m_graph.m_argumentFormats[argument], virtualRegisterForArgument(argument)); root->ssa->availabilityAtHead.m_locals.argument(argument) = Availability(flushedAt); } // This could be made more efficient by processing blocks in reverse postorder. LocalOSRAvailabilityCalculator calculator(m_graph); bool changed; do { changed = false; for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; calculator.beginBlock(block); for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) calculator.executeNode(block->at(nodeIndex)); if (calculator.m_availability == block->ssa->availabilityAtTail) continue; block->ssa->availabilityAtTail = calculator.m_availability; changed = true; for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) { BasicBlock* successor = block->successor(successorIndex); successor->ssa->availabilityAtHead.merge(calculator.m_availability); successor->ssa->availabilityAtHead.pruneByLiveness( m_graph, successor->at(0)->origin.forExit); } } } while (changed); if (validationEnabled()) { for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; calculator.beginBlock(block); for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { if (block->at(nodeIndex)->origin.exitOK) { // If we're allowed to exit here, the heap must be in a state // where exiting wouldn't crash. These particular fields are // required for correctness because we use them during OSR exit // to do meaningful things. It would be wrong for any of them // to be dead. AvailabilityMap availabilityMap = calculator.m_availability; availabilityMap.pruneByLiveness(m_graph, block->at(nodeIndex)->origin.forExit); for (auto heapPair : availabilityMap.m_heap) { switch (heapPair.key.kind()) { case ActivationScopePLoc: case ActivationSymbolTablePLoc: case FunctionActivationPLoc: case FunctionExecutablePLoc: case StructurePLoc: if (heapPair.value.isDead()) { dataLogLn("PromotedHeapLocation is dead, but should not be: ", heapPair.key); availabilityMap.dump(WTF::dataFile()); CRASH(); } break; default: break; } } } calculator.executeNode(block->at(nodeIndex)); } } } return true; }
bool run() { ASSERT(m_graph.m_form == SSA); for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; block->ssa->availabilityAtHead.fill(Availability()); block->ssa->availabilityAtTail.fill(Availability()); } BasicBlock* root = m_graph.block(0); for (unsigned argument = root->ssa->availabilityAtHead.numberOfArguments(); argument--;) { root->ssa->availabilityAtHead.argument(argument) = Availability::unavailable().withFlush( FlushedAt(FlushedJSValue, virtualRegisterForArgument(argument))); } for (unsigned local = root->ssa->availabilityAtHead.numberOfLocals(); local--;) root->ssa->availabilityAtHead.local(local) = Availability::unavailable(); if (m_graph.m_plan.mode == FTLForOSREntryMode) { for (unsigned local = m_graph.m_profiledBlock->m_numCalleeRegisters; local--;) { root->ssa->availabilityAtHead.local(local) = Availability::unavailable().withFlush( FlushedAt(FlushedJSValue, virtualRegisterForLocal(local))); } } // This could be made more efficient by processing blocks in reverse postorder. Operands<Availability> availability; bool changed; do { changed = false; for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; availability = block->ssa->availabilityAtHead; for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) { Node* node = block->at(nodeIndex); switch (node->op()) { case SetLocal: { VariableAccessData* variable = node->variableAccessData(); availability.operand(variable->local()) = Availability(node->child1().node(), variable->flushedAt()); break; } case GetArgument: { VariableAccessData* variable = node->variableAccessData(); availability.operand(variable->local()) = Availability(node, variable->flushedAt()); break; } case MovHint: case MovHintAndCheck: { VariableAccessData* variable = node->variableAccessData(); availability.operand(variable->local()) = Availability(node->child1().node()); break; } case ZombieHint: { VariableAccessData* variable = node->variableAccessData(); availability.operand(variable->local()) = Availability::unavailable(); break; } default: break; } } if (availability == block->ssa->availabilityAtTail) continue; block->ssa->availabilityAtTail = availability; changed = true; for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) { BasicBlock* successor = block->successor(successorIndex); for (unsigned i = availability.size(); i--;) { successor->ssa->availabilityAtHead[i] = availability[i].merge( successor->ssa->availabilityAtHead[i]); } } } } while (changed); return true; }
void BytecodeGeneratorification::run() { // We calculate the liveness at each merge point. This gives us the information which registers should be saved and resumed conservatively. { GeneratorLivenessAnalysis pass(*this); pass.run(m_codeBlock, m_instructions); } BytecodeRewriter rewriter(m_bytecodeGenerator, m_graph, m_codeBlock, m_instructions); // Setup the global switch for the generator. { auto nextToEnterPoint = enterPoint().next(); unsigned switchTableIndex = m_codeBlock->numberOfSwitchJumpTables(); VirtualRegister state = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::State)); auto& jumpTable = m_codeBlock->addSwitchJumpTable(); jumpTable.min = 0; jumpTable.branchOffsets.resize(m_yields.size() + 1); jumpTable.branchOffsets.fill(0); jumpTable.add(0, nextToEnterPoint.offset()); for (unsigned i = 0; i < m_yields.size(); ++i) jumpTable.add(i + 1, m_yields[i].point); rewriter.insertFragmentBefore(nextToEnterPoint, [&](BytecodeRewriter::Fragment& fragment) { fragment.appendInstruction<OpSwitchImm>(switchTableIndex, BoundLabel(nextToEnterPoint.offset()), state); }); } for (const YieldData& data : m_yields) { VirtualRegister scope = virtualRegisterForArgument(static_cast<int32_t>(JSGeneratorFunction::GeneratorArgument::Frame)); auto instruction = m_instructions.at(data.point); // Emit save sequence. rewriter.insertFragmentBefore(instruction, [&](BytecodeRewriter::Fragment& fragment) { data.liveness.forEachSetBit([&](size_t index) { VirtualRegister operand = virtualRegisterForLocal(index); Storage storage = storageForGeneratorLocal(index); fragment.appendInstruction<OpPutToScope>( scope, // scope storage.identifierIndex, // identifier operand, // value GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info m_generatorFrameSymbolTableIndex, // symbol table constant index storage.scopeOffset.offset() // scope offset ); }); // Insert op_ret just after save sequence. fragment.appendInstruction<OpRet>(data.argument); }); // Emit resume sequence. rewriter.insertFragmentAfter(instruction, [&](BytecodeRewriter::Fragment& fragment) { data.liveness.forEachSetBit([&](size_t index) { VirtualRegister operand = virtualRegisterForLocal(index); Storage storage = storageForGeneratorLocal(index); fragment.appendInstruction<OpGetFromScope>( operand, // dst scope, // scope storage.identifierIndex, // identifier GetPutInfo(DoNotThrowIfNotFound, LocalClosureVar, InitializationMode::NotInitialization), // info 0, // local scope depth storage.scopeOffset.offset() // scope offset ); }); }); // Clip the unnecessary bytecodes. rewriter.removeBytecode(instruction); } rewriter.execute(); }
bool run() { ASSERT(m_graph.m_form == SSA); for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; block->ssa->availabilityAtHead.fill(Availability()); block->ssa->availabilityAtTail.fill(Availability()); } BasicBlock* root = m_graph.block(0); for (unsigned argument = root->ssa->availabilityAtHead.numberOfArguments(); argument--;) { root->ssa->availabilityAtHead.argument(argument) = Availability::unavailable().withFlush( FlushedAt(FlushedJSValue, virtualRegisterForArgument(argument))); } if (m_graph.m_plan.mode == FTLForOSREntryMode) { for (unsigned local = m_graph.m_profiledBlock->m_numCalleeRegisters; local--;) root->ssa->availabilityAtHead.local(local) = Availability::unavailable(); } else { for (unsigned local = root->ssa->availabilityAtHead.numberOfLocals(); local--;) root->ssa->availabilityAtHead.local(local) = Availability::unavailable(); } // This could be made more efficient by processing blocks in reverse postorder. LocalOSRAvailabilityCalculator calculator; bool changed; do { changed = false; for (BlockIndex blockIndex = 0; blockIndex < m_graph.numBlocks(); ++blockIndex) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; calculator.beginBlock(block); for (unsigned nodeIndex = 0; nodeIndex < block->size(); ++nodeIndex) calculator.executeNode(block->at(nodeIndex)); if (calculator.m_availability == block->ssa->availabilityAtTail) continue; block->ssa->availabilityAtTail = calculator.m_availability; changed = true; for (unsigned successorIndex = block->numSuccessors(); successorIndex--;) { BasicBlock* successor = block->successor(successorIndex); for (unsigned i = calculator.m_availability.size(); i--;) { successor->ssa->availabilityAtHead[i] = calculator.m_availability[i].merge( successor->ssa->availabilityAtHead[i]); } } } } while (changed); return true; }
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit) { ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT); jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock)); CodeOrigin codeOrigin; for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin); CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller); void* jumpTarget = nullptr; void* trueReturnPC = nullptr; unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex; switch (inlineCallFrame->kind) { case InlineCallFrame::Call: case InlineCallFrame::Construct: case InlineCallFrame::CallVarargs: case InlineCallFrame::ConstructVarargs: { CallLinkInfo* callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex); RELEASE_ASSERT(callLinkInfo); jumpTarget = callLinkInfo->callReturnLocation().executableAddress(); break; } case InlineCallFrame::GetterCall: case InlineCallFrame::SetterCall: { StructureStubInfo* stubInfo = baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex)); RELEASE_ASSERT(stubInfo); switch (inlineCallFrame->kind) { case InlineCallFrame::GetterCall: jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress(); break; case InlineCallFrame::SetterCall: jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress(); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } trueReturnPC = stubInfo->callReturnLocation.labelAtOffset( stubInfo->patch.deltaCallToDone).executableAddress(); break; } } GPRReg callerFrameGPR; if (inlineCallFrame->caller.inlineCallFrame) { jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3); callerFrameGPR = GPRInfo::regT3; } else callerFrameGPR = GPRInfo::callFrameRegister; jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); if (trueReturnPC) jit.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC), AssemblyHelpers::addressFor(inlineCallFrame->stackOffset + virtualRegisterForArgument(inlineCallFrame->arguments.size()).offset())); jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); if (!inlineCallFrame->isVarargs()) jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); #if USE(JSVALUE64) jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); if (!inlineCallFrame->isClosureCall) jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); #else // USE(JSVALUE64) // so this is the 32-bit part jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex; uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); if (!inlineCallFrame->isClosureCall) jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part } #if USE(JSVALUE64) uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex); #else Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex; uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); #endif jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount))); }
bool run() { // This enumerates the locals that we actually care about and packs them. So for example // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal, // Flush, PhantomLocal). BitVector usedLocals; // Collect those variables that are used from IR. bool hasNodesThatNeedFixup = false; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocal: case SetLocal: case Flush: case PhantomLocal: { VariableAccessData* variable = node->variableAccessData(); if (variable->local().isArgument()) break; usedLocals.set(variable->local().toLocal()); break; } case GetLocalUnlinked: { VirtualRegister operand = node->unlinkedLocal(); if (operand.isArgument()) break; usedLocals.set(operand.toLocal()); hasNodesThatNeedFixup = true; break; } case LoadVarargs: case ForwardVarargs: { LoadVarargsData* data = node->loadVarargsData(); if (data->count.isLocal()) usedLocals.set(data->count.toLocal()); if (data->start.isLocal()) { // This part really relies on the contiguity of stack layout // assignments. ASSERT(VirtualRegister(data->start.offset() + data->limit - 1).isLocal()); for (unsigned i = data->limit; i--;) usedLocals.set(VirtualRegister(data->start.offset() + i).toLocal()); } // the else case shouldn't happen. hasNodesThatNeedFixup = true; break; } case PutStack: case GetStack: { StackAccessData* stack = node->stackAccessData(); if (stack->local.isArgument()) break; usedLocals.set(stack->local.toLocal()); break; } default: break; } } } for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) { InlineCallFrame* inlineCallFrame = *iter; if (inlineCallFrame->isVarargs()) { usedLocals.set(VirtualRegister( JSStack::ArgumentCount + inlineCallFrame->stackOffset).toLocal()); } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { usedLocals.set(VirtualRegister( virtualRegisterForArgument(argument).offset() + inlineCallFrame->stackOffset).toLocal()); } } Vector<unsigned> allocation(usedLocals.size()); m_graph.m_nextMachineLocal = 0; for (unsigned i = 0; i < usedLocals.size(); ++i) { if (!usedLocals.get(i)) { allocation[i] = UINT_MAX; continue; } allocation[i] = m_graph.m_nextMachineLocal++; } for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->local().isArgument()) { variable->machineLocal() = variable->local(); continue; } size_t local = variable->local().toLocal(); if (local >= allocation.size()) continue; if (allocation[local] == UINT_MAX) continue; variable->machineLocal() = assign(allocation, variable->local()); } for (StackAccessData* data : m_graph.m_stackAccessData) { if (!data->local.isLocal()) { data->machineLocal = data->local; continue; } if (static_cast<size_t>(data->local.toLocal()) >= allocation.size()) continue; if (allocation[data->local.toLocal()] == UINT_MAX) continue; data->machineLocal = assign(allocation, data->local); } // This register is never valid for DFG code blocks. codeBlock()->setActivationRegister(VirtualRegister()); if (LIKELY(!m_graph.hasDebuggerEnabled())) codeBlock()->setScopeRegister(VirtualRegister()); else codeBlock()->setScopeRegister(assign(allocation, codeBlock()->scopeRegister())); for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) { InlineVariableData data = m_graph.m_inlineVariableData[i]; InlineCallFrame* inlineCallFrame = data.inlineCallFrame; if (inlineCallFrame->isVarargs()) { inlineCallFrame->argumentCountRegister = assign( allocation, VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount)); } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { ArgumentPosition& position = m_graph.m_argumentPositions[ data.argumentPositionStart + argument]; VariableAccessData* variable = position.someVariable(); ValueSource source; if (!variable) source = ValueSource(SourceIsDead); else { source = ValueSource::forFlushFormat( variable->machineLocal(), variable->flushFormat()); } inlineCallFrame->arguments[argument] = source.valueRecovery(); } RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable); if (inlineCallFrame->isClosureCall) { VariableAccessData* variable = data.calleeVariable->find(); ValueSource source = ValueSource::forFlushFormat( variable->machineLocal(), variable->flushFormat()); inlineCallFrame->calleeRecovery = source.valueRecovery(); } else RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant()); } // Fix GetLocalUnlinked's variable references. if (hasNodesThatNeedFixup) { for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocalUnlinked: { node->setUnlinkedMachineLocal(assign(allocation, node->unlinkedLocal())); break; } case LoadVarargs: case ForwardVarargs: { LoadVarargsData* data = node->loadVarargsData(); data->machineCount = assign(allocation, data->count); data->machineStart = assign(allocation, data->start); break; } default: break; } } } } return true; }
void mergeBlocks( BasicBlock* firstBlock, BasicBlock* secondBlock, Vector<BasicBlock*, 1> jettisonedBlocks) { // This will add all of the nodes in secondBlock to firstBlock, but in so doing // it will also ensure that any GetLocals from the second block that refer to // SetLocals in the first block are relinked. If jettisonedBlock is not NoBlock, // then Phantoms are inserted for anything that the jettisonedBlock would have // kept alive. // Remove the terminal of firstBlock since we don't need it anymore. Well, we don't // really remove it; we actually turn it into a Phantom. ASSERT(firstBlock->last()->isTerminal()); CodeOrigin boundaryCodeOrigin = firstBlock->last()->codeOrigin; firstBlock->last()->convertToPhantom(); ASSERT(firstBlock->last()->refCount() == 1); for (unsigned i = jettisonedBlocks.size(); i--;) { BasicBlock* jettisonedBlock = jettisonedBlocks[i]; // Time to insert ghosties for things that need to be kept alive in case we OSR // exit prior to hitting the firstBlock's terminal, and end up going down a // different path than secondBlock. for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfArguments(); ++i) keepOperandAlive(firstBlock, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForArgument(i)); for (size_t i = 0; i < jettisonedBlock->variablesAtHead.numberOfLocals(); ++i) keepOperandAlive(firstBlock, jettisonedBlock, boundaryCodeOrigin, virtualRegisterForLocal(i)); } for (size_t i = 0; i < secondBlock->phis.size(); ++i) firstBlock->phis.append(secondBlock->phis[i]); for (size_t i = 0; i < secondBlock->size(); ++i) firstBlock->append(secondBlock->at(i)); ASSERT(firstBlock->last()->isTerminal()); // Fix the predecessors of my new successors. This is tricky, since we are going to reset // all predecessors anyway due to reachability analysis. But we need to fix the // predecessors eagerly to ensure that we know what they are in case the next block we // consider in this phase wishes to query the predecessors of one of the blocks we // affected. for (unsigned i = firstBlock->numSuccessors(); i--;) { BasicBlock* successor = firstBlock->successor(i); for (unsigned j = 0; j < successor->predecessors.size(); ++j) { if (successor->predecessors[j] == secondBlock) successor->predecessors[j] = firstBlock; } } // Fix the predecessors of my former successors. Again, we'd rather not do this, but it's // an unfortunate necessity. See above comment. for (unsigned i = jettisonedBlocks.size(); i--;) fixJettisonedPredecessors(firstBlock, jettisonedBlocks[i]); firstBlock->valuesAtTail = secondBlock->valuesAtTail; firstBlock->cfaBranchDirection = secondBlock->cfaBranchDirection; m_graph.killBlock(secondBlock); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_forward_arguments), call_and_tail_call_forward_arguments_opcodes_must_be_same_length); CallLinkInfo* info = nullptr; if (opcodeID != op_call_eval) info = m_codeBlock->addCallLinkInfo(); if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) compileSetupVarargsFrame(opcodeID, instruction, info); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); load32(Address(regT0, JSCell::structureIDOffset()), regT0); store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits(); store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0); m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; if (opcodeID == op_tail_call) { CallFrameShuffleData shuffleData; shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; shuffleData.numLocals = instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register); shuffleData.args.resize(instruction[3].u.operand); for (int i = 0; i < instruction[3].u.operand; ++i) { shuffleData.args[i] = ValueRecovery::displacedInJSStack( virtualRegisterForArgument(i) - instruction[4].u.operand, DataFormatJS); } shuffleData.callee = ValueRecovery::inGPR(regT0, DataFormatJS); shuffleData.setupCalleeSaveRegisters(m_codeBlock); info->setFrameShuffleData(shuffleData); CallFrameShuffler(*this, shuffleData).prepareForTailCall(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } if (opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) { emitRestoreCalleeSaves(); prepareForTailCallSlow(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
SUPPRESS_ASAN void* prepareOSREntry( ExecState* exec, CodeBlock* dfgCodeBlock, CodeBlock* entryCodeBlock, unsigned bytecodeIndex, unsigned streamIndex) { VM& vm = exec->vm(); CodeBlock* baseline = dfgCodeBlock->baselineVersion(); ExecutableBase* executable = dfgCodeBlock->ownerExecutable(); DFG::JITCode* dfgCode = dfgCodeBlock->jitCode()->dfg(); ForOSREntryJITCode* entryCode = entryCodeBlock->jitCode()->ftlForOSREntry(); if (Options::verboseOSR()) { dataLog( "FTL OSR from ", *dfgCodeBlock, " to ", *entryCodeBlock, " at bc#", bytecodeIndex, ".\n"); } if (bytecodeIndex) jsCast<ScriptExecutable*>(executable)->setDidTryToEnterInLoop(true); if (bytecodeIndex != entryCode->bytecodeIndex()) { if (Options::verboseOSR()) dataLog(" OSR failed because we don't have an entrypoint for bc#", bytecodeIndex, "; ours is for bc#", entryCode->bytecodeIndex(), "\n"); return 0; } Operands<JSValue> values; dfgCode->reconstruct( exec, dfgCodeBlock, CodeOrigin(bytecodeIndex), streamIndex, values); if (Options::verboseOSR()) dataLog(" Values at entry: ", values, "\n"); for (int argument = values.numberOfArguments(); argument--;) { JSValue valueOnStack = exec->r(virtualRegisterForArgument(argument).offset()).asanUnsafeJSValue(); JSValue reconstructedValue = values.argument(argument); if (valueOnStack == reconstructedValue || !argument) continue; dataLog("Mismatch between reconstructed values and the the value on the stack for argument arg", argument, " for ", *entryCodeBlock, " at bc#", bytecodeIndex, ":\n"); dataLog(" Value on stack: ", valueOnStack, "\n"); dataLog(" Reconstructed value: ", reconstructedValue, "\n"); RELEASE_ASSERT_NOT_REACHED(); } RELEASE_ASSERT( static_cast<int>(values.numberOfLocals()) == baseline->m_numCalleeRegisters); EncodedJSValue* scratch = static_cast<EncodedJSValue*>( entryCode->entryBuffer()->dataBuffer()); for (int local = values.numberOfLocals(); local--;) scratch[local] = JSValue::encode(values.local(local)); int stackFrameSize = entryCode->common.requiredRegisterCountForExecutionAndExit(); if (!vm.interpreter->stack().ensureCapacityFor(&exec->registers()[virtualRegisterForLocal(stackFrameSize - 1).offset()])) { if (Options::verboseOSR()) dataLog(" OSR failed because stack growth failed.\n"); return 0; } exec->setCodeBlock(entryCodeBlock); void* result = entryCode->addressForCall( vm, executable, ArityCheckNotRequired, RegisterPreservationNotRequired).executableAddress(); if (Options::verboseOSR()) dataLog(" Entry will succeed, going to address", RawPointer(result), "\n"); return result; }
bool run() { SharedSymbolTable* symbolTable = codeBlock()->symbolTable(); // This enumerates the locals that we actually care about and packs them. So for example // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal, // Flush, PhantomLocal). BitVector usedLocals; // Collect those variables that are used from IR. bool hasGetLocalUnlinked = false; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocal: case SetLocal: case Flush: case PhantomLocal: { VariableAccessData* variable = node->variableAccessData(); if (variable->local().isArgument()) break; usedLocals.set(variable->local().toLocal()); break; } case GetLocalUnlinked: { VirtualRegister operand = node->unlinkedLocal(); if (operand.isArgument()) break; usedLocals.set(operand.toLocal()); hasGetLocalUnlinked = true; break; } default: break; } } } // Ensure that captured variables and captured inline arguments are pinned down. // They should have been because of flushes, except that the flushes can be optimized // away. if (symbolTable) { for (int i = symbolTable->captureStart(); i > symbolTable->captureEnd(); i--) usedLocals.set(VirtualRegister(i).toLocal()); } if (codeBlock()->usesArguments()) { usedLocals.set(codeBlock()->argumentsRegister().toLocal()); usedLocals.set(unmodifiedArgumentsRegister(codeBlock()->argumentsRegister()).toLocal()); } if (codeBlock()->uncheckedActivationRegister().isValid()) usedLocals.set(codeBlock()->activationRegister().toLocal()); for (InlineCallFrameSet::iterator iter = m_graph.m_inlineCallFrames->begin(); !!iter; ++iter) { InlineCallFrame* inlineCallFrame = *iter; if (!inlineCallFrame->executable->usesArguments()) continue; VirtualRegister argumentsRegister = m_graph.argumentsRegisterFor(inlineCallFrame); usedLocals.set(argumentsRegister.toLocal()); usedLocals.set(unmodifiedArgumentsRegister(argumentsRegister).toLocal()); for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { usedLocals.set(VirtualRegister( virtualRegisterForArgument(argument).offset() + inlineCallFrame->stackOffset).toLocal()); } } Vector<unsigned> allocation(usedLocals.size()); m_graph.m_nextMachineLocal = 0; for (unsigned i = 0; i < usedLocals.size(); ++i) { if (!usedLocals.get(i)) { allocation[i] = UINT_MAX; continue; } allocation[i] = m_graph.m_nextMachineLocal++; } for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->local().isArgument()) { variable->machineLocal() = variable->local(); continue; } size_t local = variable->local().toLocal(); if (local >= allocation.size()) continue; if (allocation[local] == UINT_MAX) continue; variable->machineLocal() = virtualRegisterForLocal( allocation[variable->local().toLocal()]); } if (codeBlock()->usesArguments()) { VirtualRegister argumentsRegister = virtualRegisterForLocal( allocation[codeBlock()->argumentsRegister().toLocal()]); RELEASE_ASSERT( virtualRegisterForLocal(allocation[ unmodifiedArgumentsRegister( codeBlock()->argumentsRegister()).toLocal()]) == unmodifiedArgumentsRegister(argumentsRegister)); codeBlock()->setArgumentsRegister(argumentsRegister); } if (codeBlock()->uncheckedActivationRegister().isValid()) { codeBlock()->setActivationRegister( virtualRegisterForLocal(allocation[codeBlock()->activationRegister().toLocal()])); } for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) { InlineVariableData data = m_graph.m_inlineVariableData[i]; InlineCallFrame* inlineCallFrame = data.inlineCallFrame; if (inlineCallFrame->executable->usesArguments()) { inlineCallFrame->argumentsRegister = virtualRegisterForLocal( allocation[m_graph.argumentsRegisterFor(inlineCallFrame).toLocal()]); RELEASE_ASSERT( virtualRegisterForLocal(allocation[unmodifiedArgumentsRegister( m_graph.argumentsRegisterFor(inlineCallFrame)).toLocal()]) == unmodifiedArgumentsRegister(inlineCallFrame->argumentsRegister)); } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { ArgumentPosition& position = m_graph.m_argumentPositions[ data.argumentPositionStart + argument]; VariableAccessData* variable = position.someVariable(); ValueSource source; if (!variable) source = ValueSource(SourceIsDead); else { source = ValueSource::forFlushFormat( variable->machineLocal(), variable->flushFormat()); } inlineCallFrame->arguments[argument] = source.valueRecovery(); } RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable); if (inlineCallFrame->isClosureCall) { ValueSource source = ValueSource::forFlushFormat( data.calleeVariable->machineLocal(), data.calleeVariable->flushFormat()); inlineCallFrame->calleeRecovery = source.valueRecovery(); } else RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant()); } if (symbolTable) { if (symbolTable->captureCount()) { unsigned captureStartLocal = allocation[ VirtualRegister(codeBlock()->symbolTable()->captureStart()).toLocal()]; ASSERT(captureStartLocal != UINT_MAX); m_graph.m_machineCaptureStart = virtualRegisterForLocal(captureStartLocal).offset(); } else m_graph.m_machineCaptureStart = virtualRegisterForLocal(0).offset(); // This is an abomination. If we had captured an argument then the argument ends // up being "slow", meaning that loads of the argument go through an extra lookup // table. if (const SlowArgument* slowArguments = symbolTable->slowArguments()) { auto newSlowArguments = std::make_unique<SlowArgument[]>( symbolTable->parameterCount()); for (size_t i = symbolTable->parameterCount(); i--;) { newSlowArguments[i] = slowArguments[i]; VirtualRegister reg = VirtualRegister(slowArguments[i].index); if (reg.isLocal()) newSlowArguments[i].index = virtualRegisterForLocal(allocation[reg.toLocal()]).offset(); } m_graph.m_slowArguments = std::move(newSlowArguments); } } // Fix GetLocalUnlinked's variable references. if (hasGetLocalUnlinked) { for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocalUnlinked: { VirtualRegister operand = node->unlinkedLocal(); if (operand.isLocal()) operand = virtualRegisterForLocal(allocation[operand.toLocal()]); node->setUnlinkedMachineLocal(operand); break; } default: break; } } } } return true; }