Node* emitCodeToGetArgumentsArrayLength( InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin) { Graph& graph = insertionSet.graph(); DFG_ASSERT( graph, arguments, arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments || arguments->op() == CreateClonedArguments || arguments->op() == PhantomDirectArguments || arguments->op() == PhantomClonedArguments); InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame; if (inlineCallFrame && !inlineCallFrame->isVarargs()) { return insertionSet.insertConstant( nodeIndex, origin, jsNumber(inlineCallFrame->arguments.size() - 1)); } Node* argumentCount; if (!inlineCallFrame) argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32, GetArgumentCount, origin); else { VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount); argumentCount = insertionSet.insertNode( nodeIndex, SpecInt32, GetStack, origin, OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32))); } return insertionSet.insertNode( nodeIndex, SpecInt32, ArithSub, origin, OpInfo(Arith::Unchecked), Edge(argumentCount, Int32Use), insertionSet.insertConstantForUse( nodeIndex, origin, jsNumber(1), Int32Use)); }
Node* emitCodeToGetArgumentsArrayLength( InsertionSet& insertionSet, Node* arguments, unsigned nodeIndex, NodeOrigin origin) { Graph& graph = insertionSet.graph(); DFG_ASSERT( graph, arguments, arguments->op() == CreateDirectArguments || arguments->op() == CreateScopedArguments || arguments->op() == CreateClonedArguments || arguments->op() == CreateRest || arguments->op() == PhantomDirectArguments || arguments->op() == PhantomClonedArguments || arguments->op() == PhantomCreateRest); InlineCallFrame* inlineCallFrame = arguments->origin.semantic.inlineCallFrame; unsigned numberOfArgumentsToSkip = 0; if (arguments->op() == CreateRest || arguments->op() == PhantomCreateRest) numberOfArgumentsToSkip = arguments->numberOfArgumentsToSkip(); if (inlineCallFrame && !inlineCallFrame->isVarargs()) { unsigned argumentsSize = inlineCallFrame->arguments.size() - 1; if (argumentsSize >= numberOfArgumentsToSkip) argumentsSize -= numberOfArgumentsToSkip; else argumentsSize = 0; return insertionSet.insertConstant( nodeIndex, origin, jsNumber(argumentsSize)); } Node* argumentCount; if (!inlineCallFrame) argumentCount = insertionSet.insertNode(nodeIndex, SpecInt32Only, GetArgumentCountIncludingThis, origin); else { VirtualRegister argumentCountRegister(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount); argumentCount = insertionSet.insertNode( nodeIndex, SpecInt32Only, GetStack, origin, OpInfo(graph.m_stackAccessData.add(argumentCountRegister, FlushedInt32))); } Node* result = insertionSet.insertNode( nodeIndex, SpecInt32Only, ArithSub, origin, OpInfo(Arith::Unchecked), Edge(argumentCount, Int32Use), insertionSet.insertConstantForUse( nodeIndex, origin, jsNumber(1 + numberOfArgumentsToSkip), Int32Use)); if (numberOfArgumentsToSkip) { // The above subtraction may produce a negative number if this number is non-zero. We correct that here. result = insertionSet.insertNode( nodeIndex, SpecInt32Only, ArithMax, origin, Edge(result, Int32Use), insertionSet.insertConstantForUse(nodeIndex, origin, jsNumber(0), Int32Use)); result->setResult(NodeResultInt32); } return result; }
void LocalOSRAvailabilityCalculator::executeNode(Node* node) { switch (node->op()) { case PutStack: { StackAccessData* data = node->stackAccessData(); m_availability.m_locals.operand(data->local).setFlush(data->flushedAt()); break; } case KillStack: { m_availability.m_locals.operand(node->unlinkedLocal()).setFlush(FlushedAt(ConflictingFlush)); break; } case GetStack: { StackAccessData* data = node->stackAccessData(); m_availability.m_locals.operand(data->local) = Availability(node, data->flushedAt()); break; } case MovHint: { m_availability.m_locals.operand(node->unlinkedLocal()).setNode(node->child1().node()); break; } case ZombieHint: { m_availability.m_locals.operand(node->unlinkedLocal()).setNodeUnavailable(); break; } case LoadVarargs: case ForwardVarargs: { LoadVarargsData* data = node->loadVarargsData(); m_availability.m_locals.operand(data->count) = Availability(FlushedAt(FlushedInt32, data->machineCount)); for (unsigned i = data->limit; i--;) { m_availability.m_locals.operand(VirtualRegister(data->start.offset() + i)) = Availability(FlushedAt(FlushedJSValue, VirtualRegister(data->machineStart.offset() + i))); } break; } case PhantomCreateRest: case PhantomDirectArguments: case PhantomClonedArguments: { InlineCallFrame* inlineCallFrame = node->origin.semantic.inlineCallFrame; if (!inlineCallFrame) { // We don't need to record anything about how the arguments are to be recovered. It's just a // given that we can read them from the stack. break; } unsigned numberOfArgumentsToSkip = 0; if (node->op() == PhantomCreateRest) numberOfArgumentsToSkip = node->numberOfArgumentsToSkip(); if (inlineCallFrame->isVarargs()) { // Record how to read each argument and the argument count. Availability argumentCount = m_availability.m_locals.operand(inlineCallFrame->stackOffset + CallFrameSlot::argumentCount); m_availability.m_heap.set(PromotedHeapLocation(ArgumentCountPLoc, node), argumentCount); } if (inlineCallFrame->isClosureCall) { Availability callee = m_availability.m_locals.operand( inlineCallFrame->stackOffset + CallFrameSlot::callee); m_availability.m_heap.set(PromotedHeapLocation(ArgumentsCalleePLoc, node), callee); } for (unsigned i = numberOfArgumentsToSkip; i < inlineCallFrame->arguments.size() - 1; ++i) { Availability argument = m_availability.m_locals.operand( inlineCallFrame->stackOffset + CallFrame::argumentOffset(i)); m_availability.m_heap.set(PromotedHeapLocation(ArgumentPLoc, node, i), argument); } break; } case PutHint: { m_availability.m_heap.set( PromotedHeapLocation(node->child1().node(), node->promotedLocationDescriptor()), Availability(node->child2().node())); break; } case PhantomSpread: m_availability.m_heap.set(PromotedHeapLocation(SpreadPLoc, node), Availability(node->child1().node())); break; case PhantomNewArrayWithSpread: for (unsigned i = 0; i < node->numChildren(); i++) { Node* child = m_graph.varArgChild(node, i).node(); m_availability.m_heap.set(PromotedHeapLocation(NewArrayWithSpreadArgumentPLoc, node, i), Availability(child)); } break; default: break; } }
void reifyInlinedCallFrames(CCallHelpers& jit, const OSRExitBase& exit) { ASSERT(jit.baselineCodeBlock()->jitType() == JITCode::BaselineJIT); jit.storePtr(AssemblyHelpers::TrustedImmPtr(jit.baselineCodeBlock()), AssemblyHelpers::addressFor((VirtualRegister)JSStack::CodeBlock)); CodeOrigin codeOrigin; for (codeOrigin = exit.m_codeOrigin; codeOrigin.inlineCallFrame; codeOrigin = codeOrigin.inlineCallFrame->caller) { InlineCallFrame* inlineCallFrame = codeOrigin.inlineCallFrame; CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin); CodeBlock* baselineCodeBlockForCaller = jit.baselineCodeBlockFor(inlineCallFrame->caller); void* jumpTarget = nullptr; void* trueReturnPC = nullptr; unsigned callBytecodeIndex = inlineCallFrame->caller.bytecodeIndex; switch (inlineCallFrame->kind) { case InlineCallFrame::Call: case InlineCallFrame::Construct: case InlineCallFrame::CallVarargs: case InlineCallFrame::ConstructVarargs: { CallLinkInfo* callLinkInfo = baselineCodeBlockForCaller->getCallLinkInfoForBytecodeIndex(callBytecodeIndex); RELEASE_ASSERT(callLinkInfo); jumpTarget = callLinkInfo->callReturnLocation().executableAddress(); break; } case InlineCallFrame::GetterCall: case InlineCallFrame::SetterCall: { StructureStubInfo* stubInfo = baselineCodeBlockForCaller->findStubInfo(CodeOrigin(callBytecodeIndex)); RELEASE_ASSERT(stubInfo); switch (inlineCallFrame->kind) { case InlineCallFrame::GetterCall: jumpTarget = jit.vm()->getCTIStub(baselineGetterReturnThunkGenerator).code().executableAddress(); break; case InlineCallFrame::SetterCall: jumpTarget = jit.vm()->getCTIStub(baselineSetterReturnThunkGenerator).code().executableAddress(); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } trueReturnPC = stubInfo->callReturnLocation.labelAtOffset( stubInfo->patch.deltaCallToDone).executableAddress(); break; } } GPRReg callerFrameGPR; if (inlineCallFrame->caller.inlineCallFrame) { jit.addPtr(AssemblyHelpers::TrustedImm32(inlineCallFrame->caller.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister, GPRInfo::regT3); callerFrameGPR = GPRInfo::regT3; } else callerFrameGPR = GPRInfo::callFrameRegister; jit.storePtr(AssemblyHelpers::TrustedImmPtr(jumpTarget), AssemblyHelpers::addressForByteOffset(inlineCallFrame->returnPCOffset())); if (trueReturnPC) jit.storePtr(AssemblyHelpers::TrustedImmPtr(trueReturnPC), AssemblyHelpers::addressFor(inlineCallFrame->stackOffset + virtualRegisterForArgument(inlineCallFrame->arguments.size()).offset())); jit.storePtr(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::CodeBlock))); if (!inlineCallFrame->isVarargs()) jit.store32(AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); #if USE(JSVALUE64) jit.store64(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); if (!inlineCallFrame->isClosureCall) jit.store64(AssemblyHelpers::TrustedImm64(JSValue::encode(JSValue(inlineCallFrame->calleeConstant()))), AssemblyHelpers::addressFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); #else // USE(JSVALUE64) // so this is the 32-bit part jit.storePtr(callerFrameGPR, AssemblyHelpers::addressForByteOffset(inlineCallFrame->callerFrameOffset())); Instruction* instruction = baselineCodeBlock->instructions().begin() + codeOrigin.bytecodeIndex; uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::ArgumentCount))); jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); if (!inlineCallFrame->isClosureCall) jit.storePtr(AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeConstant()), AssemblyHelpers::payloadFor((VirtualRegister)(inlineCallFrame->stackOffset + JSStack::Callee))); #endif // USE(JSVALUE64) // ending the #else part, so directly above is the 32-bit part } #if USE(JSVALUE64) uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(codeOrigin.bytecodeIndex); #else Instruction* instruction = jit.baselineCodeBlock()->instructions().begin() + codeOrigin.bytecodeIndex; uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); #endif jit.store32(AssemblyHelpers::TrustedImm32(locationBits), AssemblyHelpers::tagFor((VirtualRegister)(JSStack::ArgumentCount))); }
void OSRExitCompiler::emitRestoreArguments(const Operands<ValueRecovery>& operands) { HashMap<MinifiedID, int> alreadyAllocatedArguments; // Maps phantom arguments node ID to operand. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; int operand = operands.operandForIndex(index); if (recovery.technique() != DirectArgumentsThatWereNotCreated && recovery.technique() != ClonedArgumentsThatWereNotCreated) continue; MinifiedID id = recovery.nodeID(); auto iter = alreadyAllocatedArguments.find(id); if (iter != alreadyAllocatedArguments.end()) { JSValueRegs regs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT1); m_jit.loadValue(CCallHelpers::addressFor(iter->value), regs); m_jit.storeValue(regs, CCallHelpers::addressFor(operand)); continue; } InlineCallFrame* inlineCallFrame = m_jit.codeBlock()->jitCode()->dfg()->minifiedDFG.at(id)->inlineCallFrame(); int stackOffset; if (inlineCallFrame) stackOffset = inlineCallFrame->stackOffset; else stackOffset = 0; if (!inlineCallFrame || inlineCallFrame->isClosureCall) { m_jit.loadPtr( AssemblyHelpers::addressFor(stackOffset + JSStack::Callee), GPRInfo::regT0); } else { m_jit.move( AssemblyHelpers::TrustedImmPtr(inlineCallFrame->calleeRecovery.constant().asCell()), GPRInfo::regT0); } if (!inlineCallFrame || inlineCallFrame->isVarargs()) { m_jit.load32( AssemblyHelpers::payloadFor(stackOffset + JSStack::ArgumentCount), GPRInfo::regT1); } else { m_jit.move( AssemblyHelpers::TrustedImm32(inlineCallFrame->arguments.size()), GPRInfo::regT1); } m_jit.setupArgumentsWithExecState( AssemblyHelpers::TrustedImmPtr(inlineCallFrame), GPRInfo::regT0, GPRInfo::regT1); switch (recovery.technique()) { case DirectArgumentsThatWereNotCreated: m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateDirectArgumentsDuringExit)), GPRInfo::nonArgGPR0); break; case ClonedArgumentsThatWereNotCreated: m_jit.move(AssemblyHelpers::TrustedImmPtr(bitwise_cast<void*>(operationCreateClonedArgumentsDuringExit)), GPRInfo::nonArgGPR0); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } m_jit.call(GPRInfo::nonArgGPR0); m_jit.storeCell(GPRInfo::returnValueGPR, AssemblyHelpers::addressFor(operand)); alreadyAllocatedArguments.add(id, operand); } }
bool run() { // This enumerates the locals that we actually care about and packs them. So for example // if we use local 1, 3, 4, 5, 7, then we remap them: 1->0, 3->1, 4->2, 5->3, 7->4. We // treat a variable as being "used" if there exists an access to it (SetLocal, GetLocal, // Flush, PhantomLocal). BitVector usedLocals; // Collect those variables that are used from IR. bool hasNodesThatNeedFixup = false; for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocal: case SetLocal: case Flush: case PhantomLocal: { VariableAccessData* variable = node->variableAccessData(); if (variable->local().isArgument()) break; usedLocals.set(variable->local().toLocal()); break; } case GetLocalUnlinked: { VirtualRegister operand = node->unlinkedLocal(); if (operand.isArgument()) break; usedLocals.set(operand.toLocal()); hasNodesThatNeedFixup = true; break; } case LoadVarargs: case ForwardVarargs: { LoadVarargsData* data = node->loadVarargsData(); if (data->count.isLocal()) usedLocals.set(data->count.toLocal()); if (data->start.isLocal()) { // This part really relies on the contiguity of stack layout // assignments. ASSERT(VirtualRegister(data->start.offset() + data->limit - 1).isLocal()); for (unsigned i = data->limit; i--;) usedLocals.set(VirtualRegister(data->start.offset() + i).toLocal()); } // the else case shouldn't happen. hasNodesThatNeedFixup = true; break; } case PutStack: case GetStack: { StackAccessData* stack = node->stackAccessData(); if (stack->local.isArgument()) break; usedLocals.set(stack->local.toLocal()); break; } default: break; } } } for (InlineCallFrameSet::iterator iter = m_graph.m_plan.inlineCallFrames->begin(); !!iter; ++iter) { InlineCallFrame* inlineCallFrame = *iter; if (inlineCallFrame->isVarargs()) { usedLocals.set(VirtualRegister( JSStack::ArgumentCount + inlineCallFrame->stackOffset).toLocal()); } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { usedLocals.set(VirtualRegister( virtualRegisterForArgument(argument).offset() + inlineCallFrame->stackOffset).toLocal()); } } Vector<unsigned> allocation(usedLocals.size()); m_graph.m_nextMachineLocal = 0; for (unsigned i = 0; i < usedLocals.size(); ++i) { if (!usedLocals.get(i)) { allocation[i] = UINT_MAX; continue; } allocation[i] = m_graph.m_nextMachineLocal++; } for (unsigned i = m_graph.m_variableAccessData.size(); i--;) { VariableAccessData* variable = &m_graph.m_variableAccessData[i]; if (!variable->isRoot()) continue; if (variable->local().isArgument()) { variable->machineLocal() = variable->local(); continue; } size_t local = variable->local().toLocal(); if (local >= allocation.size()) continue; if (allocation[local] == UINT_MAX) continue; variable->machineLocal() = assign(allocation, variable->local()); } for (StackAccessData* data : m_graph.m_stackAccessData) { if (!data->local.isLocal()) { data->machineLocal = data->local; continue; } if (static_cast<size_t>(data->local.toLocal()) >= allocation.size()) continue; if (allocation[data->local.toLocal()] == UINT_MAX) continue; data->machineLocal = assign(allocation, data->local); } // This register is never valid for DFG code blocks. codeBlock()->setActivationRegister(VirtualRegister()); if (LIKELY(!m_graph.hasDebuggerEnabled())) codeBlock()->setScopeRegister(VirtualRegister()); else codeBlock()->setScopeRegister(assign(allocation, codeBlock()->scopeRegister())); for (unsigned i = m_graph.m_inlineVariableData.size(); i--;) { InlineVariableData data = m_graph.m_inlineVariableData[i]; InlineCallFrame* inlineCallFrame = data.inlineCallFrame; if (inlineCallFrame->isVarargs()) { inlineCallFrame->argumentCountRegister = assign( allocation, VirtualRegister(inlineCallFrame->stackOffset + JSStack::ArgumentCount)); } for (unsigned argument = inlineCallFrame->arguments.size(); argument-- > 1;) { ArgumentPosition& position = m_graph.m_argumentPositions[ data.argumentPositionStart + argument]; VariableAccessData* variable = position.someVariable(); ValueSource source; if (!variable) source = ValueSource(SourceIsDead); else { source = ValueSource::forFlushFormat( variable->machineLocal(), variable->flushFormat()); } inlineCallFrame->arguments[argument] = source.valueRecovery(); } RELEASE_ASSERT(inlineCallFrame->isClosureCall == !!data.calleeVariable); if (inlineCallFrame->isClosureCall) { VariableAccessData* variable = data.calleeVariable->find(); ValueSource source = ValueSource::forFlushFormat( variable->machineLocal(), variable->flushFormat()); inlineCallFrame->calleeRecovery = source.valueRecovery(); } else RELEASE_ASSERT(inlineCallFrame->calleeRecovery.isConstant()); } // Fix GetLocalUnlinked's variable references. if (hasNodesThatNeedFixup) { for (BlockIndex blockIndex = m_graph.numBlocks(); blockIndex--;) { BasicBlock* block = m_graph.block(blockIndex); if (!block) continue; for (unsigned nodeIndex = block->size(); nodeIndex--;) { Node* node = block->at(nodeIndex); switch (node->op()) { case GetLocalUnlinked: { node->setUnlinkedMachineLocal(assign(allocation, node->unlinkedLocal())); break; } case LoadVarargs: case ForwardVarargs: { LoadVarargsData* data = node->loadVarargsData(); data->machineCount = assign(allocation, data->count); data->machineStart = assign(allocation, data->start); break; } default: break; } } } } return true; }