void AssemblyHelpers::callExceptionFuzz() { if (!Options::enableExceptionFuzz()) return; ASSERT(stackAlignmentBytes() >= sizeof(void*) * 2); subPtr(TrustedImm32(stackAlignmentBytes()), stackPointerRegister); poke(GPRInfo::returnValueGPR, 0); poke(GPRInfo::returnValueGPR2, 1); move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR); call(GPRInfo::nonPreservedNonReturnGPR); peek(GPRInfo::returnValueGPR, 0); peek(GPRInfo::returnValueGPR2, 1); addPtr(TrustedImm32(stackAlignmentBytes()), stackPointerRegister); }
void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex) { if (opcodeID == op_call_eval) { compileCallEvalSlowCase(instruction, iter); return; } linkSlowCase(iter); linkSlowCase(iter); move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2); m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code()); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JITCompiler::compileExceptionHandlers() { if (!m_exceptionChecksWithCallFrameRollback.empty()) { m_exceptionChecksWithCallFrameRollback.link(this); copyCalleeSavesToVMCalleeSavesBuffer(); // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); #if CPU(X86) // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! poke(GPRInfo::argumentGPR0); poke(GPRInfo::argumentGPR1, 1); #endif m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame)); jumpToExceptionHandler(); } if (!m_exceptionChecks.empty()) { m_exceptionChecks.link(this); copyCalleeSavesToVMCalleeSavesBuffer(); // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); #if CPU(X86) // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! poke(GPRInfo::argumentGPR0); poke(GPRInfo::argumentGPR1, 1); #endif m_calls.append(CallLinkRecord(call(), lookupExceptionHandler)); jumpToExceptionHandler(); } }
std::enable_if_t< Op::opcodeID != op_call_varargs && Op::opcodeID != op_construct_varargs && Op::opcodeID != op_tail_call_varargs && Op::opcodeID != op_tail_call_forward_arguments , void> JIT::compileSetupFrame(const Op& bytecode, CallLinkInfo*) { auto& metadata = bytecode.metadata(m_codeBlock); int argCount = bytecode.m_argc; int registerOffset = -static_cast<int>(bytecode.m_argv); if (Op::opcodeID == op_call && shouldEmitProfiling()) { emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1); Jump done = branchIfNotCell(regT0); load32(Address(regT1, JSCell::structureIDOffset()), regT1); store32(regT1, metadata.m_arrayProfile.addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) { int callee = instruction[1].u.operand; int argCount = instruction[2].u.operand; int registerOffset = instruction[3].u.operand; linkSlowCase(iter); linkSlowCase(iter); // Fast check for JS function. Jump callLinkFailNotObject = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); Jump callLinkFailNotJSFunction = emitJumpIfNotType(regT0, regT1, JSFunctionType); // Speculatively roll the callframe, assuming argCount will match the arity. store32(TrustedImm32(JSValue::CellTag), tagFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister)); storePtr(callFrameRegister, payloadFor(RegisterFile::CallerFrame + registerOffset, callFrameRegister)); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); move(Imm32(argCount), regT1); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink()); // Done! - return back to the hot path. ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval)); ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct)); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call)); // This handles host functions callLinkFailNotJSFunction.link(this); move(TrustedImm32(JSValue::CellTag), regT1); // Restore cell tag since it was clobbered. callLinkFailNotObject.link(this); JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction); stubCall.addArgument(callee); stubCall.addArgument(JIT::Imm32(registerOffset)); stubCall.addArgument(JIT::Imm32(argCount)); stubCall.call(); sampleCodeBlock(m_codeBlock); }
void JITCompiler::compile() { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); m_speculative = adoptPtr(new SpeculativeJIT(*this)); addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); compileBody(); setEndOfMainPath(); // Generate slow path code. m_speculative->runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); }
bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck) { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1); // Return here after stack check. Label fromStackCheck = label(); // === Function body code generation === SpeculativeJIT speculative(*this); compileBody(speculative); setEndOfMainPath(); // === Function footer code generation === // // Generate code to perform the slow stack check (if the fast one in // the function header fails), and generate the entry point with arity check. // // Generate the stack check; if the fast check in the function head fails, // we need to call out to a helper function to check whether more space is available. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). stackCheck.link(this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); CallBeginToken token; beginCall(CodeOrigin(0), token); Call callStackCheck = call(); notifyCall(callStackCheck, CodeOrigin(0), token); jump(fromStackCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). Label arityCheck = label(); compileEntry(); load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); beginCall(CodeOrigin(0), token); Call callArityCheck = call(); notifyCall(callArityCheck, CodeOrigin(0), token); move(GPRInfo::regT0, GPRInfo::callFrameRegister); jump(fromArityCheck); // Generate slow path code. speculative.runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. speculative.createOSREntries(); setEndOfCode(); // === Link === LinkBuffer linkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail); if (linkBuffer.didFailToAllocate()) return false; link(linkBuffer); speculative.linkOSREntries(linkBuffer); // FIXME: switch the stack check & arity check over to DFGOpertaion style calls, not JIT stubs. linkBuffer.link(callStackCheck, cti_stack_check); linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck); if (shouldShowDisassembly()) m_disassembler->dump(linkBuffer); if (m_graph.m_compilation) m_disassembler->reportToProfiler(m_graph.m_compilation.get(), linkBuffer); entryWithArityCheck = linkBuffer.locationOf(arityCheck); entry = JITCode( linkBuffer.finalizeCodeWithoutDisassembly(), JITCode::DFGJIT); return true; }
void JITCompiler::compileFunction() { setStartOfCode(); compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); // Move the stack pointer down to accommodate locals addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); compileSetupRegistersForEntry(); compileEntryExecutionFlag(); // === Function body code generation === m_speculative = std::make_unique<SpeculativeJIT>(*this); compileBody(); setEndOfMainPath(); // === Function footer code generation === // // Generate code to perform the stack overflow handling (if the stack check in // the function header fails), and generate the entry point with arity check. // // Generate the stack overflow handling; if the stack check in the function head fails, // we need to call out to a helper function to throw the StackOverflowError. stackOverflow.link(this); emitStoreCodeOrigin(CodeOrigin(0)); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). m_arityCheck = label(); compileEntry(); load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0); m_callArityFixup = call(); jump(fromArityCheck); // Generate slow path code. m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder); m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); // === Link === auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail); if (linkBuffer->didFailToAllocate()) { m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan); return; } link(*linkBuffer); m_speculative->linkOSREntries(*linkBuffer); m_jitCode->shrinkToFit(); codeBlock()->shrinkToFit(CodeBlock::LateShrink); linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress())); disassemble(*linkBuffer); MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck); m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>( m_graph.m_plan, m_jitCode.release(), WTFMove(linkBuffer), withArityCheck); }
void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && VirtualRegister(arguments) == m_codeBlock->argumentsRegister() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitLoadTag(arguments, regT1); slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag))); load32(payloadFor(JSStack::ArgumentCount), regT2); slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1))); // regT2: argumentCountIncludingThis move(regT2, regT3); neg32(regT3); add32(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT3); lshift32(TrustedImm32(3), regT3); addPtr(callFrameRegister, regT3); // regT3: newCallFrame slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT3)); // Initialize ArgumentCount. store32(regT2, payloadFor(JSStack::ArgumentCount, regT3)); // Initialize 'this'. emitLoad(thisValue, regT1, regT0); store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); // Copy arguments. end.append(branchSub32(Zero, TrustedImm32(1), regT2)); // regT2: argumentCount; Label copyLoop = label(); load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0); load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1); store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this); end.append(jump()); } if (canOptimize) slowCase.link(this); emitLoad(thisValue, regT1, regT0); emitLoad(arguments, regT3, regT2); callOperation(operationLoadVarargs, regT1, regT0, regT3, regT2, firstFreeRegister); move(returnValueRegister, regT3); if (canOptimize) end.link(this); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[1].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Caller initializes ScopeChain. - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ScopeChain; ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ if (opcodeID == op_call_varargs) compileLoadVarargs(instruction); else { int argCount = instruction[2].u.operand; int registerOffset = instruction[3].u.operand; if (opcodeID == op_call && canBeOptimized()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); loadPtr(Address(regT0, JSCell::structureOffset()), regT0); storePtr(regT0, instruction[5].u.arrayProfile->addressOfLastSeenStructure()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1); store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); } // regT1 holds newCallFrame with ArgumentCount initialized. store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register)))); store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register)))); move(regT1, callFrameRegister); if (opcodeID == op_call_eval) { compileCallEval(); return; } DataLabelPtr addressOfLinkedFunctionCheck; BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall); Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); END_UNINTERRUPTED_SEQUENCE(sequenceOpCall); addSlowCase(slowCase); ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex); m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo()); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID); m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset; loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1); emitPutToCallFrameHeader(regT1, JSStack::ScopeChain); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); sampleCodeBlock(m_codeBlock); }
void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase) { // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset. COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0); Jump notImm1; Jump notImm2; if (op1HasImmediateIntFastCase) { notImm2 = getSlowCase(iter); } else if (op2HasImmediateIntFastCase) { notImm1 = getSlowCase(iter); } else { notImm1 = getSlowCase(iter); notImm2 = getSlowCase(iter); } linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare. if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number. linkSlowCase(iter); emitGetVirtualRegister(op1, regT0); Label stubFunctionCall(this); JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul); if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) { emitGetVirtualRegister(op1, regT0); emitGetVirtualRegister(op2, regT1); } stubCall.addArgument(regT0); stubCall.addArgument(regT1); stubCall.call(result); Jump end = jump(); if (op1HasImmediateIntFastCase) { notImm2.link(this); if (!types.second().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); emitGetVirtualRegister(op1, regT1); convertInt32ToDouble(regT1, fpRegT1); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT2); } else if (op2HasImmediateIntFastCase) { notImm1.link(this); if (!types.first().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); emitGetVirtualRegister(op2, regT1); convertInt32ToDouble(regT1, fpRegT1); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT2); } else { // if we get here, eax is not an int32, edx not yet checked. notImm1.link(this); if (!types.first().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); if (!types.second().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT1); Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1); convertInt32ToDouble(regT1, fpRegT2); Jump op2wasInteger = jump(); // if we get here, eax IS an int32, edx is not. notImm2.link(this); if (!types.second().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this); convertInt32ToDouble(regT0, fpRegT1); op2isDouble.link(this); addPtr(tagTypeNumberRegister, regT1); movePtrToDouble(regT1, fpRegT2); op2wasInteger.link(this); } if (opcodeID == op_add) addDouble(fpRegT2, fpRegT1); else if (opcodeID == op_sub) subDouble(fpRegT2, fpRegT1); else if (opcodeID == op_mul) mulDouble(fpRegT2, fpRegT1); else { ASSERT(opcodeID == op_div); divDouble(fpRegT2, fpRegT1); } moveDoubleToPtr(fpRegT1, regT0); subPtr(tagTypeNumberRegister, regT0); emitPutVirtualRegister(result, regT0); end.link(this); }
// parse the heap to find valid objects and initialize metadata, then // add edges for every known root pointer and every known obj->obj ptr. HeapGraph makeHeapGraph(bool include_free) { HeapGraph g; PtrMap blocks; // parse the heap once to create a PtrMap for pointer filtering. Create // one node for every parsed block, including NativeData and AsyncFuncFrame // blocks. Only include free blocks if requested. MM().forEachHeader([&](Header* h, size_t alloc_size) { if (h->kind() != HeaderKind::Free || include_free) { blocks.insert(h, alloc_size); // adds interval [h, h+alloc_size[ } }); blocks.prepare(); // initialize nodes by iterating over PtrMap's regions g.nodes.reserve(blocks.size()); blocks.iterate([&](const Header* h, size_t size) { type_scan::Index ty; switch (h->kind()) { case HeaderKind::NativeData: ty = h->native_.typeIndex(); break; case HeaderKind::Resource: ty = h->res_.typeIndex(); break; case HeaderKind::SmallMalloc: case HeaderKind::BigMalloc: ty = h->malloc_.typeIndex(); break; default: ty = type_scan::kIndexUnknown; break; } g.nodes.push_back( HeapGraph::Node{h, size, false, ty, -1, -1} ); }); // find root nodes type_scan::Scanner scanner; iterateRoots([&](const void* h, size_t size, type_scan::Index tyindex) { // it's important that we actually scan each root node before // returning, since at least one will be the C++ stack, and some // nodes will only exist for the duration of the call to this lambda, // for example EphemeralPtrWrapper<T>. addRootNode(g, blocks, scanner, h, size, tyindex); }); // find heap->heap pointers for (size_t i = 0, n = g.nodes.size(); i < n; i++) { if (g.nodes[i].is_root) continue; auto h = g.nodes[i].h; scanHeader(h, scanner); auto from = blocks.index(h); assert(from == i); scanner.finish( [&](const void* p) { // definitely a ptr, but maybe interior, and maybe not counted if (auto r = blocks.region(p)) { addPtr(g, from, blocks.index(r), HeapGraph::Implicit, UnknownOffset); } }, [&](const void* p, std::size_t size) { conservativeScan(p, size, [&](const void** addr, const void* ptr) { if (auto r = blocks.region(ptr)) { auto to = blocks.index(r); auto offset = uintptr_t(addr) - uintptr_t(h); addPtr(g, from, to, HeapGraph::Ambiguous, offset); } }); }, [&](const void** addr) { if (auto r = blocks.region(*addr)) { auto to = blocks.index(r); auto offset = uintptr_t(addr) - uintptr_t(h); addPtr(g, from, to, HeapGraph::Counted, offset); } } ); } g.nodes.shrink_to_fit(); g.ptrs.shrink_to_fit(); g.root_ptrs.shrink_to_fit(); g.root_nodes.shrink_to_fit(); return g; }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Caller initializes ScopeChain. - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ScopeChain; ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs) compileLoadVarargs(instruction); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); load32(Address(regT0, JSCell::structureIDOffset()), regT0); store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset); store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); info->callType = CallLinkInfo::callTypeFor(opcodeID); info->codeOrigin = CodeOrigin(m_bytecodeOffset); info->calleeGPR = regT0; m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT2); store64(regT2, Address(MacroAssembler::stackPointerRegister, JSStack::ScopeChain * sizeof(Register) - sizeof(CallerFrameAndPC))); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::compileOpCall(const Instruction* instruction, unsigned callLinkInfoIndex) { OpcodeID opcodeID = Op::opcodeID; auto bytecode = instruction->as<Op>(); int callee = bytecode.m_callee.offset(); /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ CallLinkInfo* info = nullptr; if (opcodeID != op_call_eval) info = m_codeBlock->addCallLinkInfo(); compileSetupFrame(bytecode, info); // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t locationBits = CallSiteIndex(instruction).bits(); store32(TrustedImm32(locationBits), tagFor(CallFrameSlot::argumentCount)); emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee. store32(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); store32(regT1, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC))); if (compileCallEval(bytecode)) return; if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) emitRestoreCalleeSaves(); addSlowCase(branchIfNotCell(regT1)); DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(nullptr)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0); m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; checkStackPointerAlignment(); if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) { prepareForTailCallSlow(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(bytecode); }
void JITCompiler::compileFunction() { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), GPRInfo::regT1); // Move the stack pointer down to accommodate locals addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); // === Function body code generation === m_speculative = adoptPtr(new SpeculativeJIT(*this)); compileBody(); setEndOfMainPath(); // === Function footer code generation === // // Generate code to perform the stack overflow handling (if the stack check in // the function header fails), and generate the entry point with arity check. // // Generate the stack overflow handling; if the stack check in the function head fails, // we need to call out to a helper function to throw the StackOverflowError. stackOverflow.link(this); emitStoreCodeOrigin(CodeOrigin(0)); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). m_arityCheck = label(); compileEntry(); load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0); if (maxFrameExtentForSlowPathCall) addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); move(TrustedImmPtr(m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters())), GPRInfo::regT5); loadPtr(BaseIndex(GPRInfo::regT5, GPRInfo::regT0, timesPtr()), GPRInfo::regT5); m_callArityFixup = call(); jump(fromArityCheck); // Generate slow path code. m_speculative->runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); }
int main (int argc, char **argv) { /* Local variables */ char input[CHAR_FNAME_LENGTH+1]; int printtime = NO; /* print time after each step? */ int save_tmp = DUMMY; /* save temporary files? */ int verbose = NO; /* print info during processing? */ int debug = NO; /* print debug statements during processing? */ int quiet = NO; /* suppress STDOUT messages? */ int too_many = NO; /* too many command-line arguments? */ int onecpu = NO; /* suppress openmp usage by using only 1 thread?*/ int i, j; /* loop indexes */ /* Function definitions */ void c_irafinit (int, char **); int CalWf3Run (char *, int, int, int, int, int); void WhichError (int); /* Initialize status to OK and MsgText to null */ status = WF3_OK; MsgText[0] = '\0'; input[0] = '\0'; /* Initialize IRAF environment */ c_irafinit(argc, argv); PtrRegister ptrReg; initPtrRegister(&ptrReg); /* Command line arguments: ** 0. Check for --version option ** 1. input file name ** 2. print time? ** 3. save intermediate files? ** 4. verbose? */ for (i = 1; i < argc; i++) { if (!(strcmp(argv[i],"--version"))) { printf("%s\n",WF3_CAL_VER_NUM); freeOnExit(&ptrReg); exit(0); } if (!(strcmp(argv[i],"--gitinfo"))) { printGitInfo(); freeOnExit(&ptrReg); exit(0); } if (!(strcmp(argv[i],"--help"))) { printHelp(); freeOnExit(&ptrReg); exit(0); } if (argv[i][0] == '-') { for (j = 1; argv[i][j] != '\0'; j++) { if (argv[i][j] == 't') { printtime = YES; } else if (argv[i][j] == 's') { save_tmp = YES; } else if (argv[i][j] == 'r'){ printf ("Current version: %s\n", WF3_CAL_VER); freeOnExit(&ptrReg); exit(0); } else if (argv[i][j] == 'v') { verbose = YES; } else if (argv[i][j] == 'd') { debug = NO; } else if (argv[i][j] == 'q') { quiet = YES; } else if (argv[i][j] == '1'){ onecpu = YES; } else { printf ("Unrecognized option %s\n", argv[i]); printSyntax(); freeOnExit(&ptrReg); exit (ERROR_RETURN); } } } else if (input[0] == '\0') { strcpy (input, argv[i]); } else { too_many = YES; } } if (input[0] == '\0' || too_many) { printSyntax(); freeOnExit(&ptrReg); exit (ERROR_RETURN); } /* Initialize the structure for managing trailer file comments */ InitTrlBuf (); addPtr(&ptrReg, &trlbuf, &CloseTrlBuf); trlGitInfo(); /* Copy command-line value for QUIET to structure */ SetTrlQuietMode (quiet); /* Call the CALWF3 main program */ if (CalWf3Run (input, printtime, save_tmp, verbose, debug, onecpu)) { if (status == NOTHING_TO_DO) { /* If there is just nothing to do, ** as for ACQ images, just quit. */ status = 0; sprintf (MsgText, "CALWF3 did NOT process %s", input); trlmessage (MsgText); freeOnExit(&ptrReg); exit(0); } else { /* Error during processing */ sprintf (MsgText, "CALWF3 processing NOT completed for %s", input); trlerror (MsgText); /* Provide interpretation of error for user */ WhichError (status); freeOnExit(&ptrReg); exit (ERROR_RETURN); } } /* Successful completion */ sprintf (MsgText, "CALWF3 completion for %s", input); trlmessage (MsgText); freeOnExit(&ptrReg); /* Exit the program */ exit (0); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval Jump wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx); compileOpCallEvalSetupArgs(instruction); emitCTICall(Interpreter::cti_op_call_eval); wasEval = jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue()))); } // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee. // This deliberately leaves the callee in ecx, used when setting up the stack frame below emitGetVirtualRegister(callee, X86::ecx); DataLabelPtr addressOfLinkedFunctionCheck; Jump jumpToSlow = jnePtrWithPatch(X86::ecx, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue()))); addSlowCase(jumpToSlow); ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; // The following is the fast case, only used whan a callee can be linked. // In the case of OpConstruct, call out to a cti_ function to create the new object. if (opcodeID == op_construct) { int proto = instruction[5].u.operand; int thisRegister = instruction[6].u.operand; emitPutJITStubArg(X86::ecx, 1); emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax); emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(thisRegister); emitGetVirtualRegister(callee, X86::ecx); } // Fast version of stack frame initialization, directly relative to edi. // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)))); storePtr(X86::ecx, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)))); loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)))); storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)))); storePtr(X86::edx, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister); // Call to the callee m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable)); if (opcodeID == op_call_eval) wasEval.link(this); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; linkSlowCase(iter); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Fast check for JS function. Jump callLinkFailNotObject = emitJumpIfNotJSCell(X86::ecx); Jump callLinkFailNotJSFunction = jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } move(Imm32(argCount), X86::edx); // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink); Jump storeResultForFirstRun = jump(); // FIXME: this label can be removed, since it is a fixed offset from 'callReturnLocation'. // This is the address for the cold path *after* the first run (which tries to link the call). m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = MacroAssembler::Label(this); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. Jump isNotObject = emitJumpIfNotJSCell(X86::ecx); Jump isJSFunction = jePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); // This handles host functions isNotObject.link(this); callLinkFailNotObject.link(this); callLinkFailNotJSFunction.link(this); emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); Jump wasNotJSFunction = jump(); // Next, handle JSFunctions... isJSFunction.link(this); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); move(Imm32(argCount), X86::edx); emitNakedCall(m_interpreter->m_ctiVirtualCall); // Put the return value in dst. In the interpreter, op_ret does this. wasNotJSFunction.link(this); storeResultForFirstRun.link(this); emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck) { // === Stage 1 - Function header code generation === // // This code currently matches the old JIT. In the function header we need to // pop the return address (since we do not allow any recursion on the machine // stack), and perform a fast register file check. // This is the main entry point, without performing an arity check. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292 // We'll need to convert the remaining cti_ style calls (specifically the register file // check) which will be dependent on stack layout. (We'd need to account for this in // both normal return code and when jumping to an exception handler). preserveReturnAddressAfterCall(GPRInfo::regT2); emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC); // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Setup a pointer to the codeblock in the CallFrameHeader. emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); // Plant a check that sufficient space is available in the RegisterFile. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1); // Return here after register file check. Label fromRegisterFileCheck = label(); // === Stage 2 - Function body code generation === // // We generate the speculative code path, followed by the non-speculative // code for the function. Next we need to link the two together, making // bail-outs from the speculative path jump to the corresponding point on // the non-speculative one (and generating any code necessary to juggle // register values around, rebox values, and ensure spilled, to match the // non-speculative path's requirements). #if DFG_JIT_BREAK_ON_EVERY_FUNCTION // Handy debug tool! breakpoint(); #endif // First generate the speculative path. Label speculativePathBegin = label(); SpeculativeJIT speculative(*this); #if !DFG_DEBUG_LOCAL_DISBALE_SPECULATIVE bool compiledSpeculative = speculative.compile(); #else bool compiledSpeculative = false; #endif // Next, generate the non-speculative path. We pass this a SpeculationCheckIndexIterator // to allow it to check which nodes in the graph may bail out, and may need to reenter the // non-speculative path. if (compiledSpeculative) { SpeculationCheckIndexIterator checkIterator(speculative.speculationChecks()); NonSpeculativeJIT nonSpeculative(*this); nonSpeculative.compile(checkIterator); // Link the bail-outs from the speculative path to the corresponding entry points into the non-speculative one. linkSpeculationChecks(speculative, nonSpeculative); } else { // If compilation through the SpeculativeJIT failed, throw away the code we generated. m_calls.clear(); rewindToLabel(speculativePathBegin); SpeculationCheckVector noChecks; SpeculationCheckIndexIterator checkIterator(noChecks); NonSpeculativeJIT nonSpeculative(*this); nonSpeculative.compile(checkIterator); } // === Stage 3 - Function footer code generation === // // Generate code to lookup and jump to exception handlers, to perform the slow // register file check (if the fast one in the function header fails), and // generate the entry point with arity check. // Iterate over the m_calls vector, checking for exception checks, // and linking them to here. unsigned exceptionCheckCount = 0; for (unsigned i = 0; i < m_calls.size(); ++i) { Jump& exceptionCheck = m_calls[i].m_exceptionCheck; if (exceptionCheck.isSet()) { exceptionCheck.link(this); ++exceptionCheckCount; } } // If any exception checks were linked, generate code to lookup a handler. if (exceptionCheckCount) { // lookupExceptionHandler is passed two arguments, exec (the CallFrame*), and // an identifier for the operation that threw the exception, which we can use // to look up handler information. The identifier we use is the return address // of the call out from JIT code that threw the exception; this is still // available on the stack, just below the stack pointer! move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); peek(GPRInfo::argumentGPR1, -1); m_calls.append(CallRecord(call(), lookupExceptionHandler)); // lookupExceptionHandler leaves the handler CallFrame* in the returnValueGPR, // and the address of the handler in returnValueGPR2. jump(GPRInfo::returnValueGPR2); } // Generate the register file check; if the fast check in the function head fails, // we need to call out to a helper function to check whether more space is available. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). registerFileCheck.link(this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); Call callRegisterFileCheck = call(); jump(fromRegisterFileCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). Label arityCheck = label(); preserveReturnAddressAfterCall(GPRInfo::regT2); emitPutToCallFrameHeader(GPRInfo::regT2, RegisterFile::ReturnPC); branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); Call callArityCheck = call(); move(GPRInfo::regT0, GPRInfo::callFrameRegister); jump(fromArityCheck); // === Stage 4 - Link === // // Link the code, populate data in CodeBlock data structures. LinkBuffer linkBuffer(*m_globalData, this, m_globalData->executableAllocator); #if DFG_DEBUG_VERBOSE fprintf(stderr, "JIT code start at %p\n", linkBuffer.debugAddress()); #endif // Link all calls out from the JIT code to their respective functions. for (unsigned i = 0; i < m_calls.size(); ++i) linkBuffer.link(m_calls[i].m_call, m_calls[i].m_function); if (m_codeBlock->needsCallReturnIndices()) { m_codeBlock->callReturnIndexVector().reserveCapacity(exceptionCheckCount); for (unsigned i = 0; i < m_calls.size(); ++i) { if (m_calls[i].m_exceptionCheck.isSet()) { unsigned returnAddressOffset = linkBuffer.returnAddressOffset(m_calls[i].m_call); unsigned exceptionInfo = m_calls[i].m_exceptionInfo; m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(returnAddressOffset, exceptionInfo)); } } } // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs. linkBuffer.link(callRegisterFileCheck, cti_register_file_check); linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck); entryWithArityCheck = linkBuffer.locationOf(arityCheck); entry = linkBuffer.finalizeCode(); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs) compileSetupVarargsFrame(instruction, info); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1); Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag)); loadPtr(Address(regT1, JSCell::structureIDOffset()), regT1); storePtr(regT1, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister)); emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee. store32(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); store32(regT1, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0); m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; checkStackPointerAlignment(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool invert) { unsigned op1 = currentInstruction[1].u.operand; unsigned op2 = currentInstruction[2].u.operand; unsigned target = currentInstruction[3].u.operand; // We generate inline code for the following cases in the slow path: // - floating-point number to constant int immediate // - constant int immediate to floating-point number // - floating-point number to floating-point number. if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) { linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_jlesseq); stubCall.addArgument(op1, regT0); stubCall.addArgument(op2, regT1); stubCall.call(); emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); return; } if (isOperandConstantImmediateInt(op2)) { linkSlowCase(iter); if (supportsFloatingPoint()) { Jump fail1 = emitJumpIfNotImmediateNumber(regT0); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); int32_t op2imm = getConstantOperand(op2).asInt32();; move(Imm32(op2imm), regT1); convertInt32ToDouble(regT1, fpRegT1); emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq)); fail1.link(this); } JITStubCall stubCall(this, cti_op_jlesseq); stubCall.addArgument(regT0); stubCall.addArgument(op2, regT2); stubCall.call(); emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); } else if (isOperandConstantImmediateInt(op1)) { linkSlowCase(iter); if (supportsFloatingPoint()) { Jump fail1 = emitJumpIfNotImmediateNumber(regT1); addPtr(tagTypeNumberRegister, regT1); movePtrToDouble(regT1, fpRegT1); int32_t op1imm = getConstantOperand(op1).asInt32();; move(Imm32(op1imm), regT0); convertInt32ToDouble(regT0, fpRegT0); emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq)); fail1.link(this); } JITStubCall stubCall(this, cti_op_jlesseq); stubCall.addArgument(op1, regT2); stubCall.addArgument(regT1); stubCall.call(); emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); } else { linkSlowCase(iter); if (supportsFloatingPoint()) { Jump fail1 = emitJumpIfNotImmediateNumber(regT0); Jump fail2 = emitJumpIfNotImmediateNumber(regT1); Jump fail3 = emitJumpIfImmediateInteger(regT1); addPtr(tagTypeNumberRegister, regT0); addPtr(tagTypeNumberRegister, regT1); movePtrToDouble(regT0, fpRegT0); movePtrToDouble(regT1, fpRegT1); emitJumpSlowToHot(branchDouble(invert ? DoubleLessThanOrUnordered : DoubleGreaterThanOrEqual, fpRegT1, fpRegT0), target); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jnlesseq)); fail1.link(this); fail2.link(this); fail3.link(this); } linkSlowCase(iter); JITStubCall stubCall(this, cti_op_jlesseq); stubCall.addArgument(regT0); stubCall.addArgument(regT1); stubCall.call(); emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target); } }
void JITCompiler::emitCount(AbstractSamplingCounter& counter, uint32_t increment) { addPtr(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter())); }
int allocateCTEParamsFast(CTEParamsFast * pars) { PtrRegister ptrReg; initPtrRegister(&ptrReg); void * tmp = NULL; tmp = newAndZero((void*)&pars->iz_data, pars->nScaleTableColumns, sizeof(*pars->iz_data)); addPtr(&ptrReg, tmp, &free); if (!tmp) { freeOnExit(&ptrReg); trlerror ("Out of memory.\n"); return OUT_OF_MEMORY; } tmp = newAndZero((void*)&pars->scale512, pars->nScaleTableColumns, sizeof(*pars->scale512)); addPtr(&ptrReg, tmp, &free); if (!tmp) { freeOnExit(&ptrReg); trlerror ("Out of memory.\n"); return OUT_OF_MEMORY; } tmp = newAndZero((void*)&pars->scale1024, pars->nScaleTableColumns, sizeof(*pars->scale1024)); addPtr(&ptrReg, tmp, &free); if (!tmp) { freeOnExit(&ptrReg); trlerror ("Out of memory.\n"); return OUT_OF_MEMORY; } tmp = newAndZero((void*)&pars->scale1536, pars->nScaleTableColumns, sizeof(*pars->scale1536)); addPtr(&ptrReg, tmp, &free); if (!tmp) { freeOnExit(&ptrReg); trlerror ("Out of memory.\n"); return OUT_OF_MEMORY; } tmp = newAndZero((void*)&pars->scale2048, pars->nScaleTableColumns, sizeof(*pars->scale2048)); addPtr(&ptrReg, tmp, &free); if (!tmp) { freeOnExit(&ptrReg); trlerror ("Out of memory.\n"); return OUT_OF_MEMORY; } tmp = newAndZero((void*)&pars->wcol_data, pars->nTraps, sizeof(*pars->wcol_data)); addPtr(&ptrReg, tmp, &free); if (!tmp) { freeOnExit(&ptrReg); trlerror ("Out of memory.\n"); return OUT_OF_MEMORY; } tmp = newAndZero((void*)&pars->qlevq_data, pars->nTraps, sizeof(*pars->qlevq_data)); addPtr(&ptrReg, tmp, &free); if (!tmp) { freeOnExit(&ptrReg); trlerror ("Out of memory.\n"); return OUT_OF_MEMORY; } tmp = newAndZero((void*)& pars->dpdew_data, pars->nTraps, sizeof(*pars->dpdew_data)); addPtr(&ptrReg, tmp, &free); if (!tmp) { freeOnExit(&ptrReg); trlerror ("Out of memory.\n"); return OUT_OF_MEMORY; } freeReg(&ptrReg); return 0; }
void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister().offset() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitGetVirtualRegister(arguments, regT0); slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue())))); emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1))); // regT0: argumentCountIncludingThis move(regT0, regT1); neg64(regT1); add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1); lshift64(TrustedImm32(3), regT1); addPtr(callFrameRegister, regT1); // regT1: newCallFrame slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1)); // Initialize ArgumentCount. store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT2); store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); // Copy arguments. signExtend32ToPtr(regT0, regT0); end.append(branchSub64(Zero, TrustedImm32(1), regT0)); // regT0: argumentCount Label copyLoop = label(); load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2); store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this); end.append(jump()); } if (canOptimize) slowCase.link(this); emitGetVirtualRegister(arguments, regT1); callOperation(operationSizeAndAllocFrameForVarargs, regT1, firstFreeRegister); emitGetVirtualRegister(thisValue, regT1); emitGetVirtualRegister(arguments, regT2); callOperation(operationLoadVarargs, returnValueGPR, regT1, regT2); move(returnValueGPR, regT1); if (canOptimize) end.link(this); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_forward_arguments), call_and_tail_call_forward_arguments_opcodes_must_be_same_length); CallLinkInfo* info = nullptr; if (opcodeID != op_call_eval) info = m_codeBlock->addCallLinkInfo(); if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) compileSetupVarargsFrame(opcodeID, instruction, info); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); load32(Address(regT0, JSCell::structureIDOffset()), regT0); store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits(); store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0); m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; if (opcodeID == op_tail_call) { CallFrameShuffleData shuffleData; shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; shuffleData.numLocals = instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register); shuffleData.args.resize(instruction[3].u.operand); for (int i = 0; i < instruction[3].u.operand; ++i) { shuffleData.args[i] = ValueRecovery::displacedInJSStack( virtualRegisterForArgument(i) - instruction[4].u.operand, DataFormatJS); } shuffleData.callee = ValueRecovery::inGPR(regT0, DataFormatJS); shuffleData.setupCalleeSaveRegisters(m_codeBlock); info->setFrameShuffleData(shuffleData); CallFrameShuffler(*this, shuffleData).prepareForTailCall(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } if (opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) { emitRestoreCalleeSaves(); prepareForTailCallSlow(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[2].u.operand; int arguments = instruction[3].u.operand; int firstFreeRegister = instruction[4].u.operand; killLastResultRegister(); JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitGetVirtualRegister(arguments, regT0); slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue())))); emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1))); // regT0: argumentCountIncludingThis move(regT0, regT1); add32(TrustedImm32(firstFreeRegister + JSStack::CallFrameHeaderSize), regT1); lshift32(TrustedImm32(3), regT1); addPtr(callFrameRegister, regT1); // regT1: newCallFrame slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT1)); // Initialize ArgumentCount. store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT2); store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); // Copy arguments. neg32(regT0); signExtend32ToPtr(regT0, regT0); end.append(branchAdd64(Zero, TrustedImm32(1), regT0)); // regT0: -argumentCount Label copyLoop = label(); load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2); store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); branchAdd64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this); end.append(jump()); } if (canOptimize) slowCase.link(this); JITStubCall stubCall(this, cti_op_load_varargs); stubCall.addArgument(thisValue, regT0); stubCall.addArgument(arguments, regT0); stubCall.addArgument(Imm32(firstFreeRegister)); stubCall.call(regT1); if (canOptimize) end.link(this); }
void JITCompiler::compileFunction() { SamplingRegion samplingRegion("DFG Backend"); setStartOfCode(); compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the JSStack. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 addPtr(TrustedImm32(-m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), GPRInfo::regT1); // Return here after stack check. Label fromStackCheck = label(); // === Function body code generation === m_speculative = adoptPtr(new SpeculativeJIT(*this)); compileBody(); setEndOfMainPath(); // === Function footer code generation === // // Generate code to perform the slow stack check (if the fast one in // the function header fails), and generate the entry point with arity check. // // Generate the stack check; if the fast check in the function head fails, // we need to call out to a helper function to check whether more space is available. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). stackCheck.link(this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); emitStoreCodeOrigin(CodeOrigin(0)); m_callStackCheck = call(); jump(fromStackCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). m_arityCheck = label(); compileEntry(); load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1); branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); emitStoreCodeOrigin(CodeOrigin(0)); m_callArityCheck = call(); branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this); emitStoreCodeOrigin(CodeOrigin(0)); m_callArityFixup = call(); jump(fromArityCheck); // Generate slow path code. m_speculative->runSlowPathGenerators(); compileExceptionHandlers(); linkOSRExits(); // Create OSR entry trampolines if necessary. m_speculative->createOSREntries(); setEndOfCode(); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Caller initializes ScopeChain. - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ScopeChain; ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ if (opcodeID == op_call_varargs) compileLoadVarargs(instruction); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1); Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag)); loadPtr(Address(regT1, JSCell::structureOffset()), regT1); storePtr(regT1, instruction[6].u.arrayProfile->addressOfLastSeenStructure()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3); store32(TrustedImm32(argCount), payloadFor(JSStack::ArgumentCount, regT3)); } // regT3 holds newCallFrame with ArgumentCount initialized. uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister)); emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee. storePtr(callFrameRegister, Address(regT3, JSStack::CallerFrame * static_cast<int>(sizeof(Register)))); emitStore(JSStack::Callee, regT1, regT0, regT3); move(regT3, callFrameRegister); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex); m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo()); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID); m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset; loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1); emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck) { compileEntry(); // === Function header code generation === // This is the main entry point, without performing an arity check. // If we needed to perform an arity check we will already have moved the return address, // so enter after this. Label fromArityCheck(this); // Plant a check that sufficient space is available in the RegisterFile. // FIXME: https://bugs.webkit.org/show_bug.cgi?id=56291 addPtr(Imm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1); Jump registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), GPRInfo::regT1); // Return here after register file check. Label fromRegisterFileCheck = label(); // === Function body code generation === SpeculativeJIT speculative(*this); compileBody(speculative); // === Function footer code generation === // // Generate code to perform the slow register file check (if the fast one in // the function header fails), and generate the entry point with arity check. // // Generate the register file check; if the fast check in the function head fails, // we need to call out to a helper function to check whether more space is available. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). registerFileCheck.link(this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); Call callRegisterFileCheck = call(); jump(fromRegisterFileCheck); // The fast entry point into a function does not check the correct number of arguments // have been passed to the call (we only use the fast entry point where we can statically // determine the correct number of arguments have been passed, or have already checked). // In cases where an arity check is necessary, we enter here. // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions). Label arityCheck = label(); compileEntry(); load32(Address(GPRInfo::callFrameRegister, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register))), GPRInfo::regT1); branch32(Equal, GPRInfo::regT1, Imm32(m_codeBlock->m_numParameters)).linkTo(fromArityCheck, this); move(stackPointerRegister, GPRInfo::argumentGPR0); poke(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*)); Call callArityCheck = call(); move(GPRInfo::regT0, GPRInfo::callFrameRegister); jump(fromArityCheck); // === Link === LinkBuffer linkBuffer(*m_globalData, this); link(linkBuffer); speculative.linkOSREntries(linkBuffer); // FIXME: switch the register file check & arity check over to DFGOpertaion style calls, not JIT stubs. linkBuffer.link(callRegisterFileCheck, cti_register_file_check); linkBuffer.link(callArityCheck, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck); entryWithArityCheck = linkBuffer.locationOf(arityCheck); entry = JITCode(linkBuffer.finalizeCode(), JITCode::DFGJIT); }