void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) { #ifndef NDEBUG breakpoint(); #endif return; } if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) { if (!types.first().definitelyIsNumber()) linkSlowCase(iter); } if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) { if (!types.second().definitelyIsNumber()) linkSlowCase(iter); } // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0. JITStubCall stubCall(this, cti_op_div); stubCall.addArgument(op1, regT2); stubCall.addArgument(op2, regT2); stubCall.call(result); }
void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned dst = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { int shift = getConstantOperand(op2).asInt32(); // op1 = regT0 linkSlowCase(iter); // int32 check if (supportsFloatingPointTruncate()) { JumpList failures; failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); if (shift) urshift32(Imm32(shift & 0x1f), regT0); if (shift < 0 || !(shift & 31)) failures.append(branch32(LessThan, regT0, Imm32(0))); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(dst, regT0); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); failures.link(this); } if (shift < 0 || !(shift & 31)) linkSlowCase(iter); // failed to box in hot path } else { // op1 = regT0 // op2 = regT1 if (!isOperandConstantImmediateInt(op1)) { linkSlowCase(iter); // int32 check -- op1 is not an int if (supportsFloatingPointTruncate()) { JumpList failures; failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int emitFastArithImmToInt(regT1); urshift32(regT1, regT0); failures.append(branch32(LessThan, regT0, Imm32(0))); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(dst, regT0); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); failures.link(this); } } linkSlowCase(iter); // int32 check - op2 is not an int linkSlowCase(iter); // Can't represent unsigned result as an immediate } JITStubCall stubCall(this, cti_op_urshift); stubCall.addArgument(op1, regT0); stubCall.addArgument(op2, regT1); stubCall.call(dst); }
void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned result = currentInstruction[1].u.operand; linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_mod); stubCall.addArgument(regT0); stubCall.addArgument(regT2); stubCall.call(result); }
void JIT::emitSlow_op_post_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned result = currentInstruction[1].u.operand; unsigned srcDst = currentInstruction[2].u.operand; linkSlowCase(iter); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_post_dec); stubCall.addArgument(regT0); stubCall.addArgument(Imm32(srcDst)); stubCall.call(result); }
void JIT::compileOpCallVarargsSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_call_NotJSFunction); stubCall.addArgument(regT0); stubCall.addArgument(regT2); stubCall.addArgument(regT1); stubCall.call(); sampleCodeBlock(m_codeBlock); }
void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction*, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex) { if (opcodeID == op_call_eval) { compileCallEvalSlowCase(iter); return; } linkSlowCase(iter); linkSlowCase(iter); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->getCTIStub(linkConstructGenerator).code() : m_globalData->getCTIStub(linkCallGenerator).code()); sampleCodeBlock(m_codeBlock); }
void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; UNUSED_PARAM(op1); UNUSED_PARAM(op2); linkSlowCase(iter); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_lshift); stubCall.addArgument(regT0); stubCall.addArgument(regT2); stubCall.call(result); }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID) { int argCount = instruction[2].u.operand; int registerOffset = instruction[3].u.operand; linkSlowCase(iter); linkSlowCase(iter); JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction); stubCall.addArgument(regT0); stubCall.addArgument(JIT::Imm32(registerOffset)); stubCall.addArgument(JIT::Imm32(argCount)); stubCall.call(); sampleCodeBlock(m_codeBlock); }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) { int argCount = instruction[2].u.operand; int registerOffset = instruction[3].u.operand; linkSlowCase(iter); // Fast check for JS function. Jump callLinkFailNotObject = emitJumpIfNotJSCell(regT0); Jump callLinkFailNotJSFunction = branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr)); // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); move(Imm32(argCount), regT1); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstructLink() : m_globalData->jitStubs->ctiVirtualCallLink()); // Done! - return back to the hot path. ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_eval)); ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct)); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_call)); // This handles host functions callLinkFailNotObject.link(this); callLinkFailNotJSFunction.link(this); JITStubCall stubCall(this, opcodeID == op_construct ? cti_op_construct_NotJSConstruct : cti_op_call_NotJSFunction); stubCall.addArgument(regT0); stubCall.addArgument(JIT::Imm32(registerOffset)); stubCall.addArgument(JIT::Imm32(argCount)); stubCall.call(); sampleCodeBlock(m_codeBlock); }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID) { int dst = instruction[1].u.operand; linkSlowCase(iter); linkSlowCase(iter); // This handles host functions emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) { CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0); linkSlowCase(iter); int registerOffset = -instruction[4].u.operand; addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0); loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT1); move(TrustedImmPtr(info), regT2); emitLoad(JSStack::Callee, regT1, regT0); MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info); info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true)); emitNakedCall(virtualThunk.code()); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex) { if (opcodeID == op_call_eval) { compileCallEvalSlowCase(instruction, iter); return; } linkSlowCase(iter); if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) emitRestoreCalleeSaves(); move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2); m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code()); if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) { abortWithReason(JITDidReturnFromTailCall); return; } addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { #if ENABLE(JIT_USE_SOFT_MODULO) unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_mod); stubCall.addArgument(op1, regT2); stubCall.addArgument(op2, regT2); stubCall.call(result); #else ASSERT_NOT_REACHED(); #endif }
void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); emitLoad(RegisterFile::Callee, regT1, regT0); emitNakedCall(m_globalData->jitStubs->ctiVirtualCall()); sampleCodeBlock(m_codeBlock); }
void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); emitNakedCall(m_globalData->jitStubs->ctiVirtualCall()); sampleCodeBlock(m_codeBlock); }
void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); emitGetFromCallFrameHeader64(JSStack::Callee, regT0); emitNakedCall(m_globalData->getCTIStub(virtualCallGenerator).code()); sampleCodeBlock(m_codeBlock); }
void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); emitLoad(JSStack::Callee, regT1, regT0); emitNakedCall(m_vm->getCTIStub(virtualCallGenerator).code()); sampleCodeBlock(m_codeBlock); }
void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex) { if (opcodeID == op_call_eval) { compileCallEvalSlowCase(instruction, iter); return; } linkSlowCase(iter); linkSlowCase(iter); move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2); m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code()); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); emitGetFromCallFrameHeader64(JSStack::Callee, regT0); emitNakedCall(m_vm->getCTIStub(oldStyleVirtualCallGenerator).code()); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); emitLoad(JSStack::Callee, regT1, regT0); emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code()); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned srcDst = currentInstruction[1].u.operand; Jump notImm = getSlowCase(iter); linkSlowCase(iter); emitGetVirtualRegister(srcDst, regT0); notImm.link(this); JITStubCall stubCall(this, cti_op_pre_dec); stubCall.addArgument(regT0); stubCall.call(srcDst); }
void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; JITStubCall stubCall(this, cti_op_rshift); if (isOperandConstantImmediateInt(op2)) { linkSlowCase(iter); stubCall.addArgument(regT0); stubCall.addArgument(op2, regT2); } else { if (supportsFloatingPointTruncate()) { linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); // We're reloading op1 to regT0 as we can no longer guarantee that // we have not munged the operand. It may have already been shifted // correctly, but it still will not have been tagged. stubCall.addArgument(op1, regT0); stubCall.addArgument(regT2); } else { linkSlowCase(iter); linkSlowCase(iter); stubCall.addArgument(regT0); stubCall.addArgument(regT2); } } stubCall.call(result); }
void JIT::compilePutByIdSlowCase(int baseVReg, Identifier* ident, int, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex) { linkSlowCaseIfNotJSCell(iter, baseVReg); linkSlowCase(iter); emitPutJITStubArgConstant(reinterpret_cast<unsigned>(ident), 2); emitPutJITStubArg(X86::eax, 1); emitPutJITStubArg(X86::edx, 3); JmpSrc call = emitCTICall(Interpreter::cti_op_put_by_id); // Track the location of the call; this will be used to recover repatch information. m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call; }
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) { linkSlowCase(iter); load64(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0); move(TrustedImmPtr(&CallLinkInfo::dummy()), regT2); emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code()); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) { int callee = instruction[1].u.operand; linkSlowCaseIfNotJSCell(iter, callee); linkSlowCase(iter); JITStubCall stubCall(this, cti_op_call_NotJSFunction); stubCall.addArgument(regT1, regT0); stubCall.addArgument(regT3); stubCall.addArgument(regT2); stubCall.call(); sampleCodeBlock(m_codeBlock); }
void JIT::compileOpCallVarargsSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) { int callee = instruction[1].u.operand; linkSlowCaseIfNotJSCell(iter, callee); Jump notCell = jump(); linkSlowCase(iter); move(TrustedImm32(JSValue::CellTag), regT1); // Need to restore cell tag in regT1 because it was clobbered. notCell.link(this); JITStubCall stubCall(this, cti_op_call_NotJSFunction); stubCall.addArgument(regT1, regT0); stubCall.addArgument(regT3); stubCall.addArgument(regT2); stubCall.call(); sampleCodeBlock(m_codeBlock); }
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) { CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0); linkSlowCase(iter); int registerOffset = -instruction[4].u.operand; addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); load64(Address(stackPointerRegister, sizeof(Register) * CallFrameSlot::callee - sizeof(CallerFrameAndPC)), regT0); emitDumbVirtualCall(info); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex) { // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset // so that we only need track one pointer into the slow case code - we track a pointer to the location // of the call (which we can use to look up the repatch information), but should a array-length or // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back // the distance from the call to the head of the slow case. linkSlowCaseIfNotJSCell(iter, baseVReg); linkSlowCase(iter); #ifndef NDEBUG JmpDst coldPathBegin = __ label(); #endif emitPutJITStubArg(X86::eax, 1); emitPutJITStubArgConstant(reinterpret_cast<unsigned>(ident), 2); JmpSrc call = emitCTICall(Interpreter::cti_op_get_by_id); ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall); emitPutVirtualRegister(resultVReg); // Track the location of the call; this will be used to recover repatch information. m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call; }
void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; linkSlowCase(iter); if (isOperandConstantImmediateInt(op1)) { JITStubCall stubCall(this, cti_op_bitand); stubCall.addArgument(op1, regT2); stubCall.addArgument(regT0); stubCall.call(result); } else if (isOperandConstantImmediateInt(op2)) { JITStubCall stubCall(this, cti_op_bitand); stubCall.addArgument(regT0); stubCall.addArgument(op2, regT2); stubCall.call(result); } else { JITStubCall stubCall(this, cti_op_bitand); stubCall.addArgument(op1, regT2); stubCall.addArgument(regT1); stubCall.call(result); } }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; linkSlowCase(iter); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Fast check for JS function. Jump callLinkFailNotObject = emitJumpIfNotJSCell(X86::ecx); Jump callLinkFailNotJSFunction = jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } move(Imm32(argCount), X86::edx); // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink); Jump storeResultForFirstRun = jump(); // FIXME: this label can be removed, since it is a fixed offset from 'callReturnLocation'. // This is the address for the cold path *after* the first run (which tries to link the call). m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = MacroAssembler::Label(this); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. Jump isNotObject = emitJumpIfNotJSCell(X86::ecx); Jump isJSFunction = jePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); // This handles host functions isNotObject.link(this); callLinkFailNotObject.link(this); callLinkFailNotJSFunction.link(this); emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); Jump wasNotJSFunction = jump(); // Next, handle JSFunctions... isJSFunction.link(this); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); move(Imm32(argCount), X86::edx); emitNakedCall(m_interpreter->m_ctiVirtualCall); // Put the return value in dst. In the interpreter, op_ret does this. wasNotJSFunction.link(this); storeResultForFirstRun.link(this); emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }