void JIT::emit_op_urshift(Instruction* currentInstruction) { unsigned dst = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; // Slow case of urshift makes assumptions about what registers hold the // shift arguments, so any changes must be updated there as well. if (isOperandConstantImmediateInt(op2)) { emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); emitFastArithImmToInt(regT0); int shift = getConstantOperand(op2).asInt32(); if (shift) urshift32(Imm32(shift & 0x1f), regT0); // unsigned shift < 0 or shift = k*2^32 may result in (essentially) // a toUint conversion, which can result in a value we can represent // as an immediate int. if (shift < 0 || !(shift & 31)) addSlowCase(branch32(LessThan, regT0, Imm32(0))); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(dst, regT0); return; } emitGetVirtualRegisters(op1, regT0, op2, regT1); if (!isOperandConstantImmediateInt(op1)) emitJumpSlowCaseIfNotImmediateInteger(regT0); emitJumpSlowCaseIfNotImmediateInteger(regT1); emitFastArithImmToInt(regT0); emitFastArithImmToInt(regT1); urshift32(regT1, regT0); addSlowCase(branch32(LessThan, regT0, Imm32(0))); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(dst, regT0); }
void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned dst = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { int shift = getConstantOperand(op2).asInt32(); // op1 = regT0 linkSlowCase(iter); // int32 check if (supportsFloatingPointTruncate()) { JumpList failures; failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); if (shift) urshift32(Imm32(shift & 0x1f), regT0); if (shift < 0 || !(shift & 31)) failures.append(branch32(LessThan, regT0, Imm32(0))); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(dst, regT0); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); failures.link(this); } if (shift < 0 || !(shift & 31)) linkSlowCase(iter); // failed to box in hot path } else { // op1 = regT0 // op2 = regT1 if (!isOperandConstantImmediateInt(op1)) { linkSlowCase(iter); // int32 check -- op1 is not an int if (supportsFloatingPointTruncate()) { JumpList failures; failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0)); failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int emitFastArithImmToInt(regT1); urshift32(regT1, regT0); failures.append(branch32(LessThan, regT0, Imm32(0))); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(dst, regT0); emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift)); failures.link(this); } } linkSlowCase(iter); // int32 check - op2 is not an int linkSlowCase(iter); // Can't represent unsigned result as an immediate } JITStubCall stubCall(this, cti_op_urshift); stubCall.addArgument(op1, regT0); stubCall.addArgument(op2, regT1); stubCall.call(dst); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned i, unsigned) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval JmpSrc wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx, i); compileOpCallEvalSetupArgs(instruction); emitCTICall(i, Interpreter::cti_op_call_eval); __ cmpl_i32r(asInteger(JSImmediate::impossibleValue()), X86::eax); wasEval = __ jne(); } emitGetVirtualRegister(callee, X86::ecx, i); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. emitJumpSlowCaseIfNotJSCell(X86::ecx, i); __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx); m_slowCases.append(SlowCaseEntry(__ jne(), i)); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(i, Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx, i); } // Speculatively roll the callframe, assuming argCount will match the arity. __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); __ movl_i32r(argCount, X86::edx); emitNakedCall(i, m_interpreter->m_ctiVirtualCall); if (opcodeID == op_call_eval) __ link(wasEval, __ label()); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval Jump wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx); compileOpCallEvalSetupArgs(instruction); emitCTICall(Interpreter::cti_op_call_eval); wasEval = jnePtr(X86::eax, ImmPtr(JSImmediate::impossibleValue())); } emitGetVirtualRegister(callee, X86::ecx); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. emitJumpSlowCaseIfNotJSCell(X86::ecx); addSlowCase(jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr))); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); move(Imm32(argCount), X86::edx); emitNakedCall(m_interpreter->m_ctiVirtualCall); if (opcodeID == op_call_eval) wasEval.link(this); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::emit_op_post_dec(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned srcDst = currentInstruction[2].u.operand; emitGetVirtualRegister(srcDst, regT0); move(regT0, regT1); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchSub32(Zero, Imm32(1), regT1)); emitFastArithIntToImmNoCheck(regT1, regT1); emitPutVirtualRegister(srcDst, regT1); emitPutVirtualRegister(result); }
void JIT::emit_op_rshift(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { // isOperandConstantImmediateInt(op2) => 1 SlowCase emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); // Mask with 0x1f as per ecma-262 11.7.2 step 7. rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0); } else { emitGetVirtualRegisters(op1, regT0, op2, regT2); if (supportsFloatingPointTruncate()) { Jump lhsIsInt = emitJumpIfImmediateInteger(regT0); // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases addSlowCase(emitJumpIfNotImmediateNumber(regT0)); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0)); lhsIsInt.link(this); emitJumpSlowCaseIfNotImmediateInteger(regT2); } else { // !supportsFloatingPoint() => 2 SlowCases emitJumpSlowCaseIfNotImmediateInteger(regT0); emitJumpSlowCaseIfNotImmediateInteger(regT2); } emitFastArithImmToInt(regT2); rshift32(regT2, regT0); } emitFastArithIntToImmNoCheck(regT0, regT0); emitPutVirtualRegister(result); }
void JIT::emit_op_add(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { JITStubCall stubCall(this, cti_op_add); stubCall.addArgument(op1, regT2); stubCall.addArgument(op2, regT2); stubCall.call(result); return; } if (isOperandConstantImmediateInt(op1)) { emitGetVirtualRegister(op2, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0)); emitFastArithIntToImmNoCheck(regT0, regT0); } else if (isOperandConstantImmediateInt(op2)) { emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0)); emitFastArithIntToImmNoCheck(regT0, regT0); } else compileBinaryArithOp(op_add, result, op1, op2, types); emitPutVirtualRegister(result); }
void JIT::emit_op_bitand(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op1)) { emitGetVirtualRegister(op2, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); int32_t imm = getConstantOperandImmediateInt(op1); andPtr(Imm32(imm), regT0); if (imm >= 0) emitFastArithIntToImmNoCheck(regT0, regT0); } else if (isOperandConstantImmediateInt(op2)) { emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); int32_t imm = getConstantOperandImmediateInt(op2); andPtr(Imm32(imm), regT0); if (imm >= 0) emitFastArithIntToImmNoCheck(regT0, regT0); } else { emitGetVirtualRegisters(op1, regT0, op2, regT1); andPtr(regT1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); } emitPutVirtualRegister(result); }
void JIT::emit_op_call_put_result(Instruction* instruction) { int dst = instruction[1].u.operand; emitValueProfilingSite(); emitPutVirtualRegister(dst); if (canBeOptimized()) killLastResultRegister(); // Make lastResultRegister tracking simpler in the DFG. }
void JIT::emit_op_sub(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); compileBinaryArithOp(op_sub, result, op1, op2, types); emitPutVirtualRegister(result); }
void JIT::emit_op_pre_inc(Instruction* currentInstruction) { unsigned srcDst = currentInstruction[1].u.operand; emitGetVirtualRegister(srcDst, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchAdd32(Overflow, Imm32(1), regT0)); emitFastArithIntToImmNoCheck(regT0, regT0); emitPutVirtualRegister(srcDst); }
void JIT::emit_op_div(Instruction* currentInstruction) { unsigned dst = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); if (isOperandConstantImmediateDouble(op1)) { emitGetVirtualRegister(op1, regT0); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); } else if (isOperandConstantImmediateInt(op1)) { emitLoadInt32ToDouble(op1, fpRegT0); } else { emitGetVirtualRegister(op1, regT0); if (!types.first().definitelyIsNumber()) emitJumpSlowCaseIfNotImmediateNumber(regT0); Jump notInt = emitJumpIfNotImmediateInteger(regT0); convertInt32ToDouble(regT0, fpRegT0); Jump skipDoubleLoad = jump(); notInt.link(this); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); skipDoubleLoad.link(this); } if (isOperandConstantImmediateDouble(op2)) { emitGetVirtualRegister(op2, regT1); addPtr(tagTypeNumberRegister, regT1); movePtrToDouble(regT1, fpRegT1); } else if (isOperandConstantImmediateInt(op2)) { emitLoadInt32ToDouble(op2, fpRegT1); } else { emitGetVirtualRegister(op2, regT1); if (!types.second().definitelyIsNumber()) emitJumpSlowCaseIfNotImmediateNumber(regT1); Jump notInt = emitJumpIfNotImmediateInteger(regT1); convertInt32ToDouble(regT1, fpRegT1); Jump skipDoubleLoad = jump(); notInt.link(this); addPtr(tagTypeNumberRegister, regT1); movePtrToDouble(regT1, fpRegT1); skipDoubleLoad.link(this); } divDouble(fpRegT1, fpRegT0); // Double result. moveDoubleToPtr(fpRegT0, regT0); subPtr(tagTypeNumberRegister, regT0); emitPutVirtualRegister(dst, regT0); }
void JIT::emitPutCallResult(Instruction* instruction) { int dst = instruction[1].u.operand; emitValueProfilingSite(regT4); emitPutVirtualRegister(dst); if (canBeOptimizedOrInlined()) { // Make lastResultRegister tracking simpler in the DFG. This is needed because // the DFG may have the SetLocal corresponding to this Call's return value in // a different basic block, if inlining happened. The DFG isn't smart enough to // track the baseline JIT's last result register across basic blocks. killLastResultRegister(); } }
void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned) { // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched. // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. emitGetVirtualRegister(baseVReg, X86::eax); emitPutJITStubArg(X86::eax, 1); emitPutJITStubArgConstant(ident, 2); emitCTICall(Interpreter::cti_op_get_by_id_generic); emitPutVirtualRegister(resultVReg); }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned, OpcodeID opcodeID) { int dst = instruction[1].u.operand; linkSlowCase(iter); linkSlowCase(iter); // This handles host functions emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::emit_op_lshift(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; emitGetVirtualRegisters(op1, regT0, op2, regT2); // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent. emitJumpSlowCaseIfNotImmediateInteger(regT0); emitJumpSlowCaseIfNotImmediateInteger(regT2); emitFastArithImmToInt(regT0); emitFastArithImmToInt(regT2); lshift32(regT2, regT0); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(result); }
void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, unsigned propertyAccessInstructionIndex) { // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset // so that we only need track one pointer into the slow case code - we track a pointer to the location // of the call (which we can use to look up the repatch information), but should a array-length or // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back // the distance from the call to the head of the slow case. linkSlowCaseIfNotJSCell(iter, baseVReg); linkSlowCase(iter); #ifndef NDEBUG JmpDst coldPathBegin = __ label(); #endif emitPutJITStubArg(X86::eax, 1); emitPutJITStubArgConstant(reinterpret_cast<unsigned>(ident), 2); JmpSrc call = emitCTICall(Interpreter::cti_op_get_by_id); ASSERT(X86Assembler::getDifferenceBetweenLabels(coldPathBegin, call) == repatchOffsetGetByIdSlowCaseCall); emitPutVirtualRegister(resultVReg); // Track the location of the call; this will be used to recover repatch information. m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].callReturnLocation = call; }
void JIT::emit_op_mod(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; #if CPU(X86) || CPU(X86_64) // Make sure registers are correct for x86 IDIV instructions. ASSERT(regT0 == X86Registers::eax); ASSERT(regT1 == X86Registers::edx); ASSERT(regT2 == X86Registers::ecx); #endif emitGetVirtualRegisters(op1, regT0, op2, regT2); emitJumpSlowCaseIfNotImmediateInteger(regT0); emitJumpSlowCaseIfNotImmediateInteger(regT2); addSlowCase(branchPtr(Equal, regT2, ImmPtr(JSValue::encode(jsNumber(m_globalData, 0))))); m_assembler.cdq(); m_assembler.idivl_r(regT2); emitFastArithReTagImmediate(regT1, regT0); emitPutVirtualRegister(result); }
void JIT::emit_op_mod(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; #if ENABLE(JIT_USE_SOFT_MODULO) emitGetVirtualRegisters(op1, regT0, op2, regT2); emitJumpSlowCaseIfNotImmediateInteger(regT0); emitJumpSlowCaseIfNotImmediateInteger(regT2); addSlowCase(branch32(Equal, regT2, Imm32(1))); emitNakedCall(m_globalData->jitStubs->ctiSoftModulo()); emitPutVirtualRegister(result, regT0); #else JITStubCall stubCall(this, cti_op_mod); stubCall.addArgument(op1, regT2); stubCall.addArgument(op2, regT2); stubCall.call(result); #endif }
void JIT::emit_op_mul(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); // For now, only plant a fast int case if the constant operand is greater than zero. int32_t value; if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) { emitGetVirtualRegister(op2, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0)); emitFastArithReTagImmediate(regT0, regT0); } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) { emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0)); emitFastArithReTagImmediate(regT0, regT0); } else compileBinaryArithOp(op_mul, result, op1, op2, types); emitPutVirtualRegister(result); }
void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex) { // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched. // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. emitGetVirtualRegister(baseVReg, X86::eax); emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg); JmpDst hotPathBegin = __ label(); m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; __ cmpl_im_force32(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdStructure); addSlowCase(__ jne()); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdBranchToSlowCase); __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); __ movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::eax); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdPropertyMapOffset); emitPutVirtualRegister(resultVReg); }
void JIT::emit_op_call_put_result(Instruction* instruction) { int dst = instruction[1].u.operand; emitPutVirtualRegister(dst); }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; linkSlowCase(iter); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Fast check for JS function. Jump callLinkFailNotObject = emitJumpIfNotJSCell(X86::ecx); Jump callLinkFailNotJSFunction = jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } move(Imm32(argCount), X86::edx); // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink); Jump storeResultForFirstRun = jump(); // FIXME: this label can be removed, since it is a fixed offset from 'callReturnLocation'. // This is the address for the cold path *after* the first run (which tries to link the call). m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = MacroAssembler::Label(this); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. Jump isNotObject = emitJumpIfNotJSCell(X86::ecx); Jump isJSFunction = jePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); // This handles host functions isNotObject.link(this); callLinkFailNotObject.link(this); callLinkFailNotJSFunction.link(this); emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); Jump wasNotJSFunction = jump(); // Next, handle JSFunctions... isJSFunction.link(this); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); move(Imm32(argCount), X86::edx); emitNakedCall(m_interpreter->m_ctiVirtualCall); // Put the return value in dst. In the interpreter, op_ret does this. wasNotJSFunction.link(this); storeResultForFirstRun.link(this); emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval Jump wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx); compileOpCallEvalSetupArgs(instruction); emitCTICall(Interpreter::cti_op_call_eval); wasEval = jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue()))); } // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee. // This deliberately leaves the callee in ecx, used when setting up the stack frame below emitGetVirtualRegister(callee, X86::ecx); DataLabelPtr addressOfLinkedFunctionCheck; Jump jumpToSlow = jnePtrWithPatch(X86::ecx, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue()))); addSlowCase(jumpToSlow); ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; // The following is the fast case, only used whan a callee can be linked. // In the case of OpConstruct, call out to a cti_ function to create the new object. if (opcodeID == op_construct) { int proto = instruction[5].u.operand; int thisRegister = instruction[6].u.operand; emitPutJITStubArg(X86::ecx, 1); emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax); emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(thisRegister); emitGetVirtualRegister(callee, X86::ecx); } // Fast version of stack frame initialization, directly relative to edi. // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)))); storePtr(X86::ecx, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)))); loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)))); storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)))); storePtr(X86::edx, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister); // Call to the callee m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable)); if (opcodeID == op_call_eval) wasEval.link(this); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::emitPutCallResult(Instruction* instruction) { int dst = instruction[1].u.operand; emitValueProfilingSite(regT4); emitPutVirtualRegister(dst); }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; linkSlowCase(iter); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Fast check for JS function. __ testl_i32r(JSImmediate::TagMask, X86::ecx); JmpSrc callLinkFailNotObject = __ jne(); __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), 0, X86::ecx); JmpSrc callLinkFailNotJSFunction = __ jne(); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } __ movl_i32r(argCount, X86::edx); // Speculatively roll the callframe, assuming argCount will match the arity. __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_ir(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink); JmpSrc storeResultForFirstRun = __ jmp(); // This is the address for the cold path *after* the first run (which tries to link the call). m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = __ label(); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. __ testl_i32r(JSImmediate::TagMask, X86::ecx); JmpSrc isNotObject = __ jne(); __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), 0, X86::ecx); JmpSrc isJSFunction = __ je(); // This handles host functions JmpDst notJSFunctionlabel = __ label(); __ link(isNotObject, notJSFunctionlabel); __ link(callLinkFailNotObject, notJSFunctionlabel); __ link(callLinkFailNotJSFunction, notJSFunctionlabel); emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); JmpSrc wasNotJSFunction = __ jmp(); // Next, handle JSFunctions... __ link(isJSFunction, __ label()); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } // Speculatively roll the callframe, assuming argCount will match the arity. __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_ir(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); __ movl_i32r(argCount, X86::edx); emitNakedCall(m_interpreter->m_ctiVirtualCall); // Put the return value in dst. In the interpreter, op_ret does this. JmpDst storeResult = __ label(); __ link(wasNotJSFunction, storeResult); __ link(storeResultForFirstRun, storeResult); emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval JmpSrc wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx); compileOpCallEvalSetupArgs(instruction); emitCTICall(Interpreter::cti_op_call_eval); __ cmpl_ir(asInteger(JSImmediate::impossibleValue()), X86::eax); wasEval = __ jne(); } // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee. // This deliberately leaves the callee in ecx, used when setting up the stack frame below emitGetVirtualRegister(callee, X86::ecx); __ cmpl_ir_force32(asInteger(JSImmediate::impossibleValue()), X86::ecx); JmpDst addressOfLinkedFunctionCheck = __ label(); addSlowCase(__ jne()); ASSERT(X86Assembler::getDifferenceBetweenLabels(addressOfLinkedFunctionCheck, __ label()) == repatchOffsetOpCallCall); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; // The following is the fast case, only used whan a callee can be linked. // In the case of OpConstruct, call out to a cti_ function to create the new object. if (opcodeID == op_construct) { int proto = instruction[5].u.operand; int thisRegister = instruction[6].u.operand; emitPutJITStubArg(X86::ecx, 1); emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax); emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(thisRegister); emitGetVirtualRegister(callee, X86::ecx); } // Fast version of stack frame initialization, directly relative to edi. // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee __ movl_i32m(asInteger(noValue()), (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_rm(X86::ecx, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_mr(FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node), X86::ecx, X86::edx); // newScopeChain __ movl_i32m(argCount, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_rm(X86::edi, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_rm(X86::edx, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_ir(registerOffset * sizeof(Register), X86::edi); // Call to the callee m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable)); if (opcodeID == op_call_eval) __ link(wasEval, __ label()); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase) { // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset. COMPILE_ASSERT(((JSImmediate::TagTypeNumber + JSImmediate::DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0); Jump notImm1; Jump notImm2; if (op1HasImmediateIntFastCase) { notImm2 = getSlowCase(iter); } else if (op2HasImmediateIntFastCase) { notImm1 = getSlowCase(iter); } else { notImm1 = getSlowCase(iter); notImm2 = getSlowCase(iter); } linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare. if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number. linkSlowCase(iter); emitGetVirtualRegister(op1, regT0); Label stubFunctionCall(this); JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul); if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) { emitGetVirtualRegister(op1, regT0); emitGetVirtualRegister(op2, regT1); } stubCall.addArgument(regT0); stubCall.addArgument(regT1); stubCall.call(result); Jump end = jump(); if (op1HasImmediateIntFastCase) { notImm2.link(this); if (!types.second().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); emitGetVirtualRegister(op1, regT1); convertInt32ToDouble(regT1, fpRegT1); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT2); } else if (op2HasImmediateIntFastCase) { notImm1.link(this); if (!types.first().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); emitGetVirtualRegister(op2, regT1); convertInt32ToDouble(regT1, fpRegT1); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT2); } else { // if we get here, eax is not an int32, edx not yet checked. notImm1.link(this); if (!types.first().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this); if (!types.second().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT1); Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1); convertInt32ToDouble(regT1, fpRegT2); Jump op2wasInteger = jump(); // if we get here, eax IS an int32, edx is not. notImm2.link(this); if (!types.second().definitelyIsNumber()) emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this); convertInt32ToDouble(regT0, fpRegT1); op2isDouble.link(this); addPtr(tagTypeNumberRegister, regT1); movePtrToDouble(regT1, fpRegT2); op2wasInteger.link(this); } if (opcodeID == op_add) addDouble(fpRegT2, fpRegT1); else if (opcodeID == op_sub) subDouble(fpRegT2, fpRegT1); else if (opcodeID == op_mul) mulDouble(fpRegT2, fpRegT1); else { ASSERT(opcodeID == op_div); divDouble(fpRegT2, fpRegT1); } moveDoubleToPtr(fpRegT1, regT0); subPtr(tagTypeNumberRegister, regT0); emitPutVirtualRegister(result, regT0); end.link(this); }
void JIT::emit_op_call_put_result(Instruction* instruction) { int dst = instruction[1].u.operand; emitValueProfilingSite(FirstProfilingSite); emitPutVirtualRegister(dst); }