void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; emitGetVirtualRegister(arguments, regT1); callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); emitGetVirtualRegister(arguments, regT2); callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0); move(returnValueGPR, regT1); // Profile the argument count. load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load8(info->addressOfMaxNumArguments(), regT0); Jump notBiggest = branch32(Above, regT0, regT2); Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255)); move(TrustedImm32(255), regT2); notSaturated.link(this); store8(regT2, info->addressOfMaxNumArguments()); notBiggest.link(this); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT0); store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[2].u.operand; int arguments = instruction[3].u.operand; int firstFreeRegister = instruction[4].u.operand; killLastResultRegister(); JumpList slowCase; JumpList end; if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister()) { emitGetVirtualRegister(arguments, regT0); slowCase.append(branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue())))); emitGetFromCallFrameHeader32(RegisterFile::ArgumentCount, regT0); slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1))); // regT0: argumentCountIncludingThis move(regT0, regT1); add32(TrustedImm32(firstFreeRegister + RegisterFile::CallFrameHeaderSize), regT1); lshift32(TrustedImm32(3), regT1); addPtr(callFrameRegister, regT1); // regT1: newCallFrame slowCase.append(branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1)); // Initialize ArgumentCount. emitFastArithReTagImmediate(regT0, regT2); storePtr(regT2, Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)))); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT2); storePtr(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); // Copy arguments. neg32(regT0); signExtend32ToPtr(regT0, regT0); end.append(branchAddPtr(Zero, Imm32(1), regT0)); // regT0: -argumentCount Label copyLoop = label(); loadPtr(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2); storePtr(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); branchAddPtr(NonZero, Imm32(1), regT0).linkTo(copyLoop, this); end.append(jump()); } if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister()) slowCase.link(this); JITStubCall stubCall(this, cti_op_load_varargs); stubCall.addArgument(thisValue, regT0); stubCall.addArgument(arguments, regT0); stubCall.addArgument(Imm32(firstFreeRegister)); stubCall.call(regT1); if (m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister()) end.link(this); }
void JIT::emit_op_bitand(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op1)) { emitGetVirtualRegister(op2, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); int32_t imm = getConstantOperandImmediateInt(op1); andPtr(Imm32(imm), regT0); if (imm >= 0) emitFastArithIntToImmNoCheck(regT0, regT0); } else if (isOperandConstantImmediateInt(op2)) { emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); int32_t imm = getConstantOperandImmediateInt(op2); andPtr(Imm32(imm), regT0); if (imm >= 0) emitFastArithIntToImmNoCheck(regT0, regT0); } else { emitGetVirtualRegisters(op1, regT0, op2, regT1); andPtr(regT1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); } emitPutVirtualRegister(result); }
void JIT::compileOpCallVarargs(Instruction* instruction) { int callee = instruction[1].u.operand; int argCountRegister = instruction[2].u.operand; int registerOffset = instruction[3].u.operand; emitGetVirtualRegister(argCountRegister, regT1); emitFastArithImmToInt(regT1); emitGetVirtualRegister(callee, regT0); addPtr(Imm32(registerOffset), regT1, regT2); // Check for JSFunctions. emitJumpSlowCaseIfNotJSCell(regT0); addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr))); // Speculatively roll the callframe, assuming argCount will match the arity. mul32(TrustedImm32(sizeof(Register)), regT2, regT2); intptr_t offset = (intptr_t)sizeof(Register) * (intptr_t)RegisterFile::CallerFrame; addPtr(Imm32((int32_t)offset), regT2, regT3); addPtr(callFrameRegister, regT3); storePtr(callFrameRegister, regT3); addPtr(regT2, callFrameRegister); emitNakedCall(m_globalData->jitStubs->ctiVirtualCall()); sampleCodeBlock(m_codeBlock); }
void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister().offset() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitGetVirtualRegister(arguments, regT0); slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue())))); emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1))); // regT0: argumentCountIncludingThis move(regT0, regT1); neg64(regT1); add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1); lshift64(TrustedImm32(3), regT1); addPtr(callFrameRegister, regT1); // regT1: newCallFrame slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1)); // Initialize ArgumentCount. store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT2); store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); // Copy arguments. signExtend32ToPtr(regT0, regT0); end.append(branchSub64(Zero, TrustedImm32(1), regT0)); // regT0: argumentCount Label copyLoop = label(); load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2); store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this); end.append(jump()); } if (canOptimize) slowCase.link(this); emitGetVirtualRegister(thisValue, regT0); emitGetVirtualRegister(arguments, regT1); callOperation(operationLoadVarargs, regT0, regT1, firstFreeRegister); move(returnValueGPR, regT1); if (canOptimize) end.link(this); }
void JIT::emit_op_add(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) { JITStubCall stubCall(this, cti_op_add); stubCall.addArgument(op1, regT2); stubCall.addArgument(op2, regT2); stubCall.call(result); return; } if (isOperandConstantImmediateInt(op1)) { emitGetVirtualRegister(op2, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op1)), regT0)); emitFastArithIntToImmNoCheck(regT0, regT0); } else if (isOperandConstantImmediateInt(op2)) { emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchAdd32(Overflow, Imm32(getConstantOperandImmediateInt(op2)), regT0)); emitFastArithIntToImmNoCheck(regT0, regT0); } else compileBinaryArithOp(op_add, result, op1, op2, types); emitPutVirtualRegister(result); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned i, unsigned) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval JmpSrc wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx, i); compileOpCallEvalSetupArgs(instruction); emitCTICall(i, Interpreter::cti_op_call_eval); __ cmpl_i32r(asInteger(JSImmediate::impossibleValue()), X86::eax); wasEval = __ jne(); } emitGetVirtualRegister(callee, X86::ecx, i); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. emitJumpSlowCaseIfNotJSCell(X86::ecx, i); __ cmpl_i32m(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), X86::ecx); m_slowCases.append(SlowCaseEntry(__ jne(), i)); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(i, Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx, i); } // Speculatively roll the callframe, assuming argCount will match the arity. __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_i32r(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); __ movl_i32r(argCount, X86::edx); emitNakedCall(i, m_interpreter->m_ctiVirtualCall); if (opcodeID == op_call_eval) __ link(wasEval, __ label()); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::emit_op_div(Instruction* currentInstruction) { unsigned dst = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); if (isOperandConstantImmediateDouble(op1)) { emitGetVirtualRegister(op1, regT0); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); } else if (isOperandConstantImmediateInt(op1)) { emitLoadInt32ToDouble(op1, fpRegT0); } else { emitGetVirtualRegister(op1, regT0); if (!types.first().definitelyIsNumber()) emitJumpSlowCaseIfNotImmediateNumber(regT0); Jump notInt = emitJumpIfNotImmediateInteger(regT0); convertInt32ToDouble(regT0, fpRegT0); Jump skipDoubleLoad = jump(); notInt.link(this); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); skipDoubleLoad.link(this); } if (isOperandConstantImmediateDouble(op2)) { emitGetVirtualRegister(op2, regT1); addPtr(tagTypeNumberRegister, regT1); movePtrToDouble(regT1, fpRegT1); } else if (isOperandConstantImmediateInt(op2)) { emitLoadInt32ToDouble(op2, fpRegT1); } else { emitGetVirtualRegister(op2, regT1); if (!types.second().definitelyIsNumber()) emitJumpSlowCaseIfNotImmediateNumber(regT1); Jump notInt = emitJumpIfNotImmediateInteger(regT1); convertInt32ToDouble(regT1, fpRegT1); Jump skipDoubleLoad = jump(); notInt.link(this); addPtr(tagTypeNumberRegister, regT1); movePtrToDouble(regT1, fpRegT1); skipDoubleLoad.link(this); } divDouble(fpRegT1, fpRegT0); // Double result. moveDoubleToPtr(fpRegT0, regT0); subPtr(tagTypeNumberRegister, regT0); emitPutVirtualRegister(dst, regT0); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval Jump wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx); compileOpCallEvalSetupArgs(instruction); emitCTICall(Interpreter::cti_op_call_eval); wasEval = jnePtr(X86::eax, ImmPtr(JSImmediate::impossibleValue())); } emitGetVirtualRegister(callee, X86::ecx); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. emitJumpSlowCaseIfNotJSCell(X86::ecx); addSlowCase(jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr))); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); move(Imm32(argCount), X86::edx); emitNakedCall(m_interpreter->m_ctiVirtualCall); if (opcodeID == op_call_eval) wasEval.link(this); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; JumpList slowCase; JumpList end; bool canOptimize = m_codeBlock->usesArguments() && arguments == m_codeBlock->argumentsRegister().offset() && !m_codeBlock->symbolTable()->slowArguments(); if (canOptimize) { emitGetVirtualRegister(arguments, regT0); slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue())))); move(TrustedImm32(-firstFreeRegister), regT1); emitSetupVarargsFrameFastCase(*this, regT1, regT0, regT1, regT2, firstVarArgOffset, slowCase); end.append(jump()); slowCase.link(this); } emitGetVirtualRegister(arguments, regT1); callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); emitGetVirtualRegister(arguments, regT2); callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0); move(returnValueGPR, regT1); if (canOptimize) end.link(this); // Profile the argument count. load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load8(&info->maxNumArguments, regT0); Jump notBiggest = branch32(Above, regT0, regT2); Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255)); move(TrustedImm32(255), regT2); notSaturated.link(this); store8(regT2, &info->maxNumArguments); notBiggest.link(this); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT0); store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
void JIT::emit_op_rshift(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; if (isOperandConstantImmediateInt(op2)) { // isOperandConstantImmediateInt(op2) => 1 SlowCase emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); // Mask with 0x1f as per ecma-262 11.7.2 step 7. rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0); } else { emitGetVirtualRegisters(op1, regT0, op2, regT2); if (supportsFloatingPointTruncate()) { Jump lhsIsInt = emitJumpIfImmediateInteger(regT0); // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases addSlowCase(emitJumpIfNotImmediateNumber(regT0)); addPtr(tagTypeNumberRegister, regT0); movePtrToDouble(regT0, fpRegT0); addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0)); lhsIsInt.link(this); emitJumpSlowCaseIfNotImmediateInteger(regT2); } else { // !supportsFloatingPoint() => 2 SlowCases emitJumpSlowCaseIfNotImmediateInteger(regT0); emitJumpSlowCaseIfNotImmediateInteger(regT2); } emitFastArithImmToInt(regT2); rshift32(regT2, regT0); } emitFastArithIntToImmNoCheck(regT0, regT0); emitPutVirtualRegister(result); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned) { int callee = instruction[1].u.operand; int argCount = instruction[2].u.operand; int registerOffset = instruction[3].u.operand; // Handle eval Jump wasEval; if (opcodeID == op_call_eval) { JITStubCall stubCall(this, cti_op_call_eval); stubCall.addArgument(callee, regT0); stubCall.addArgument(JIT::Imm32(registerOffset)); stubCall.addArgument(JIT::Imm32(argCount)); stubCall.call(); wasEval = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue()))); } emitGetVirtualRegister(callee, regT0); // Check for JSFunctions. emitJumpSlowCaseIfNotJSCell(regT0); addSlowCase(branchPtr(NotEqual, Address(regT0), TrustedImmPtr(m_globalData->jsFunctionVPtr))); // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); move(Imm32(argCount), regT1); emitNakedCall(opcodeID == op_construct ? m_globalData->jitStubs->ctiVirtualConstruct() : m_globalData->jitStubs->ctiVirtualCall()); if (opcodeID == op_call_eval) wasEval.link(this); sampleCodeBlock(m_codeBlock); }
void JIT::emit_op_urshift(Instruction* currentInstruction) { unsigned dst = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; // Slow case of urshift makes assumptions about what registers hold the // shift arguments, so any changes must be updated there as well. if (isOperandConstantImmediateInt(op2)) { emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); emitFastArithImmToInt(regT0); int shift = getConstantOperand(op2).asInt32(); if (shift) urshift32(Imm32(shift & 0x1f), regT0); // unsigned shift < 0 or shift = k*2^32 may result in (essentially) // a toUint conversion, which can result in a value we can represent // as an immediate int. if (shift < 0 || !(shift & 31)) addSlowCase(branch32(LessThan, regT0, Imm32(0))); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(dst, regT0); return; } emitGetVirtualRegisters(op1, regT0, op2, regT1); if (!isOperandConstantImmediateInt(op1)) emitJumpSlowCaseIfNotImmediateInteger(regT0); emitJumpSlowCaseIfNotImmediateInteger(regT1); emitFastArithImmToInt(regT0); emitFastArithImmToInt(regT1); urshift32(regT1, regT0); addSlowCase(branch32(LessThan, regT0, Imm32(0))); emitFastArithReTagImmediate(regT0, regT0); emitPutVirtualRegister(dst, regT0); }
void JIT::emit_op_jlesseq(Instruction* currentInstruction, bool invert) { unsigned op1 = currentInstruction[1].u.operand; unsigned op2 = currentInstruction[2].u.operand; unsigned target = currentInstruction[3].u.operand; // We generate inline code for the following cases in the fast path: // - int immediate to constant int immediate // - constant int immediate to int immediate // - int immediate to int immediate if (isOperandConstantImmediateChar(op1)) { emitGetVirtualRegister(op2, regT0); addSlowCase(emitJumpIfNotJSCell(regT0)); JumpList failures; emitLoadCharacterString(regT0, regT0, failures); addSlowCase(failures); addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target); return; } if (isOperandConstantImmediateChar(op2)) { emitGetVirtualRegister(op1, regT0); addSlowCase(emitJumpIfNotJSCell(regT0)); JumpList failures; emitLoadCharacterString(regT0, regT0, failures); addSlowCase(failures); addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target); return; } if (isOperandConstantImmediateInt(op2)) { emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); int32_t op2imm = getConstantOperandImmediateInt(op2); addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, Imm32(op2imm)), target); } else if (isOperandConstantImmediateInt(op1)) { emitGetVirtualRegister(op2, regT1); emitJumpSlowCaseIfNotImmediateInteger(regT1); int32_t op1imm = getConstantOperandImmediateInt(op1); addJump(branch32(invert ? LessThan : GreaterThanOrEqual, regT1, Imm32(op1imm)), target); } else { emitGetVirtualRegisters(op1, regT0, op2, regT1); emitJumpSlowCaseIfNotImmediateInteger(regT0); emitJumpSlowCaseIfNotImmediateInteger(regT1); addJump(branch32(invert ? GreaterThan : LessThanOrEqual, regT0, regT1), target); } }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[1].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Caller initializes ScopeChain. - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ScopeChain; ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ if (opcodeID == op_call_varargs) compileLoadVarargs(instruction); else { int argCount = instruction[2].u.operand; int registerOffset = instruction[3].u.operand; addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1); store32(TrustedImm32(argCount), Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); } // regT1 holds newCallFrame with ArgumentCount initialized. store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(regT1, RegisterFile::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. storePtr(callFrameRegister, Address(regT1, RegisterFile::CallerFrame * static_cast<int>(sizeof(Register)))); storePtr(regT0, Address(regT1, RegisterFile::Callee * static_cast<int>(sizeof(Register)))); move(regT1, callFrameRegister); if (opcodeID == op_call_eval) { compileCallEval(); return; } DataLabelPtr addressOfLinkedFunctionCheck; BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall); Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(JSValue::encode(JSValue()))); END_UNINTERRUPTED_SEQUENCE(sequenceOpCall); addSlowCase(slowCase); ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex); m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo()); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID); m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset; loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1); emitPutToCallFrameHeader(regT1, RegisterFile::ScopeChain); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); sampleCodeBlock(m_codeBlock); }
void JIT::emit_op_pre_dec(Instruction* currentInstruction) { unsigned srcDst = currentInstruction[1].u.operand; emitGetVirtualRegister(srcDst, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchSub32(Zero, Imm32(1), regT0)); emitFastArithIntToImmNoCheck(regT0, regT0); emitPutVirtualRegister(srcDst); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[1].u.operand; int argCount = instruction[2].u.operand; int registerOffset = instruction[3].u.operand; // Handle eval Jump wasEval; if (opcodeID == op_call_eval) { JITStubCall stubCall(this, cti_op_call_eval); stubCall.addArgument(callee, regT0); stubCall.addArgument(JIT::Imm32(registerOffset)); stubCall.addArgument(JIT::Imm32(argCount)); stubCall.call(); wasEval = branchPtr(NotEqual, regT0, TrustedImmPtr(JSValue::encode(JSValue()))); } // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee. // This deliberately leaves the callee in ecx, used when setting up the stack frame below emitGetVirtualRegister(callee, regT0); DataLabelPtr addressOfLinkedFunctionCheck; BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall); Jump jumpToSlow = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(JSValue::encode(JSValue()))); END_UNINTERRUPTED_SEQUENCE(sequenceOpCall); addSlowCase(jumpToSlow); ASSERT_JIT_OFFSET(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow), patchOffsetOpCallCompareToJump); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callStructureStubCompilationInfo[callLinkInfoIndex].isCall = opcodeID != op_construct; // The following is the fast case, only used whan a callee can be linked. // Fast version of stack frame initialization, directly relative to edi. // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scopeChain)), regT1); // newScopeChain store32(TrustedImm32(Int32Tag), intTagFor(registerOffset + RegisterFile::ArgumentCount)); store32(Imm32(argCount), intPayloadFor(registerOffset + RegisterFile::ArgumentCount)); storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)))); storePtr(regT0, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)))); storePtr(regT1, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister); // Call to the callee m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); if (opcodeID == op_call_eval) wasEval.link(this); sampleCodeBlock(m_codeBlock); }
void JIT::emitSlow_op_pre_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { unsigned srcDst = currentInstruction[1].u.operand; Jump notImm = getSlowCase(iter); linkSlowCase(iter); emitGetVirtualRegister(srcDst, regT0); notImm.link(this); JITStubCall stubCall(this, cti_op_pre_dec); stubCall.addArgument(regT0); stubCall.call(srcDst); }
void JIT::emit_op_post_inc(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned srcDst = currentInstruction[2].u.operand; emitGetVirtualRegister(srcDst, regT0); move(regT0, regT1); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchAdd32(Overflow, Imm32(1), regT1)); emitFastArithIntToImmNoCheck(regT1, regT1); emitPutVirtualRegister(srcDst, regT1); emitPutVirtualRegister(result); }
void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, CallLinkInfo* info) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; int firstVarArgOffset = instruction[6].u.operand; emitGetVirtualRegister(arguments, regT1); Z_JITOperation_EJZZ sizeOperation; if (opcode == op_tail_call_forward_arguments) sizeOperation = operationSizeFrameForForwardArguments; else sizeOperation = operationSizeFrameForVarargs; callOperation(sizeOperation, regT1, -firstFreeRegister, firstVarArgOffset); move(TrustedImm32(-firstFreeRegister), regT1); emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); emitGetVirtualRegister(arguments, regT2); F_JITOperation_EFJZZ setupOperation; if (opcode == op_tail_call_forward_arguments) setupOperation = operationSetupForwardArgumentsFrame; else setupOperation = operationSetupVarargsFrame; callOperation(setupOperation, regT1, regT2, firstVarArgOffset, regT0); move(returnValueGPR, regT1); // Profile the argument count. load32(Address(regT1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); load32(info->addressOfMaxNumArguments(), regT0); Jump notBiggest = branch32(Above, regT0, regT2); store32(regT2, info->addressOfMaxNumArguments()); notBiggest.link(this); // Initialize 'this'. emitGetVirtualRegister(thisValue, regT0); store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); }
void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier* ident, unsigned) { // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched. // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. emitGetVirtualRegister(baseVReg, X86::eax); emitPutJITStubArg(X86::eax, 1); emitPutJITStubArgConstant(ident, 2); emitCTICall(Interpreter::cti_op_get_by_id_generic); emitPutVirtualRegister(resultVReg); }
void JIT::emit_op_mul(Instruction* currentInstruction) { unsigned result = currentInstruction[1].u.operand; unsigned op1 = currentInstruction[2].u.operand; unsigned op2 = currentInstruction[3].u.operand; OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); // For now, only plant a fast int case if the constant operand is greater than zero. int32_t value; if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) { emitGetVirtualRegister(op2, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0)); emitFastArithReTagImmediate(regT0, regT0); } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) { emitGetVirtualRegister(op1, regT0); emitJumpSlowCaseIfNotImmediateInteger(regT0); addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT0)); emitFastArithReTagImmediate(regT0, regT0); } else compileBinaryArithOp(op_mul, result, op1, op2, types); emitPutVirtualRegister(result); }
void JIT::compileGetByIdHotPath(int resultVReg, int baseVReg, Identifier*, unsigned propertyAccessInstructionIndex) { // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be repatched. // Additionally, for get_by_id we need repatch the offset of the branch to the slow case (we repatch this to jump // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label // to jump back to if one of these trampolies finds a match. emitGetVirtualRegister(baseVReg, X86::eax); emitJumpSlowCaseIfNotJSCell(X86::eax, baseVReg); JmpDst hotPathBegin = __ label(); m_propertyAccessCompilationInfo[propertyAccessInstructionIndex].hotPathBegin = hotPathBegin; __ cmpl_im_force32(repatchGetByIdDefaultStructure, FIELD_OFFSET(JSCell, m_structure), X86::eax); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdStructure); addSlowCase(__ jne()); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdBranchToSlowCase); __ movl_mr(FIELD_OFFSET(JSObject, m_propertyStorage), X86::eax, X86::eax); __ movl_mr(repatchGetByIdDefaultOffset, X86::eax, X86::eax); ASSERT(X86Assembler::getDifferenceBetweenLabels(hotPathBegin, __ label()) == repatchOffsetGetByIdPropertyMapOffset); emitPutVirtualRegister(resultVReg); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs) compileSetupVarargsFrame(instruction, info); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); load32(Address(regT0, JSCell::structureIDOffset()), regT0); store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset); store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->callType = CallLinkInfo::callTypeFor(opcodeID); info->codeOrigin = CodeOrigin(m_bytecodeOffset); info->calleeGPR = regT0; m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; linkSlowCase(iter); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Fast check for JS function. Jump callLinkFailNotObject = emitJumpIfNotJSCell(X86::ecx); Jump callLinkFailNotJSFunction = jnePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } move(Imm32(argCount), X86::edx); // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink); Jump storeResultForFirstRun = jump(); // FIXME: this label can be removed, since it is a fixed offset from 'callReturnLocation'. // This is the address for the cold path *after* the first run (which tries to link the call). m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = MacroAssembler::Label(this); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. Jump isNotObject = emitJumpIfNotJSCell(X86::ecx); Jump isJSFunction = jePtr(Address(X86::ecx), ImmPtr(m_interpreter->m_jsFunctionVptr)); // This handles host functions isNotObject.link(this); callLinkFailNotObject.link(this); callLinkFailNotJSFunction.link(this); emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); Jump wasNotJSFunction = jump(); // Next, handle JSFunctions... isJSFunction.link(this); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } // Speculatively roll the callframe, assuming argCount will match the arity. storePtr(callFrameRegister, Address(callFrameRegister, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * static_cast<int>(sizeof(Register))), callFrameRegister); move(Imm32(argCount), X86::edx); emitNakedCall(m_interpreter->m_ctiVirtualCall); // Put the return value in dst. In the interpreter, op_ret does this. wasNotJSFunction.link(this); storeResultForFirstRun.link(this); emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval Jump wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx); compileOpCallEvalSetupArgs(instruction); emitCTICall(Interpreter::cti_op_call_eval); wasEval = jnePtr(X86::eax, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue()))); } // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee. // This deliberately leaves the callee in ecx, used when setting up the stack frame below emitGetVirtualRegister(callee, X86::ecx); DataLabelPtr addressOfLinkedFunctionCheck; Jump jumpToSlow = jnePtrWithPatch(X86::ecx, addressOfLinkedFunctionCheck, ImmPtr(JSValuePtr::encode(JSImmediate::impossibleValue()))); addSlowCase(jumpToSlow); ASSERT(differenceBetween(addressOfLinkedFunctionCheck, jumpToSlow) == patchOffsetOpCallCompareToJump); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; // The following is the fast case, only used whan a callee can be linked. // In the case of OpConstruct, call out to a cti_ function to create the new object. if (opcodeID == op_construct) { int proto = instruction[5].u.operand; int thisRegister = instruction[6].u.operand; emitPutJITStubArg(X86::ecx, 1); emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax); emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(thisRegister); emitGetVirtualRegister(callee, X86::ecx); } // Fast version of stack frame initialization, directly relative to edi. // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee storePtr(ImmPtr(JSValuePtr::encode(noValue())), Address(callFrameRegister, (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)))); storePtr(X86::ecx, Address(callFrameRegister, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)))); loadPtr(Address(X86::ecx, FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node)), X86::edx); // newScopeChain store32(Imm32(argCount), Address(callFrameRegister, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)))); storePtr(callFrameRegister, Address(callFrameRegister, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)))); storePtr(X86::edx, Address(callFrameRegister, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)))); addPtr(Imm32(registerOffset * sizeof(Register)), callFrameRegister); // Call to the callee m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable)); if (opcodeID == op_call_eval) wasEval.link(this); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) storePtr(ImmPtr(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileOpCallSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex, OpcodeID opcodeID) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; linkSlowCase(iter); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Fast check for JS function. __ testl_i32r(JSImmediate::TagMask, X86::ecx); JmpSrc callLinkFailNotObject = __ jne(); __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), 0, X86::ecx); JmpSrc callLinkFailNotJSFunction = __ jne(); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } __ movl_i32r(argCount, X86::edx); // Speculatively roll the callframe, assuming argCount will match the arity. __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_ir(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_interpreter->m_ctiVirtualCallPreLink); JmpSrc storeResultForFirstRun = __ jmp(); // This is the address for the cold path *after* the first run (which tries to link the call). m_callStructureStubCompilationInfo[callLinkInfoIndex].coldPathOther = __ label(); // The arguments have been set up on the hot path for op_call_eval if (opcodeID == op_call) compileOpCallSetupArgs(instruction); else if (opcodeID == op_construct) compileOpConstructSetupArgs(instruction); // Check for JSFunctions. __ testl_i32r(JSImmediate::TagMask, X86::ecx); JmpSrc isNotObject = __ jne(); __ cmpl_im(reinterpret_cast<unsigned>(m_interpreter->m_jsFunctionVptr), 0, X86::ecx); JmpSrc isJSFunction = __ je(); // This handles host functions JmpDst notJSFunctionlabel = __ label(); __ link(isNotObject, notJSFunctionlabel); __ link(callLinkFailNotObject, notJSFunctionlabel); __ link(callLinkFailNotJSFunction, notJSFunctionlabel); emitCTICall(((opcodeID == op_construct) ? Interpreter::cti_op_construct_NotJSConstruct : Interpreter::cti_op_call_NotJSFunction)); JmpSrc wasNotJSFunction = __ jmp(); // Next, handle JSFunctions... __ link(isJSFunction, __ label()); // First, in the case of a construct, allocate the new object. if (opcodeID == op_construct) { emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(registerOffset - RegisterFile::CallFrameHeaderSize - argCount); emitGetVirtualRegister(callee, X86::ecx); } // Speculatively roll the callframe, assuming argCount will match the arity. __ movl_rm(X86::edi, (RegisterFile::CallerFrame + registerOffset) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_ir(registerOffset * static_cast<int>(sizeof(Register)), X86::edi); __ movl_i32r(argCount, X86::edx); emitNakedCall(m_interpreter->m_ctiVirtualCall); // Put the return value in dst. In the interpreter, op_ret does this. JmpDst storeResult = __ label(); __ link(wasNotJSFunction, storeResult); __ link(storeResultForFirstRun, storeResult); emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int dst = instruction[1].u.operand; int callee = instruction[2].u.operand; int argCount = instruction[3].u.operand; int registerOffset = instruction[4].u.operand; // Handle eval JmpSrc wasEval; if (opcodeID == op_call_eval) { emitGetVirtualRegister(callee, X86::ecx); compileOpCallEvalSetupArgs(instruction); emitCTICall(Interpreter::cti_op_call_eval); __ cmpl_ir(asInteger(JSImmediate::impossibleValue()), X86::eax); wasEval = __ jne(); } // This plants a check for a cached JSFunction value, so we can plant a fast link to the callee. // This deliberately leaves the callee in ecx, used when setting up the stack frame below emitGetVirtualRegister(callee, X86::ecx); __ cmpl_ir_force32(asInteger(JSImmediate::impossibleValue()), X86::ecx); JmpDst addressOfLinkedFunctionCheck = __ label(); addSlowCase(__ jne()); ASSERT(X86Assembler::getDifferenceBetweenLabels(addressOfLinkedFunctionCheck, __ label()) == repatchOffsetOpCallCall); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; // The following is the fast case, only used whan a callee can be linked. // In the case of OpConstruct, call out to a cti_ function to create the new object. if (opcodeID == op_construct) { int proto = instruction[5].u.operand; int thisRegister = instruction[6].u.operand; emitPutJITStubArg(X86::ecx, 1); emitPutJITStubArgFromVirtualRegister(proto, 4, X86::eax); emitCTICall(Interpreter::cti_op_construct_JSConstruct); emitPutVirtualRegister(thisRegister); emitGetVirtualRegister(callee, X86::ecx); } // Fast version of stack frame initialization, directly relative to edi. // Note that this omits to set up RegisterFile::CodeBlock, which is set in the callee __ movl_i32m(asInteger(noValue()), (registerOffset + RegisterFile::OptionalCalleeArguments) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_rm(X86::ecx, (registerOffset + RegisterFile::Callee) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_mr(FIELD_OFFSET(JSFunction, m_scopeChain) + FIELD_OFFSET(ScopeChain, m_node), X86::ecx, X86::edx); // newScopeChain __ movl_i32m(argCount, (registerOffset + RegisterFile::ArgumentCount) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_rm(X86::edi, (registerOffset + RegisterFile::CallerFrame) * static_cast<int>(sizeof(Register)), X86::edi); __ movl_rm(X86::edx, (registerOffset + RegisterFile::ScopeChain) * static_cast<int>(sizeof(Register)), X86::edi); __ addl_ir(registerOffset * sizeof(Register), X86::edi); // Call to the callee m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(reinterpret_cast<void*>(unreachable)); if (opcodeID == op_call_eval) __ link(wasEval, __ label()); // Put the return value in dst. In the interpreter, op_ret does this. emitPutVirtualRegister(dst); #if ENABLE(CODEBLOCK_SAMPLING) __ movl_i32m(reinterpret_cast<unsigned>(m_codeBlock), m_interpreter->sampler()->codeBlockSlot()); #endif }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Caller initializes ScopeChain. - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ScopeChain; ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ if (opcodeID == op_call_varargs) compileLoadVarargs(instruction); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); loadPtr(Address(regT0, JSCell::structureOffset()), regT0); storePtr(regT0, instruction[6].u.arrayProfile->addressOfLastSeenStructure()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1); store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); } // regT1 holds newCallFrame with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset); store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset())); store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register)))); move(regT1, callFrameRegister); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex); m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo()); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID); m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset; loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1); emitPutToCallFrameHeader(regT1, JSStack::ScopeChain); m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex) { int callee = instruction[2].u.operand; /* Caller always: - Updates callFrameRegister to callee callFrame. - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - Caller initializes ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length); COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_forward_arguments), call_and_tail_call_forward_arguments_opcodes_must_be_same_length); CallLinkInfo* info = nullptr; if (opcodeID != op_call_eval) info = m_codeBlock->addCallLinkInfo(); if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) compileSetupVarargsFrame(opcodeID, instruction, info); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); load32(Address(regT0, JSCell::structureIDOffset()), regT0); store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); done.link(this); } addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits(); store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. store64(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0); m_callCompilationInfo.append(CallCompilationInfo()); m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; if (opcodeID == op_tail_call) { CallFrameShuffleData shuffleData; shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister; shuffleData.numLocals = instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register); shuffleData.args.resize(instruction[3].u.operand); for (int i = 0; i < instruction[3].u.operand; ++i) { shuffleData.args[i] = ValueRecovery::displacedInJSStack( virtualRegisterForArgument(i) - instruction[4].u.operand, DataFormatJS); } shuffleData.callee = ValueRecovery::inGPR(regT0, DataFormatJS); shuffleData.setupCalleeSaveRegisters(m_codeBlock); info->setFrameShuffleData(shuffleData); CallFrameShuffler(*this, shuffleData).prepareForTailCall(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } if (opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) { emitRestoreCalleeSaves(); prepareForTailCallSlow(); m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall(); return; } m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); checkStackPointerAlignment(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); }