void MethodOfGettingAValueProfile::emitReportValue(CCallHelpers& jit, JSValueRegs regs) const { switch (m_kind) { case None: return; case Ready: jit.storeValue(regs, u.profile->specFailBucket(0)); return; case LazyOperand: { LazyOperandValueProfileKey key(u.lazyOperand.bytecodeOffset, VirtualRegister(u.lazyOperand.operand)); ConcurrentJSLocker locker(u.lazyOperand.codeBlock->m_lock); LazyOperandValueProfile* profile = u.lazyOperand.codeBlock->lazyOperandValueProfiles(locker).add(locker, key); jit.storeValue(regs, profile->specFailBucket(0)); return; } case ArithProfileReady: { u.arithProfile->emitObserveResult(jit, regs, DoNotHaveTagRegisters); return; } } RELEASE_ASSERT_NOT_REACHED(); }
void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR) { jit.move(numUsedSlotsGPR, resultGPR); // We really want to make sure the size of the new call frame is a multiple of // stackAlignmentRegisters(), however it is easier to accomplish this by // rounding numUsedSlotsGPR to the next multiple of stackAlignmentRegisters(). // Together with the rounding below, we will assure that the new call frame is // located on a stackAlignmentRegisters() boundary and a multiple of // stackAlignmentRegisters() in size. jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR); jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR); jit.addPtr(lengthGPR, resultGPR); jit.addPtr(CCallHelpers::TrustedImm32(JSStack::CallFrameHeaderSize + (lengthIncludesThis? 0 : 1)), resultGPR); // resultGPR now has the required frame size in Register units // Round resultGPR to next multiple of stackAlignmentRegisters() jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR); jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR); // Now resultGPR has the right stack frame offset in Register units. jit.negPtr(resultGPR); jit.lshiftPtr(CCallHelpers::Imm32(3), resultGPR); jit.addPtr(GPRInfo::callFrameRegister, resultGPR); }
void restoreRegisters(CCallHelpers& jit) { if (m_origResult != m_result) jit.move(m_result, m_origResult); if (m_origLeft != m_left && m_origLeft != m_origResult) jit.move(m_left, m_origLeft); if (m_origRight != m_right && m_origRight != m_origResult && m_origRight != m_origLeft) jit.move(m_right, m_origRight); // We are guaranteed that the tag registers are not the same as the original input // or output registers. Otherwise, we would not have allocated a scratch for them. // Hence, we don't need to need to check for overlap like we do for the input registers. if (m_savedTagMaskRegister != InvalidGPRReg) { ASSERT(GPRInfo::tagMaskRegister != m_origLeft); ASSERT(GPRInfo::tagMaskRegister != m_origRight); ASSERT(GPRInfo::tagMaskRegister != m_origResult); jit.move(m_savedTagMaskRegister, GPRInfo::tagMaskRegister); } if (m_savedTagTypeNumberRegister != InvalidGPRReg) { ASSERT(GPRInfo::tagTypeNumberRegister != m_origLeft); ASSERT(GPRInfo::tagTypeNumberRegister != m_origRight); ASSERT(GPRInfo::tagTypeNumberRegister != m_origResult); jit.move(m_savedTagTypeNumberRegister, GPRInfo::tagTypeNumberRegister); } }
void BasicBlockLocation::emitExecuteCode(CCallHelpers& jit, MacroAssembler::RegisterID scratch) const { static_assert(sizeof(size_t) == 4, "Assuming size_t is 32 bits on 32 bit platforms."); jit.load32(&m_executionCount, scratch); CCallHelpers::Jump done = jit.branchAdd32(CCallHelpers::Zero, scratch, CCallHelpers::TrustedImm32(1), scratch); jit.store32(scratch, bitwise_cast<void*>(&m_executionCount)); done.link(&jit); }
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit) { #if ENABLE(GGC) // 11) Write barrier the owner executable because we're jumping into a different block. for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) { CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(codeOrigin); jit.move(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock->ownerExecutable()), GPRInfo::nonArgGPR0); osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2); if (!codeOrigin.inlineCallFrame) break; } #endif if (exit.m_codeOrigin.inlineCallFrame) jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin); Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock); BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex); ASSERT(mapping); ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex); void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset); jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister); jit.jitAssertTagsInPlace(); jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2); jit.jump(GPRInfo::regT2); }
static void dispatch(CCallHelpers& jit, FTL::State* state, const B3::StackmapGenerationParams& params, DFG::Node* node, Box<CCallHelpers::JumpList> exceptions, CCallHelpers::JumpList from, OperationType operation, ResultType result, Arguments arguments, std::index_sequence<ArgumentsIndex...>) { CCallHelpers::Label done = jit.label(); params.addLatePath([=] (CCallHelpers& jit) { AllowMacroScratchRegisterUsage allowScratch(jit); from.link(&jit); callOperation( *state, params.unavailableRegisters(), jit, node->origin.semantic, exceptions.get(), operation, extractResult(result), std::get<ArgumentsIndex>(arguments)...); jit.jump().linkTo(done, &jit); }); }
void initializeRegisters(CCallHelpers& jit) { if (m_left != m_origLeft) jit.move(m_origLeft, m_left); if (m_right != m_origRight && m_origRight != m_origLeft) jit.move(m_origRight, m_right); if (m_savedTagMaskRegister != InvalidGPRReg) jit.move(GPRInfo::tagMaskRegister, m_savedTagMaskRegister); if (m_savedTagTypeNumberRegister != InvalidGPRReg) jit.move(GPRInfo::tagTypeNumberRegister, m_savedTagTypeNumberRegister); jit.emitMaterializeTagCheckRegisters(); }
inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR) { #if !ASSERT_DISABLED CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR); jit.breakpoint(); isNonZero.link(&jit); jit.push(pointerGPR); jit.load8(pointerGPR, pointerGPR); jit.pop(pointerGPR); #else UNUSED_PARAM(jit); UNUSED_PARAM(pointerGPR); #endif }
CCallHelpers::JumpList generateImpl(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit, std::index_sequence<ArgumentsIndex...>) { CCallHelpers::JumpList exceptions; // We spill (1) the used registers by IC and (2) the used registers by Snippet. AccessGenerationState::SpillState spillState = state.preserveLiveRegistersToStackForCall(usedRegistersBySnippet); jit.store32( CCallHelpers::TrustedImm32(state.callSiteIndexForExceptionHandlingOrOriginal().bits()), CCallHelpers::tagFor(static_cast<VirtualRegister>(CallFrameSlot::argumentCount))); jit.makeSpaceOnStackForCCall(); jit.setupArguments<FunctionType>(std::get<ArgumentsIndex>(m_arguments)...); CCallHelpers::Call operationCall = jit.call(OperationPtrTag); auto function = m_function; jit.addLinkTask([=] (LinkBuffer& linkBuffer) { linkBuffer.link(operationCall, FunctionPtr<OperationPtrTag>(function)); }); jit.setupResults(m_result); jit.reclaimSpaceOnStackForCCall(); CCallHelpers::Jump noException = jit.emitExceptionCheck(state.m_vm, CCallHelpers::InvertedExceptionCheck); state.restoreLiveRegistersFromStackForCallWithThrownException(spillState); exceptions.append(jit.jump()); noException.link(&jit); RegisterSet dontRestore; dontRestore.set(m_result); state.restoreLiveRegistersFromStackForCall(spillState, dontRestore); return exceptions; }
void ArithProfile::emitObserveResult(CCallHelpers& jit, JSValueRegs regs, TagRegistersMode mode) { if (!shouldEmitSetDouble() && !shouldEmitSetNonNumber()) return; CCallHelpers::Jump isInt32 = jit.branchIfInt32(regs, mode); CCallHelpers::Jump notDouble = jit.branchIfNotDoubleKnownNotInt32(regs, mode); emitSetDouble(jit); CCallHelpers::Jump done = jit.jump(); notDouble.link(&jit); emitSetNonNumber(jit); done.link(&jit); isInt32.link(&jit); }
CCallHelpers::JumpList generate(AccessGenerationState& state, const RegisterSet& usedRegistersBySnippet, CCallHelpers& jit) override { m_from.link(&jit); CCallHelpers::JumpList exceptions = generateImpl(state, usedRegistersBySnippet, jit, std::make_index_sequence<std::tuple_size<std::tuple<Arguments...>>::value>()); jit.jump().linkTo(m_to, &jit); return exceptions; }
Box<CCallHelpers::JumpList> ExceptionTarget::jumps(CCallHelpers& jit) { Box<CCallHelpers::JumpList> result = Box<CCallHelpers::JumpList>::create(); if (m_isDefaultHandler) { Box<CCallHelpers::Label> defaultHandler = m_defaultHandler; jit.addLinkTask( [=] (LinkBuffer& linkBuffer) { linkBuffer.link(*result, linkBuffer.locationOf(*defaultHandler)); }); } else { RefPtr<OSRExitHandle> handle = m_handle; jit.addLinkTask( [=] (LinkBuffer& linkBuffer) { linkBuffer.link(*result, linkBuffer.locationOf(handle->label)); }); } return result; }
void storeCodeOrigin(State& state, CCallHelpers& jit, CodeOrigin codeOrigin) { if (!codeOrigin.isSet()) return; unsigned index = state.jitCode->common.addCodeOrigin(codeOrigin); unsigned locationBits = CallFrame::Location::encodeAsCodeOriginIndex(index); jit.store32( CCallHelpers::TrustedImm32(locationBits), CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount))); }
void OSRExitHandle::emitExitThunk(State& state, CCallHelpers& jit) { Profiler::Compilation* compilation = state.graph.compilation(); CCallHelpers::Label myLabel = jit.label(); label = myLabel; jit.pushToSaveImmediateWithoutTouchingRegisters(CCallHelpers::TrustedImm32(index)); CCallHelpers::PatchableJump jump = jit.patchableJump(); RefPtr<OSRExitHandle> self = this; VM& vm = state.vm(); jit.addLinkTask( [self, jump, myLabel, compilation, &vm] (LinkBuffer& linkBuffer) { self->exit.m_patchableJump = CodeLocationJump<JSInternalPtrTag>(linkBuffer.locationOf<JSInternalPtrTag>(jump)); linkBuffer.link( jump.m_jump, CodeLocationLabel<JITThunkPtrTag>(vm.getCTIStub(osrExitGenerationThunkGenerator).code())); if (compilation) compilation->addOSRExitSite({ linkBuffer.locationOf<JSInternalPtrTag>(myLabel) }); }); }
CCallHelpers::Jump CCallSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext&) { switch (inst.args[calleeArgOffset].kind()) { case Arg::Imm: case Arg::Imm64: jit.move(inst.args[calleeArgOffset].asTrustedImmPtr(), scratchRegister); jit.call(scratchRegister); break; case Arg::Tmp: jit.call(inst.args[calleeArgOffset].gpr()); break; case Arg::Addr: jit.call(inst.args[calleeArgOffset].asAddress()); break; default: RELEASE_ASSERT_NOT_REACHED(); break; } return CCallHelpers::Jump(); }
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit) { if (exit.m_codeOrigin.inlineCallFrame) jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin); Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock); BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex); ASSERT(mapping); ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex); void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset); jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2); jit.jump(GPRInfo::regT2); #if DFG_ENABLE(DEBUG_VERBOSE) dataLogF(" -> %p\n", jumpTarget); #endif }
MacroAssembler::Call callOperation( State& state, const RegisterSet& usedRegisters, CCallHelpers& jit, CodeOrigin codeOrigin, MacroAssembler::JumpList* exceptionTarget, V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, GPRReg value, GPRReg object, StringImpl* uid) { storeCodeOrigin(state, jit, codeOrigin); CallContext context(state, usedRegisters, jit, 5, InvalidGPRReg); jit.setupArgumentsWithExecState( CCallHelpers::TrustedImmPtr(stubInfo), value, object, CCallHelpers::TrustedImmPtr(uid)); return context.makeCall(bitwise_cast<void*>(operation), exceptionTarget); }
bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling) { ASSERT(m_scratchGPR != m_src.payloadGPR()); ASSERT(m_scratchGPR != m_result.payloadGPR()); ASSERT(m_scratchGPR != InvalidGPRReg); #if USE(JSVALUE32_64) ASSERT(m_scratchGPR != m_src.tagGPR()); ASSERT(m_scratchGPR != m_result.tagGPR()); #endif jit.moveValueRegs(m_src, m_result); CCallHelpers::Jump srcNotInt = jit.branchIfNotInt32(m_src); // -0 should produce a double, and hence cannot be negated as an int. // The negative int32 0x80000000 doesn't have a positive int32 representation, and hence cannot be negated as an int. slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff))); jit.neg32(m_result.payloadGPR()); #if USE(JSVALUE64) jit.boxInt32(m_result.payloadGPR(), m_result); #endif endJumpList.append(jit.jump()); srcNotInt.link(&jit); slowPathJumpList.append(jit.branchIfNotNumber(m_src, m_scratchGPR)); // For a double, all we need to do is to invert the sign bit. #if USE(JSVALUE64) jit.move(CCallHelpers::TrustedImm64((int64_t)(1ull << 63)), m_scratchGPR); jit.xor64(m_scratchGPR, m_result.payloadGPR()); #else jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR()); #endif // The flags of ArithNegate are basic in DFG. // We only need to know if we ever produced a number. if (shouldEmitProfiling && arithProfile && !arithProfile->lhsObservedType().sawNumber() && !arithProfile->didObserveDouble()) arithProfile->emitSetDouble(jit); return true; }
void JITBitOrGenerator::generateFastPath(CCallHelpers& jit) { ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32()); m_didEmitFastPath = true; if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) { JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left; SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand; // Try to do intVar | intConstant. m_slowPathJumpList.append(jit.branchIfNotInt32(var)); jit.moveValueRegs(var, m_result); if (constOpr.asConstInt32()) { #if USE(JSVALUE64) jit.or32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR()); jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR()); #else jit.or32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR()); #endif } } else { ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32()); // Try to do intVar | intVar. m_slowPathJumpList.append(jit.branchIfNotInt32(m_left)); m_slowPathJumpList.append(jit.branchIfNotInt32(m_right)); jit.moveValueRegs(m_left, m_result); #if USE(JSVALUE64) jit.or64(m_right.payloadGPR(), m_result.payloadGPR()); #else jit.or32(m_right.payloadGPR(), m_result.payloadGPR()); #endif } }
static void slowPathFor( CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction) { jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2); emitPointerValidation(jit, GPRInfo::nonArgGPR2); jit.emitPutReturnPCToCallFrameHeader(GPRInfo::nonArgGPR2); jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame); jit.setupArgumentsExecState(); jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0); emitPointerValidation(jit, GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); // This slow call will return the address of one of the following: // 1) Exception throwing thunk. // 2) Host call return value returner thingy. // 3) The function to call. jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::nonPreservedNonReturnGPR); jit.emitPutReturnPCToCallFrameHeader(CCallHelpers::TrustedImmPtr(0)); emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR); jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR); emitPointerValidation(jit, GPRInfo::returnValueGPR); jit.jump(GPRInfo::returnValueGPR); }
void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR) { jit.move(numUsedSlotsGPR, resultGPR); jit.addPtr(lengthGPR, resultGPR); jit.addPtr(CCallHelpers::TrustedImm32(JSStack::CallFrameHeaderSize + (lengthIncludesThis? 0 : 1)), resultGPR); // resultGPR now has the required frame size in Register units // Round resultGPR to next multiple of stackAlignmentRegisters() jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR); jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR); // Now resultGPR has the right stack frame offset in Register units. jit.negPtr(resultGPR); jit.lshiftPtr(CCallHelpers::Imm32(3), resultGPR); jit.addPtr(GPRInfo::callFrameRegister, resultGPR); }
void generateBinaryArithOpFastPath(BinaryOpDescriptor& ic, CCallHelpers& jit, GPRReg result, GPRReg left, GPRReg right, RegisterSet usedRegisters, CCallHelpers::Jump& done, CCallHelpers::Jump& slowPathStart) { ScratchRegisterAllocator allocator(usedRegisters); BinarySnippetRegisterContext context(allocator, result, left, right); GPRReg scratchGPR = allocator.allocateScratchGPR(); FPRReg leftFPR = allocator.allocateScratchFPR(); FPRReg rightFPR = allocator.allocateScratchFPR(); FPRReg scratchFPR = InvalidFPRReg; if (scratchFPRUsage == NeedScratchFPR) scratchFPR = allocator.allocateScratchFPR(); BinaryArithOpGenerator gen(ic.leftOperand(), ic.rightOperand(), JSValueRegs(result), JSValueRegs(left), JSValueRegs(right), leftFPR, rightFPR, scratchGPR, scratchFPR); auto numberOfBytesUsedToPreserveReusedRegisters = allocator.preserveReusedRegistersByPushing(jit, ScratchRegisterAllocator::ExtraStackSpace::NoExtraSpace); context.initializeRegisters(jit); gen.generateFastPath(jit); ASSERT(gen.didEmitFastPath()); gen.endJumpList().link(&jit); context.restoreRegisters(jit); allocator.restoreReusedRegistersByPopping(jit, numberOfBytesUsedToPreserveReusedRegisters, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall); done = jit.jump(); gen.slowPathJumpList().link(&jit); context.restoreRegisters(jit); allocator.restoreReusedRegistersByPopping(jit, numberOfBytesUsedToPreserveReusedRegisters, ScratchRegisterAllocator::ExtraStackSpace::SpaceForCCall); slowPathStart = jit.jump(); }
static void slowPathFor( CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction) { jit.emitFunctionPrologue(); jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame); if (maxFrameExtentForSlowPathCall) jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); jit.setupArgumentsExecState(); jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0); emitPointerValidation(jit, GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); if (maxFrameExtentForSlowPathCall) jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); // This slow call will return the address of one of the following: // 1) Exception throwing thunk. // 2) Host call return value returner thingy. // 3) The function to call. emitPointerValidation(jit, GPRInfo::returnValueGPR); jit.emitFunctionEpilogue(); jit.jump(GPRInfo::returnValueGPR); }
void JSCallBase::emit(CCallHelpers& jit) { m_callLinkInfo = jit.codeBlock()->addCallLinkInfo(); CCallHelpers::Jump slowPath = jit.branchPtrWithPatch( CCallHelpers::NotEqual, GPRInfo::regT0, m_targetToCheck, CCallHelpers::TrustedImmPtr(0)); m_fastCall = jit.nearCall(); CCallHelpers::Jump done = jit.jump(); slowPath.link(&jit); jit.move(CCallHelpers::TrustedImmPtr(m_callLinkInfo), GPRInfo::regT2); m_slowCall = jit.nearCall(); done.link(&jit); }
static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch) { AssemblyHelpers::Jump ownerIsRememberedOrInEden = jit.jumpIfIsRememberedOrInEden(owner); // We need these extra slots because setupArgumentsWithExecState will use poke on x86. #if CPU(X86) jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); #endif jit.setupArgumentsWithExecState(owner); jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch); jit.call(scratch); #if CPU(X86) jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); #endif ownerIsRememberedOrInEden.link(&jit); }
static void osrWriteBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2) { AssemblyHelpers::Jump definitelyNotMarked = jit.genericWriteBarrier(owner, scratch1, scratch2); // We need these extra slots because setupArgumentsWithExecState will use poke on x86. #if CPU(X86) jit.subPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); #endif jit.setupArgumentsWithExecState(owner); jit.move(MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(operationOSRWriteBarrier)), scratch1); jit.call(scratch1); #if CPU(X86) jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*) * 3), MacroAssembler::stackPointerRegister); #endif definitelyNotMarked.link(&jit); }
void JSCallVarargs::emit(CCallHelpers& jit, State& state, int32_t spillSlotsOffset, int32_t osrExitFromGenericUnwindSpillSlots) { // We are passed three pieces of information: // - The callee. // - The arguments object, if it's not a forwarding call. // - The "this" value, if it's a constructor call. CallVarargsData* data = m_node->callVarargsData(); GPRReg calleeGPR = GPRInfo::argumentGPR0; GPRReg argumentsGPR = InvalidGPRReg; GPRReg thisGPR = InvalidGPRReg; bool forwarding = false; switch (m_node->op()) { case CallVarargs: case TailCallVarargs: case TailCallVarargsInlinedCaller: case ConstructVarargs: argumentsGPR = GPRInfo::argumentGPR1; thisGPR = GPRInfo::argumentGPR2; break; case CallForwardVarargs: case TailCallForwardVarargs: case TailCallForwardVarargsInlinedCaller: case ConstructForwardVarargs: thisGPR = GPRInfo::argumentGPR1; forwarding = true; break; default: RELEASE_ASSERT_NOT_REACHED(); break; } const unsigned calleeSpillSlot = 0; const unsigned argumentsSpillSlot = 1; const unsigned thisSpillSlot = 2; const unsigned stackPointerSpillSlot = 3; // Get some scratch registers. RegisterSet usedRegisters; usedRegisters.merge(RegisterSet::stackRegisters()); usedRegisters.merge(RegisterSet::reservedHardwareRegisters()); usedRegisters.merge(RegisterSet::calleeSaveRegisters()); usedRegisters.set(calleeGPR); if (argumentsGPR != InvalidGPRReg) usedRegisters.set(argumentsGPR); ASSERT(thisGPR); usedRegisters.set(thisGPR); ScratchRegisterAllocator allocator(usedRegisters); GPRReg scratchGPR1 = allocator.allocateScratchGPR(); GPRReg scratchGPR2 = allocator.allocateScratchGPR(); GPRReg scratchGPR3 = allocator.allocateScratchGPR(); RELEASE_ASSERT(!allocator.numberOfReusedRegisters()); auto computeUsedStack = [&] (GPRReg targetGPR, unsigned extra) { if (isARM64()) { // Have to do this the weird way because $sp on ARM64 means zero when used in a subtraction. jit.move(CCallHelpers::stackPointerRegister, targetGPR); jit.negPtr(targetGPR); jit.addPtr(GPRInfo::callFrameRegister, targetGPR); } else { jit.move(GPRInfo::callFrameRegister, targetGPR); jit.subPtr(CCallHelpers::stackPointerRegister, targetGPR); } if (extra) jit.subPtr(CCallHelpers::TrustedImm32(extra), targetGPR); jit.urshiftPtr(CCallHelpers::Imm32(3), targetGPR); }; auto callWithExceptionCheck = [&] (void* callee) { jit.move(CCallHelpers::TrustedImmPtr(callee), GPRInfo::nonPreservedNonArgumentGPR); jit.call(GPRInfo::nonPreservedNonArgumentGPR); m_exceptions.append(jit.emitExceptionCheck(AssemblyHelpers::NormalExceptionCheck, AssemblyHelpers::FarJumpWidth)); }; if (isARM64()) { jit.move(CCallHelpers::stackPointerRegister, scratchGPR1); jit.storePtr(scratchGPR1, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot)); } else jit.storePtr(CCallHelpers::stackPointerRegister, CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot)); unsigned extraStack = sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)); if (forwarding) { CCallHelpers::JumpList slowCase; computeUsedStack(scratchGPR2, 0); emitSetupVarargsFrameFastCase(jit, scratchGPR2, scratchGPR1, scratchGPR2, scratchGPR3, m_node->child2()->origin.semantic.inlineCallFrame, data->firstVarArgOffset, slowCase); CCallHelpers::Jump done = jit.jump(); slowCase.link(&jit); jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister); jit.setupArgumentsExecState(); callWithExceptionCheck(bitwise_cast<void*>(operationThrowStackOverflowForVarargs)); jit.abortWithReason(DFGVarargsThrowingPathDidNotThrow); done.link(&jit); jit.move(calleeGPR, GPRInfo::regT0); } else { // Gotta spill the callee, arguments, and this because we will need them later and we will have some // calls that clobber them. jit.store64(calleeGPR, CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot)); jit.store64(argumentsGPR, CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot)); jit.store64(thisGPR, CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot)); computeUsedStack(scratchGPR1, 0); jit.subPtr(CCallHelpers::TrustedImm32(extraStack), CCallHelpers::stackPointerRegister); jit.setupArgumentsWithExecState(argumentsGPR, scratchGPR1, CCallHelpers::TrustedImm32(data->firstVarArgOffset)); callWithExceptionCheck(bitwise_cast<void*>(operationSizeFrameForVarargs)); jit.move(GPRInfo::returnValueGPR, scratchGPR1); computeUsedStack(scratchGPR2, extraStack); jit.load64(CCallHelpers::addressFor(spillSlotsOffset + argumentsSpillSlot), argumentsGPR); emitSetVarargsFrame(jit, scratchGPR1, false, scratchGPR2, scratchGPR2); jit.addPtr(CCallHelpers::TrustedImm32(-extraStack), scratchGPR2, CCallHelpers::stackPointerRegister); jit.setupArgumentsWithExecState(scratchGPR2, argumentsGPR, CCallHelpers::TrustedImm32(data->firstVarArgOffset), scratchGPR1); callWithExceptionCheck(bitwise_cast<void*>(operationSetupVarargsFrame)); jit.move(GPRInfo::returnValueGPR, scratchGPR2); jit.load64(CCallHelpers::addressFor(spillSlotsOffset + thisSpillSlot), thisGPR); jit.load64(CCallHelpers::addressFor(spillSlotsOffset + calleeSpillSlot), GPRInfo::regT0); } jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR2, CCallHelpers::stackPointerRegister); jit.store64(thisGPR, CCallHelpers::calleeArgumentSlot(0)); // Henceforth we make the call. The base FTL call machinery expects the callee in regT0 and for the // stack frame to already be set up, which it is. jit.store64(GPRInfo::regT0, CCallHelpers::calleeFrameSlot(JSStack::Callee)); m_callBase.emit(jit, state, osrExitFromGenericUnwindSpillSlots); // Undo the damage we've done. if (isARM64()) { GPRReg scratchGPRAtReturn = CCallHelpers::selectScratchGPR(GPRInfo::returnValueGPR); jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), scratchGPRAtReturn); jit.move(scratchGPRAtReturn, CCallHelpers::stackPointerRegister); } else jit.loadPtr(CCallHelpers::addressFor(spillSlotsOffset + stackPointerSpillSlot), CCallHelpers::stackPointerRegister); }
CCallHelpers::Jump CheckSpecial::generate(Inst& inst, CCallHelpers& jit, GenerationContext& context) { CCallHelpers::Jump fail = hiddenBranch(inst).generate(jit, context); ASSERT(fail.isSet()); StackmapValue* value = inst.origin->as<StackmapValue>(); ASSERT(value); Vector<ValueRep> reps = repsImpl(context, numB3Args(inst), m_numCheckArgs + 1, inst); // Set aside the args that are relevant to undoing the operation. This is because we don't want to // capture all of inst in the closure below. Vector<Arg, 3> args; for (unsigned i = 0; i < m_numCheckArgs; ++i) args.append(inst.args[1 + i]); context.latePaths.append( createSharedTask<GenerationContext::LatePathFunction>( [=] (CCallHelpers& jit, GenerationContext& context) { fail.link(&jit); // If necessary, undo the operation. switch (m_checkKind.opcode) { case BranchAdd32: if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3]) || (m_numCheckArgs == 3 && args[1] == args[2])) { // This is ugly, but that's fine - we won't have to do this very often. ASSERT(args[1].isGPR()); GPRReg valueGPR = args[1].gpr(); GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR); jit.pushToSave(scratchGPR); jit.setCarry(scratchGPR); jit.lshift32(CCallHelpers::TrustedImm32(31), scratchGPR); jit.urshift32(CCallHelpers::TrustedImm32(1), valueGPR); jit.or32(scratchGPR, valueGPR); jit.popToRestore(scratchGPR); break; } if (m_numCheckArgs == 4) { if (args[1] == args[3]) Inst(Sub32, nullptr, args[2], args[3]).generate(jit, context); else if (args[2] == args[3]) Inst(Sub32, nullptr, args[1], args[3]).generate(jit, context); } else if (m_numCheckArgs == 3) Inst(Sub32, nullptr, args[1], args[2]).generate(jit, context); break; case BranchAdd64: if ((m_numCheckArgs == 4 && args[1] == args[2] && args[2] == args[3]) || (m_numCheckArgs == 3 && args[1] == args[2])) { // This is ugly, but that's fine - we won't have to do this very often. ASSERT(args[1].isGPR()); GPRReg valueGPR = args[1].gpr(); GPRReg scratchGPR = CCallHelpers::selectScratchGPR(valueGPR); jit.pushToSave(scratchGPR); jit.setCarry(scratchGPR); jit.lshift64(CCallHelpers::TrustedImm32(63), scratchGPR); jit.urshift64(CCallHelpers::TrustedImm32(1), valueGPR); jit.or64(scratchGPR, valueGPR); jit.popToRestore(scratchGPR); break; } if (m_numCheckArgs == 4) { if (args[1] == args[3]) Inst(Sub64, nullptr, args[2], args[3]).generate(jit, context); else if (args[2] == args[3]) Inst(Sub64, nullptr, args[1], args[3]).generate(jit, context); } else if (m_numCheckArgs == 3) Inst(Sub64, nullptr, args[1], args[2]).generate(jit, context); break; case BranchSub32: Inst(Add32, nullptr, args[1], args[2]).generate(jit, context); break; case BranchSub64: Inst(Add64, nullptr, args[1], args[2]).generate(jit, context); break; case BranchNeg32: Inst(Neg32, nullptr, args[1]).generate(jit, context); break; case BranchNeg64: Inst(Neg64, nullptr, args[1]).generate(jit, context); break; default: break; } value->m_generator->run(jit, StackmapGenerationParams(value, reps, context)); })); return CCallHelpers::Jump(); // As far as Air thinks, we are not a terminal. }
void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase) { CCallHelpers::JumpList end; if (argCountRecovery.isConstant()) { // FIXME: We could constant-fold a lot of the computation below in this case. // https://bugs.webkit.org/show_bug.cgi?id=141486 jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1); } else jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1); if (firstVarArgOffset) { CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1)); jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1); CCallHelpers::Jump endVarArgs = jit.jump(); sufficientArguments.link(&jit); jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1); endVarArgs.link(&jit); } slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1))); emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2); slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2)); // Initialize ArgumentCount. jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset)); // Copy arguments. jit.signExtend32ToPtr(scratchGPR1, scratchGPR1); CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1); // scratchGPR1: argumentCount CCallHelpers::Label copyLoop = jit.label(); int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register)); #if USE(JSVALUE64) jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3); jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); #else // USE(JSVALUE64), so this begins the 32-bit case jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3); jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset)); jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3); jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset)); #endif // USE(JSVALUE64), end of 32-bit case jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit); done.link(&jit); }
void JITRightShiftGenerator::generateFastPath(CCallHelpers& jit) { ASSERT(m_scratchGPR != InvalidGPRReg); ASSERT(m_scratchGPR != m_left.payloadGPR()); ASSERT(m_scratchGPR != m_right.payloadGPR()); #if USE(JSVALUE32_64) ASSERT(m_scratchGPR != m_left.tagGPR()); ASSERT(m_scratchGPR != m_right.tagGPR()); ASSERT(m_scratchFPR != InvalidFPRReg); #endif ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32()); m_didEmitFastPath = true; if (m_rightOperand.isConstInt32()) { // Try to do (intVar >> intConstant). CCallHelpers::Jump notInt = jit.branchIfNotInt32(m_left); jit.moveValueRegs(m_left, m_result); int32_t shiftAmount = m_rightOperand.asConstInt32() & 0x1f; if (shiftAmount) { if (m_shiftType == SignedShift) jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR()); else jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR()); #if USE(JSVALUE64) jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR()); #endif } if (jit.supportsFloatingPointTruncate()) { m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code. // Try to do (doubleVar >> intConstant). notInt.link(&jit); m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR)); jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR); m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR)); if (shiftAmount) { if (m_shiftType == SignedShift) jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR); else jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR); } jit.boxInt32(m_scratchGPR, m_result); } else m_slowPathJumpList.append(notInt); } else { // Try to do (intConstant >> intVar) or (intVar >> intVar). m_slowPathJumpList.append(jit.branchIfNotInt32(m_right)); CCallHelpers::Jump notInt; if (m_leftOperand.isConstInt32()) { #if USE(JSVALUE32_64) jit.move(m_right.tagGPR(), m_result.tagGPR()); #endif jit.move(CCallHelpers::Imm32(m_leftOperand.asConstInt32()), m_result.payloadGPR()); } else { notInt = jit.branchIfNotInt32(m_left); jit.moveValueRegs(m_left, m_result); } if (m_shiftType == SignedShift) jit.rshift32(m_right.payloadGPR(), m_result.payloadGPR()); else jit.urshift32(m_right.payloadGPR(), m_result.payloadGPR()); #if USE(JSVALUE64) jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR()); #endif if (m_leftOperand.isConstInt32()) return; if (jit.supportsFloatingPointTruncate()) { m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code. // Try to do (doubleVar >> intVar). notInt.link(&jit); m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR)); jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR); m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR)); if (m_shiftType == SignedShift) jit.rshift32(m_right.payloadGPR(), m_scratchGPR); else jit.urshift32(m_right.payloadGPR(), m_scratchGPR); jit.boxInt32(m_scratchGPR, m_result); } else m_slowPathJumpList.append(notInt); } }