CodeBlock * CodeBlockManager::buildCodeBlockAt(MemlocData * growFrom) { MemlocData * current_start = growFrom; MemlocData * current_start_prev = growFrom; MemlocData * current_end = growFrom; MemlocData * current_end_next = growFrom; if (!dynamic_cast<Instruction *>(growFrom)) return NULL; // while we're not at the beginning of the memory segment, // and while we don't have any code xrefs to this location, // we need to continue to loop // We may break out of the loop if the insn before has an xref from that is code while ((current_start_prev = dynamic_cast<Instruction *>(current_start->getPreviousContiguous())) && !hasNonLinkCodeXref(current_start->begin_xref_to(), current_start->end_xref_to()) && !hasNonLinkCodeXref(current_start_prev->begin_xref_from(), current_start->begin_xref_from()) ) current_start = current_start_prev; while ((current_end_next = dynamic_cast<Instruction *>(current_end->getNextContiguous())) && !hasNonLinkCodeXref(current_end_next->begin_xref_to(), current_end_next->end_xref_to()) && !hasNonLinkCodeXref(current_end->begin_xref_from(), current_end->begin_xref_from()) ) current_end = current_end_next; CodeBlock * cb = new CodeBlock(m_trace, current_start->get_addr(), current_end->get_addr()); m_blocks[cb->getStart()] = cb; return cb; }
void CodeBlockSet::deleteUnmarkedAndUnreferenced() { // This needs to be a fixpoint because code blocks that are unmarked may // refer to each other. For example, a DFG code block that is owned by // the GC may refer to an FTL for-entry code block that is also owned by // the GC. Vector<CodeBlock*, 16> toRemove; if (verbose) dataLog("Fixpointing over unmarked, set size = ", m_set.size(), "...\n"); for (;;) { HashSet<CodeBlock*>::iterator iter = m_set.begin(); HashSet<CodeBlock*>::iterator end = m_set.end(); for (; iter != end; ++iter) { CodeBlock* codeBlock = *iter; if (!codeBlock->hasOneRef()) continue; if (codeBlock->m_mayBeExecuting) continue; codeBlock->deref(); toRemove.append(codeBlock); } if (verbose) dataLog(" Removing ", toRemove.size(), " blocks.\n"); if (toRemove.isEmpty()) break; for (unsigned i = toRemove.size(); i--;) m_set.remove(toRemove[i]); toRemove.resize(0); } }
JSCell* JIT_OPERATION operationCreateClonedArgumentsDuringExit(ExecState* exec, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount) { VM& vm = exec->vm(); NativeCallFrameTracer target(&vm, exec); DeferGCForAWhile deferGC(vm.heap); CodeBlock* codeBlock; if (inlineCallFrame) codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame); else codeBlock = exec->codeBlock(); unsigned length = argumentCount - 1; ClonedArguments* result = ClonedArguments::createEmpty( vm, codeBlock->globalObject()->outOfBandArgumentsStructure(), callee); Register* arguments = exec->registers() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0) + CallFrame::argumentOffset(0); for (unsigned i = length; i--;) result->putDirectIndex(exec, i, arguments[i].jsValue()); result->putDirect(vm, vm.propertyNames->length, jsNumber(length)); return result; }
void BytecodeLivenessAnalysis::computeKills(BytecodeKills& result) { FastBitVector out; CodeBlock* codeBlock = m_graph.codeBlock(); result.m_codeBlock = codeBlock; result.m_killSets = std::make_unique<BytecodeKills::KillSet[]>(codeBlock->instructions().size()); for (std::unique_ptr<BytecodeBasicBlock>& block : m_graph.basicBlocksInReverseOrder()) { if (block->isEntryBlock() || block->isExitBlock()) continue; out = block->out(); for (unsigned i = block->offsets().size(); i--;) { unsigned bytecodeOffset = block->offsets()[i]; stepOverInstruction( m_graph, bytecodeOffset, out, [&] (unsigned index) { // This is for uses. if (out.get(index)) return; result.m_killSets[bytecodeOffset].add(index); out.set(index); }, [&] (unsigned index) { // This is for defs. out.clear(index); }); } } }
void align(CodeBlock& cb, Alignment alignment, AlignContext context, bool fixups /* = false */) { vixl::MacroAssembler a { cb }; switch (alignment) { case Alignment::CacheLine: case Alignment::CacheLineRoundUp: case Alignment::JmpTarget: break; case Alignment::SmashCmpq: case Alignment::SmashMovq: case Alignment::SmashJmp: // Smashable movs and jmps are two instructions plus inline 64-bit data, // so they need to be 8-byte aligned. if (!cb.isFrontierAligned(8)) a.Nop(); break; case Alignment::SmashCall: // Smashable call is 8 instructions plus inline 64-bit data, so it must // be 8 byte aligned. if (!cb.isFrontierAligned(8)) a.Nop(); break; case Alignment::SmashJcc: case Alignment::SmashJccAndJmp: // Other smashable control flow instructions are three instructions plus // inline 64-bit data, so it needs to be one instruction off from 8-byte // alignment. if (cb.isFrontierAligned(8)) a.Nop(); break; } }
static void fixupPCforExceptionIfNeeded(ExecState* exec) { CodeBlock* codeBlock = exec->codeBlock(); ASSERT(!!codeBlock); Instruction* pc = exec->currentVPC(); exec->setCurrentVPC(codeBlock->adjustPCIfAtCallSite(pc)); }
static void appendSourceToError(CallFrame* callFrame, ErrorInstance* exception, unsigned bytecodeOffset) { ErrorInstance::SourceAppender appender = exception->sourceAppender(); exception->clearSourceAppender(); RuntimeType type = exception->runtimeTypeForCause(); exception->clearRuntimeTypeForCause(); if (!callFrame->codeBlock()->hasExpressionInfo()) return; int startOffset = 0; int endOffset = 0; int divotPoint = 0; unsigned line = 0; unsigned column = 0; CodeBlock* codeBlock; CodeOrigin codeOrigin = callFrame->codeOrigin(); if (codeOrigin && codeOrigin.inlineCallFrame) codeBlock = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame); else codeBlock = callFrame->codeBlock(); codeBlock->expressionRangeForBytecodeOffset(bytecodeOffset, divotPoint, startOffset, endOffset, line, column); int expressionStart = divotPoint - startOffset; int expressionStop = divotPoint + endOffset; StringView sourceString = codeBlock->source()->source(); if (!expressionStop || expressionStart > static_cast<int>(sourceString.length())) return; VM* vm = &callFrame->vm(); JSValue jsMessage = exception->getDirect(*vm, vm->propertyNames->message); if (!jsMessage || !jsMessage.isString()) return; String message = asString(jsMessage)->value(callFrame); if (expressionStart < expressionStop) message = appender(message, codeBlock->source()->getRange(expressionStart, expressionStop).toString(), type, ErrorInstance::FoundExactSource); else { // No range information, so give a few characters of context. int dataLength = sourceString.length(); int start = expressionStart; int stop = expressionStart; // Get up to 20 characters of context to the left and right of the divot, clamping to the line. // Then strip whitespace. while (start > 0 && (expressionStart - start < 20) && sourceString[start - 1] != '\n') start--; while (start < (expressionStart - 1) && isStrWhiteSpace(sourceString[start])) start++; while (stop < dataLength && (stop - expressionStart < 20) && sourceString[stop] != '\n') stop++; while (stop > expressionStart && isStrWhiteSpace(sourceString[stop - 1])) stop--; message = appender(message, codeBlock->source()->getRange(start, stop).toString(), type, ErrorInstance::FoundApproximateSource); } exception->putDirect(*vm, vm->propertyNames->message, jsString(vm, message)); }
JSLexicalEnvironment* CallFrame::lexicalEnvironment() const { CodeBlock* codeBlock = this->codeBlock(); RELEASE_ASSERT(codeBlock->needsActivation()); VirtualRegister activationRegister = codeBlock->activationRegister(); return registers()[activationRegister.offset()].Register::lexicalEnvironment(); }
void CallFrame::setActivation(JSLexicalEnvironment* lexicalEnvironment) { CodeBlock* codeBlock = this->codeBlock(); RELEASE_ASSERT(codeBlock->needsActivation()); VirtualRegister activationRegister = codeBlock->activationRegister(); registers()[activationRegister.offset()] = lexicalEnvironment; }
void relocateStubs(TransLoc& loc, TCA frozenStart, TCA frozenEnd, RelocationInfo& rel, CodeCache::View cache, CGMeta& fixups) { auto const stubSize = svcreq::stub_size(); for (auto addr : fixups.reusedStubs) { if (!loc.contains(addr)) continue; always_assert(frozenStart <= addr); CodeBlock dest; dest.init(cache.frozen().frontier(), stubSize, "New Stub"); x64::relocate(rel, dest, addr, addr + stubSize, fixups, nullptr); cache.frozen().skip(stubSize); if (addr != frozenStart) { rel.recordRange(frozenStart, addr, frozenStart, addr); } frozenStart = addr + stubSize; } if (frozenStart != frozenEnd) { rel.recordRange(frozenStart, frozenEnd, frozenStart, frozenEnd); } x64::adjustForRelocation(rel); x64::adjustMetaDataForRelocation(rel, nullptr, fixups); x64::adjustCodeForRelocation(rel, fixups); }
void recordGdbTranslation(SrcKey sk, const Func* srcFunc, const CodeBlock& cb, const TCA start, const TCA end, bool exit, bool inPrologue) { assertx(cb.contains(start) && cb.contains(end)); if (start != end) { assertOwnsCodeLock(); if (!RuntimeOption::EvalJitNoGdb) { Debug::DebugInfo::Get()->recordTracelet( Debug::TCRange(start, end, &cb == &code().cold()), srcFunc, srcFunc->unit() ? srcFunc->unit()->at(sk.offset()) : nullptr, exit, inPrologue ); } if (RuntimeOption::EvalPerfPidMap) { Debug::DebugInfo::Get()->recordPerfMap( Debug::TCRange(start, end, &cb == &code().cold()), sk, srcFunc, exit, inPrologue ); } } }
static EncodedJSValue JSC_HOST_CALL functionPrintByteCodeFor(ExecState* exec) { CodeBlock* codeBlock = codeBlockFromArg(exec); if (codeBlock) codeBlock->dumpBytecode(); return JSValue::encode(jsUndefined()); }
void emitCheckSurpriseFlagsEnter(CodeBlock& mainCode, CodeBlock& stubsCode, bool inTracelet, FixupMap& fixupMap, Fixup fixup) { Asm a { mainCode }; Asm astubs { stubsCode }; emitTestSurpriseFlags(a); a. jnz (stubsCode.frontier()); astubs. movq (rVmFp, argNumToRegName[0]); if (false) { // typecheck const ActRec* ar = nullptr; functionEnterHelper(ar); } emitCall(astubs, (TCA)&functionEnterHelper); if (inTracelet) { fixupMap.recordSyncPoint(stubsCode.frontier(), fixup.m_pcOffset, fixup.m_spOffset); } else { // If we're being called while generating a func prologue, we // have to record the fixup directly in the fixup map instead of // going through the pending fixup path like normal. fixupMap.recordFixup(stubsCode.frontier(), fixup); } astubs. jmp (mainCode.frontier()); }
void adjustAndJumpToTarget(CCallHelpers& jit, const OSRExitBase& exit) { #if ENABLE(GGC) jit.move(AssemblyHelpers::TrustedImmPtr(jit.codeBlock()->ownerExecutable()), GPRInfo::nonArgGPR0); osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1); InlineCallFrameSet* inlineCallFrames = jit.codeBlock()->jitCode()->dfgCommon()->inlineCallFrames.get(); if (inlineCallFrames) { for (InlineCallFrame* inlineCallFrame : *inlineCallFrames) { ScriptExecutable* ownerExecutable = inlineCallFrame->executable.get(); jit.move(AssemblyHelpers::TrustedImmPtr(ownerExecutable), GPRInfo::nonArgGPR0); osrWriteBarrier(jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1); } } #endif if (exit.m_codeOrigin.inlineCallFrame) jit.addPtr(AssemblyHelpers::TrustedImm32(exit.m_codeOrigin.inlineCallFrame->stackOffset * sizeof(EncodedJSValue)), GPRInfo::callFrameRegister); CodeBlock* baselineCodeBlock = jit.baselineCodeBlockFor(exit.m_codeOrigin); Vector<BytecodeAndMachineOffset>& decodedCodeMap = jit.decodedCodeMapFor(baselineCodeBlock); BytecodeAndMachineOffset* mapping = binarySearch<BytecodeAndMachineOffset, unsigned>(decodedCodeMap, decodedCodeMap.size(), exit.m_codeOrigin.bytecodeIndex, BytecodeAndMachineOffset::getBytecodeIndex); ASSERT(mapping); ASSERT(mapping->m_bytecodeIndex == exit.m_codeOrigin.bytecodeIndex); void* jumpTarget = baselineCodeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset); jit.addPtr(AssemblyHelpers::TrustedImm32(JIT::stackPointerOffsetFor(baselineCodeBlock) * sizeof(Register)), GPRInfo::callFrameRegister, AssemblyHelpers::stackPointerRegister); jit.jitAssertTagsInPlace(); jit.move(AssemblyHelpers::TrustedImmPtr(jumpTarget), GPRInfo::regT2); jit.jump(GPRInfo::regT2); }
JSObject* ScriptExecutable::prepareForExecutionImpl( ExecState* exec, JSFunction* function, JSScope* scope, CodeSpecializationKind kind) { VM& vm = exec->vm(); DeferGC deferGC(vm.heap); if (vm.getAndClearFailNextNewCodeBlock()) return createError(exec->callerFrame(), ASCIILiteral("Forced Failure")); JSObject* exception = 0; CodeBlock* codeBlock = newCodeBlockFor(kind, function, scope, exception); if (!codeBlock) { RELEASE_ASSERT(exception); return exception; } if (Options::validateBytecode()) codeBlock->validate(); if (Options::useLLInt()) setupLLInt(vm, codeBlock); else setupJIT(vm, codeBlock); installCode(*codeBlock->vm(), codeBlock, codeBlock->codeType(), codeBlock->specializationKind()); return 0; }
JSValue CallFrame::uncheckedActivation() const { CodeBlock* codeBlock = this->codeBlock(); RELEASE_ASSERT(codeBlock->needsActivation()); VirtualRegister activationRegister = codeBlock->activationRegister(); return registers()[activationRegister.offset()].jsValue(); }
JSValue JSC_HOST_CALL callHostFunctionAsConstructor(ExecState* exec, JSObject* constructor, JSValue, const ArgList&) { CodeBlock* codeBlock = exec->callerFrame()->codeBlock(); unsigned vPCIndex = codeBlock->bytecodeOffset(exec, exec->returnPC()); exec->setException(createNotAConstructorError(exec, constructor, vPCIndex, codeBlock)); return JSValue(); }
static bool attemptToOptimizeClosureCall(ExecState* execCallee, JSCell* calleeAsFunctionCell, CallLinkInfo& callLinkInfo) { if (!calleeAsFunctionCell) return false; JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell); JSFunction* oldCallee = callLinkInfo.callee.get(); if (!oldCallee || oldCallee->structure() != callee->structure() || oldCallee->executable() != callee->executable()) return false; ASSERT(callee->executable()->hasJITCodeForCall()); MacroAssemblerCodePtr codePtr = callee->executable()->generatedJITCodeForCall()->addressForCall(); CodeBlock* codeBlock; if (callee->executable()->isHostFunction()) codeBlock = 0; else { codeBlock = jsCast<FunctionExecutable*>(callee->executable())->codeBlockForCall(); if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters())) return false; } linkClosureCall( execCallee, callLinkInfo, codeBlock, callee->structure(), callee->executable(), codePtr); return true; }
void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState*, void* debugInfoRaw) { SpeculationFailureDebugInfo* debugInfo = static_cast<SpeculationFailureDebugInfo*>(debugInfoRaw); CodeBlock* codeBlock = debugInfo->codeBlock; CodeBlock* alternative = codeBlock->alternative(); printf("Speculation failure in %p at 0x%x with executeCounter = %d, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, success/fail %u/%u\n", codeBlock, debugInfo->debugOffset, alternative ? alternative->executeCounter() : 0, alternative ? alternative->reoptimizationRetryCounter() : 0, alternative ? alternative->optimizationDelayCounter() : 0, codeBlock->speculativeSuccessCounter(), codeBlock->speculativeFailCounter()); }
void writeMethodClassWrapperMethodBody(const CppMethod * cppMethod, CodeBlock * codeBlock) { string s; s = Poco::format("cpgf::GScopedInterface<cpgf::IScriptFunction> func(this->getScriptFunction(\"%s\"));", cppMethod->getName() ); codeBlock->appendLine(s); codeBlock->appendLine("if(func)"); CodeBlock * bodyBlock = codeBlock->appendBlock(cbsBracketAndIndent); s = "cpgf::invokeScriptFunction(func.get(), this"; if(cppMethod->getArity() > 0) { s = Poco::format("%s, %s", s, cppMethod->getTextOfParamList(itoWithArgName)); } s.append(");"); if(cppMethod->hasResult()) { bodyBlock->appendLine("return " + s); } else { bodyBlock->appendLine(s); bodyBlock->appendLine("return;"); } writeMethodClassWrapperCallSuperMethod(cppMethod, codeBlock); }
static void genBlock(IRUnit& unit, CodeBlock& cb, CodeBlock& stubsCode, MCGenerator* mcg, CodegenState& state, Block* block, std::vector<TransBCMapping>* bcMap) { FTRACE(6, "genBlock: {}\n", block->id()); std::unique_ptr<CodeGenerator> cg(mcg->backEnd().newCodeGenerator(unit, cb, stubsCode, mcg, state)); BCMarker prevMarker; for (IRInstruction& instr : *block) { IRInstruction* inst = &instr; // If we're on the first instruction of the block or we have a new // marker since the last instruction, update the bc mapping. if ((!prevMarker.valid() || inst->marker() != prevMarker) && (mcg->tx().isTransDBEnabled() || RuntimeOption::EvalJitUseVtuneAPI) && bcMap) { bcMap->push_back(TransBCMapping{inst->marker().func()->unit()->md5(), inst->marker().bcOff(), cb.frontier(), stubsCode.frontier()}); prevMarker = inst->marker(); } auto* addr = cg->cgInst(inst); if (state.asmInfo && addr) { state.asmInfo->updateForInstruction(inst, addr, cb.frontier()); } } }
extern "C" void* compileFTLOSRExit(ExecState* exec, unsigned exitID) { SamplingRegion samplingRegion("FTL OSR Exit Compilation"); CodeBlock* codeBlock = exec->codeBlock(); ASSERT(codeBlock); ASSERT(codeBlock->jitType() == JITCode::FTLJIT); VM* vm = &exec->vm(); // It's sort of preferable that we don't GC while in here. Anyways, doing so wouldn't // really be profitable. DeferGCForAWhile deferGC(vm->heap); JITCode* jitCode = codeBlock->jitCode()->ftl(); OSRExit& exit = jitCode->osrExit[exitID]; prepareCodeOriginForOSRExit(exec, exit.m_codeOrigin); if (Options::ftlUsesStackmaps()) compileStubWithOSRExitStackmap(exitID, jitCode, exit, vm, codeBlock); else compileStubWithoutOSRExitStackmap(exitID, exit, vm, codeBlock); RepatchBuffer repatchBuffer(codeBlock); repatchBuffer.relink( exit.codeLocationForRepatch(codeBlock), CodeLocationLabel(exit.m_code.code())); return exit.m_code.code().executableAddress(); }
void CallLinkStatus::computeDFGStatuses( CodeBlock* dfgCodeBlock, CallLinkStatus::ContextMap& map) { #if ENABLE(DFG_JIT) RELEASE_ASSERT(dfgCodeBlock->jitType() == JITCode::DFGJIT); CodeBlock* baselineCodeBlock = dfgCodeBlock->alternative(); for (auto iter = dfgCodeBlock->callLinkInfosBegin(); !!iter; ++iter) { CallLinkInfo& info = **iter; CodeOrigin codeOrigin = info.codeOrigin; bool takeSlowPath; bool badFunction; // Check if we had already previously made a terrible mistake in the FTL for this // code origin. Note that this is approximate because we could have a monovariant // inline in the FTL that ended up failing. We should fix that at some point by // having data structures to track the context of frequent exits. This is currently // challenging because it would require creating a CodeOrigin-based database in // baseline CodeBlocks, but those CodeBlocks don't really have a place to put the // InlineCallFrames. CodeBlock* currentBaseline = baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock); { ConcurrentJITLocker locker(currentBaseline->m_lock); takeSlowPath = currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadCache, ExitFromFTL)) || currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadCacheWatchpoint, ExitFromFTL)) || currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadExecutable, ExitFromFTL)); badFunction = currentBaseline->hasExitSite(locker, DFG::FrequentExitSite(codeOrigin.bytecodeIndex, BadFunction, ExitFromFTL)); } { ConcurrentJITLocker locker(dfgCodeBlock->m_lock); if (takeSlowPath) map.add(info.codeOrigin, takesSlowPath()); else { CallLinkStatus status = computeFor(locker, info); if (status.isSet()) { if (badFunction) status.makeClosureCall(); map.add(info.codeOrigin, status); } } } } #else UNUSED_PARAM(dfgCodeBlock); #endif // ENABLE(DFG_JIT) if (verbose) { dataLog("Context map:\n"); ContextMap::iterator iter = map.begin(); ContextMap::iterator end = map.end(); for (; iter != end; ++iter) { dataLog(" ", iter->key, ":\n"); dataLog(" ", iter->value, "\n"); } } }
static void genBlock(IRUnit& unit, CodeBlock& cb, CodeBlock& coldCode, CodeBlock& frozenCode, CodegenState& state, Block* block, std::vector<TransBCMapping>* bcMap) { FTRACE(6, "genBlock: {}\n", block->id()); std::unique_ptr<CodeGenerator> cg(mcg->backEnd().newCodeGenerator(unit, cb, coldCode, frozenCode, state)); for (IRInstruction& instr : *block) { IRInstruction* inst = &instr; if (instr.is(EndGuards)) state.pastGuards = true; if (bcMap && state.pastGuards && (mcg->tx().isTransDBEnabled() || RuntimeOption::EvalJitUseVtuneAPI)) { // Don't insert an entry in bcMap if the marker corresponds to last entry // in there. if (bcMap->empty() || bcMap->back().md5 != inst->marker().func()->unit()->md5() || bcMap->back().bcStart != inst->marker().bcOff()) { bcMap->push_back(TransBCMapping{ inst->marker().func()->unit()->md5(), inst->marker().bcOff(), mcg->cgFixups().m_tletMain->frontier(), mcg->cgFixups().m_tletCold->frontier(), mcg->cgFixups().m_tletFrozen->frontier()}); } } auto* start = cb.frontier(); cg->cgInst(inst); if (state.asmInfo && start < cb.frontier()) { state.asmInfo->updateForInstruction(inst, start, cb.frontier()); } } }
int32_t emitBindCall(CodeBlock& mainCode, CodeBlock& stubsCode, SrcKey srcKey, const Func* funcd, int numArgs) { // If this is a call to a builtin and we don't need any argument // munging, we can skip the prologue system and do it inline. if (isNativeImplCall(funcd, numArgs)) { StoreImmPatcher patchIP(mainCode, (uint64_t)mainCode.frontier(), reg::rax, cellsToBytes(numArgs) + AROFF(m_savedRip), rVmSp); assert(funcd->numLocals() == funcd->numParams()); assert(funcd->numIterators() == 0); Asm a { mainCode }; emitLea(a, rVmSp[cellsToBytes(numArgs)], rVmFp); emitCheckSurpriseFlagsEnter(mainCode, stubsCode, true, tx64->fixupMap(), Fixup(0, numArgs)); // rVmSp is already correctly adjusted, because there's no locals // other than the arguments passed. auto retval = emitNativeImpl(mainCode, funcd, false /* don't jump to return */); patchIP.patch(uint64_t(mainCode.frontier())); return retval; } Asm a { mainCode }; if (debug) { a. storeq (kUninitializedRIP, rVmSp[cellsToBytes(numArgs) + AROFF(m_savedRip)]); } // Stash callee's rVmFp into rStashedAR for the callee's prologue emitLea(a, rVmSp[cellsToBytes(numArgs)], rStashedAR); emitBindCallHelper(mainCode, stubsCode, srcKey, funcd, numArgs); return 0; }
// While a b.cc can be overwritten on ARM, if the cc and the target // are both changed then the behavior can cause old cc to jump to new // target or new cc to jump to old target. Therefore we'll keep // the branch as an indirect branch to a target stored in the // instruction stream. This way we can at least guarantee that old cc // won't jump to new target. We can still have an issue where new cc // jumps to old target, but that old target is *likely* a stub. TCA emitSmashableJcc(CodeBlock& cb, CGMeta& meta, TCA target, ConditionCode cc) { align(cb, &meta, Alignment::SmashJcc, AlignContext::Live); vixl::MacroAssembler a { cb }; vixl::Label target_data; vixl::Label after_data; auto const start = cb.frontier(); meta.smashableLocations.insert(start); // Emit the conditional branch a. B (&after_data, InvertCondition(arm::convertCC(cc))); // Emit the smashable jump a. Ldr (rAsm, &target_data); a. Br (rAsm); // Emit the jmp target into the instruction stream. a. bind (&target_data); a. dc64 (target); a. bind (&after_data); __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(cb.frontier())); return start; }
JSCell* JIT_OPERATION operationCreateDirectArgumentsDuringExit(ExecState* exec, InlineCallFrame* inlineCallFrame, JSFunction* callee, int32_t argumentCount) { VM& vm = exec->vm(); NativeCallFrameTracer target(&vm, exec); DeferGCForAWhile deferGC(vm.heap); CodeBlock* codeBlock; if (inlineCallFrame) codeBlock = baselineCodeBlockForInlineCallFrame(inlineCallFrame); else codeBlock = exec->codeBlock(); unsigned length = argumentCount - 1; unsigned capacity = std::max(length, static_cast<unsigned>(codeBlock->numParameters() - 1)); DirectArguments* result = DirectArguments::create( vm, codeBlock->globalObject()->directArgumentsStructure(), length, capacity); result->callee().set(vm, result, callee); Register* arguments = exec->registers() + (inlineCallFrame ? inlineCallFrame->stackOffset : 0) + CallFrame::argumentOffset(0); for (unsigned i = length; i--;) result->setIndexQuickly(vm, i, arguments[i].jsValue()); return result; }
TCA emitSmashableCall(CodeBlock& cb, CGMeta& meta, TCA target) { align(cb, &meta, Alignment::SmashCall, AlignContext::Live); vixl::MacroAssembler a { cb }; vixl::Label target_data; vixl::Label after_data; auto const start = cb.frontier(); meta.smashableLocations.insert(start); // Jump over the data a. B (&after_data); // Emit the call target into the instruction stream. a. bind (&target_data); a. dc64 (target); a. bind (&after_data); // Load the target address and call it a. Ldr (rAsm, &target_data); a. Blr (rAsm); __builtin___clear_cache(reinterpret_cast<char*>(start), reinterpret_cast<char*>(cb.frontier())); return start; }
static void updateResultProfileForBinaryArithOp(ExecState* exec, Instruction* pc, JSValue result, JSValue left, JSValue right) { CodeBlock* codeBlock = exec->codeBlock(); unsigned bytecodeOffset = codeBlock->bytecodeOffset(pc); ResultProfile* profile = codeBlock->ensureResultProfile(bytecodeOffset); if (result.isNumber()) { if (!result.isInt32()) { if (left.isInt32() && right.isInt32()) profile->setObservedInt32Overflow(); double doubleVal = result.asNumber(); if (!doubleVal && std::signbit(doubleVal)) profile->setObservedNegZeroDouble(); else { profile->setObservedNonNegZeroDouble(); // The Int52 overflow check here intentionally omits 1ll << 51 as a valid negative Int52 value. // Therefore, we will get a false positive if the result is that value. This is intentionally // done to simplify the checking algorithm. static const int64_t int52OverflowPoint = (1ll << 51); int64_t int64Val = static_cast<int64_t>(std::abs(doubleVal)); if (int64Val >= int52OverflowPoint) profile->setObservedInt52Overflow(); } } } else profile->setObservedNonNumber(); }
TCA emitServiceReqWork(CodeBlock& cb, TCA start, bool persist, SRFlags flags, ServiceRequest req, const ServiceReqArgVec& argv) { MacroAssembler a { cb }; folly::Optional<CodeCursor> maybeCc = folly::none; if (start != cb.frontier()) { maybeCc.emplace(cb, start); } // There are 6 instructions after the argument-shuffling, and they're all // single instructions (i.e. not macros). There are up to 4 instructions per // argument (it may take up to 4 instructions to move a 64-bit immediate into // a register). constexpr auto kMaxStubSpace = 6 * vixl::kInstructionSize + (4 * maxArgReg()) * vixl::kInstructionSize; for (auto i = 0; i < argv.size(); ++i) { auto reg = serviceReqArgReg(i); auto const& arg = argv[i]; switch (arg.m_kind) { case ServiceReqArgInfo::Immediate: a. Mov (reg, arg.m_imm); break; case ServiceReqArgInfo::CondCode: not_implemented(); break; default: not_reached(); } } // Save VM regs a. Str (rVmFp, rGContextReg[offsetof(VMExecutionContext, m_fp)]); a. Str (rVmSp, rGContextReg[offsetof(VMExecutionContext, m_stack) + Stack::topOfStackOffset()]); if (persist) { a. Mov (rAsm, 0); } else { a. Mov (rAsm, reinterpret_cast<intptr_t>(start)); } a. Mov (argReg(0), req); // The x64 equivalent loads to rax. I knew this was a trap. if (flags & SRFlags::JmpInsteadOfRet) { a. Ldr (rAsm, MemOperand(sp, 8, PostIndex)); a. Br (rAsm); } else { a. Ret (); } a. Brk (0); if (!persist) { assert(cb.frontier() - start <= kMaxStubSpace); while (cb.frontier() - start < kMaxStubSpace) { a. Nop (); } } return start; }