void disassemble(const MacroAssemblerCodePtr<DisassemblyPtrTag>& codePtr, size_t size, const char* prefix, PrintStream& out) { if (tryToDisassemble(codePtr, size, prefix, out)) return; out.printf("%sdisassembly not available for range %p...%p\n", prefix, codePtr.untaggedExecutableAddress(), codePtr.untaggedExecutableAddress<char*>() + size); }
void disassemble(const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out) { if (tryToDisassemble(codePtr, size, prefix, out)) return; out.printf("%sdisassembly not available for range %p...%p\n", prefix, codePtr.executableAddress(), static_cast<char*>(codePtr.executableAddress()) + size); }
void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr) { JumpList slowCases; slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure))); slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable))); loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1); emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); Call call = nearCall(); Jump done = jump(); slowCases.link(this); move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2); restoreReturnAddressBeforeReturn(regT2); Jump slow = jump(); LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); patchBuffer.link(call, FunctionPtr(codePtr.executableAddress())); patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0)); patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code())); RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine( FINALIZE_CODE( patchBuffer, ("Baseline closure call stub for %s, return point %p, target %p (%s)", toCString(*m_codeBlock).data(), callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(), codePtr.executableAddress(), toCString(pointerDump(calleeCodeBlock)).data())), *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable, callLinkInfo->codeOrigin)); RepatchBuffer repatchBuffer(m_codeBlock); repatchBuffer.replaceWithJump( RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin), CodeLocationLabel(stubRoutine->code().code())); repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code()); callLinkInfo->stub = stubRoutine.release(); }
inline void* linkFor(ExecState* execCallee, ReturnAddressPtr returnAddress, CodeSpecializationKind kind) { ExecState* exec = execCallee->callerFrame(); JSGlobalData* globalData = &exec->globalData(); NativeCallFrameTracer tracer(globalData, exec); JSValue calleeAsValue = execCallee->calleeAsValue(); JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue); if (!calleeAsFunctionCell) return handleHostCall(execCallee, calleeAsValue, kind); JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell); execCallee->setScopeChain(callee->scopeUnchecked()); ExecutableBase* executable = callee->executable(); MacroAssemblerCodePtr codePtr; CodeBlock* codeBlock = 0; if (executable->isHostFunction()) codePtr = executable->generatedJITCodeFor(kind).addressForCall(); else { FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable); JSObject* error = functionExecutable->compileFor(execCallee, callee->scope(), kind); if (error) { globalData->exception = createStackOverflowError(exec); return 0; } codeBlock = &functionExecutable->generatedBytecodeFor(kind); if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters())) codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind); else codePtr = functionExecutable->generatedJITCodeFor(kind).addressForCall(); } CallLinkInfo& callLinkInfo = exec->codeBlock()->getCallLinkInfo(returnAddress); if (!callLinkInfo.seenOnce()) callLinkInfo.setSeen(); else dfgLinkFor(execCallee, callLinkInfo, codeBlock, callee, codePtr, kind); return codePtr.executableAddress(); }
inline char* linkFor(ExecState* execCallee, CodeSpecializationKind kind) { ExecState* exec = execCallee->callerFrame(); VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); JSValue calleeAsValue = execCallee->calleeAsValue(); JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue); if (!calleeAsFunctionCell) return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind)); JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell); execCallee->setScope(callee->scopeUnchecked()); ExecutableBase* executable = callee->executable(); MacroAssemblerCodePtr codePtr; CodeBlock* codeBlock = 0; if (executable->isHostFunction()) codePtr = executable->generatedJITCodeFor(kind)->addressForCall(); else { FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable); JSObject* error = functionExecutable->prepareForExecution(execCallee, callee->scope(), kind); if (error) { vm->throwException(exec, createStackOverflowError(exec)); return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress()); } codeBlock = functionExecutable->codeBlockFor(kind); if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters())) codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind); else codePtr = functionExecutable->generatedJITCodeFor(kind)->addressForCall(); } CallLinkInfo& callLinkInfo = exec->codeBlock()->getCallLinkInfo(execCallee->returnPC()); if (!callLinkInfo.seenOnce()) callLinkInfo.setSeen(); else linkFor(execCallee, callLinkInfo, codeBlock, callee, codePtr, kind); return reinterpret_cast<char*>(codePtr.executableAddress()); }
bool tryToDisassemble(const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out) { ud_t disassembler; ud_init(&disassembler); ud_set_input_buffer(&disassembler, static_cast<unsigned char*>(codePtr.executableAddress()), size); #if CPU(X86_64) ud_set_mode(&disassembler, 64); #else ud_set_mode(&disassembler, 32); #endif ud_set_pc(&disassembler, bitwise_cast<uintptr_t>(codePtr.executableAddress())); ud_set_syntax(&disassembler, UD_SYN_ATT); uint64_t currentPC = disassembler.pc; while (ud_disassemble(&disassembler)) { char pcString[20]; print(pcString, sizeof(pcString), currentPC); out.printf("%s%16s: %s\n", prefix, pcString, ud_insn_asm(&disassembler)); currentPC = disassembler.pc; } return true; }
bool tryToDisassembleWithLLVM( const MacroAssemblerCodePtr& codePtr, size_t size, const char* prefix, PrintStream& out, InstructionSubsetHint) { initializeLLVM(); const char* triple; #if CPU(X86_64) triple = "x86_64-apple-darwin"; #elif CPU(X86) triple = "x86-apple-darwin"; #elif CPU(ARM64) triple = "arm64-apple-darwin"; #else #error "LLVM disassembler currently not supported on this CPU." #endif char symbolString[symbolStringSize]; LLVMDisasmContextRef disassemblyContext = llvm->CreateDisasm(triple, symbolString, 0, 0, symbolLookupCallback); RELEASE_ASSERT(disassemblyContext); char pcString[20]; char instructionString[1000]; uint8_t* pc = static_cast<uint8_t*>(codePtr.executableAddress()); uint8_t* end = pc + size; while (pc < end) { snprintf( pcString, sizeof(pcString), "0x%lx", static_cast<unsigned long>(bitwise_cast<uintptr_t>(pc))); size_t instructionSize = llvm->DisasmInstruction( disassemblyContext, pc, end - pc, bitwise_cast<uintptr_t>(pc), instructionString, sizeof(instructionString)); if (!instructionSize) snprintf(instructionString, sizeof(instructionString), ".byte 0x%02x", *pc++); else pc += instructionSize; out.printf("%s%16s: %s\n", prefix, pcString, instructionString); } llvm->DisasmDispose(disassemblyContext); return true; }
MacroAssemblerCodeRef generateRegisterPreservationWrapper(VM& vm, ExecutableBase* executable, MacroAssemblerCodePtr target) { #if ENABLE(FTL_JIT) // We shouldn't ever be generating wrappers for native functions. RegisterSet toSave = registersToPreserve(); ptrdiff_t offset = registerPreservationOffset(); AssemblyHelpers jit(&vm, 0); jit.preserveReturnAddressAfterCall(GPRInfo::regT1); jit.load32( AssemblyHelpers::Address( AssemblyHelpers::stackPointerRegister, (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset), GPRInfo::regT2); // Place the stack pointer where we want it to be. jit.subPtr(AssemblyHelpers::TrustedImm32(offset), AssemblyHelpers::stackPointerRegister); // Compute the number of things we will be copying. jit.add32( AssemblyHelpers::TrustedImm32( JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize), GPRInfo::regT2); ASSERT(!toSave.get(GPRInfo::regT4)); jit.move(AssemblyHelpers::stackPointerRegister, GPRInfo::regT4); AssemblyHelpers::Label loop = jit.label(); jit.sub32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); jit.load64(AssemblyHelpers::Address(GPRInfo::regT4, offset), GPRInfo::regT0); jit.store64(GPRInfo::regT0, GPRInfo::regT4); jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(Register)), GPRInfo::regT4); jit.branchTest32(AssemblyHelpers::NonZero, GPRInfo::regT2).linkTo(loop, &jit); // At this point regT4 + offset points to where we save things. ptrdiff_t currentOffset = 0; jit.storePtr(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset)); for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) { if (!toSave.get(gpr)) continue; currentOffset += sizeof(Register); jit.store64(gpr, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset)); } // Assume that there aren't any saved FP registers. // Restore the tag registers. jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); jit.add64(AssemblyHelpers::TrustedImm32(TagMask - TagTypeNumber), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister); jit.move( AssemblyHelpers::TrustedImmPtr( vm.getCTIStub(registerRestorationThunkGenerator).code().executableAddress()), GPRInfo::nonArgGPR0); jit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR0); AssemblyHelpers::Jump jump = jit.jump(); LinkBuffer linkBuffer(vm, &jit, GLOBAL_THUNK_ID); linkBuffer.link(jump, CodeLocationLabel(target)); if (Options::verboseFTLToJSThunk()) dataLog("Need a thunk for calls from FTL to non-FTL version of ", *executable, "\n"); return FINALIZE_DFG_CODE(linkBuffer, ("Register preservation wrapper for %s/%s, %p", toCString(executable->hashFor(CodeForCall)).data(), toCString(executable->hashFor(CodeForConstruct)).data(), target.executableAddress())); #else // ENABLE(FTL_JIT) UNUSED_PARAM(vm); UNUSED_PARAM(executable); UNUSED_PARAM(target); // We don't support non-FTL builds for two reasons: // - It just so happens that currently only the FTL bottoms out in this code. // - The code above uses 64-bit instructions. It doesn't necessarily have to; it would be // easy to change it so that it doesn't. But obviously making that change would be a // prerequisite to removing this #if. UNREACHABLE_FOR_PLATFORM(); return MacroAssemblerCodeRef(); #endif // ENABLE(FTL_JIT) }