void wasm::ToggleProfiling(const Code& code, const CallSite& callSite, bool enabled) { if (callSite.kind() != CallSite::FuncDef) return; uint8_t* callerRetAddr = code.segment().base() + callSite.returnAddressOffset(); #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) void* callee = X86Encoding::GetRel32Target(callerRetAddr); #elif defined(JS_CODEGEN_ARM) uint8_t* caller = callerRetAddr - 4; Instruction* callerInsn = reinterpret_cast<Instruction*>(caller); BOffImm calleeOffset; callerInsn->as<InstBLImm>()->extractImm(&calleeOffset); void* callee = calleeOffset.getDest(callerInsn); #elif defined(JS_CODEGEN_ARM64) MOZ_CRASH(); void* callee = nullptr; (void)callerRetAddr; #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) uint8_t* caller = callerRetAddr - 2 * sizeof(uint32_t); InstImm* callerInsn = reinterpret_cast<InstImm*>(caller); BOffImm16 calleeOffset; callerInsn->extractImm16(&calleeOffset); void* callee = calleeOffset.getDest(reinterpret_cast<Instruction*>(caller)); #elif defined(JS_CODEGEN_NONE) MOZ_CRASH(); void* callee = nullptr; #else # error "Missing architecture" #endif const CodeRange* codeRange = code.lookupRange(callee); if (!codeRange->isFunction()) return; uint8_t* from = code.segment().base() + codeRange->funcNonProfilingEntry(); uint8_t* to = code.segment().base() + codeRange->funcProfilingEntry(); if (!enabled) Swap(from, to); MOZ_ASSERT(callee == from); #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) X86Encoding::SetRel32(callerRetAddr, to); #elif defined(JS_CODEGEN_ARM) new (caller) InstBLImm(BOffImm(to - caller), Assembler::Always); #elif defined(JS_CODEGEN_ARM64) (void)to; MOZ_CRASH(); #elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) new (caller) InstImm(op_regimm, zero, rt_bgezal, BOffImm16(to - caller)); #elif defined(JS_CODEGEN_NONE) MOZ_CRASH(); #else # error "Missing architecture" #endif }
static void UpdateEntry(const Code& code, bool profilingEnabled, void** entry) { const CodeRange& codeRange = *code.lookupRange(*entry); void* from = code.segment().base() + codeRange.funcNonProfilingEntry(); void* to = code.segment().base() + codeRange.funcProfilingEntry(); if (!profilingEnabled) Swap(from, to); MOZ_ASSERT(*entry == from); *entry = to; }
void LIRGeneratorX86Shared::lowerForCompFx4(LSimdBinaryCompFx4* ins, MSimdBinaryComp* mir, MDefinition* lhs, MDefinition* rhs) { // Swap the operands around to fit the instructions that x86 actually has. // We do this here, before register allocation, so that we don't need // temporaries and copying afterwards. switch (mir->operation()) { case MSimdBinaryComp::greaterThan: case MSimdBinaryComp::greaterThanOrEqual: mir->reverse(); Swap(lhs, rhs); break; default: break; } lowerForFPU(ins, mir, lhs, rhs); }
void MBasicBlock::setLoopHeader(MBasicBlock* newBackedge) { MOZ_ASSERT(!isLoopHeader()); kind_ = LOOP_HEADER; size_t numPreds = numPredecessors(); MOZ_ASSERT(numPreds != 0); size_t lastIndex = numPreds - 1; size_t oldIndex = 0; for (; ; ++oldIndex) { MOZ_ASSERT(oldIndex < numPreds); MBasicBlock* pred = getPredecessor(oldIndex); if (pred == newBackedge) break; } // Set the loop backedge to be the last element in predecessors_. Swap(predecessors_[oldIndex], predecessors_[lastIndex]); // If we have phis, reorder their operands accordingly. if (!phisEmpty()) { getPredecessor(oldIndex)->setSuccessorWithPhis(this, oldIndex); getPredecessor(lastIndex)->setSuccessorWithPhis(this, lastIndex); for (MPhiIterator iter(phisBegin()), end(phisEnd()); iter != end; ++iter) { MPhi* phi = *iter; MDefinition* last = phi->getOperand(oldIndex); MDefinition* old = phi->getOperand(lastIndex); phi->replaceOperand(oldIndex, old); phi->replaceOperand(lastIndex, last); } } MOZ_ASSERT(newBackedge->loopHeaderOfBackedge() == this); MOZ_ASSERT(backedge() == newBackedge); }
bool Module::setProfilingEnabled(JSContext* cx, bool enabled) { MOZ_ASSERT(dynamicallyLinked_); MOZ_ASSERT(!activation()); if (profilingEnabled_ == enabled) return true; // When enabled, generate profiling labels for every name in funcNames_ // that is the name of some Function CodeRange. This involves malloc() so // do it now since, once we start sampling, we'll be in a signal-handing // context where we cannot malloc. if (enabled) { if (!funcLabels_.resize(module_->numFuncs)) { ReportOutOfMemory(cx); return false; } for (const CodeRange& codeRange : module_->codeRanges) { if (!codeRange.isFunction()) continue; UniqueChars owner; const char* funcName = getFuncName(cx, codeRange.funcIndex(), &owner); if (!funcName) return false; UniqueChars label(JS_smprintf("%s (%s:%u)", funcName, module_->filename.get(), codeRange.funcLineOrBytecode())); if (!label) { ReportOutOfMemory(cx); return false; } funcLabels_[codeRange.funcIndex()] = Move(label); } } else { funcLabels_.clear(); } // Patch callsites and returns to execute profiling prologues/epilogues. { AutoWritableJitCode awjc(cx->runtime(), code(), codeBytes()); AutoFlushICache afc("Module::setProfilingEnabled"); AutoFlushICache::setRange(uintptr_t(code()), codeBytes()); for (const CallSite& callSite : module_->callSites) EnableProfilingPrologue(*this, callSite, enabled); for (const CodeRange& codeRange : module_->codeRanges) EnableProfilingEpilogue(*this, codeRange, enabled); } // Update the function-pointer tables to point to profiling prologues. for (FuncPtrTable& funcPtrTable : funcPtrTables_) { auto array = reinterpret_cast<void**>(globalData() + funcPtrTable.globalDataOffset); for (size_t i = 0; i < funcPtrTable.numElems; i++) { const CodeRange* codeRange = lookupCodeRange(array[i]); void* from = code() + codeRange->funcNonProfilingEntry(); void* to = code() + codeRange->funcProfilingEntry(); if (!enabled) Swap(from, to); MOZ_ASSERT(array[i] == from); array[i] = to; } } profilingEnabled_ = enabled; return true; }
// Patch all internal (asm.js->asm.js) callsites to call the profiling // prologues: void wasm::EnableProfilingPrologue(const Module& module, const CallSite& callSite, bool enabled) { if (callSite.kind() != CallSite::Relative) return; uint8_t* callerRetAddr = module.code() + callSite.returnAddressOffset(); #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) void* callee = X86Encoding::GetRel32Target(callerRetAddr); #elif defined(JS_CODEGEN_ARM) uint8_t* caller = callerRetAddr - 4; Instruction* callerInsn = reinterpret_cast<Instruction*>(caller); BOffImm calleeOffset; callerInsn->as<InstBLImm>()->extractImm(&calleeOffset); void* callee = calleeOffset.getDest(callerInsn); #elif defined(JS_CODEGEN_ARM64) MOZ_CRASH(); void* callee = nullptr; (void)callerRetAddr; #elif defined(JS_CODEGEN_MIPS32) Instruction* instr = (Instruction*)(callerRetAddr - 4 * sizeof(uint32_t)); void* callee = (void*)Assembler::ExtractLuiOriValue(instr, instr->next()); #elif defined(JS_CODEGEN_MIPS64) Instruction* instr = (Instruction*)(callerRetAddr - 6 * sizeof(uint32_t)); void* callee = (void*)Assembler::ExtractLoad64Value(instr); #elif defined(JS_CODEGEN_NONE) MOZ_CRASH(); void* callee = nullptr; #else # error "Missing architecture" #endif const CodeRange* codeRange = module.lookupCodeRange(callee); if (!codeRange->isFunction()) return; uint8_t* from = module.code() + codeRange->funcNonProfilingEntry(); uint8_t* to = module.code() + codeRange->funcProfilingEntry(); if (!enabled) Swap(from, to); MOZ_ASSERT(callee == from); #if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) X86Encoding::SetRel32(callerRetAddr, to); #elif defined(JS_CODEGEN_ARM) new (caller) InstBLImm(BOffImm(to - caller), Assembler::Always); #elif defined(JS_CODEGEN_ARM64) (void)to; MOZ_CRASH(); #elif defined(JS_CODEGEN_MIPS32) Assembler::WriteLuiOriInstructions(instr, instr->next(), ScratchRegister, (uint32_t)to); instr[2] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); #elif defined(JS_CODEGEN_MIPS64) Assembler::WriteLoad64Instructions(instr, ScratchRegister, (uint64_t)to); instr[4] = InstReg(op_special, ScratchRegister, zero, ra, ff_jalr); #elif defined(JS_CODEGEN_NONE) MOZ_CRASH(); #else # error "Missing architecture" #endif }