// If +disable+ is set, then the method is tagged as not being // available for JIT. void MachineCode::deoptimize(STATE, CompiledCode* original, jit::RuntimeDataHolder* rd, bool disable) { #ifdef ENABLE_LLVM LLVMState* ls = LLVMState::get(state); ls->start_method_update(); bool still_others = false; for(int i = 0; i < cMaxSpecializations; i++) { if(!rd) { specializations[i].class_id = 0; specializations[i].execute = 0; specializations[i].jit_data = 0; } else if(specializations[i].jit_data == rd) { specializations[i].class_id = 0; specializations[i].execute = 0; specializations[i].jit_data = 0; } else if(specializations[i].jit_data) { still_others = true; } } if(!rd || original->jit_data() == rd) { unspecialized = 0; original->set_jit_data(0); } if(original->jit_data()) still_others = true; if(!still_others) { execute_status_ = eInterpret; // This resets execute to use the interpreter original->set_executor(fallback); } if(disable) { execute_status_ = eJITDisable; original->set_executor(fallback); } else if(execute_status_ == eJITDisable && still_others) { execute_status_ = eJIT; } if(original->execute == CompiledCode::specialized_executor) { bool found = false; for(int i = 0; i < cMaxSpecializations; i++) { if(specializations[i].execute) found = true; } if(unspecialized) found = true; if(!found) rubinius::bug("no specializations!"); } ls->end_method_update(); #endif }
void RuntimeDataHolder::cleanup(State* state, CodeManager* cm) { LLVMState* ls = cm->shared()->llvm_state; assert(ls); if(ls->config().jit_removal_print) { void* fin = (void*)((intptr_t)native_func_ + native_size_); std::cout << "Remove function: " << function_ << " / " << native_func_ << "-" << fin << "\n"; } ls->remove(function_); }
void* LLVMCompiler::function_pointer(STATE) { if(!mci_) { if(!function_) return NULL; mci_ = new llvm::MachineCodeInfo(); LLVMState* ls = LLVMState::get(state); ls->engine()->runJITOnFunction(function_, mci_); if(state->shared.config.jit_dump_code & cMachineCode) { llvm::outs() << "[[[ JIT Machine Code: " << function_->getName() << " ]]]\n"; assembler_x86::AssemblerX86::show_buffer(mci_->address(), mci_->size(), false, NULL); } ls->add_code_bytes(mci_->size()); } return mci_->address(); }
Object* System::vm_jit_info(STATE) { if(state->shared.config.jit_disabled) return Qnil; #ifdef ENABLE_LLVM LLVMState* ls = LLVMState::get(state); Array* ary = Array::create(state, 5); ary->set(state, 0, Integer::from(state, ls->jitted_methods())); ary->set(state, 1, Integer::from(state, ls->code_bytes())); ary->set(state, 2, Integer::from(state, ls->time_spent)); ary->set(state, 3, Integer::from(state, ls->accessors_inlined())); ary->set(state, 4, Integer::from(state, ls->uncommons_taken())); return ary; #else return Qnil; #endif }
void pause() { utilities::thread::Mutex::LockGuard guard(mutex_); // it's idle, ie paused. if(state == cIdle || state == cPaused) return; pause_ = true; while(!paused_ && (ls_->run_state() == ManagedThread::eRunning || ls_->run_state() == ManagedThread::eIndependent)) { pause_condition_.wait(mutex_); } }
static std::vector<Instruction> computeAliasingInstructions(const LLVMState &State, const Instruction &Instr, size_t MaxAliasingInstructions) { // Randomly iterate the set of instructions. std::vector<unsigned> Opcodes; Opcodes.resize(State.getInstrInfo().getNumOpcodes()); std::iota(Opcodes.begin(), Opcodes.end(), 0U); std::shuffle(Opcodes.begin(), Opcodes.end(), randomGenerator()); std::vector<Instruction> AliasingInstructions; for (const unsigned OtherOpcode : Opcodes) { if (OtherOpcode == Instr.Description->getOpcode()) continue; const Instruction &OtherInstr = State.getIC().getInstr(OtherOpcode); if (OtherInstr.hasMemoryOperands()) continue; if (Instr.hasAliasingRegistersThrough(OtherInstr)) AliasingInstructions.push_back(std::move(OtherInstr)); if (AliasingInstructions.size() >= MaxAliasingInstructions) break; } return AliasingInstructions; }
static std::vector<InstructionTemplate> generateSnippetUsingStaticRenaming( const LLVMState &State, const InstructionTemplate &IT, const ArrayRef<const Variable *> TiedVariables, const BitVector *ScratchSpaceAliasedRegs) { std::vector<InstructionTemplate> Instructions; // Assign registers to variables in a round-robin manner. This is simple but // ensures that the most register-constrained variable does not get starved. std::vector<BitVector> PossibleRegsForVar; for (const Variable *Var : TiedVariables) { assert(Var); const Operand &Op = IT.Instr.getPrimaryOperand(*Var); assert(Op.isReg()); BitVector PossibleRegs = State.getRATC().emptyRegisters(); if (ScratchSpaceAliasedRegs) { PossibleRegs |= *ScratchSpaceAliasedRegs; } PossibleRegs.flip(); PossibleRegs &= Op.getRegisterAliasing().sourceBits(); PossibleRegsForVar.push_back(std::move(PossibleRegs)); } SmallVector<int, 2> Iterators(TiedVariables.size(), 0); while (true) { InstructionTemplate TmpIT = IT; // Find a possible register for each variable in turn, marking the // register as taken. for (size_t VarId = 0; VarId < TiedVariables.size(); ++VarId) { const int NextPossibleReg = PossibleRegsForVar[VarId].find_next(Iterators[VarId]); if (NextPossibleReg <= 0) { return Instructions; } TmpIT.getValueFor(*TiedVariables[VarId]) = llvm::MCOperand::createReg(NextPossibleReg); // Bump iterator. Iterators[VarId] = NextPossibleReg; // Prevent other variables from using the register. for (BitVector &OtherPossibleRegs : PossibleRegsForVar) { OtherPossibleRegs.reset(NextPossibleReg); } } Instructions.push_back(std::move(TmpIT)); } }
virtual void perform() { for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.signal(); while(pause_) { condition_.wait(mutex_); } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) return; while(pending_requests_.size() == 0) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) return; } // now locked again, shift a request req = pending_requests_.front(); pending_requests_.pop_front(); state = cRunning; } // mutex now unlock, allowing others to push more requests // LLVMCompiler* jit = new LLVMCompiler(); { timer::Running timer(ls_->time_spent); jit->compile(ls_, req->vmmethod(), req->is_block()); jit->generate_function(ls_); } if(show_machine_code_) { jit->show_machine_code(); } // Ok, compiled, generated machine code, now update MachineMethod // Ok, now we are manipulating managed memory, so make // sure the GC doesn't run. ls_->shared().gc_dependent(); req->vmmethod()->set_jitted(jit->llvm_function(), jit->code_bytes(), jit->function_pointer()); if(req->is_block()) { BlockEnvironment* be = req->block_env(); if(!be) { llvm::outs() << "Fatal error in JIT. Expected a BlockEnvironment.\n"; } else { be->set_native_function(jit->function_pointer()); } } else { MachineMethod* mm = req->machine_method(); if(!mm) { llvm::outs() << "Fatal error in JIT. Expected a MachineMethod.\n"; } else { mm->update(req->vmmethod(), jit); mm->activate(); } } int which = ls_->add_jitted_method(); if(ls_->config().jit_show_compiling) { llvm::outs() << "[[[ JIT finished background compiling " << which << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->shared().gc_independent(); } }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* const env, Arguments& args, BlockInvocation& invocation) { VMMethod* const vmm = env->vmmethod(state); if(!vmm) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_soon(state, env->code(), env); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, vmm->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); frame->prepare(vmm->stack_size); frame->previous = previous; frame->static_scope_ = invocation.static_scope; frame->arguments = &args; frame->dispatch_data = reinterpret_cast<BlockEnvironment*>(env); frame->cm = env->code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomStaticScope | CallFrame::cMultipleScopes | CallFrame::cBlock; // Check the stack and interrupts here rather than in the interpreter // loop itself. if(state->detect_stack_condition(frame)) { if(!state->check_interrupts(frame, frame)) return NULL; } state->global_lock().checkpoint(state, frame); if(unlikely(state->interrupts.check)) { state->interrupts.checked(); if(state->interrupts.perform_gc) { state->interrupts.perform_gc = false; state->collect_maybe(frame); } } #ifdef RBX_PROFILER if(unlikely(state->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->attached_instance())) { mod = ma; } } tooling::BlockEntry method(state, env, mod); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }
virtual void perform() { const char* thread_name = "rbx.jit"; ManagedThread::set_current(ls_, thread_name); ls_->set_run_state(ManagedThread::eIndependent); RUBINIUS_THREAD_START(thread_name, ls_->thread_id(), 1); #ifndef RBX_WINDOWS sigset_t set; sigfillset(&set); pthread_sigmask(SIG_SETMASK, &set, NULL); #endif for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { utilities::thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.broadcast(); if(stop_) goto halt; while(pause_) { condition_.wait(mutex_); if(stop_) goto halt; } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) goto halt; while(pending_requests_.empty()) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) goto halt; } // now locked again, shift a request req = pending_requests_.front(); state = cRunning; } // This isn't ideal, but it's the safest. Keep the GC from // running while we're building the IR. ls_->gc_dependent(); Context ctx(ls_); jit::Compiler jit(&ctx); // mutex now unlock, allowing others to push more requests // current_req_ = req; current_compiler_ = &jit; int spec_id = 0; Class* cls = req->receiver_class(); if(cls && !cls->nil_p()) { spec_id = cls->class_id(); } void* func = 0; { timer::Running<1000000> timer(ls_->shared().stats.jit_time_spent); jit.compile(req); func = jit.generate_function(); } // We were unable to compile this function, likely // because it's got something we don't support. if(!func) { if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT error background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); continue; } if(show_machine_code_) { jit.show_machine_code(); } // If the method has had jit'ing request disabled since we started // JIT'ing it, discard our work. if(!req->machine_code()->jit_disabled()) { jit::RuntimeDataHolder* rd = ctx.runtime_data_holder(); atomic::memory_barrier(); ls_->start_method_update(); if(!req->is_block()) { if(spec_id) { req->method()->add_specialized(spec_id, reinterpret_cast<executor>(func), rd); } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } } else { req->method()->set_unspecialized(reinterpret_cast<executor>(func), rd); } req->machine_code()->clear_compiling(); // assert(req->method()->jit_data()); ls_->end_method_update(); rd->run_write_barrier(ls_->write_barrier(), req->method()); ls_->shared().stats.jitted_methods++; if(ls_->config().jit_show_compiling) { CompiledCode* code = req->method(); llvm::outs() << "[[[ JIT finished background compiling " << ls_->enclosure_name(code) << "#" << ls_->symbol_debug_str(code->name()) << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } } // If someone was waiting on this, wake them up. if(utilities::thread::Condition* cond = req->waiter()) { cond->signal(); } current_req_ = 0; current_compiler_ = 0; pending_requests_.pop_front(); delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->gc_independent(); } halt: RUBINIUS_THREAD_STOP(thread_name, ls_->thread_id(), 1); }
void RuntimeDataHolder::cleanup(CodeManager* cm) { LLVMState* ls = cm->shared()->llvm_state; assert(ls); ls->remove(function_); }
llvm::Function* function(const char* name) { return llvm::cast<llvm::Function>(ls_->module()->getOrInsertFunction(name, type())); }
Object* MachineCode::execute_specialized(STATE, CallFrame* previous, Executable* exec, Module* mod, Arguments& args) { CompiledCode* code = as<CompiledCode>(exec); MachineCode* mcode = code->machine_code(); StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); // Originally, I tried using msg.module directly, but what happens is if // super is used, that field is read. If you combine that with the method // being called recursively, msg.module can change, causing super() to // look in the wrong place. // // Thus, we have to cache the value in the StackVariables. scope->initialize(args.recv(), args.block(), mod, mcode->number_of_locals); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); // If argument handling fails.. if(ArgumentHandler::call(state, mcode, scope, args) == false) { Exception* exc = Exception::make_argument_error(state, mcode->total_args, args.total(), args.name()); exc->locations(state, Location::from_call_stack(state, previous)); state->raise_exception(exc); return NULL; } frame->prepare(mcode->stack_size); frame->previous = previous; frame->constant_scope_ = 0; frame->dispatch_data = 0; frame->compiled_code = code; frame->flags = 0; frame->optional_jit_data = 0; frame->top_scope_ = 0; frame->scope = scope; frame->arguments = &args; GCTokenImpl gct; #ifdef ENABLE_LLVM // A negative call_count means we've disabled usage based JIT // for this method. if(mcode->call_count >= 0) { if(mcode->call_count >= state->shared().config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); OnStack<3> os(state, exec, mod, code); ls->compile_callframe(state, gct, code, frame); } else { mcode->call_count++; } } #endif OnStack<3> os(state, exec, mod, code); #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { // Check the stack and interrupts here rather than in the interpreter // loop itself. if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); tooling::MethodEntry method(state, exec, mod, args, code); RUBINIUS_METHOD_ENTRY_HOOK(state, mod, args.name(), previous); Object* result = (*mcode->run)(state, mcode, frame); RUBINIUS_METHOD_RETURN_HOOK(state, mod, args.name(), previous); return result; } else { if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); RUBINIUS_METHOD_ENTRY_HOOK(state, mod, args.name(), previous); Object* result = (*mcode->run)(state, mcode, frame); RUBINIUS_METHOD_RETURN_HOOK(state, mod, args.name(), previous); return result; } #else if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); RUBINIUS_METHOD_ENTRY_HOOK(state, mod, args.name(), previous); Object* result = (*mcode->run)(state, mcode, frame); RUBINIUS_METHOD_RETURN_HOOK(state, mod, args.name(), previous); return result; #endif }
virtual void perform() { sigset_t set; sigfillset(&set); pthread_sigmask(SIG_SETMASK, &set, NULL); for(;;) { // forever BackgroundCompileRequest* req = 0; // Lock, wait, get a request, unlock { thread::Mutex::LockGuard guard(mutex_); if(pause_) { state = cPaused; paused_ = true; pause_condition_.signal(); while(pause_) { condition_.wait(mutex_); } state = cUnknown; paused_ = false; } // If we've been asked to stop, do so now. if(stop_) return; while(pending_requests_.size() == 0) { state = cIdle; // unlock and wait... condition_.wait(mutex_); if(stop_) return; } // now locked again, shift a request req = pending_requests_.front(); pending_requests_.pop_front(); state = cRunning; } // This isn't ideal, but it's the safest. Keep the GC from // running while we're building the IR. ls_->shared().gc_dependent(); // mutex now unlock, allowing others to push more requests // jit::Compiler jit; void* func = 0; { timer::Running<size_t, 1000000> timer(ls_->shared().stats.jit_time_spent); if(req->is_block()) { jit.compile_block(ls_, req->method(), req->vmmethod()); } else { jit.compile_method(ls_, req->method(), req->vmmethod()); } func = jit.generate_function(ls_); } // We were unable to compile this function, likely // because it's got something we don't support. if(!func) { if(ls_->config().jit_show_compiling) { llvm::outs() << "[[[ JIT error in background compiling ]]]\n"; } // If someone was waiting on this, wake them up. if(thread::Condition* cond = req->waiter()) { cond->signal(); } delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->shared().gc_independent(); continue; } if(show_machine_code_) { jit.show_machine_code(); } req->vmmethod()->set_jitted(jit.llvm_function(), jit.code_bytes(), func); if(!req->is_block()) { req->method()->execute = reinterpret_cast<executor>(func); } assert(req->method()->jit_data()); req->method()->jit_data()->run_write_barrier(ls_->write_barrier(), req->method()); ls_->shared().stats.jitted_methods++; if(ls_->config().jit_show_compiling) { llvm::outs() << "[[[ JIT finished background compiling " << (req->is_block() ? " (block)" : " (method)") << " ]]]\n"; } // If someone was waiting on this, wake them up. if(thread::Condition* cond = req->waiter()) { cond->signal(); } delete req; // We don't depend on the GC here, so let it run independent // of us. ls_->shared().gc_independent(); } }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* env, Arguments& args, BlockInvocation& invocation) { // Don't use env->machine_code() because it might lock and the work should // already be done. MachineCode* const mcode = env->compiled_code_->machine_code(); if(!mcode) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(mcode->call_count >= 0) { if(mcode->call_count >= state->shared().config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); GCTokenImpl gct; OnStack<1> os(state, env); ls->compile_soon(state, gct, env->compiled_code(), previous, invocation.self->lookup_begin(state), env, true); } else { mcode->call_count++; } } #endif StackVariables* scope = ALLOCA_STACKVARIABLES(mcode->number_of_locals); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, mcode->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(mcode->stack_size); frame->prepare(mcode->stack_size); frame->previous = previous; frame->constant_scope_ = invocation.constant_scope; frame->arguments = &args; frame->dispatch_data = env; frame->compiled_code = env->compiled_code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomConstantScope | CallFrame::cMultipleScopes | CallFrame::cBlock; // TODO: this is a quick hack to process block arguments in 1.9. if(!LANGUAGE_18_ENABLED(state)) { if(!GenericArguments::call(state, frame, mcode, scope, args, invocation.flags)) { return NULL; } } #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->attached_instance())) { mod = ma; } } OnStack<2> os(state, env, mod); // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); tooling::BlockEntry method(state, env, mod); return (*mcode->run)(state, mcode, frame); } else { // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); } #else // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(!state->check_interrupts(gct, frame, frame)) return NULL; state->checkpoint(gct, frame); return (*mcode->run)(state, mcode, frame); #endif }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* env, Arguments& args, BlockInvocation& invocation) { // Don't use env->vmmethod() because it mighc lock and the work should already // be done. VMMethod* const vmm = env->code_->backend_method(); if(!vmm) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } #ifdef ENABLE_LLVM if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared().config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_soon(state, env->code(), env, true); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, vmm->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); frame->prepare(vmm->stack_size); frame->previous = previous; frame->static_scope_ = invocation.static_scope; frame->arguments = &args; frame->dispatch_data = reinterpret_cast<BlockEnvironment*>(env); frame->cm = env->code_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomStaticScope | CallFrame::cMultipleScopes | CallFrame::cBlock; frame->stack_top_ptr_ptr = NULL; // TODO: this is a quick hack to process block arguments in 1.9. if(!LANGUAGE_18_ENABLED(state)) { if(!GenericArguments::call(state, frame, vmm, scope, args, invocation.flags)) { return NULL; } } // Check the stack and interrupts here rather than in the interpreter // loop itself. GCTokenImpl gct; if(state->detect_stack_condition(frame)) { if(!state->check_interrupts(gct, frame, frame)) return NULL; } state->checkpoint(gct, frame); #ifdef RBX_PROFILER if(unlikely(state->vm()->tooling())) { Module* mod = scope->module(); if(SingletonClass* sc = try_as<SingletonClass>(mod)) { if(Module* ma = try_as<Module>(sc->attached_instance())) { mod = ma; } } tooling::BlockEntry method(state, env, mod); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }
NoAccessManagedMemory(LLVMState* ls) : ls_(ls) { ls_->shared().gc_independent(); }
Signature& operator<<(const char* name) { types_.push_back(ls_->ptr_type(name)); return *this; }
// Installed by default in BlockEnvironment::execute, it runs the bytecodes // for the block in the interpreter. // // Future code will detect hot blocks and queue them in the JIT, whereby the // JIT will install a newly minted machine function into ::execute. Object* BlockEnvironment::execute_interpreter(STATE, CallFrame* previous, BlockEnvironment* const env, Arguments& args, BlockInvocation& invocation) { if(!env->vmm) { env->method_->formalize(state, false); env->vmm = env->method_->backend_method(); // Not sure why we hit this case currenly, so just disable the JIT // for them all together. env->vmm->call_count = -1; } VMMethod* const vmm = env->vmm; #ifdef ENABLE_LLVM if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); if(state->shared.config.jit_inline_blocks) { if(VMMethod* parent = vmm->parent()) { while(VMMethod* next = parent->parent()) { parent = next; } if(parent->call_count >= 200) { ls->compile_soon(state, parent); } } } ls->compile_soon(state, vmm, env); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); Module* mod = invocation.module; if(!mod) mod = env->module(); scope->initialize(invocation.self, env->top_scope_->block(), mod, vmm->number_of_locals); scope->set_parent(env->scope_); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); frame->prepare(vmm->stack_size); frame->previous = previous; frame->static_scope_ = invocation.static_scope; frame->msg = NULL; frame->cm = env->method_; frame->scope = scope; frame->top_scope_ = env->top_scope_; frame->flags = invocation.flags | CallFrame::cCustomStaticScope | CallFrame::cMultipleScopes; #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, env->top_scope_->method()->name(), scope->module(), env->method_); return (*vmm->run)(state, vmm, frame, args); } else { return (*vmm->run)(state, vmm, frame, args); } #else return (*vmm->run)(state, vmm, frame, args); #endif }
Object* VMMethod::execute_specialized(STATE, CallFrame* previous, Dispatch& msg, Arguments& args) { CompiledMethod* cm = as<CompiledMethod>(msg.method); VMMethod* vmm = cm->backend_method(); #ifdef ENABLE_LLVM // A negative call_count means we've disabled usage based JIT // for this method. if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_callframe(state, cm, previous); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); // Originally, I tried using msg.module directly, but what happens is if // super is used, that field is read. If you combine that with the method // being called recursively, msg.module can change, causing super() to // look in the wrong place. // // Thus, we have to cache the value in the StackVariables. scope->initialize(args.recv(), args.block(), msg.module, vmm->number_of_locals); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); // If argument handling fails.. if(ArgumentHandler::call(state, vmm, scope, args) == false) { Exception* exc = Exception::make_argument_error(state, vmm->required_args, args.total(), msg.name); exc->locations(state, Location::from_call_stack(state, previous)); state->thread_state()->raise_exception(exc); return NULL; } frame->prepare(vmm->stack_size); frame->previous = previous; frame->flags = 0; frame->arguments = &args; frame->dispatch_data = &msg; frame->cm = cm; frame->scope = scope; #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, msg, args, cm); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }
~NoAccessManagedMemory() { ls_->shared().gc_dependent(); }