void LLVMState::compile_callframe(STATE, CompiledMethod* start, CallFrame* call_frame, int primitive) { if(config().jit_inline_debug) { if(start) { log() << "JIT: target search from " << symbol_cstr(start->name()) << "\n"; } else { log() << "JIT: target search from primitive\n"; } } CompiledMethod* candidate = find_candidate(start, call_frame); if(!candidate) { if(config().jit_inline_debug) { log() << "JIT: unable to find candidate\n"; } return; } assert(!candidate->backend_method()->parent()); if(candidate->backend_method()->call_count < 0) { if(!start) return; // Ignore it. compile this one. candidate = start; } compile_soon(state, candidate); }
CallFrame* LLVMState::find_candidate(CompiledMethod* start, CallFrame* call_frame) { if(!config_.jit_inline_generic) { return call_frame; } int depth = cInlineMaxDepth; if(!start) { start = call_frame->cm; call_frame = call_frame->previous; depth--; } if(!call_frame || start->backend_method()->total > SMALL_METHOD_SIZE) { return call_frame; } CallFrame* caller = call_frame; while(depth-- > 0) { CompiledMethod* cur = call_frame->cm; VMMethod* vmm = cur->backend_method(); /* if(call_frame->block_p() || vmm->required_args != vmm->total_args // has a splat || vmm->call_count < 200 // not called much || vmm->jitted() // already jitted || vmm->parent() // is a block ) return caller; */ if(vmm->required_args != vmm->total_args // has a splat || vmm->call_count < 200 // not called much || vmm->jitted() // already jitted || !vmm->no_inline_p() // method marked as not inlinable ) return caller; CallFrame* next = call_frame->previous; if(!next|| cur->backend_method()->total > SMALL_METHOD_SIZE) return call_frame; caller = call_frame; call_frame = next; } return caller; }
void Compiler::compile_method(LLVMState* ls, BackgroundCompileRequest* req) { CompiledMethod* cm = req->method(); if(ls->config().jit_inline_debug) { struct timeval tv; gettimeofday(&tv, NULL); ls->log() << "JIT: compiling " << ls->enclosure_name(cm) << "#" << ls->symbol_debug_str(cm->name()) << " (" << tv.tv_sec << "." << tv.tv_usec << ")\n"; } JITMethodInfo info(ctx_, cm, cm->backend_method()); info.is_block = false; if(Class* cls = req->receiver_class()) { info.set_self_class(cls); } ctx_.set_root(&info); jit::MethodBuilder work(ls, info); work.setup(); compile_builder(ctx_, ls, info, work); }
Object* CompiledMethod::specialized_executor(STATE, CallFrame* call_frame, Executable* exec, Module* mod, Arguments& args) { CompiledMethod* cm = as<CompiledMethod>(exec); Class* cls = args.recv()->class_object(state); int id = cls->class_id(); VMMethod* v = cm->backend_method(); executor target = v->unspecialized; for(int i = 0; i < VMMethod::cMaxSpecializations; i++) { int c_id = v->specializations[i].class_id; executor x = v->specializations[i].execute; if(c_id == id && x != 0) { target = x; break; } } // This is a bug. We should not have this setup if there are no // specializations. FIX THIS BUG! if(!target) target = v->fallback; return target(state, call_frame, exec, mod, args); }
r_mint Env::method_id(rmethod meth) { CompiledMethod* cm = i(meth); if(VMMethod* vmm = cm->backend_method()) { return (vmm->method_id() << 1) | 1; } return 0; }
void CompiledMethod::Info::show(STATE, Object* self, int level) { CompiledMethod* cm = as<CompiledMethod>(self); class_header(state, self); indent_attribute(++level, "file"); cm->file()->show(state, level); indent_attribute(level, "iseq"); cm->iseq()->show(state, level); indent_attribute(level, "lines"); cm->lines()->show_simple(state, level); indent_attribute(level, "literals"); cm->literals()->show_simple(state, level); indent_attribute(level, "local_count"); cm->local_count()->show(state, level); indent_attribute(level, "local_names"); cm->local_names()->show_simple(state, level); indent_attribute(level, "name"); cm->name()->show(state, level); indent_attribute(level, "required_args"); cm->required_args()->show(state, level); indent_attribute(level, "scope"); cm->scope()->show(state, level); indent_attribute(level, "splat"); cm->splat()->show(state, level); indent_attribute(level, "stack_size"); cm->stack_size()->show(state, level); indent_attribute(level, "total_args"); cm->total_args()->show(state, level); indent_attribute(level, "internalized"); if(!cm->backend_method_) { std::cout << "no\n"; } else { std::cout << "yes\n"; #ifdef ENABLE_LLVM VMMethod* v = cm->backend_method(); for(int i = 0; i < VMMethod::cMaxSpecializations; i++) { if(!v->specializations[i].jit_data) continue; llvm::Function* func = v->specializations[i].jit_data->llvm_function(); llvm::outs() << "<LLVM>\n" << *func << "</LLVM>\n<MachineCode>\n"; LLVMState::show_machine_code( v->specializations[i].jit_data->native_func(), v->specializations[i].jit_data->native_size()); llvm::outs() << "</MachineCode>\n"; } #endif } close_body(level); }
CallFrame* LLVMState::find_candidate(STATE, CompiledMethod* start, CallFrame* call_frame) { if(!config_.jit_inline_generic) { return call_frame; } int depth = cInlineMaxDepth; if(!start) rubinius::bug("null start"); if(!call_frame) rubinius::bug("null call_frame"); // if(!start) { // start = call_frame->cm; // call_frame = call_frame->previous; // depth--; // } if(debug_search) { std::cout << "> call_count: " << call_frame->cm->backend_method()->call_count << " size: " << call_frame->cm->backend_method()->total << " sends: " << call_frame->cm->backend_method()->inline_cache_count() << std::endl; call_frame->print_backtrace(state, 1); } if(start->backend_method()->total > (size_t)config_.jit_max_method_inline_size) { if(debug_search) { std::cout << "JIT: STOP. reason: trigger method isn't small: " << start->backend_method()->total << " > " << config_.jit_max_method_inline_size << std::endl; } return call_frame; } VMMethod* vmm = start->backend_method(); if(vmm->required_args != vmm->total_args) { if(debug_search) { std::cout << "JIT: STOP. reason: trigger method req_args != total_args" << std::endl; } return call_frame; } if(vmm->no_inline_p()) { if(debug_search) { std::cout << "JIT: STOP. reason: trigger method no_inline_p() = true" << std::endl; } return call_frame; } CallFrame* callee = call_frame; call_frame = call_frame->previous; if(!call_frame) return callee; // Now start looking at callers. while(depth-- > 0) { CompiledMethod* cur = call_frame->cm; if(!cur) { if(debug_search) { std::cout << "JIT: STOP. reason: synthetic CallFrame hit" << std::endl; } return callee; } VMMethod* vmm = cur->backend_method(); if(debug_search) { std::cout << "> call_count: " << vmm->call_count << " size: " << vmm->total << " sends: " << vmm->inline_cache_count() << std::endl; call_frame->print_backtrace(state, 1); } /* if(call_frame->block_p() || vmm->required_args != vmm->total_args // has a splat || vmm->call_count < 200 // not called much || vmm->jitted() // already jitted || vmm->parent() // is a block ) return callee; */ if(vmm->required_args != vmm->total_args) { if(debug_search) { std::cout << "JIT: STOP. reason: req_args != total_args" << std::endl; } return callee; } if(vmm->call_count < config_.jit_call_inline_threshold) { if(debug_search) { std::cout << "JIT: STOP. reason: call_count too small: " << vmm->call_count << " < " << config_.jit_call_inline_threshold << std::endl; } return callee; } if(vmm->jitted()) { if(debug_search) { std::cout << "JIT: STOP. reason: already jitted" << std::endl; } return callee; } if(vmm->no_inline_p()) { if(debug_search) { std::cout << "JIT: STOP. reason: no_inline_p() = true" << std::endl; } return callee; } if(vmm->inline_cache_count() > eMaxInlineSendCount) { if(debug_search) { std::cout << "JIT: STOP. reason: high send count" << std::endl; } return call_frame; } // if(vmm->required_args != vmm->total_args // has a splat // || vmm->call_count < 200 // not called much // || vmm->jitted() // already jitted // || !vmm->no_inline_p() // method marked as not inlineable // ) return callee; CallFrame* prev = call_frame->previous; if(!prev) { if(debug_search) { std::cout << "JIT: STOP. reason: toplevel method" << std::endl; } return call_frame; } // if(cur->backend_method()->total > SMALL_METHOD_SIZE) { // if(debug_search) { // std::cout << "JIT: STOP. reason: big method: " // << cur->backend_method()->total << " > " // << SMALL_METHOD_SIZE // << "\n"; // } // return call_frame; // } // if(!next || cur->backend_method()->total > SMALL_METHOD_SIZE) return call_frame; callee->cm->backend_method()->call_count = 0; callee = call_frame; call_frame = prev; } return callee; }
void calculate_ip(void** pos) { ip_ = pos - cm->backend_method()->addresses; }
Object* VMMethod::execute_specialized(STATE, CallFrame* previous, Dispatch& msg, Arguments& args) { CompiledMethod* cm = as<CompiledMethod>(msg.method); VMMethod* vmm = cm->backend_method(); #ifdef ENABLE_LLVM // A negative call_count means we've disabled usage based JIT // for this method. if(vmm->call_count >= 0) { if(vmm->call_count >= state->shared.config.jit_call_til_compile) { LLVMState* ls = LLVMState::get(state); ls->compile_callframe(state, cm, previous); } else { vmm->call_count++; } } #endif size_t scope_size = sizeof(StackVariables) + (vmm->number_of_locals * sizeof(Object*)); StackVariables* scope = reinterpret_cast<StackVariables*>(alloca(scope_size)); // Originally, I tried using msg.module directly, but what happens is if // super is used, that field is read. If you combine that with the method // being called recursively, msg.module can change, causing super() to // look in the wrong place. // // Thus, we have to cache the value in the StackVariables. scope->initialize(args.recv(), args.block(), msg.module, vmm->number_of_locals); InterpreterCallFrame* frame = ALLOCA_CALLFRAME(vmm->stack_size); // If argument handling fails.. if(ArgumentHandler::call(state, vmm, scope, args) == false) { Exception* exc = Exception::make_argument_error(state, vmm->required_args, args.total(), msg.name); exc->locations(state, Location::from_call_stack(state, previous)); state->thread_state()->raise_exception(exc); return NULL; } frame->prepare(vmm->stack_size); frame->previous = previous; frame->flags = 0; frame->arguments = &args; frame->dispatch_data = &msg; frame->cm = cm; frame->scope = scope; #ifdef RBX_PROFILER if(unlikely(state->shared.profiling())) { profiler::MethodEntry method(state, msg, args, cm); return (*vmm->run)(state, vmm, frame); } else { return (*vmm->run)(state, vmm, frame); } #else return (*vmm->run)(state, vmm, frame); #endif }