void test_set_breakpoint_flags() { CompiledMethod* cm = CompiledMethod::create(state); Tuple* tup = Tuple::from(state, 1, state->symbol("@blah")); cm->literals(state, tup); InstructionSequence* iseq = InstructionSequence::create(state, 3); iseq->opcodes()->put(state, 0, Fixnum::from(InstructionSequence::insn_push_ivar)); iseq->opcodes()->put(state, 1, Fixnum::from(0)); iseq->opcodes()->put(state, 2, Fixnum::from(InstructionSequence::insn_push_nil)); cm->iseq(state, iseq); VMMethod* vmm = new VMMethod(state, cm); vmm->set_breakpoint_flags(state, 0, 1 << 24); TS_ASSERT_EQUALS(vmm->opcodes[0], (1U << 24) | static_cast<unsigned int>(InstructionSequence::insn_push_ivar)); vmm->set_breakpoint_flags(state, 0, 7 << 24); TS_ASSERT_EQUALS(vmm->opcodes[0], (7U << 24) | static_cast<unsigned int>(InstructionSequence::insn_push_ivar)); vmm->set_breakpoint_flags(state, 0, 0); TS_ASSERT_EQUALS(vmm->opcodes[0], static_cast<unsigned int>(InstructionSequence::insn_push_ivar)); vmm->set_breakpoint_flags(state, 1, 1); TS_ASSERT_EQUALS(vmm->opcodes[1], 0U); }
void test_specialize_transforms_ivars_to_slots() { CompiledMethod* cm = CompiledMethod::create(state); Tuple* tup = Tuple::from(state, 1, state->symbol("@blah")); cm->literals(state, tup); InstructionSequence* iseq = InstructionSequence::create(state, 3); iseq->opcodes()->put(state, 0, Fixnum::from(InstructionSequence::insn_push_ivar)); iseq->opcodes()->put(state, 1, Fixnum::from(0)); iseq->opcodes()->put(state, 2, Fixnum::from(InstructionSequence::insn_push_nil)); cm->iseq(state, iseq); VMMethod* vmm = new VMMethod(state, cm); Object::Info ti(ObjectType); ti.slots[state->symbol("@blah")->index()] = 5; ti.slot_locations.resize(6); ti.slot_locations[5] = 33; vmm->specialize(state, cm, &ti); TS_ASSERT_EQUALS(vmm->total, 3U); TS_ASSERT_EQUALS(vmm->opcodes[0], static_cast<unsigned int>(InstructionSequence::insn_push_my_offset)); TS_ASSERT_EQUALS(vmm->opcodes[1], 33U); TS_ASSERT_EQUALS(vmm->opcodes[2], static_cast<unsigned int>(InstructionSequence::insn_push_nil)); }
BlockEnvironment* BlockEnvironment::under_call_frame(STATE, CompiledMethod* cm, VMMethod* caller, CallFrame* call_frame, size_t index) { BlockEnvironment* be = state->new_object<BlockEnvironment>(G(blokenv)); VMMethod* vmm = caller->blocks.at(index); if(!vmm) { vmm = cm->formalize(state); if(caller->type) { vmm->specialize(state, caller->type); } caller->blocks[index] = vmm; vmm->set_parent(caller); } be->scope(state, call_frame->promote_scope(state)); be->top_scope(state, call_frame->top_scope(state)); be->method(state, cm); be->module(state, call_frame->module()); be->local_count(state, cm->local_count()); be->vmm = vmm; BlockExecutor native = reinterpret_cast<BlockExecutor>(vmm->native_function); if(native) { be->execute = native; } else { be->execute = &BlockEnvironment::execute_interpreter; } return be; }
void CompiledMethod::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledMethod* cm = as<CompiledMethod>(obj); if(!cm->backend_method_) return; VMMethod* vmm = cm->backend_method_; vmm->set_mark(); Object* tmp; #ifdef ENABLE_LLVM if(cm->jit_data()) { cm->jit_data()->set_mark(); cm->jit_data()->mark_all(cm, mark); } for(int i = 0; i < VMMethod::cMaxSpecializations; i++) { if(vmm->specializations[i].jit_data) { vmm->specializations[i].jit_data->set_mark(); vmm->specializations[i].jit_data->mark_all(cm, mark); } } #endif for(size_t i = 0; i < vmm->inline_cache_count(); i++) { InlineCache* cache = &vmm->caches[i]; MethodCacheEntry* mce = cache->cache_; if(mce) { tmp = mark.call(mce); if(tmp) { cache->cache_ = (MethodCacheEntry*)tmp; mark.just_set(obj, tmp); } } if(cache->call_unit_) { tmp = mark.call(cache->call_unit_); if(tmp) { cache->call_unit_ = (CallUnit*)tmp; mark.just_set(obj, tmp); } } for(int i = 0; i < cTrackedICHits; i++) { Module* mod = cache->seen_classes_[i].klass(); if(mod) { tmp = mark.call(mod); if(tmp) { cache->seen_classes_[i].set_klass(force_as<Class>(tmp)); mark.just_set(obj, tmp); } } } } }
void Interpreter::doSuperSend(long bytecodeIndex) { VMSymbol* signature = static_cast<VMSymbol*>(method->GetConstant(bytecodeIndex)); VMFrame* ctxt = GetFrame()->GetOuterContext(); VMMethod* realMethod = ctxt->GetMethod(); VMClass* holder = realMethod->GetHolder(); VMClass* super = holder->GetSuperClass(); VMInvokable* invokable = static_cast<VMInvokable*>(super->LookupInvokable(signature)); if (invokable != nullptr) (*invokable)(GetFrame()); else { long numOfArgs = Signature::GetNumberOfArguments(signature); vm_oop_t receiver = GetFrame()->GetStackElement(numOfArgs - 1); VMArray* argumentsArray = GetUniverse()->NewArray(numOfArgs); for (long i = numOfArgs - 1; i >= 0; --i) { vm_oop_t o = GetFrame()->Pop(); argumentsArray->SetIndexableField(i, o); } vm_oop_t arguments[] = {signature, argumentsArray}; AS_OBJ(receiver)->Send(doesNotUnderstand, arguments, 2); } }
void CompiledMethod::Info::visit(Object* obj, ObjectVisitor& visit) { auto_visit(obj, visit); visit_inliners(obj, visit); CompiledMethod* cm = as<CompiledMethod>(obj); if(!cm->backend_method_) return; VMMethod* vmm = cm->backend_method_; #ifdef ENABLE_LLVM if(cm->jit_data()) { cm->jit_data()->visit_all(visit); } for(int i = 0; i < VMMethod::cMaxSpecializations; i++) { if(vmm->specializations[i].jit_data) { vmm->specializations[i].jit_data->visit_all(visit); } } #endif for(size_t i = 0; i < vmm->inline_cache_count(); i++) { InlineCache* cache = &vmm->caches[i]; MethodCacheEntry* mce = cache->cache_; if(mce) visit.call(mce); for(int i = 0; i < cTrackedICHits; i++) { Module* mod = cache->seen_classes_[i].klass(); if(mod) visit.call(mod); } } }
void CompiledMethod::add_specialized(int spec_id, executor exec, jit::RuntimeDataHolder* rd) { if(!backend_method_) rubinius::bug("specializing with no backend"); VMMethod* v = backend_method_; // Must happen only on the first specialization if(!v->unspecialized) { if(execute == specialized_executor) { rubinius::bug("cant setup unspecialized from specialized"); } v->unspecialized = execute; } for(int i = 0; i < VMMethod::cMaxSpecializations; i++) { int id = v->specializations[i].class_id; if(id == 0 || id == spec_id) { v->specializations[i].class_id = spec_id; v->specializations[i].execute = exec; v->specializations[i].jit_data = rd; v->set_execute_status(VMMethod::eJIT); execute = specialized_executor; return; } } // No room for the specialization, this is bad. std::cerr << "No room for specialization!\n"; }
void LLVMState::compile_callframe(STATE, VMMethod* start, CallFrame* call_frame, int primitive) { if(config().jit_inline_debug) { if(start) { llvm::errs() << "JIT: target search from " << symbol_cstr(start->original->scope()->module()->name()) << "#" << symbol_cstr(start->original->name()) << "\n"; } else { llvm::errs() << "JIT: target search from primitive\n"; } } VMMethod* candidate = find_candidate(start, call_frame); if(!candidate) { if(config().jit_inline_debug) { llvm::errs() << "JIT: unable to find candidate\n"; } return; } assert(!candidate->parent()); if(candidate->call_count < 0) { if(!start) return; // Ignore it. compile this one. candidate = start; } compile_soon(state, candidate); }
void Interpreter::popFrameAndPushResult(vm_oop_t result) { VMFrame* prevFrame = popFrame(); VMMethod* method = prevFrame->GetMethod(); long numberOfArgs = method->GetNumberOfArguments(); for (long i = 0; i < numberOfArgs; ++i) GetFrame()->Pop(); GetFrame()->Push(result); }
static VMMethod* find_first_non_block(CallFrame* cf) { VMMethod* vmm = cf->cm->backend_method(); while(vmm->parent()) { cf = cf->previous; if(!cf) return 0; vmm = cf->cm->backend_method(); } return vmm; }
VMClass* BootstrapLoader::find(VMContext* ctx, const std::string& name, bool initClass){ TR_USE(Java_Loader); std::map<std::string,VMClass*>::iterator iter = mUninitializedClasses.find(name); if (iter != mUninitializedClasses.end()){ VMClass* cls = iter->second; mUninitializedClasses.erase(iter); mClasses[name] = cls; //delayed class init unsigned idx = cls->findMethodIndex("<clinit>", "()V"); VMMethod* mthd = cls->getMethod(idx); if (mthd){ TR_INFO("Delayed execution of class init method"); mthd->execute(ctx, -1); } return cls; } VMClass* entry = mClasses[name]; if (entry == 0){ //array functions if (name[0] == '['){ entry = new VMArrayClass(this, name); mClasses[name] = entry; return entry; } else if (name.size() == 1){ //primitive types return getPrimitiveClass(ctx, name); } //Java::ClassFile* clfile = new Java::ClassFile(); CGE::Reader* rdr = filenameToReader(name); if (!rdr) return NULL; entry = new VMClass(ctx, this, *rdr); delete rdr; if (ctx->getException() != NULL){ delete entry; return NULL; } if (!initClass) mUninitializedClasses[name] = entry; else mClasses[name] = entry; entry->initClass(ctx, initClass); } return entry; }
void CloneObjectsTest::testCloneBlock() { VMSymbol* methodSymbol = GetUniverse()->NewSymbol("someMethod"); VMMethod* method = GetUniverse()->NewMethod(methodSymbol, 0, 0); VMBlock* orig = GetUniverse()->NewBlock(method, GetUniverse()->GetInterpreter()->GetFrame(), method->GetNumberOfArguments()); VMBlock* clone = orig->Clone(); CPPUNIT_ASSERT((intptr_t)orig != (intptr_t)clone); CPPUNIT_ASSERT_EQUAL_MESSAGE("class differs!!", orig->clazz, clone->clazz); CPPUNIT_ASSERT_EQUAL_MESSAGE("objectSize differs!!", orig->objectSize, clone->objectSize); CPPUNIT_ASSERT_EQUAL_MESSAGE("numberOfFields differs!!", orig->numberOfFields, clone->numberOfFields); CPPUNIT_ASSERT_EQUAL_MESSAGE("blockMethod differs!!", orig->blockMethod, clone->blockMethod); CPPUNIT_ASSERT_EQUAL_MESSAGE("context differs!!", orig->context, clone->context); }
bool N3ModuleProvider::Materialize(GlobalValue *GV, std::string *ErrInfo) { Function* F = dyn_cast<Function>(GV); assert(F && "Not a function."); if (F->getLinkage() == GlobalValue::ExternalLinkage) return false; if (!F->empty()) return false; VMMethod* meth = functions->lookup(F); if (!meth) { // VT methods return false; } else { meth->compileToNative(); return false; } }
CallFrame* LLVMState::find_candidate(CompiledMethod* start, CallFrame* call_frame) { if(!config_.jit_inline_generic) { return call_frame; } int depth = cInlineMaxDepth; if(!start) { start = call_frame->cm; call_frame = call_frame->previous; depth--; } if(!call_frame || start->backend_method()->total > SMALL_METHOD_SIZE) { return call_frame; } CallFrame* caller = call_frame; while(depth-- > 0) { CompiledMethod* cur = call_frame->cm; VMMethod* vmm = cur->backend_method(); /* if(call_frame->block_p() || vmm->required_args != vmm->total_args // has a splat || vmm->call_count < 200 // not called much || vmm->jitted() // already jitted || vmm->parent() // is a block ) return caller; */ if(vmm->required_args != vmm->total_args // has a splat || vmm->call_count < 200 // not called much || vmm->jitted() // already jitted || !vmm->no_inline_p() // method marked as not inlinable ) return caller; CallFrame* next = call_frame->previous; if(!next|| cur->backend_method()->total > SMALL_METHOD_SIZE) return call_frame; caller = call_frame; call_frame = next; } return caller; }
void _Method::InvokeOn_With_(Interpreter* interp, VMFrame* frame) { // REM: this is a clone with _Primitive::InvokeOn_With_ VMArray* args = static_cast<VMArray*>(frame->Pop()); vm_oop_t rcvr = static_cast<vm_oop_t>(frame->Pop()); VMMethod* mthd = static_cast<VMMethod*>(frame->Pop()); frame->Push(rcvr); size_t num_args = args->GetNumberOfIndexableFields(); for (size_t i = 0; i < num_args; i++) { vm_oop_t arg = args->GetIndexableField(i); frame->Push(arg); } mthd->Invoke(interp, frame); }
static void mapInitialThread(N3* vm) { VMClass* cl = (VMClass*)vm->coreAssembly->loadTypeFromName( vm->asciizToUTF8("Thread"), vm->asciizToUTF8("System.Threading"), true, true, true, true); declare_gcroot(VMObject*, appThread) = cl->doNew(); std::vector<VMCommonClass*> args; args.push_back(MSCorlib::pVoid); args.push_back(cl); args.push_back(MSCorlib::pIntPtr); VMMethod* ctor = cl->lookupMethod(vm->asciizToUTF8(".ctor"), args, false, false); VMThread* myth = VMThread::get(); ctor->compileToNative()->invokeVoid(appThread, myth); myth->ooo_appThread = appThread; }
void test_validate_ip() { CompiledMethod* cm = CompiledMethod::create(state); Tuple* tup = Tuple::from(state, 1, state->symbol("@blah")); cm->literals(state, tup); InstructionSequence* iseq = InstructionSequence::create(state, 3); iseq->opcodes()->put(state, 0, Fixnum::from(InstructionSequence::insn_push_ivar)); iseq->opcodes()->put(state, 1, Fixnum::from(0)); iseq->opcodes()->put(state, 2, Fixnum::from(InstructionSequence::insn_push_nil)); cm->iseq(state, iseq); VMMethod* vmm = new VMMethod(state, cm); TS_ASSERT_EQUALS(vmm->validate_ip(state, 0), true); TS_ASSERT_EQUALS(vmm->validate_ip(state, 1), false); TS_ASSERT_EQUALS(vmm->validate_ip(state, 2), true); }
VMMethod* CompiledMethod::internalize(STATE, GCToken gct, const char** reason, int* ip) { VMMethod* vmm = backend_method_; atomic::memory_barrier(); if(vmm) return vmm; CompiledMethod* self = this; OnStack<1> os(state, self); self->hard_lock(state, gct); vmm = self->backend_method_; if(!vmm) { { BytecodeVerification bv(self); if(!bv.verify(state)) { if(reason) *reason = bv.failure_reason(); if(ip) *ip = bv.failure_ip(); std::cerr << "Error validating bytecode: " << bv.failure_reason() << "\n"; return 0; } } vmm = new VMMethod(state, self); if(self->resolve_primitive(state)) { vmm->fallback = execute; } else { vmm->setup_argument_handler(self); } // We need to have an explicit memory barrier here, because we need to // be sure that vmm is completely initialized before it's set. // Otherwise another thread might see a partially initialized // VMMethod. atomic::memory_barrier(); backend_method_ = vmm; } self->hard_unlock(state, gct); return vmm; }
void CloneObjectsTest::testCloneMethod() { VMSymbol* methodSymbol = GetUniverse()->NewSymbol("myMethod"); VMMethod* orig = GetUniverse()->NewMethod(methodSymbol, 0, 0); VMMethod* clone = orig->Clone(); CPPUNIT_ASSERT((intptr_t)orig != (intptr_t)clone); CPPUNIT_ASSERT_EQUAL_MESSAGE("class differs!!", orig->clazz, clone->clazz); CPPUNIT_ASSERT_EQUAL_MESSAGE("objectSize differs!!", orig->objectSize, clone->objectSize); CPPUNIT_ASSERT_EQUAL_MESSAGE("numberOfFields differs!!", orig->numberOfFields, clone->numberOfFields); CPPUNIT_ASSERT_EQUAL_MESSAGE("numberOfLocals differs!!", INT_VAL(load_ptr(orig->numberOfLocals)), INT_VAL(load_ptr(clone->numberOfLocals))); CPPUNIT_ASSERT_EQUAL_MESSAGE("bcLength differs!!", INT_VAL(load_ptr(orig->bcLength)), INT_VAL(load_ptr(clone->bcLength))); CPPUNIT_ASSERT_EQUAL_MESSAGE("maximumNumberOfStackElements differs!!", INT_VAL(load_ptr(orig->maximumNumberOfStackElements)), INT_VAL(load_ptr(clone->maximumNumberOfStackElements))); CPPUNIT_ASSERT_EQUAL_MESSAGE("numberOfArguments differs!!", INT_VAL(load_ptr(orig->numberOfArguments)), INT_VAL(load_ptr(clone->numberOfArguments))); CPPUNIT_ASSERT_EQUAL_MESSAGE("numberOfConstants differs!!", INT_VAL(load_ptr(orig->numberOfConstants)), INT_VAL(load_ptr(clone->numberOfConstants))); CPPUNIT_ASSERT_EQUAL_MESSAGE("GetHolder() differs!!", orig->GetHolder(), clone->GetHolder()); CPPUNIT_ASSERT_EQUAL_MESSAGE("GetSignature() differs!!", orig->GetSignature(), clone->GetSignature()); }
void test_get_breakpoint_flags() { CompiledMethod* cm = CompiledMethod::create(state); Tuple* tup = Tuple::from(state, 1, state->symbol("@blah")); cm->literals(state, tup); InstructionSequence* iseq = InstructionSequence::create(state, 3); iseq->opcodes()->put(state, 0, Fixnum::from(InstructionSequence::insn_push_ivar)); iseq->opcodes()->put(state, 1, Fixnum::from(0)); iseq->opcodes()->put(state, 2, Fixnum::from(4 << 24 | InstructionSequence::insn_push_nil)); cm->iseq(state, iseq); VMMethod* vmm = new VMMethod(state, cm); TS_ASSERT_EQUALS(vmm->get_breakpoint_flags(state, 0), 0U); TS_ASSERT_EQUALS(vmm->get_breakpoint_flags(state, 2), (4U << 24)); TS_ASSERT_EQUALS(vmm->get_breakpoint_flags(state, 1), 0U); }
VMClass* BootstrapLoader::getPrimitiveClass(VMContext* ctx, std::string name){ VMClass* entry = mClasses[name]; if (entry == 0){ entry = new VMClass(this); entry->setName(name); mClasses[name] = entry; //entry->print(std::cout); //entry->initFields(ctx); VMClass* cls = find(ctx, "java/lang/Class"); VMMethod* clsmthd = cls->getMethod(cls->findMethodIndex("<init>", "()V")); entry->init(ctx, cls); ctx->push((VMObject*)cls); clsmthd->execute(ctx, -1); } return entry; }
Object* BlockEnvironment::invoke(STATE, CallFrame* previous, BlockEnvironment* const env, Arguments& args, BlockInvocation& invocation) { #ifdef ENABLE_LLVM VMMethod* vmm = env->vmmethod(state); if(!vmm) { Exception::internal_error(state, previous, "invalid bytecode method"); return 0; } if(void* ptr = vmm->native_function()) { return (*((BlockExecutor)ptr))(state, previous, env, args, invocation); } #endif return execute_interpreter(state, previous, env, args, invocation); }
BlockEnvironment* BlockEnvironment::under_call_frame(STATE, CompiledMethod* cm, VMMethod* caller, CallFrame* call_frame, size_t index) { BlockEnvironment* be = state->new_object<BlockEnvironment>(G(blokenv)); VMMethod* vmm = cm->internalize(state); if(!vmm) { Exception::internal_error(state, call_frame, "invalid bytecode method"); return 0; } vmm->set_parent(caller); be->scope(state, call_frame->promote_scope(state)); be->top_scope(state, call_frame->top_scope(state)); be->code(state, cm); be->module(state, call_frame->module()); return be; }
void Interpreter::doPushBlock(long bytecodeIndex) { // Short cut the negative case of #ifTrue: and #ifFalse: if (currentBytecodes[bytecodeIndexGlobal] == BC_SEND) { if (GetFrame()->GetStackElement(0) == load_ptr(falseObject) && method->GetConstant(bytecodeIndexGlobal) == load_ptr(symbolIfTrue)) { GetFrame()->Push(load_ptr(nilObject)); return; } else if (GetFrame()->GetStackElement(0) == load_ptr(trueObject) && method->GetConstant(bytecodeIndexGlobal) == load_ptr(symbolIfFalse)) { GetFrame()->Push(load_ptr(nilObject)); return; } } VMMethod* blockMethod = static_cast<VMMethod*>(method->GetConstant(bytecodeIndex)); long numOfArgs = blockMethod->GetNumberOfArguments(); GetFrame()->Push(GetUniverse()->NewBlock(blockMethod, GetFrame(), numOfArgs)); }
BlockEnvironment* BlockEnvironment::under_context(STATE, CompiledMethod* cm, MethodContext* parent, MethodContext* active, size_t index) { BlockEnvironment* be = (BlockEnvironment*)state->new_object(G(blokenv)); VMMethod* vmm; if((vmm = active->vmm->blocks[index]) == NULL) { vmm = new VMMethod(state, cm); if(active->vmm->type) { vmm->specialize(state, active->vmm->type); } active->vmm->blocks[index] = vmm; } be->home(state, parent); be->home_block(state, active); be->method(state, cm); be->local_count(state, cm->local_count()); be->vmm = vmm; return be; }
Location* Location::create(STATE, CallFrame* call_frame, bool include_variables) { if(NativeMethodFrame* nmf = call_frame->native_method_frame()) { return create(state, nmf); } Location* loc = state->new_object<Location>(G(location)); loc->method_module(state, call_frame->module()); loc->receiver(state, call_frame->self()); loc->method(state, call_frame->cm); loc->ip(state, Fixnum::from(call_frame->ip())); loc->flags(state, Fixnum::from(0)); if(call_frame->is_block_p(state)) { loc->name(state, call_frame->top_scope(state)->method()->name()); loc->set_is_block(state); } else { loc->name(state, call_frame->name()); } VMMethod* vmm = call_frame->cm->backend_method(); if(vmm && vmm->jitted()) { loc->set_is_jit(state); } if(include_variables) { // Use promote_scope because it can figure out of the generated // VariableScope should be isolated by default (true atm for JITd // frames) loc->variables(state, call_frame->promote_scope(state)); } loc->static_scope(state, call_frame->static_scope()); return loc; }
void Interpreter::doReturnNonLocal() { vm_oop_t result = GetFrame()->Pop(); VMFrame* context = GetFrame()->GetOuterContext(); if (!context->HasPreviousFrame()) { VMBlock* block = static_cast<VMBlock*>(GetFrame()->GetArgument(0, 0)); VMFrame* prevFrame = GetFrame()->GetPreviousFrame(); VMFrame* outerContext = prevFrame->GetOuterContext(); vm_oop_t sender = outerContext->GetArgument(0, 0); vm_oop_t arguments[] = {block}; popFrame(); // Pop old arguments from stack VMMethod* method = GetFrame()->GetMethod(); long numberOfArgs = method->GetNumberOfArguments(); for (long i = 0; i < numberOfArgs; ++i) GetFrame()->Pop(); // check if current frame is big enough for this unplanned send // #escapedBlock: needs 2 slots, one for self, and one for the block long additionalStackSlots = 2 - GetFrame()->RemainingStackSize(); if (additionalStackSlots > 0) { GetFrame()->SetBytecodeIndex(bytecodeIndexGlobal); // copy current frame into a bigger one, and replace it SetFrame(VMFrame::EmergencyFrameFrom(GetFrame(), additionalStackSlots)); } AS_OBJ(sender)->Send(escapedBlock, arguments, 1); return; } while (GetFrame() != context) popFrame(); popFrameAndPushResult(result); }
Location* Location::create(STATE, CallFrame* call_frame) { Location* loc = state->new_object<Location>(G(location)); loc->method_module(state, call_frame->module()); loc->receiver(state, call_frame->self()); loc->method(state, call_frame->cm); loc->ip(state, Fixnum::from(call_frame->ip() - 1)); if(call_frame->is_block_p(state)) { loc->name(state, call_frame->top_scope(state)->method()->name()); loc->is_block(state, Qtrue); } else { loc->name(state, call_frame->name()); loc->is_block(state, Qfalse); } VMMethod* vmm = call_frame->cm->backend_method(); if(vmm && vmm->jitted()) { loc->is_jit(state, Qtrue); } else { loc->is_jit(state, Qfalse); } return loc; }
VMMethod* LLVMState::find_candidate(VMMethod* start, CallFrame* call_frame) { VMMethod* found = start; int depth = 0; bool consider_block_parents = config_.jit_inline_blocks; // No upper call_frames or generic inlining is off, use the start. // With generic inlining off, there is no way to inline back to start, // so we don't both trying. if(!config_.jit_inline_generic) { if(!start) start = call_frame->cm->backend_method(); return find_first_non_block(call_frame); } /* std::cerr << "JIT target search:\n"; if(start) { show_method(this, start); } else { std::cerr << " <primitive>\n"; } */ VMMethod* next = call_frame->cm->backend_method(); VMMethod* parent = 0; while(depth < cInlineMaxDepth) { // show_method(this, next); // Basic requirements if(next->required_args != next->total_args || next->call_count < 200 || next->jitted()) break; // Jump to defining methods of blocks? parent = next->parent(); if(parent) { if(consider_block_parents) { // See if parent is in this call_frame chain properly.. if(CallFrame* pf = validate_block_parent(call_frame, parent)) { depth++; // Method parents are valuable, so always use them if we find them. if(!parent->parent()) { found = parent; } // show_method(this, parent, " parent!"); call_frame = pf; } } else { // We hit a block, just bail. break; } } else { found = next; } call_frame = call_frame->previous; if(!call_frame) break; next = call_frame->cm->backend_method(); depth++; } if(!found && !next->parent()) return next; return found; }
void CompiledMethod::Info::mark(Object* obj, ObjectMark& mark) { auto_mark(obj, mark); mark_inliners(obj, mark); CompiledMethod* cm = as<CompiledMethod>(obj); if(!cm->backend_method_) return; VMMethod* vmm = cm->backend_method_; vmm->set_mark(); Object* tmp; #ifdef ENABLE_LLVM if(cm->jit_data()) { cm->jit_data()->set_mark(); cm->jit_data()->mark_all(cm, mark); } #endif for(size_t i = 0; i < vmm->inline_cache_count(); i++) { InlineCache* cache = &vmm->caches[i]; if(cache->module) { tmp = mark.call(cache->module); if(tmp) { cache->module = (Module*)tmp; mark.just_set(obj, tmp); } } if(cache->method) { tmp = mark.call(cache->method); if(tmp) { cache->method = (Executable*)tmp; mark.just_set(obj, tmp); } } if(cache->klass_) { tmp = mark.call(cache->klass_); if(tmp) { cache->klass_ = (Class*)tmp; mark.just_set(obj, tmp); } } if(cache->call_unit_) { tmp = mark.call(cache->call_unit_); if(tmp) { cache->call_unit_ = (CallUnit*)tmp; mark.just_set(obj, tmp); } } for(int i = 0; i < cTrackedICHits; i++) { Module* mod = cache->seen_classes_[i].klass(); if(mod) { tmp = mark.call(mod); if(tmp) { cache->seen_classes_[i].set_klass(force_as<Class>(tmp)); mark.just_set(obj, tmp); } } } } for(IndirectLiterals::iterator i = vmm->indirect_literals().begin(); i != vmm->indirect_literals().end(); ++i) { Object** ptr = (*i); if((tmp = mark.call(*ptr)) != NULL) { *ptr = tmp; mark.just_set(obj, tmp); } } }