void Compilation::generate_exception_handler_table() { // Generate an ExceptionHandlerTable from the exception handler // information accumulated during the compilation. ExceptionInfoList* info_list = exception_info_list(); if (info_list->length() == 0) { return; } // allocate some arrays for use by the collection code. const int num_handlers = 5; GrowableArray<intptr_t>* bcis = new GrowableArray<intptr_t>(num_handlers); GrowableArray<intptr_t>* scope_depths = new GrowableArray<intptr_t>(num_handlers); GrowableArray<intptr_t>* pcos = new GrowableArray<intptr_t>(num_handlers); for (int i = 0; i < info_list->length(); i++) { ExceptionInfo* info = info_list->at(i); XHandlers* handlers = info->exception_handlers(); // empty the arrays bcis->trunc_to(0); scope_depths->trunc_to(0); pcos->trunc_to(0); for (int i = 0; i < handlers->length(); i++) { XHandler* handler = handlers->handler_at(i); assert(handler->entry_pco() != -1, "must have been generated"); int e = bcis->find(handler->handler_bci()); if (e >= 0 && scope_depths->at(e) == handler->scope_count()) { // two different handlers are declared to dispatch to the same // catch bci. During parsing we created edges for each // handler but we really only need one. The exception handler // table will also get unhappy if we try to declare both since // it's nonsensical. Just skip this handler. continue; } bcis->append(handler->handler_bci()); if (handler->handler_bci() == -1) { // insert a wildcard handler at scope depth 0 so that the // exception lookup logic with find it. scope_depths->append(0); } else { scope_depths->append(handler->scope_count()); } pcos->append(handler->entry_pco()); // stop processing once we hit a catch any if (handler->is_catch_all()) { assert(i == handlers->length() - 1, "catch all must be last handler"); } } exception_handler_table()->add_subtable(info->pco(), bcis, scope_depths, pcos); } }
void CodeBlobCollector::collect() { assert_locked_or_safepoint(CodeCache_lock); assert(_global_code_blobs == NULL, "checking"); // create the global list _global_code_blobs = new (ResourceObj::C_HEAP, mtInternal) GrowableArray<JvmtiCodeBlobDesc*>(50,true); // iterate over the stub code descriptors and put them in the list first. for (StubCodeDesc* desc = StubCodeDesc::first(); desc != NULL; desc = StubCodeDesc::next(desc)) { _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end())); } // Vtable stubs are not described with StubCodeDesc, // process them separately VtableStubs::vtable_stub_do(do_vtable_stub); // next iterate over all the non-nmethod code blobs and add them to // the list - as noted above this will filter out duplicates and // enclosing blobs. CodeCache::blobs_do(do_blob); // make the global list the instance list so that it can be used // for other iterations. _code_blobs = _global_code_blobs; _global_code_blobs = NULL; }
static GrowableArray<HistoEntry*>* sorted_array(int* array, int length) { GrowableArray<HistoEntry*>* a = new GrowableArray<HistoEntry*>(length); int i = length; while (i-- > 0) a->append(new HistoEntry(i, array[i])); a->sort(HistoEntry::compare); return a; }
GrowableArray<MonitorInfo*>* javaVFrame::locked_monitors() { assert(SafepointSynchronize::is_at_safepoint() || JavaThread::current() == thread(), "must be at safepoint or it's a java frame of the current thread"); GrowableArray<MonitorInfo*>* mons = monitors(); GrowableArray<MonitorInfo*>* result = new GrowableArray<MonitorInfo*>(mons->length()); if (mons->is_empty()) return result; bool found_first_monitor = false; ObjectMonitor *pending_monitor = thread()->current_pending_monitor(); ObjectMonitor *waiting_monitor = thread()->current_waiting_monitor(); oop pending_obj = (pending_monitor != NULL ? (oop) pending_monitor->object() : (oop) NULL); oop waiting_obj = (waiting_monitor != NULL ? (oop) waiting_monitor->object() : (oop) NULL); for (int index = (mons->length()-1); index >= 0; index--) { MonitorInfo* monitor = mons->at(index); if (monitor->eliminated() && is_compiled_frame()) continue; // skip eliminated monitor oop obj = monitor->owner(); if (obj == NULL) continue; // skip unowned monitor // // Skip the monitor that the thread is blocked to enter or waiting on // if (!found_first_monitor && (obj == pending_obj || obj == waiting_obj)) { continue; } found_first_monitor = true; result->append(monitor); } return result; }
void CodeBlobCollector::do_blob(CodeBlob* cb) { // ignore nmethods if (cb->is_nmethod()) { return; } // exclude VtableStubs, which are processed separately if (cb->is_buffer_blob() && strcmp(cb->name(), "vtable chunks") == 0) { return; } // check if this starting address has been seen already - the // assumption is that stubs are inserted into the list before the // enclosing BufferBlobs. address addr = cb->code_begin(); for (int i=0; i<_global_code_blobs->length(); i++) { JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i); if (addr == scb->code_begin()) { return; } } // record the CodeBlob details as a JvmtiCodeBlobDesc JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(cb->name(), cb->code_begin(), cb->code_end()); _global_code_blobs->append(scb); }
// Returns MonitorInfos for all objects locked on this thread in youngest to oldest order static GrowableArray<MonitorInfo*>* get_or_compute_monitor_info(JavaThread* thread) { GrowableArray<MonitorInfo*>* info = thread->cached_monitor_info(); if (info != NULL) { return info; } info = new GrowableArray<MonitorInfo*>(); // It's possible for the thread to not have any Java frames on it, // i.e., if it's the main thread and it's already returned from main() if (thread->has_last_Java_frame()) { RegisterMap rm(thread); for (javaVFrame* vf = thread->last_java_vframe(&rm); vf != NULL; vf = vf->java_sender()) { GrowableArray<MonitorInfo*> *monitors = vf->monitors(); if (monitors != NULL) { int len = monitors->length(); // Walk monitors youngest to oldest for (int i = len - 1; i >= 0; i--) { MonitorInfo* mon_info = monitors->at(i); if (mon_info->eliminated()) continue; oop owner = mon_info->owner(); if (owner != NULL) { info->append(mon_info); } } } } } thread->set_cached_monitor_info(info); return info; }
Thread(HANDLE handle, int id, void* stackLimit) : thread_handle(handle), thread_id(id), stack_limit(stackLimit) { int index = threads->find(NULL, equals); if (index < 0) threads->append(this); else threads->at_put(index, this); }
void CodeBlobCollector::collect() { assert_locked_or_safepoint(CodeCache_lock); assert(_global_code_blobs == NULL, "checking"); // create the global list _global_code_blobs = new (ResourceObj::C_HEAP) GrowableArray<JvmtiCodeBlobDesc*>(50,true); // iterate over the stub code descriptors and put them in the list first. int index = 0; StubCodeDesc* desc; while ((desc = StubCodeDesc::desc_for_index(++index)) != NULL) { _global_code_blobs->append(new JvmtiCodeBlobDesc(desc->name(), desc->begin(), desc->end())); } // next iterate over all the non-nmethod code blobs and add them to // the list - as noted above this will filter out duplicates and // enclosing blobs. Unimplemented(); //CodeCache::blobs_do(do_blob); // make the global list the instance list so that it can be used // for other iterations. _code_blobs = _global_code_blobs; _global_code_blobs = NULL; }
GrowableArray<klassOop>* IC::receiver_klasses() const { GrowableArray<klassOop>* result = new GrowableArray<klassOop>(); IC_Iterator* it = iterator(); it->init_iteration(); while (!it->at_end()) { result->append(it->klass()); it->advance(); } return result; }
static void add_loaded_class(klassOop k) { // FIXME: For now - don't include array klasses // The spec is unclear at this point to count array klasses or not // and also indirect creation of array of super class and secondaries // // for (klassOop l = k; l != NULL; l = Klass::cast(l)->array_klass_or_null()) { // KlassHandle h(_current_thread, l); // _loaded_classes->append(h); // } KlassHandle h(_current_thread, k); _loaded_classes->append(h); }
// Put node on worklist if it is (or was) not there. inline void add_to_worklist(PointsToNode* pt) { PointsToNode* ptf = pt; uint pidx_bias = 0; if (PointsToNode::is_base_use(pt)) { // Create a separate entry in _in_worklist for a marked base edge // because _worklist may have an entry for a normal edge pointing // to the same node. To separate them use _next_pidx as bias. ptf = PointsToNode::get_use_node(pt)->as_Field(); pidx_bias = _next_pidx; } if (!_in_worklist.test_set(ptf->pidx() + pidx_bias)) { _worklist.append(pt); } }
void CodeBlobCollector::do_blob(CodeBlob* cb) { // ignore nmethods if (cb->is_nmethod()) { return; } // check if this starting address has been seen already - the // assumption is that stubs are inserted into the list before the // enclosing BufferBlobs. address addr = cb->instructions_begin(); for (int i=0; i<_global_code_blobs->length(); i++) { JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i); if (addr == scb->code_begin()) { return; } } // we must name the CodeBlob - some CodeBlobs already have names :- // - stubs used by compiled code to call a (static) C++ runtime routine // - non-relocatable machine code such as the interpreter, stubroutines, etc. // - various singleton blobs // // others are unnamed so we create a name :- // - OSR adapter (interpreter frame that has been on-stack replaced) // - I2C and C2I adapters const char* name = NULL; if (cb->is_runtime_stub()) { name = ((RuntimeStub*)cb)->name(); } if (cb->is_buffer_blob()) { name = ((BufferBlob*)cb)->name(); } if (cb->is_deoptimization_stub() || cb->is_safepoint_stub()) { name = ((SingletonBlob*)cb)->name(); } if (cb->is_uncommon_trap_stub() || cb->is_exception_stub()) { name = ((SingletonBlob*)cb)->name(); } // record the CodeBlob details as a JvmtiCodeBlobDesc JvmtiCodeBlobDesc* scb = new JvmtiCodeBlobDesc(name, cb->instructions_begin(), cb->instructions_end()); _global_code_blobs->append(scb); }
void CompiledLoop::hoistTypeTests() { // collect all type tests that can be hoisted out of the loop _loopHeader->_tests = _hoistableTests = new GrowableArray<HoistedTypeTest*>(10); TTHoister tth(this, _hoistableTests); _scope->subScopesDo(&tth); // add type tests to loop header for testing (avoid duplicates) // (currently quadratic algorithm but there should be very few) GrowableArray<HoistedTypeTest*>* headerTests = new GrowableArray<HoistedTypeTest*>(_hoistableTests->length()); for (int i = _hoistableTests->length() - 1; i >= 0; i--) { HoistedTypeTest* t = _hoistableTests->at(i); PReg* tested = t->testedPR; for (int j = headerTests->length() - 1; j >= 0; j--) { if (headerTests->at(j)->testedPR == tested) { // already testing this PReg if (isEquivalentType(headerTests->at(j)->klasses, t->klasses)) { // nothing to do } else { // Whoa! The same PReg is tested for different types in different places. // Possible but rare. headerTests->at(j)->invalid = t->invalid = true; if (WizardMode) { compiler_warning("CompiledLoop::hoistTypeTests: PReg tested for different types\n"); t->print(); headerTests->at(j)->print(); } } tested = NULL; // don't add it to list break; } } if (tested) headerTests->append(t); } // now delete all hoisted type tests from loop body for (i = _hoistableTests->length() - 1; i >= 0; i--) { HoistedTypeTest* t = _hoistableTests->at(i); if (!t->invalid) { t->node->assert_preg_type(t->testedPR, t->klasses, _loopHeader); } } if (!_loopHeader->isActivated()) _loopHeader->activate(); }
void nmethodCollector::do_nmethod(nmethod* nm) { // ignore zombies if (!nm->is_alive()) { return; } assert(nm->method() != NULL, "checking"); // create the location map for the nmethod. jvmtiAddrLocationMap* map; jint map_length; JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length); // record the nmethod details methodHandle mh(nm->method()); nmethodDesc* snm = new nmethodDesc(mh, nm->code_begin(), nm->code_end(), map, map_length); _global_nmethods->append(snm); }
void CodeBlobCollector::do_blob(CodeBlob* cb) { if(cb->is_methodCode()){ return; } // check if this starting address has been seen already - the // assumption is that stubs are inserted into the list before the // enclosing BufferBlobs. address addr=cb->code_begins(); for (int i=0; i<_global_code_blobs->length(); i++) { JvmtiCodeBlobDesc* scb = _global_code_blobs->at(i); if (addr == scb->code_begin()) { return; } } // we must name the CodeBlob - some CodeBlobs already have names :- // - stubs used by compiled code to call a (static) C++ runtime routine // - non-relocatable machine code such as the interpreter, stubroutines, etc. // - various singleton blobs // // others are unnamed so we create a name :- // - OSR adapter (interpreter frame that has been on-stack replaced) // - I2C and C2I adapters // // CodeBlob::name() should return any defined name string, first. const char* name = cb->methodname(); // returns NULL or methodname+signature if (! name ) { name = cb->name(); // returns generic name... } else { ShouldNotReachHere(); } // record the CodeBlob details as a JvmtiCodeBlobDesc JvmtiCodeBlobDesc*scb=new JvmtiCodeBlobDesc(name,cb->code_begins(), cb->code_ends()); _global_code_blobs->append(scb); }
void do_it(InlinedScope* s) { GrowableArray<NonTrivialNode*>* tests = s->typeTests(); int len = tests->length(); for (int i = 0; i < len; i++) { NonTrivialNode* n = tests->at(i); assert(n->doesTypeTests(), "shouldn't be in list"); if (n->deleted) continue; if (n->hasUnknownCode()) continue; // can't optimize - expects other klasses, so would get uncommon trap at run-time if (!theLoop->isInLoop(n)) continue; // not in this loop GrowableArray<PReg*> regs(4); GrowableArray<GrowableArray<klassOop>*> klasses(4); n->collectTypeTests(regs, klasses); for (int j = 0; j < regs.length(); j++) { PReg* r = regs.at(j); if (theLoop->defsInLoop(r) == 0) { // this test can be hoisted if (CompilerDebug || PrintLoopOpts) cout(PrintLoopOpts)->print("*moving type test of %s at N%d out of loop\n", r->name(), n->id()); hoistableTests->append(new HoistedTypeTest(n, r, klasses.at(j))); } } } }
void nmethodCollector::do_nmethod(CodeBlob* /* nmethod* */ nm) { Unimplemented();//FIXME - if nmethod/methodCodeOop is not alive, return.... // if (nm->is_methodCode()) { // return; // } // // verify that there is code... // assert(nm->code_size() != 0, "checking"); // create the location map for the nmethod. jvmtiAddrLocationMap* map; jint map_length; JvmtiCodeBlobEvents::build_jvmti_addr_location_map(nm, &map, &map_length); // record the nmethod details methodHandle mh(nm->method()); nmethodDesc* snm = new nmethodDesc(mh, nm->code_begins(), nm->code_ends(), map, map_length); _global_nmethods->append(snm); }
void do_cinfo(KlassInfoEntry* cie) { // ignore array classes if (cie->klass()->oop_is_instance()) { _elements->append(cie); } }
// Update entry void add_jsr (int return_bci) { _jsrs->append(return_bci); }
void do_object(oop obj) { if (obj->is_a(_klass)) { _result->append(obj); } }
static void add(CommandLineFlagRange* range) { _ranges->append(range); }
UntagClosure(CompiledLoop* l, PReg* r) { theLoop = l; theLoopPReg = r; smi_type = new GrowableArray<klassOop>(1); smi_type->append(smiKlassObj); }
void do_object(oop obj) { if (obj->is_klass()) { _klass_objects->append(klassOop(obj)); } }
void add_mark(PseudoScopeMark* psm) { _marks.append(psm); }
static GrowableArray<EmptyVtableSlot*>* find_empty_vtable_slots( InstanceKlass* klass, GrowableArray<Method*>* mirandas, TRAPS) { assert(klass != NULL, "Must be valid class"); GrowableArray<EmptyVtableSlot*>* slots = new GrowableArray<EmptyVtableSlot*>(); // All miranda methods are obvious candidates for (int i = 0; i < mirandas->length(); ++i) { Method* m = mirandas->at(i); if (!already_in_vtable_slots(slots, m)) { slots->append(new EmptyVtableSlot(m)); } } // Also any overpasses in our superclasses, that we haven't implemented. // (can't use the vtable because it is not guaranteed to be initialized yet) InstanceKlass* super = klass->java_super(); while (super != NULL) { for (int i = 0; i < super->methods()->length(); ++i) { Method* m = super->methods()->at(i); if (m->is_overpass() || m->is_static()) { // m is a method that would have been a miranda if not for the // default method processing that occurred on behalf of our superclass, // so it's a method we want to re-examine in this new context. That is, // unless we have a real implementation of it in the current class. Method* impl = klass->lookup_method(m->name(), m->signature()); if (impl == NULL || impl->is_overpass() || impl->is_static()) { if (!already_in_vtable_slots(slots, m)) { slots->append(new EmptyVtableSlot(m)); } } } } // also any default methods in our superclasses if (super->default_methods() != NULL) { for (int i = 0; i < super->default_methods()->length(); ++i) { Method* m = super->default_methods()->at(i); // m is a method that would have been a miranda if not for the // default method processing that occurred on behalf of our superclass, // so it's a method we want to re-examine in this new context. That is, // unless we have a real implementation of it in the current class. Method* impl = klass->lookup_method(m->name(), m->signature()); if (impl == NULL || impl->is_overpass() || impl->is_static()) { if (!already_in_vtable_slots(slots, m)) { slots->append(new EmptyVtableSlot(m)); } } } } super = super->java_super(); } #ifndef PRODUCT if (TraceDefaultMethods) { tty->print_cr("Slots that need filling:"); streamIndentor si(tty); for (int i = 0; i < slots->length(); ++i) { tty->indent(); slots->at(i)->print_on(tty); tty->cr(); } } #endif // ndef PRODUCT return slots; }
void add_jni_locked_monitor(oop object) { _jni_locked_monitors->append(object); }
void add_thread(JavaThread* t) { _threads->append(t); }
//---------------------------catch_call_exceptions----------------------------- // Put a Catch and CatchProj nodes behind a just-created call. // Send their caught exceptions to the proper handler. // This may be used after a call to the rethrow VM stub, // when it is needed to process unloaded exception classes. void Parse::catch_call_exceptions(ciExceptionHandlerStream& handlers) { // Exceptions are delivered through this channel: Node* i_o = this->i_o(); // Add a CatchNode. GrowableArray<int>* bcis = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, -1); GrowableArray<const Type*>* extypes = new (C->node_arena()) GrowableArray<const Type*>(C->node_arena(), 8, 0, NULL); GrowableArray<int>* saw_unloaded = new (C->node_arena()) GrowableArray<int>(C->node_arena(), 8, 0, 0); for (; !handlers.is_done(); handlers.next()) { ciExceptionHandler* h = handlers.handler(); int h_bci = h->handler_bci(); ciInstanceKlass* h_klass = h->is_catch_all() ? env()->Throwable_klass() : h->catch_klass(); const TypePtr* h_extype = TypeOopPtr::make_from_klass_unique(h_klass)->cast_away_null(); // Ignore exceptions with no implementors. These cannot be thrown // (without class loading anyways, which will deopt this code). if( h_extype->empty() ) continue; // Do not introduce unloaded exception types into the graph: if (!h_klass->is_loaded()) { if (saw_unloaded->contains(h_bci)) { /* We've already seen an unloaded exception with h_bci, so don't duplicate. Duplication will cause the CatchNode to be unnecessarily large. See 4713716. */ continue; } else { saw_unloaded->append(h_bci); } } // Note: It's OK if the BCIs repeat themselves. bcis->append(h_bci); extypes->append(h_extype); } int len = bcis->length(); CatchNode *cn = new (C, 2) CatchNode(control(), i_o, len+1); Node *catch_ = _gvn.transform(cn); // now branch with the exception state to each of the (potential) // handlers for(int i=0; i < len; i++) { // Setup JVM state to enter the handler. PreserveJVMState pjvms(this); // Locals are just copied from before the call. // Get control from the CatchNode. int handler_bci = bcis->at(i); Node* ctrl = _gvn.transform( new (C, 1) CatchProjNode(catch_, i+1,handler_bci)); // This handler cannot happen? if (ctrl == top()) continue; set_control(ctrl); // Create exception oop const TypeInstPtr* extype = extypes->at(i)->is_instptr(); Node *thread = _gvn.transform( new (C, 1) ThreadLocalNode() ); Node*ex_adr=basic_plus_adr(top(),thread,in_bytes(JavaThread::pending_exception_offset())); int pending_ex_alias_idx = C->get_alias_index(ex_adr->bottom_type()->is_ptr()); Node *ex_oop = make_load( NULL, ex_adr, extype, T_OBJECT, pending_ex_alias_idx ); Node *ex_st = store_to_memory( ctrl, ex_adr, null(), T_OBJECT, pending_ex_alias_idx ); record_for_igvn(ex_st); // Handle unloaded exception classes. if (saw_unloaded->contains(handler_bci)) { // An unloaded exception type is coming here. Do an uncommon trap. // We do not expect the same handler bci to take both cold unloaded // and hot loaded exceptions. But, watch for it. if(PrintOpto&&extype->is_loaded()){ C2OUT->print_cr("Warning: Handler @%d takes mixed loaded/unloaded exceptions in ",handler_bci); method()->print_name(C2OUT);C2OUT->cr(); } // Emit an uncommon trap instead of processing the block. set_bci(handler_bci); push_ex_oop(ex_oop); uncommon_trap(Deoptimization::Reason_unloaded,extype->klass(),"not loaded exception",false); set_bci(iter().cur_bci()); // put it back continue; } // go to the exception handler if (handler_bci < 0) { // merge with corresponding rethrow node throw_to_exit(make_exception_state(ex_oop)); } else { // Else jump to corresponding handle push_ex_oop(ex_oop); // Clear stack and push just the oop. merge_exception(handler_bci); } } // The first CatchProj is for the normal return. // (Note: If this is a call to rethrow_Java, this node goes dead.) set_control(_gvn.transform( new (C, 1) CatchProjNode(catch_, CatchProjNode::fall_through_index, CatchProjNode::no_handler_bci))); }
static void add(CommandLineFlagConstraint* constraint) { _constraints->append(constraint); }
void add_method(Method* method, QualifiedState state) { Pair<Method*,QualifiedState> entry(method, state); _member_index.put(method, _members.length()); _members.append(entry); }