void InstanceMirrorKlass::oop_pc_follow_contents(oop obj, ParCompactionManager* cm) { InstanceKlass::oop_pc_follow_contents(obj, cm); // Follow the klass field in the mirror. Klass* klass = java_lang_Class::as_Klass(obj); if (klass != NULL) { // An anonymous class doesn't have its own class loader, so the call // to follow_klass will mark and push its java mirror instead of the // class loader. When handling the java mirror for an anonymous class // we need to make sure its class loader data is claimed, this is done // by calling follow_class_loader explicitly. For non-anonymous classes // the call to follow_class_loader is made when the class loader itself // is handled. if (klass->is_instance_klass() && InstanceKlass::cast(klass)->is_anonymous()) { cm->follow_class_loader(klass->class_loader_data()); } else { cm->follow_klass(klass); } } else { // If klass is NULL then this a mirror for a primitive type. // We don't have to follow them, since they are handled as strong // roots in Universe::oops_do. assert(java_lang_Class::is_primitive(obj), "Sanity check"); } ParCompactionManager::MarkAndPushClosure cl(cm); oop_oop_iterate_statics<true>(obj, &cl); }
void ClassLoaderData::classes_do(KlassClosure* klass_closure) { // Lock-free access requires load_ptr_acquire for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { klass_closure->do_klass(k); assert(k != k->next_link(), "no loops!"); } }
void javaVFrame::print() { ResourceMark rm; vframe::print(); tty->print("\t"); method()->print_value(); tty->cr(); tty->print_cr("\tbci: %d", bci()); print_stack_values("locals", locals()); print_stack_values("expressions", expressions()); GrowableArray<MonitorInfo*>* list = monitors(); if (list->is_empty()) return; tty->print_cr("\tmonitor list:"); for (int index = (list->length()-1); index >= 0; index--) { MonitorInfo* monitor = list->at(index); tty->print("\t obj\t"); if (monitor->owner_is_scalar_replaced()) { Klass* k = java_lang_Class::as_Klass(monitor->owner_klass()); tty->print("( is scalar replaced %s)", k->external_name()); } else if (monitor->owner() == NULL) { tty->print("( null )"); } else { monitor->owner()->print_value(); tty->print("(" INTPTR_FORMAT ")", (address)monitor->owner()); } if (monitor->eliminated() && is_compiled_frame()) tty->print(" ( lock is eliminated )"); tty->cr(); tty->print("\t "); monitor->lock()->print_on(tty); tty->cr(); } }
void Dictionary::print() { ResourceMark rm; HandleMark hm; tty->print_cr("Java system dictionary (table_size=%d, classes=%d)", table_size(), number_of_entries()); tty->print_cr("^ indicates that initiating loader is different from " "defining loader"); for (int index = 0; index < table_size(); index++) { for (DictionaryEntry* probe = bucket(index); probe != NULL; probe = probe->next()) { if (Verbose) tty->print("%4d: ", index); Klass* e = probe->klass(); ClassLoaderData* loader_data = probe->loader_data(); bool is_defining_class = (loader_data == InstanceKlass::cast(e)->class_loader_data()); tty->print("%s%s", is_defining_class ? " " : "^", e->external_name()); tty->print(", loader "); loader_data->print_value(); tty->cr(); } } tty->cr(); _pd_cache_table->print(); tty->cr(); }
} UNSAFE_END UNSAFE_ENTRY(jclass, Unsafe_GetJavaMirror(JNIEnv *env, jobject unsafe, jlong metaspace_klass)) { Klass* klass = (Klass*) (address) metaspace_klass; return (jclass) JNIHandles::make_local(klass->java_mirror()); } UNSAFE_END
void Dictionary::verify() { guarantee(number_of_entries() >= 0, "Verify of system dictionary failed"); int element_count = 0; for (int index = 0; index < table_size(); index++) { for (DictionaryEntry* probe = bucket(index); probe != NULL; probe = probe->next()) { Klass* e = probe->klass(); ClassLoaderData* loader_data = probe->loader_data(); guarantee(e->oop_is_instance(), "Verify of system dictionary failed"); // class loader must be present; a null class loader is the // boostrap loader guarantee(loader_data != NULL || DumpSharedSpaces || loader_data->class_loader() == NULL || loader_data->class_loader()->is_instance(), "checking type of class_loader"); e->verify(); probe->verify_protection_domain_set(); element_count++; } } guarantee(number_of_entries() == element_count, "Verify of system dictionary failed"); debug_only(verify_lookup_length((double)number_of_entries() / table_size())); _pd_cache_table->verify(); }
oop AOTCompiledMethod::oop_at(int index) const { if (index == 0) { // 0 is reserved return NULL; } Metadata** entry = _metadata_got + (index - 1); intptr_t meta = (intptr_t)*entry; if ((meta & 1) == 1) { // already resolved Klass* k = (Klass*)(meta & ~1); return k->java_mirror(); } // The entry is string which we need to resolve. const char* meta_name = _heap->get_name_at((int)meta); int klass_len = build_u2_from((address)meta_name); const char* klass_name = meta_name + 2; // Quick check the current method's holder. Klass* k = _method->method_holder(); ResourceMark rm; // for signature_name() if (strncmp(k->signature_name(), klass_name, klass_len) != 0) { // Does not match? // Search klass in got cells in DSO which have this compiled method. k = _heap->get_klass_from_got(klass_name, klass_len, _method); } int method_name_len = build_u2_from((address)klass_name + klass_len); guarantee(method_name_len == 0, "only klass is expected here"); meta = ((intptr_t)k) | 1; *entry = (Metadata*)meta; // Should be atomic on x64 return k->java_mirror(); }
// increment the count for the given basic type array class (and any // multi-dimensional arrays). For example, for [B we check for // [[B, [[[B, .. and the count is incremented for each one that exists. static void increment_for_basic_type_arrays(Klass* k) { JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); assert(that != NULL, "no JvmtiGetLoadedClassesClosure"); for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) { that->set_count(that->get_count() + 1); } }
// ------------------------------------------------------------------ // ciObjectFactory::create_new_object // // Create a new ciObject from a Metadata*. // // Implementation note: this functionality could be virtual behavior // of the oop itself. For now, we explicitly marshal the object. ciMetadata* ciObjectFactory::create_new_object(Metadata* o) { EXCEPTION_CONTEXT; if (o->is_klass()) { KlassHandle h_k(THREAD, (Klass*)o); Klass* k = (Klass*)o; if (k->oop_is_instance()) { return new (arena()) ciInstanceKlass(h_k); } else if (k->oop_is_objArray()) { return new (arena()) ciObjArrayKlass(h_k); } else if (k->oop_is_typeArray()) { return new (arena()) ciTypeArrayKlass(h_k); } } else if (o->is_method()) { methodHandle h_m(THREAD, (Method*)o); return new (arena()) ciMethod(h_m); } else if (o->is_methodData()) { // Hold methodHandle alive - might not be necessary ??? methodHandle h_m(THREAD, ((MethodData*)o)->method()); return new (arena()) ciMethodData((MethodData*)o); } // The oop is of some type not supported by the compiler interface. ShouldNotReachHere(); return NULL; }
Expr* PrimInliner::obj_new() { // replace generic allocation primitive by size-specific primitive, if possible Expr* rcvr = parameter(0); if (!rcvr->isConstantExpr() || !rcvr->constant()->is_klass()) return NULL; Klass* klass = klassOop(rcvr->constant())->klass_part(); // class being instantiated if (klass->oop_is_indexable()) return NULL; // would fail (extremely unlikely) int size = klass->non_indexable_size(); // size in words if (klass->can_inline_allocation()) { // These special compiler primitives only work for memOop klasses int number_of_instance_variables = size - memOopDesc::header_size(); switch (number_of_instance_variables) { case 0: _pdesc = primitives::new0(); break; case 1: _pdesc = primitives::new1(); break; case 2: _pdesc = primitives::new2(); break; case 3: _pdesc = primitives::new3(); break; case 4: _pdesc = primitives::new4(); break; case 5: _pdesc = primitives::new5(); break; case 6: _pdesc = primitives::new6(); break; case 7: _pdesc = primitives::new7(); break; case 8: _pdesc = primitives::new8(); break; case 9: _pdesc = primitives::new9(); break; default: ; // use generic primitives } } Expr* u = genCall(true); return new KlassExpr(klass->as_klassOop(), u->preg(), u->node()); }
void do_object(oop obj) { if (obj->is_klass()) { Klass* k = Klass::cast(klassOop(obj)); k->set_alloc_count(0); k->set_alloc_size(0); } }
// Sets the do_print flag for every superclass and subclass of the specified class. void KlassHierarchy::set_do_print_for_class_hierarchy(KlassInfoEntry* cie, KlassInfoTable* cit, bool print_subclasses) { // Set do_print for all superclasses of this class. Klass* super = ((InstanceKlass*)cie->klass())->java_super(); while (super != NULL) { KlassInfoEntry* super_cie = cit->lookup(super); super_cie->set_do_print(true); super = super->super(); } // Set do_print for this class and all of its subclasses. Stack <KlassInfoEntry*, mtClass> class_stack; class_stack.push(cie); while (!class_stack.is_empty()) { KlassInfoEntry* curr_cie = class_stack.pop(); curr_cie->set_do_print(true); if (print_subclasses && curr_cie->subclasses() != NULL) { // Current class has subclasses, so push all of them onto the stack. for (int i = 0; i < curr_cie->subclasses()->length(); i++) { KlassInfoEntry* cie = curr_cie->subclasses()->at(i); class_stack.push(cie); } } } }
// ------------------------------------------------------------------ // ciKlass::least_common_ancestor // // Get the shared parent of two klasses. // // Implementation note: this method currently goes "over the wall" // and does all of the work on the VM side. It could be rewritten // to use the super() method and do all of the work (aside from the // lazy computation of super()) in native mode. This may be // worthwhile if the compiler is repeatedly requesting the same lca // computation or possibly if most of the superklasses have already // been created as ciObjects anyway. Something to think about... ciKlass* ciKlass::least_common_ancestor(ciKlass* that) { assert(is_loaded() && that->is_loaded(), "must be loaded"); assert(is_java_klass() && that->is_java_klass(), "must be java klasses"); // Check to see if the klasses are identical. if (this == that) { return this; } VM_ENTRY_MARK; Klass* this_klass = get_Klass(); Klass* that_klass = that->get_Klass(); Klass* lca = this_klass->LCA(that_klass); // Many times the LCA will be either this_klass or that_klass. // Treat these as special cases. if (lca == that_klass) { return that; } if (this_klass == lca) { return this; } // Create the ciInstanceKlass for the lca. ciKlass* result = CURRENT_THREAD_ENV->get_object(lca->as_klassOop())->as_klass(); return result; }
int InstanceMirrorKlass::compute_static_oop_field_count(oop obj) { Klass* k = java_lang_Class::as_Klass(obj); if (k != NULL && k->is_instance_klass()) { return InstanceKlass::cast(k)->static_oop_field_count(); } return 0; }
void ClassLoaderData::methods_do(void f(Method*)) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) { if (k->is_instance_klass()) { InstanceKlass::cast(k)->methods_do(f); } } }
void ClassLoaderData::methods_do(void f(Method*)) { // Lock-free access requires load_ptr_acquire for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass()) { InstanceKlass::cast(k)->methods_do(f); } } }
void ReceiverTypeData::clean_weak_klass_links(BoolObjectClosure* is_alive_cl) { for (uint row = 0; row < row_limit(); row++) { Klass* p = receiver(row); if (p != NULL && !p->is_loader_alive(is_alive_cl)) { clear_row(row); } } }
// ------------------------------------------------------------------ // ciKlass::ciKlass ciKlass::ciKlass(KlassHandle h_k) : ciType(h_k) { assert(get_Klass()->is_klass(), "wrong type"); Klass* k = get_Klass(); _layout_helper = k->layout_helper(); Symbol* klass_name = k->name(); assert(klass_name != NULL, "wrong ciKlass constructor"); _name = CURRENT_ENV->get_symbol(klass_name); }
inline void oopDesc::push_contents(PSPromotionManager* pm) { Klass* klass = blueprint(); if (!klass->oop_is_typeArray()) { // It might contain oops beyond the header, so take the virtual call. klass->oop_push_contents(pm, this); } // Else skip it. The typeArrayKlass in the header never needs scavenging. }
void ClassLoaderData::classes_do(void f(InstanceKlass*)) { for (Klass* k = _klasses; k != NULL; k = k->next_link()) { if (k->oop_is_instance()) { f(InstanceKlass::cast(k)); } assert(k != k->next_link(), "no loops!"); } }
static void increment_with_loader(Klass* k, ClassLoaderData* loader_data) { JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); oop class_loader = loader_data->class_loader(); if (class_loader == JNIHandles::resolve(that->get_initiatingLoader())) { for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) { that->set_count(that->get_count() + 1); } } }
void ClassLoaderData::classes_do(void f(InstanceKlass*)) { // Lock-free access requires load_ptr_acquire for (Klass* k = load_ptr_acquire(&_klasses); k != NULL; k = k->next_link()) { if (k->is_instance_klass()) { f(InstanceKlass::cast(k)); } assert(k != k->next_link(), "no loops!"); } }
void generateRdfTypeInfo(const ontology::Ontology& ontology) { std::ofstream oifs; createFile(RdfsEntity::outdir + "/RdfTypeInfo.h", &oifs); startInternal(oifs); generateCodeProtectorBegin(oifs, "", "RdfTypeInfo"); oifs << "class RdfTypeInfo {" << std::endl; oifs << "public:" << std::endl; indent(oifs, 1) << "RdfTypeInfo();" << std::endl; oifs << std::endl; indent(oifs, 1) << "static const std::map<std::string, std::set<std::string> >& data() { return DATA; }" << std::endl; oifs << "private:" << std::endl; indent(oifs, 1) << "static std::map<std::string, std::set<std::string> > DATA;" << std::endl; oifs << "};" << std::endl; oifs << std::endl; generateCodeProtectorEnd(oifs, "", "RdfTypeInfo"); stopInternal(oifs); std::ofstream ofs; createFile(RdfsEntity::outdir + "/RdfTypeInfo.cpp", &ofs); startInternal(ofs); addBoilerPlate(ofs); ofs << std::endl; ofs << "#include <map>" << std::endl; ofs << "#include <set>" << std::endl; ofs << "#include <string>" << std::endl; ofs << std::endl; if ( RdfsEntity::outdir == ".") { ofs << "#include \"RdfTypeInfo.h\"" << std::endl; } else { ofs << "#include \"" << RdfsEntity::outdir << "/RdfTypeInfo.h\"" << std::endl; } ofs << std::endl; for ( auto const& klassMapItem: ontology.classUri2Ptr()) { const Klass cls(*klassMapItem.second); ofs << "#include \"" << cls.genCppNameSpaceInclusionPath() << "/" << klassMapItem.second->prettyIRIName() << ".h" << "\"" << std::endl; } ofs << std::endl; ofs << "std::map<std::string, std::set<std::string> > RdfTypeInfo::DATA;" << std::endl; ofs << std::endl; ofs << "RdfTypeInfo::RdfTypeInfo() {" << std::endl; indent(ofs, 1) << "if ( DATA.empty() ) {" << std::endl; for ( auto const& klassMapItem: ontology.classUri2Ptr()) { const Klass& cls = *klassMapItem.second; indent(ofs, 2) << "DATA[\"" << klassMapItem.first << "\"] = " << cls.genCppNameSpaceFullyQualified() << "::" << klassMapItem.second->prettyIRIName() << "::ancestorsRdfTypeIRI();" << std::endl; } indent(ofs, 1) << "};" << std::endl; ofs << std::endl; ofs << "}" << std::endl; ofs << std::endl; ofs << "namespace {" << std::endl; ofs << "RdfTypeInfo __loader;" << std::endl; ofs << "}" << std::endl; stopInternal(ofs); }
// Return self, except for abstract classes with exactly 1 // implementor. Then return the 1 concrete implementation. Klass *Klass::up_cast_abstract() { Klass *r = this; while( r->is_abstract() ) { // Receiver is abstract? Klass *s = r->subklass(); // Check for exactly 1 subklass if( !s || s->next_sibling() ) // Oops; wrong count; give up return this; // Return 'this' as a no-progress flag r = s; // Loop till find concrete class } return r; // Return the 1 concrete class }
objArrayOop HCodeBuffer::oops() { BlockScavenge bs; Klass* klass = Universe::objArrayKlassObj()->klass_part(); objArrayOop result = objArrayOop(klass->allocateObjectSize(oopLength())); for (int index = 0; index < oopLength(); index++) result->obj_at_put(index + 1, _oops->at(index)); return result; }
// add the basic type array class and its multi-dimensional array classes to the list static void add_for_basic_type_arrays(Klass* k) { JvmtiGetLoadedClassesClosure* that = JvmtiGetLoadedClassesClosure::get_this(); assert(that != NULL, "no JvmtiGetLoadedClassesClosure"); assert(that->available(), "no list"); for (Klass* l = k; l != NULL; l = l->array_klass_or_null()) { oop mirror = l->java_mirror(); that->set_element(that->get_index(), mirror); that->set_index(that->get_index() + 1); } }
void ClassLoaderData::loaded_classes_do(KlassClosure* klass_closure) { // Lock to avoid classes being modified/added/removed during iteration MutexLockerEx ml(metaspace_lock(), Mutex::_no_safepoint_check_flag); for (Klass* k = _klasses; k != NULL; k = k->next_link()) { // Do not filter ArrayKlass oops here... if (k->oop_is_array() || (k->oop_is_instance() && InstanceKlass::cast(k)->is_loaded())) { klass_closure->do_klass(k); } } }
int klassKlass::oop_oop_iterate(oop obj, OopClosure* blk) { // Get size before changing pointers int size = oop_size(obj); Klass* k = Klass::cast(klassOop(obj)); blk->do_oop(k->adr_super()); for (juint i = 0; i < Klass::primary_super_limit(); i++) blk->do_oop(k->adr_primary_supers()+i); blk->do_oop(k->adr_secondary_super_cache()); blk->do_oop(k->adr_secondary_supers()); blk->do_oop(k->adr_java_mirror()); blk->do_oop(k->adr_name()); // The following are in the perm gen and are treated // specially in a later phase of a perm gen collection; ... assert(oop(k)->is_perm(), "should be in perm"); assert(oop(k->subklass())->is_perm_or_null(), "should be in perm"); assert(oop(k->next_sibling())->is_perm_or_null(), "should be in perm"); // ... don't scan them normally, but remember this klassKlass // for later (see, for instance, oop_follow_contents above // for what MarkSweep does with it. if (blk->should_remember_klasses()) { blk->remember_klass(k); } obj->oop_iterate_header(blk); return size; }
byteArrayOop HCodeBuffer::bytes() { BlockScavenge bs; align(); Klass* klass = Universe::byteArrayKlassObj()->klass_part(); byteArrayOop result = byteArrayOop(klass->allocateObjectSize(byteLength())); for (int index = 0; index < byteLength(); index++) result->byte_at_put(index + 1, (unsigned char) _bytes->at(index)); return result; }
int klassKlass::oop_update_pointers(ParCompactionManager* cm, oop obj) { Klass* k = Klass::cast(klassOop(obj)); oop* const beg_oop = k->oop_block_beg(); oop* const end_oop = k->oop_block_end(); for (oop* cur_oop = beg_oop; cur_oop < end_oop; ++cur_oop) { PSParallelCompact::adjust_pointer(cur_oop); } return oop_size(obj); }