void do_object(oop obj) { // instanceKlass objects need some adjustment. if (obj->blueprint()->oop_is_instanceKlass()) { instanceKlass* ik = instanceKlass::cast((klassOop)obj); sort_methods(ik, _thread); } }
void CollectedHeap::post_allocation_install_obj_klass(KlassHandle klass, oop obj) { // These asserts are kind of complicated because of klassKlass // and the beginning of the world. assert(klass() != NULL || !Universe::is_fully_initialized(), "NULL klass"); assert(klass() == NULL || klass()->is_klass(), "not a klass"); assert(klass() == NULL || klass()->klass_part() != NULL, "not a klass"); assert(obj != NULL, "NULL object pointer"); obj->set_klass(klass()); assert(!Universe::is_fully_initialized() || obj->blueprint() != NULL, "missing blueprint"); }
object_type ClassifyObjectClosure::classify_object(oop obj, bool count) { object_type type = unknown_type; Klass* k = obj->blueprint(); if (k->as_klassOop() == SystemDictionary::Object_klass()) { tty->print_cr("Found the class!"); } if (count) { k->set_alloc_count(k->alloc_count() + 1); } if (obj->is_instance()) { if (k->oop_is_instanceRef()) { type = instanceRef_type; } else { type = instance_type; } } else if (obj->is_typeArray()) { type = typeArray_type; } else if (obj->is_objArray()) { type = objArray_type; } else if (obj->is_symbol()) { type = symbol_type; } else if (obj->is_klass()) { Klass* k = ((klassOop)obj)->klass_part(); if (k->oop_is_instance()) { type = instanceKlass_type; } else { type = klass_type; } } else if (obj->is_method()) { type = method_type; } else if (obj->is_constMethod()) { type = constMethod_type; } else if (obj->is_methodData()) { ShouldNotReachHere(); } else if (obj->is_constantPool()) { type = constantPool_type; } else if (obj->is_constantPoolCache()) { type = constantPoolCache_type; } else if (obj->is_compiledICHolder()) { type = compiledICHolder_type; } else { ShouldNotReachHere(); } assert(type != unknown_type, "found object of unknown type."); return type; }
static HeuristicsResult update_heuristics(oop o, bool allow_rebias) { markOop mark = o->mark(); if (!mark->has_bias_pattern()) { return HR_NOT_BIASED; } // Heuristics to attempt to throttle the number of revocations. // Stages: // 1. Revoke the biases of all objects in the heap of this type, // but allow rebiasing of those objects if unlocked. // 2. Revoke the biases of all objects in the heap of this type // and don't allow rebiasing of these objects. Disable // allocation of objects of that type with the bias bit set. Klass* k = o->blueprint(); jlong cur_time = os::javaTimeMillis(); jlong last_bulk_revocation_time = k->last_biased_lock_bulk_revocation_time(); int revocation_count = k->biased_lock_revocation_count(); if ((revocation_count >= BiasedLockingBulkRebiasThreshold) && (revocation_count < BiasedLockingBulkRevokeThreshold) && (last_bulk_revocation_time != 0) && (cur_time - last_bulk_revocation_time >= BiasedLockingDecayTime)) { // This is the first revocation we've seen in a while of an // object of this type since the last time we performed a bulk // rebiasing operation. The application is allocating objects in // bulk which are biased toward a thread and then handing them // off to another thread. We can cope with this allocation // pattern via the bulk rebiasing mechanism so we reset the // klass's revocation count rather than allow it to increase // monotonically. If we see the need to perform another bulk // rebias operation later, we will, and if subsequently we see // many more revocation operations in a short period of time we // will completely disable biasing for this type. k->set_biased_lock_revocation_count(0); revocation_count = 0; } // Make revocation count saturate just beyond BiasedLockingBulkRevokeThreshold if (revocation_count <= BiasedLockingBulkRevokeThreshold) { revocation_count = k->atomic_incr_biased_lock_revocation_count(); } if (revocation_count == BiasedLockingBulkRevokeThreshold) { return HR_BULK_REVOKE; } if (revocation_count == BiasedLockingBulkRebiasThreshold) { return HR_BULK_REBIAS; } return HR_SINGLE_REVOKE; }
void do_object(oop obj) { // The METHODS() OBJARRAYS CANNOT BE MADE READ-ONLY, even though // it is never modified. Otherwise, they will be pre-marked; the // GC marking phase will skip them; and by skipping them will fail // to mark the methods objects referenced by the array. if (obj->is_klass()) { mark_object(obj); Klass* k = klassOop(obj)->klass_part(); mark_object(k->java_mirror()); if (obj->blueprint()->oop_is_instanceKlass()) { instanceKlass* ik = (instanceKlass*)k; mark_object(ik->methods()); mark_object(ik->constants()); } if (obj->blueprint()->oop_is_javaArray()) { arrayKlass* ak = (arrayKlass*)k; mark_object(ak->component_mirror()); } return; } // Mark constantPool tags and the constantPoolCache. else if (obj->is_constantPool()) { constantPoolOop pool = constantPoolOop(obj); mark_object(pool->cache()); pool->shared_tags_iterate(&mark_objects); return; } // Mark all method objects. if (obj->is_method()) { mark_object(obj); } }
void do_object(oop obj) { if (obj->is_klass() && obj->blueprint()->oop_is_instanceKlass()) { instanceKlass* ik = instanceKlass::cast((klassOop)obj); int i; mark_and_move_for_policy(OP_favor_startup, ik->name(), _move_ro); if (ik->super() != NULL) { do_object(ik->super()); } objArrayOop interfaces = ik->local_interfaces(); mark_and_move_for_policy(OP_favor_startup, interfaces, _move_ro); for(i = 0; i < interfaces->length(); i++) { klassOop k = klassOop(interfaces->obj_at(i)); mark_and_move_for_policy(OP_favor_startup, k->klass_part()->name(), _move_ro); do_object(k); } objArrayOop methods = ik->methods(); for(i = 0; i < methods->length(); i++) { methodOop m = methodOop(methods->obj_at(i)); mark_and_move_for_policy(OP_favor_startup, m->constMethod(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->exception_table(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, m->constMethod()->stackmap_data(), _move_ro); // We don't move the name symbolOop here because it may invalidate // method ordering, which is dependent on the address of the name // symbolOop. It will get promoted later with the other symbols. // Method name is rarely accessed during classloading anyway. // mark_and_move_for_policy(OP_balanced, m->name(), _move_ro); mark_and_move_for_policy(OP_favor_startup, m->signature(), _move_ro); } mark_and_move_for_policy(OP_favor_startup, ik->transitive_interfaces(), _move_ro); mark_and_move_for_policy(OP_favor_startup, ik->fields(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, ik->secondary_supers(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, ik->method_ordering(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, ik->class_annotations(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, ik->fields_annotations(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, ik->methods_annotations(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, ik->methods_parameter_annotations(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, ik->methods_default_annotations(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, ik->inner_classes(), _move_ro); mark_and_move_for_policy(OP_favor_runtime, ik->secondary_supers(), _move_ro); } }
void do_object(oop obj) { // Mark symbols refered to by method objects. if (obj->is_method()) { methodOop m = methodOop(obj); mark_object(m->name()); mark_object(m->signature()); } // Mark symbols referenced by klass objects which are read-only. else if (obj->is_klass()) { if (obj->blueprint()->oop_is_instanceKlass()) { instanceKlass* ik = instanceKlass::cast((klassOop)obj); mark_object(ik->name()); mark_object(ik->generic_signature()); mark_object(ik->source_file_name()); mark_object(ik->source_debug_extension()); typeArrayOop inner_classes = ik->inner_classes(); if (inner_classes != NULL) { int length = inner_classes->length(); for (int i = 0; i < length; i += instanceKlass::inner_class_next_offset) { int ioff = i + instanceKlass::inner_class_inner_name_offset; int index = inner_classes->ushort_at(ioff); if (index != 0) { mark_object(ik->constants()->symbol_at(index)); } } } ik->field_names_and_sigs_iterate(&mark_all); } } // Mark symbols referenced by other constantpool entries. if (obj->is_constantPool()) { constantPoolOop(obj)->shared_symbols_iterate(&mark_all); } }
void do_object(oop obj) { // Mark all constMethod objects. if (obj->is_constMethod()) { mark_object(obj); mark_object(constMethodOop(obj)->stackmap_data()); // Exception tables are needed by ci code during compilation. mark_object(constMethodOop(obj)->exception_table()); } // Mark objects referenced by klass objects which are read-only. else if (obj->is_klass()) { Klass* k = Klass::cast((klassOop)obj); mark_object(k->secondary_supers()); // The METHODS() OBJARRAYS CANNOT BE MADE READ-ONLY, even though // it is never modified. Otherwise, they will be pre-marked; the // GC marking phase will skip them; and by skipping them will fail // to mark the methods objects referenced by the array. if (obj->blueprint()->oop_is_instanceKlass()) { instanceKlass* ik = instanceKlass::cast((klassOop)obj); mark_object(ik->method_ordering()); mark_object(ik->local_interfaces()); mark_object(ik->transitive_interfaces()); mark_object(ik->fields()); mark_object(ik->class_annotations()); mark_object_recursive_skipping_klasses(ik->fields_annotations()); mark_object_recursive_skipping_klasses(ik->methods_annotations()); mark_object_recursive_skipping_klasses(ik->methods_parameter_annotations()); mark_object_recursive_skipping_klasses(ik->methods_default_annotations()); typeArrayOop inner_classes = ik->inner_classes(); if (inner_classes != NULL) { mark_object(inner_classes); } } } }
void do_object(oop obj) { if (obj->is_klass() && obj->blueprint()->oop_is_instanceKlass()) { instanceKlass* ik = instanceKlass::cast((klassOop)obj); int i; mark_and_move_for_policy(OP_favor_startup, ik->as_klassOop(), _move_rw); if (ik->super() != NULL) { do_object(ik->super()); } objArrayOop interfaces = ik->local_interfaces(); for(i = 0; i < interfaces->length(); i++) { klassOop k = klassOop(interfaces->obj_at(i)); mark_and_move_for_policy(OP_favor_startup, k, _move_rw); do_object(k); } objArrayOop methods = ik->methods(); mark_and_move_for_policy(OP_favor_startup, methods, _move_rw); for(i = 0; i < methods->length(); i++) { methodOop m = methodOop(methods->obj_at(i)); mark_and_move_for_policy(OP_favor_startup, m, _move_rw); mark_and_move_for_policy(OP_favor_startup, ik->constants(), _move_rw); // idempotent mark_and_move_for_policy(OP_balanced, ik->constants()->cache(), _move_rw); // idempotent mark_and_move_for_policy(OP_balanced, ik->constants()->tags(), _move_rw); // idempotent } mark_and_move_for_policy(OP_favor_startup, ik->as_klassOop()->klass(), _move_rw); mark_and_move_for_policy(OP_favor_startup, ik->constants()->klass(), _move_rw); // Although Java mirrors are marked in MarkReadWriteObjects, // apparently they were never moved into shared spaces since // MoveMarkedObjects skips marked instance oops. This may // be a bug in the original implementation or simply the vestige // of an abandoned experiment. Nevertheless we leave a hint // here in case this capability is ever correctly implemented. // // mark_and_move_for_policy(OP_favor_runtime, ik->java_mirror(), _move_rw); } }
void arrayKlassKlass::oop_push_contents(PSPromotionManager* pm, oop obj) { assert(obj->blueprint()->oop_is_arrayKlass(),"must be an array klass"); }
void do_object(oop obj) { Klass* k = obj->blueprint(); k->set_alloc_count(k->alloc_count() + 1); k->set_alloc_size(k->alloc_size() + obj->size()); }
static BiasedLocking::Condition bulk_revoke_or_rebias_at_safepoint(oop o, bool bulk_rebias, bool attempt_rebias_of_object, JavaThread* requesting_thread) { assert(SafepointSynchronize::is_at_safepoint(), "must be done at safepoint"); if (TraceBiasedLocking) { tty->print_cr("* Beginning bulk revocation (kind == %s) because of object " INTPTR_FORMAT " , mark " INTPTR_FORMAT " , type %s", (bulk_rebias ? "rebias" : "revoke"), (intptr_t) o, (intptr_t) o->mark(), Klass::cast(o->klass())->external_name()); } jlong cur_time = os::javaTimeMillis(); o->blueprint()->set_last_biased_lock_bulk_revocation_time(cur_time); klassOop k_o = o->klass(); Klass* klass = Klass::cast(k_o); if (bulk_rebias) { // Use the epoch in the klass of the object to implicitly revoke // all biases of objects of this data type and force them to be // reacquired. However, we also need to walk the stacks of all // threads and update the headers of lightweight locked objects // with biases to have the current epoch. // If the prototype header doesn't have the bias pattern, don't // try to update the epoch -- assume another VM operation came in // and reset the header to the unbiased state, which will // implicitly cause all existing biases to be revoked if (klass->prototype_header()->has_bias_pattern()) { int prev_epoch = klass->prototype_header()->bias_epoch(); klass->set_prototype_header(klass->prototype_header()->incr_bias_epoch()); int cur_epoch = klass->prototype_header()->bias_epoch(); // Now walk all threads' stacks and adjust epochs of any biased // and locked objects of this data type we encounter for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); for (int i = 0; i < cached_monitor_info->length(); i++) { MonitorInfo* mon_info = cached_monitor_info->at(i); oop owner = mon_info->owner(); markOop mark = owner->mark(); if ((owner->klass() == k_o) && mark->has_bias_pattern()) { // We might have encountered this object already in the case of recursive locking assert(mark->bias_epoch() == prev_epoch || mark->bias_epoch() == cur_epoch, "error in bias epoch adjustment"); owner->set_mark(mark->set_bias_epoch(cur_epoch)); } } } } // At this point we're done. All we have to do is potentially // adjust the header of the given object to revoke its bias. revoke_bias(o, attempt_rebias_of_object && klass->prototype_header()->has_bias_pattern(), true, requesting_thread); } else { if (TraceBiasedLocking) { ResourceMark rm; tty->print_cr("* Disabling biased locking for type %s", klass->external_name()); } // Disable biased locking for this data type. Not only will this // cause future instances to not be biased, but existing biased // instances will notice that this implicitly caused their biases // to be revoked. klass->set_prototype_header(markOopDesc::prototype()); // Now walk all threads' stacks and forcibly revoke the biases of // any locked and biased objects of this data type we encounter. for (JavaThread* thr = Threads::first(); thr != NULL; thr = thr->next()) { GrowableArray<MonitorInfo*>* cached_monitor_info = get_or_compute_monitor_info(thr); for (int i = 0; i < cached_monitor_info->length(); i++) { MonitorInfo* mon_info = cached_monitor_info->at(i); oop owner = mon_info->owner(); markOop mark = owner->mark(); if ((owner->klass() == k_o) && mark->has_bias_pattern()) { revoke_bias(owner, false, true, requesting_thread); } } } // Must force the bias of the passed object to be forcibly revoked // as well to ensure guarantees to callers revoke_bias(o, false, true, requesting_thread); } if (TraceBiasedLocking) { tty->print_cr("* Ending bulk revocation"); } BiasedLocking::Condition status_code = BiasedLocking::BIAS_REVOKED; if (attempt_rebias_of_object && o->mark()->has_bias_pattern() && klass->prototype_header()->has_bias_pattern()) { markOop new_mark = markOopDesc::encode(requesting_thread, o->mark()->age(), klass->prototype_header()->bias_epoch()); o->set_mark(new_mark); status_code = BiasedLocking::BIAS_REVOKED_AND_REBIASED; if (TraceBiasedLocking) { tty->print_cr(" Rebiased object toward thread " INTPTR_FORMAT, (intptr_t) requesting_thread); } } assert(!o->mark()->has_bias_pattern() || (attempt_rebias_of_object && (o->mark()->biased_locker() == requesting_thread)), "bug in bulk bias revocation"); return status_code; }