// Return false if the entry could not be recorded on account // of running out of space required to create a new entry. bool KlassInfoTable::record_instance(const oop obj) { Klass* k = obj->klass(); KlassInfoEntry* elt = lookup(k); // elt may be NULL if it's a new klass for which we // could not allocate space for a new entry in the hashtable. if (elt != NULL) { elt->set_count(elt->count() + 1); elt->set_words(elt->words() + obj->size()); _size_of_instances_in_words += obj->size(); return true; } else { return false; } }
void do_object(oop obj) { if (obj->is_shared()) { return; } if (obj->is_gc_marked() && obj->forwardee() == NULL) { int s = obj->size(); oop sh_obj = (oop)_space->allocate(s); if (sh_obj == NULL) { if (_read_only) { warning("\nThe permanent generation read only space is not large " "enough to \npreload requested classes. Use " "-XX:SharedReadOnlySize= to increase \nthe initial " "size of the read only space.\n"); } else { warning("\nThe permanent generation read write space is not large " "enough to \npreload requested classes. Use " "-XX:SharedReadWriteSize= to increase \nthe initial " "size of the read write space.\n"); } exit(2); } if (PrintSharedSpaces && Verbose && WizardMode) { tty->print_cr("\nMoveMarkedObjects: " PTR_FORMAT " -> " PTR_FORMAT " %s", obj, sh_obj, (_read_only ? "ro" : "rw")); } Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)sh_obj, s); obj->forward_to(sh_obj); if (_read_only) { // Readonly objects: set hash value to self pointer and make gc_marked. sh_obj->forward_to(sh_obj); } else { sh_obj->init_mark(); } } }
void ClassifyObjectClosure::do_object(oop obj) { int i = classify_object(obj, true); ++object_count[i]; ++total_object_count; size_t size = obj->size() * HeapWordSize; object_size[i] += size; total_object_size += size; }
void DefNewGeneration::handle_promotion_failure(oop old) { log_debug(gc, promotion)("Promotion failure size = %d) ", old->size()); _promotion_failed = true; _promotion_failed_info.register_copy_failure(old->size()); preserve_mark_if_necessary(old, old->mark()); // forward to self old->forward_to(old); _promo_failure_scan_stack.push(old); if (!_promo_failure_drain_in_progress) { // prevent recursion in copy_to_survivor_space() _promo_failure_drain_in_progress = true; drain_promo_failure_scan_stack(); _promo_failure_drain_in_progress = false; } }
inline bool PSParallelCompact::mark_obj(oop obj) { const int obj_size = obj->size(); if (mark_bitmap()->mark_obj(obj, obj_size)) { _summary_data.add_obj(obj, obj_size); return true; } else { return false; } }
void DefNewGeneration::handle_promotion_failure(oop old) { if (PrintPromotionFailure && !_promotion_failed) { gclog_or_tty->print(" (promotion failure size = " SIZE_FORMAT ") ", old->size()); } _promotion_failed = true; _promotion_failed_info.register_copy_failure(old->size()); preserve_mark_if_necessary(old, old->mark()); // forward to self old->forward_to(old); _promo_failure_scan_stack.push(old); if (!_promo_failure_drain_in_progress) { // prevent recursion in copy_to_survivor_space() _promo_failure_drain_in_progress = true; drain_promo_failure_scan_stack(); _promo_failure_drain_in_progress = false; } }
bool PSPromotionLAB::unallocate_object(oop obj) { assert(Universe::heap()->is_in(obj), "Object outside heap"); if (contains(obj)) { HeapWord* object_end = (HeapWord*)obj + obj->size(); assert(object_end <= top(), "Object crosses promotion LAB boundary"); if (object_end == top()) { set_top((HeapWord*)obj); return true; } } return false; }
// Ignores "ref" and calls allocate(). oop Generation::promote(oop obj, size_t obj_size) { assert(obj_size == (size_t)obj->size(), "bad obj_size passed in"); #ifndef PRODUCT if (Universe::heap()->promotion_should_fail()) { return NULL; } #endif // #ifndef PRODUCT HeapWord* result = allocate(obj_size, false); if (result != NULL) { Copy::aligned_disjoint_words((HeapWord*)obj, result, obj_size); return oop(result); } else { GenCollectedHeap* gch = GenCollectedHeap::heap(); return gch->handle_failed_promotion(this, obj, obj_size); } }
oop DefNewGeneration::copy_to_survivor_space(oop old, oop* from) { assert(is_in_reserved(old) && !old->is_forwarded(), "shouldn't be scavenging this oop"); size_t s = old->size(); oop obj = NULL; // Try allocating obj in to-space (unless too old or won't fit or JVMPI // enabled) if (old->age() < tenuring_threshold() && !Universe::jvmpi_slow_allocation()) { obj = (oop) to()->allocate(s); } // Otherwise try allocating obj tenured if (obj == NULL) { obj = _next_gen->promote(old, s, from); if (obj == NULL) { // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag // is incorrectly set. In any case, its seriously wrong to be here! vm_exit_out_of_memory(s*wordSize, "promotion"); } } else { // Prefetch beyond obj const intx interval = PrefetchCopyIntervalInBytes; atomic::prefetch_write(obj, interval); // Copy obj Memory::copy_words_aligned((HeapWord*)old, (HeapWord*)obj, s); // Increment age if obj still in new generation obj->incr_age(); age_table()->add(obj, s); } if (Universe::jvmpi_move_event_enabled()) { Universe::jvmpi_object_move(old, obj); } // Done, insert forward pointer to obj in this header old->forward_to(obj); return obj; }
oop DefNewGeneration::copy_to_survivor_space(oop old) { assert(is_in_reserved(old) && !old->is_forwarded(), "shouldn't be scavenging this oop"); size_t s = old->size(); oop obj = NULL; // Try allocating obj in to-space (unless too old) if (old->age() < tenuring_threshold()) { obj = (oop) to()->allocate(s); } // Otherwise try allocating obj tenured if (obj == NULL) { obj = _next_gen->promote(old, s); if (obj == NULL) { if (!HandlePromotionFailure) { // A failed promotion likely means the MaxLiveObjectEvacuationRatio flag // is incorrectly set. In any case, its seriously wrong to be here! vm_exit_out_of_memory(s*wordSize, "promotion"); } handle_promotion_failure(old); return old; } } else { // Prefetch beyond obj const intx interval = PrefetchCopyIntervalInBytes; Prefetch::write(obj, interval); // Copy obj Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); // Increment age if obj still in new generation obj->incr_age(); age_table()->add(obj, s); } // Done, insert forward pointer to obj in this header old->forward_to(obj); return obj; }
oop DefNewGeneration::copy_to_survivor_space(oop old) { assert(is_in_reserved(old) && !old->is_forwarded(), "shouldn't be scavenging this oop"); size_t s = old->size(); oop obj = NULL; // Try allocating obj in to-space (unless too old) if (old->age() < tenuring_threshold()) { obj = (oop) to()->allocate(s); } // Otherwise try allocating obj tenured if (obj == NULL) { obj = _next_gen->promote(old, s); if (obj == NULL) { handle_promotion_failure(old); return old; } } else { // Prefetch beyond obj const intx interval = PrefetchCopyIntervalInBytes; Prefetch::write(obj, interval); // Copy obj Copy::aligned_disjoint_words((HeapWord*)old, (HeapWord*)obj, s); // Increment age if obj still in new generation obj->incr_age(); age_table()->add(obj, s); } // Done, insert forward pointer to obj in this header old->forward_to(obj); return obj; }
// <original comment> // The original idea here was to coalesce evacuated and dead objects. // However that caused complications with the block offset table (BOT). // In particular if there were two TLABs, one of them partially refined. // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| // The BOT entries of the unrefined part of TLAB_2 point to the start // of TLAB_2. If the last object of the TLAB_1 and the first object // of TLAB_2 are coalesced, then the cards of the unrefined part // would point into middle of the filler object. // The current approach is to not coalesce and leave the BOT contents intact. // </original comment> // // We now reset the BOT when we start the object iteration over the // region and refine its entries for every object we come across. So // the above comment is not really relevant and we should be able // to coalesce dead objects if we want to. void do_object(oop obj) { HeapWord* obj_addr = (HeapWord*) obj; assert(_hr->is_in(obj_addr), "sanity"); size_t obj_size = obj->size(); HeapWord* obj_end = obj_addr + obj_size; if (_end_of_last_gap != obj_addr) { // there was a gap before obj_addr _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr); } if (obj->is_forwarded() && obj->forwardee() == obj) { // The object failed to move. // We consider all objects that we find self-forwarded to be // live. What we'll do is that we'll update the prev marking // info so that they are all under PTAMS and explicitly marked. if (!_cm->isPrevMarked(obj)) { _cm->markPrev(obj); } if (_during_initial_mark) { // For the next marking info we'll only mark the // self-forwarded objects explicitly if we are during // initial-mark (since, normally, we only mark objects pointed // to by roots if we succeed in copying them). By marking all // self-forwarded objects we ensure that we mark any that are // still pointed to be roots. During concurrent marking, and // after initial-mark, we don't need to mark any objects // explicitly and all objects in the CSet are considered // (implicitly) live. So, we won't mark them explicitly and // we'll leave them over NTAMS. _cm->grayRoot(obj, obj_size, _worker_id, _hr); } _marked_bytes += (obj_size * HeapWordSize); obj->set_mark(markOopDesc::prototype()); // While we were processing RSet buffers during the collection, // we actually didn't scan any cards on the collection set, // since we didn't want to update remembered sets with entries // that point into the collection set, given that live objects // from the collection set are about to move and such entries // will be stale very soon. // This change also dealt with a reliability issue which // involved scanning a card in the collection set and coming // across an array that was being chunked and looking malformed. // The problem is that, if evacuation fails, we might have // remembered set entries missing given that we skipped cards on // the collection set. So, we'll recreate such entries now. obj->oop_iterate(_update_rset_cl); } else { // The object has been either evacuated or is dead. Fill it with a // dummy object. MemRegion mr(obj_addr, obj_size); CollectedHeap::fill_with_object(mr); // must nuke all dead objects which we skipped when iterating over the region _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end)); } _end_of_last_gap = obj_end; _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end); }
void KlassInfoTable::record_instance(const oop obj) { klassOop k = obj->klass(); KlassInfoEntry* elt = lookup(k); elt->set_count(elt->count() + 1); elt->set_words(elt->words() + obj->size()); }
size_t G1CMObjArrayProcessor::process_obj(oop obj) { assert(should_be_sliced(obj), "Must be an array object %d and large " SIZE_FORMAT, obj->is_objArray(), (size_t)obj->size()); return process_array_slice(objArrayOop(obj), (HeapWord*)obj, (size_t)objArrayOop(obj)->size()); }
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, oop const old, markOop const old_mark) { const size_t word_sz = old->size(); HeapRegion* const from_region = _g1h->heap_region_containing_raw(old); // +1 to make the -1 indexes valid... const int young_index = from_region->young_index_in_cset()+1; assert( (from_region->is_young() && young_index > 0) || (!from_region->is_young() && young_index == 0), "invariant" ); const AllocationContext_t context = from_region->allocation_context(); uint age = 0; InCSetState dest_state = next_state(state, old_mark, age); HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context); // PLAB allocations should succeed most of the time, so we'll // normally check against NULL once and that's it. if (obj_ptr == NULL) { obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context); if (obj_ptr == NULL) { obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context); if (obj_ptr == NULL) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. return handle_evacuation_failure_par(old, old_mark); } } } assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); #ifndef PRODUCT // Should this evacuation fail? if (_g1h->evacuation_should_fail()) { // Doing this after all the allocation attempts also tests the // undo_allocation() method too. _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); return handle_evacuation_failure_par(old, old_mark); } #endif // !PRODUCT // We're going to allocate linearly, so might as well prefetch ahead. Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); const oop obj = oop(obj_ptr); const oop forward_ptr = old->forward_to_atomic(obj); if (forward_ptr == NULL) { Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); if (dest_state.is_young()) { if (age < markOopDesc::max_age) { age++; } if (old_mark->has_displaced_mark_helper()) { // In this case, we have to install the mark word first, // otherwise obj looks to be forwarded (the old mark word, // which contains the forward pointer, was copied) obj->set_mark(old_mark); markOop new_mark = old_mark->displaced_mark_helper()->set_age(age); old_mark->set_displaced_mark_helper(new_mark); } else { obj->set_mark(old_mark->set_age(age)); } age_table()->add(age, word_sz); } else { obj->set_mark(old_mark); } if (G1StringDedup::is_enabled()) { const bool is_from_young = state.is_young(); const bool is_to_young = dest_state.is_young(); assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(), "sanity"); assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(), "sanity"); G1StringDedup::enqueue_from_evacuation(is_from_young, is_to_young, _worker_id, obj); } size_t* const surv_young_words = surviving_young_words(); surv_young_words[young_index] += word_sz; if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { // We keep track of the next start index in the length field of // the to-space object. The actual length can be found in the // length field of the from-space object. arrayOop(obj)->set_length(0); oop* old_p = set_partial_array_mask(old); push_on_queue(old_p); } else { HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr); _scanner.set_region(to_region); obj->oop_iterate_backwards(&_scanner); } return obj; } else { _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); return forward_ptr; } }
inline bool ParMarkBitMap::mark_obj(oop obj) { return mark_obj(obj, obj->size()); }
void do_object(oop obj) { Klass* k = obj->blueprint(); k->set_alloc_count(k->alloc_count() + 1); k->set_alloc_size(k->alloc_size() + obj->size()); }
inline oop PSPromotionManager::copy_to_survivor_space(oop o) { assert(should_scavenge(&o), "Sanity"); oop new_obj = NULL; // NOTE! We must be very careful with any methods that access the mark // in o. There may be multiple threads racing on it, and it may be forwarded // at any time. Do not use oop methods for accessing the mark! markOop test_mark = o->mark(); // The same test as "o->is_forwarded()" if (!test_mark->is_marked()) { bool new_obj_is_tenured = false; size_t new_obj_size = o->size(); // Find the objects age, MT safe. uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? test_mark->displaced_mark_helper()->age() : test_mark->age(); if (!promote_immediately) { // Try allocating obj in to-space (unless too old) if (age < PSScavenge::tenuring_threshold()) { new_obj = (oop) _young_lab.allocate(new_obj_size); if (new_obj == NULL && !_young_gen_is_full) { // Do we allocate directly, or flush and refill? if (new_obj_size > (YoungPLABSize / 2)) { // Allocate this object directly new_obj = (oop)young_space()->cas_allocate(new_obj_size); promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL); } else { // Flush and fill _young_lab.flush(); HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); if (lab_base != NULL) { _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); // Try the young lab allocation again. new_obj = (oop) _young_lab.allocate(new_obj_size); promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); } else { _young_gen_is_full = true; } } } } } // Otherwise try allocating obj tenured if (new_obj == NULL) { #ifndef PRODUCT if (ParallelScavengeHeap::heap()->promotion_should_fail()) { return oop_promotion_failed(o, test_mark); } #endif // #ifndef PRODUCT new_obj = (oop) _old_lab.allocate(new_obj_size); new_obj_is_tenured = true; if (new_obj == NULL) { if (!_old_gen_is_full) { // Do we allocate directly, or flush and refill? if (new_obj_size > (OldPLABSize / 2)) { // Allocate this object directly new_obj = (oop)old_gen()->cas_allocate(new_obj_size); promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL); } else { // Flush and fill _old_lab.flush(); HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); if(lab_base != NULL) { #ifdef ASSERT // Delay the initialization of the promotion lab (plab). // This exposes uninitialized plabs to card table processing. if (GCWorkerDelayMillis > 0) { os::sleep(Thread::current(), GCWorkerDelayMillis, false); } #endif _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); // Try the old lab allocation again. new_obj = (oop) _old_lab.allocate(new_obj_size); promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); } } } // This is the promotion failed test, and code handling. // The code belongs here for two reasons. It is slightly // different than the code below, and cannot share the // CAS testing code. Keeping the code here also minimizes // the impact on the common case fast path code. if (new_obj == NULL) { _old_gen_is_full = true; return oop_promotion_failed(o, test_mark); } } } assert(new_obj != NULL, "allocation should have succeeded"); // Copy obj Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); // Now we have to CAS in the header. if (o->cas_forward_to(new_obj, test_mark)) { // We won any races, we "own" this object. assert(new_obj == o->forwardee(), "Sanity"); // Increment age if obj still in new generation. Now that // we're dealing with a markOop that cannot change, it is // okay to use the non mt safe oop methods. if (!new_obj_is_tenured) { new_obj->incr_age(); assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); } // Do the size comparison first with new_obj_size, which we // already have. Hopefully, only a few objects are larger than // _min_array_size_for_chunking, and most of them will be arrays. // So, the is->objArray() test would be very infrequent. if (new_obj_size > _min_array_size_for_chunking && new_obj->is_objArray() && PSChunkLargeArrays) { // we'll chunk it oop* const masked_o = mask_chunked_array_oop(o); push_depth(masked_o); TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); } else {