void do_object(oop obj) { if (obj->is_shared()) { return; } if (obj->is_gc_marked() && obj->forwardee() == NULL) { int s = obj->size(); oop sh_obj = (oop)_space->allocate(s); if (sh_obj == NULL) { if (_read_only) { warning("\nThe permanent generation read only space is not large " "enough to \npreload requested classes. Use " "-XX:SharedReadOnlySize= to increase \nthe initial " "size of the read only space.\n"); } else { warning("\nThe permanent generation read write space is not large " "enough to \npreload requested classes. Use " "-XX:SharedReadWriteSize= to increase \nthe initial " "size of the read write space.\n"); } exit(2); } if (PrintSharedSpaces && Verbose && WizardMode) { tty->print_cr("\nMoveMarkedObjects: " PTR_FORMAT " -> " PTR_FORMAT " %s", obj, sh_obj, (_read_only ? "ro" : "rw")); } Copy::aligned_disjoint_words((HeapWord*)obj, (HeapWord*)sh_obj, s); obj->forward_to(sh_obj); if (_read_only) { // Readonly objects: set hash value to self pointer and make gc_marked. sh_obj->forward_to(sh_obj); } else { sh_obj->init_mark(); } } }
void G1ParCopyHelper::mark_forwarded_object(oop from_obj, oop to_obj) { assert(from_obj->is_forwarded(), "from obj should be forwarded"); assert(from_obj->forwardee() == to_obj, "to obj should be the forwardee"); assert(from_obj != to_obj, "should not be self-forwarded"); assert(_g1->heap_region_containing(from_obj)->in_collection_set(), "from obj should be in the CSet"); assert(!_g1->heap_region_containing(to_obj)->in_collection_set(), "should not mark objects in the CSet"); // The object might be in the process of being copied by another // worker so we cannot trust that its to-space image is // well-formed. So we have to read its size from its from-space // image which we know should not be changing. _cm->grayRoot(to_obj); }
inline oop PSPromotionManager::copy_to_survivor_space(oop o) { assert(should_scavenge(&o), "Sanity"); oop new_obj = NULL; // NOTE! We must be very careful with any methods that access the mark // in o. There may be multiple threads racing on it, and it may be forwarded // at any time. Do not use oop methods for accessing the mark! markOop test_mark = o->mark(); // The same test as "o->is_forwarded()" if (!test_mark->is_marked()) { bool new_obj_is_tenured = false; size_t new_obj_size = o->size(); // Find the objects age, MT safe. uint age = (test_mark->has_displaced_mark_helper() /* o->has_displaced_mark() */) ? test_mark->displaced_mark_helper()->age() : test_mark->age(); if (!promote_immediately) { // Try allocating obj in to-space (unless too old) if (age < PSScavenge::tenuring_threshold()) { new_obj = (oop) _young_lab.allocate(new_obj_size); if (new_obj == NULL && !_young_gen_is_full) { // Do we allocate directly, or flush and refill? if (new_obj_size > (YoungPLABSize / 2)) { // Allocate this object directly new_obj = (oop)young_space()->cas_allocate(new_obj_size); promotion_trace_event(new_obj, o, new_obj_size, age, false, NULL); } else { // Flush and fill _young_lab.flush(); HeapWord* lab_base = young_space()->cas_allocate(YoungPLABSize); if (lab_base != NULL) { _young_lab.initialize(MemRegion(lab_base, YoungPLABSize)); // Try the young lab allocation again. new_obj = (oop) _young_lab.allocate(new_obj_size); promotion_trace_event(new_obj, o, new_obj_size, age, false, &_young_lab); } else { _young_gen_is_full = true; } } } } } // Otherwise try allocating obj tenured if (new_obj == NULL) { #ifndef PRODUCT if (ParallelScavengeHeap::heap()->promotion_should_fail()) { return oop_promotion_failed(o, test_mark); } #endif // #ifndef PRODUCT new_obj = (oop) _old_lab.allocate(new_obj_size); new_obj_is_tenured = true; if (new_obj == NULL) { if (!_old_gen_is_full) { // Do we allocate directly, or flush and refill? if (new_obj_size > (OldPLABSize / 2)) { // Allocate this object directly new_obj = (oop)old_gen()->cas_allocate(new_obj_size); promotion_trace_event(new_obj, o, new_obj_size, age, true, NULL); } else { // Flush and fill _old_lab.flush(); HeapWord* lab_base = old_gen()->cas_allocate(OldPLABSize); if(lab_base != NULL) { #ifdef ASSERT // Delay the initialization of the promotion lab (plab). // This exposes uninitialized plabs to card table processing. if (GCWorkerDelayMillis > 0) { os::sleep(Thread::current(), GCWorkerDelayMillis, false); } #endif _old_lab.initialize(MemRegion(lab_base, OldPLABSize)); // Try the old lab allocation again. new_obj = (oop) _old_lab.allocate(new_obj_size); promotion_trace_event(new_obj, o, new_obj_size, age, true, &_old_lab); } } } // This is the promotion failed test, and code handling. // The code belongs here for two reasons. It is slightly // different than the code below, and cannot share the // CAS testing code. Keeping the code here also minimizes // the impact on the common case fast path code. if (new_obj == NULL) { _old_gen_is_full = true; return oop_promotion_failed(o, test_mark); } } } assert(new_obj != NULL, "allocation should have succeeded"); // Copy obj Copy::aligned_disjoint_words((HeapWord*)o, (HeapWord*)new_obj, new_obj_size); // Now we have to CAS in the header. if (o->cas_forward_to(new_obj, test_mark)) { // We won any races, we "own" this object. assert(new_obj == o->forwardee(), "Sanity"); // Increment age if obj still in new generation. Now that // we're dealing with a markOop that cannot change, it is // okay to use the non mt safe oop methods. if (!new_obj_is_tenured) { new_obj->incr_age(); assert(young_space()->contains(new_obj), "Attempt to push non-promoted obj"); } // Do the size comparison first with new_obj_size, which we // already have. Hopefully, only a few objects are larger than // _min_array_size_for_chunking, and most of them will be arrays. // So, the is->objArray() test would be very infrequent. if (new_obj_size > _min_array_size_for_chunking && new_obj->is_objArray() && PSChunkLargeArrays) { // we'll chunk it oop* const masked_o = mask_chunked_array_oop(o); push_depth(masked_o); TASKQUEUE_STATS_ONLY(++_arrays_chunked; ++_masked_pushes); } else {
// <original comment> // The original idea here was to coalesce evacuated and dead objects. // However that caused complications with the block offset table (BOT). // In particular if there were two TLABs, one of them partially refined. // |----- TLAB_1--------|----TLAB_2-~~~(partially refined part)~~~| // The BOT entries of the unrefined part of TLAB_2 point to the start // of TLAB_2. If the last object of the TLAB_1 and the first object // of TLAB_2 are coalesced, then the cards of the unrefined part // would point into middle of the filler object. // The current approach is to not coalesce and leave the BOT contents intact. // </original comment> // // We now reset the BOT when we start the object iteration over the // region and refine its entries for every object we come across. So // the above comment is not really relevant and we should be able // to coalesce dead objects if we want to. void do_object(oop obj) { HeapWord* obj_addr = (HeapWord*) obj; assert(_hr->is_in(obj_addr), "sanity"); size_t obj_size = obj->size(); HeapWord* obj_end = obj_addr + obj_size; if (_end_of_last_gap != obj_addr) { // there was a gap before obj_addr _last_gap_threshold = _hr->cross_threshold(_end_of_last_gap, obj_addr); } if (obj->is_forwarded() && obj->forwardee() == obj) { // The object failed to move. // We consider all objects that we find self-forwarded to be // live. What we'll do is that we'll update the prev marking // info so that they are all under PTAMS and explicitly marked. if (!_cm->isPrevMarked(obj)) { _cm->markPrev(obj); } if (_during_initial_mark) { // For the next marking info we'll only mark the // self-forwarded objects explicitly if we are during // initial-mark (since, normally, we only mark objects pointed // to by roots if we succeed in copying them). By marking all // self-forwarded objects we ensure that we mark any that are // still pointed to be roots. During concurrent marking, and // after initial-mark, we don't need to mark any objects // explicitly and all objects in the CSet are considered // (implicitly) live. So, we won't mark them explicitly and // we'll leave them over NTAMS. _cm->grayRoot(obj, obj_size, _worker_id, _hr); } _marked_bytes += (obj_size * HeapWordSize); obj->set_mark(markOopDesc::prototype()); // While we were processing RSet buffers during the collection, // we actually didn't scan any cards on the collection set, // since we didn't want to update remembered sets with entries // that point into the collection set, given that live objects // from the collection set are about to move and such entries // will be stale very soon. // This change also dealt with a reliability issue which // involved scanning a card in the collection set and coming // across an array that was being chunked and looking malformed. // The problem is that, if evacuation fails, we might have // remembered set entries missing given that we skipped cards on // the collection set. So, we'll recreate such entries now. obj->oop_iterate(_update_rset_cl); } else { // The object has been either evacuated or is dead. Fill it with a // dummy object. MemRegion mr(obj_addr, obj_size); CollectedHeap::fill_with_object(mr); // must nuke all dead objects which we skipped when iterating over the region _cm->clearRangePrevBitmap(MemRegion(_end_of_last_gap, obj_end)); } _end_of_last_gap = obj_end; _last_obj_threshold = _hr->cross_threshold(obj_addr, obj_end); }
inline bool HRInto_G1RemSet::self_forwarded(oop obj) { bool result = (obj->is_forwarded() && (obj->forwardee()== obj)); return result; }