InCSetState G1ParScanThreadState::next_state(InCSetState const state, markOop const m, uint& age) { if (state.is_young()) { age = !m->has_displaced_mark_helper() ? m->age() : m->displaced_mark_helper()->age(); if (age < _tenuring_threshold) { return state; } } return dest(state); }
// This method is called whenever an attempt to promote an object // fails. Some markOops will need preservation, some will not. Note // that the entire eden is traversed after a failed promotion, with // all forwarded headers replaced by the default markOop. This means // it is not necessary to preserve most markOops. void PSScavenge::oop_promotion_failed(oop obj, markOop obj_mark) { if (obj_mark->must_be_preserved_for_promotion_failure(obj)) { // Should use per-worker private stacks here rather than // locking a common pair of stacks. ThreadCritical tc; _preserved_oop_stack.push(obj); _preserved_mark_stack.push(obj_mark); } }
void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { if (m->must_be_preserved_for_promotion_failure(obj)) { if (_objs_with_preserved_marks == NULL) { assert(_preserved_marks_of_objs == NULL, "Both or none."); _objs_with_preserved_marks = new (ResourceObj::C_HEAP) GrowableArray<oop>(PreserveMarkStackSize, true); _preserved_marks_of_objs = new (ResourceObj::C_HEAP) GrowableArray<markOop>(PreserveMarkStackSize, true); } _objs_with_preserved_marks->push(obj); _preserved_marks_of_objs->push(m); } }
void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { if (m->must_be_preserved_for_promotion_failure(obj)) { preserve_mark(obj, m); } }
void DefNewGeneration::preserve_mark(oop obj, markOop m) { assert(_promotion_failed && m->must_be_preserved_for_promotion_failure(obj), "Oversaving!"); _objs_with_preserved_marks.push(obj); _preserved_marks_of_objs.push(m); }
oop G1ParScanThreadState::copy_to_survivor_space(InCSetState const state, oop const old, markOop const old_mark) { const size_t word_sz = old->size(); HeapRegion* const from_region = _g1h->heap_region_containing_raw(old); // +1 to make the -1 indexes valid... const int young_index = from_region->young_index_in_cset()+1; assert( (from_region->is_young() && young_index > 0) || (!from_region->is_young() && young_index == 0), "invariant" ); const AllocationContext_t context = from_region->allocation_context(); uint age = 0; InCSetState dest_state = next_state(state, old_mark, age); HeapWord* obj_ptr = _plab_allocator->plab_allocate(dest_state, word_sz, context); // PLAB allocations should succeed most of the time, so we'll // normally check against NULL once and that's it. if (obj_ptr == NULL) { obj_ptr = _plab_allocator->allocate_direct_or_new_plab(dest_state, word_sz, context); if (obj_ptr == NULL) { obj_ptr = allocate_in_next_plab(state, &dest_state, word_sz, context); if (obj_ptr == NULL) { // This will either forward-to-self, or detect that someone else has // installed a forwarding pointer. return handle_evacuation_failure_par(old, old_mark); } } } assert(obj_ptr != NULL, "when we get here, allocation should have succeeded"); assert(_g1h->is_in_reserved(obj_ptr), "Allocated memory should be in the heap"); #ifndef PRODUCT // Should this evacuation fail? if (_g1h->evacuation_should_fail()) { // Doing this after all the allocation attempts also tests the // undo_allocation() method too. _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); return handle_evacuation_failure_par(old, old_mark); } #endif // !PRODUCT // We're going to allocate linearly, so might as well prefetch ahead. Prefetch::write(obj_ptr, PrefetchCopyIntervalInBytes); const oop obj = oop(obj_ptr); const oop forward_ptr = old->forward_to_atomic(obj); if (forward_ptr == NULL) { Copy::aligned_disjoint_words((HeapWord*) old, obj_ptr, word_sz); if (dest_state.is_young()) { if (age < markOopDesc::max_age) { age++; } if (old_mark->has_displaced_mark_helper()) { // In this case, we have to install the mark word first, // otherwise obj looks to be forwarded (the old mark word, // which contains the forward pointer, was copied) obj->set_mark(old_mark); markOop new_mark = old_mark->displaced_mark_helper()->set_age(age); old_mark->set_displaced_mark_helper(new_mark); } else { obj->set_mark(old_mark->set_age(age)); } age_table()->add(age, word_sz); } else { obj->set_mark(old_mark); } if (G1StringDedup::is_enabled()) { const bool is_from_young = state.is_young(); const bool is_to_young = dest_state.is_young(); assert(is_from_young == _g1h->heap_region_containing_raw(old)->is_young(), "sanity"); assert(is_to_young == _g1h->heap_region_containing_raw(obj)->is_young(), "sanity"); G1StringDedup::enqueue_from_evacuation(is_from_young, is_to_young, _worker_id, obj); } size_t* const surv_young_words = surviving_young_words(); surv_young_words[young_index] += word_sz; if (obj->is_objArray() && arrayOop(obj)->length() >= ParGCArrayScanChunk) { // We keep track of the next start index in the length field of // the to-space object. The actual length can be found in the // length field of the from-space object. arrayOop(obj)->set_length(0); oop* old_p = set_partial_array_mask(old); push_on_queue(old_p); } else { HeapRegion* const to_region = _g1h->heap_region_containing_raw(obj_ptr); _scanner.set_region(to_region); obj->oop_iterate_backwards(&_scanner); } return obj; } else { _plab_allocator->undo_allocation(dest_state, obj_ptr, word_sz, context); return forward_ptr; } }
inline bool PreservedMarks::should_preserve_mark(oop obj, markOop m) const { return m->must_be_preserved_for_promotion_failure(obj); }
int32 assign_hash(markOop& m) { m = m->set_hash(currentProcess->current_hash++); return m->hash(); }
void DefNewGeneration::preserve_mark_if_necessary(oop obj, markOop m) { if (m->must_be_preserved_for_promotion_failure(obj)) { _objs_with_preserved_marks.push(obj); _preserved_marks_of_objs.push(m); } }