void do_code_blob(CodeBlob* cb) { nmethod* nm = (cb == NULL) ? NULL : cb->as_nmethod_or_null(); if (nm != NULL) { // Verify that the nemthod is live if (!nm->is_alive()) { gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has dead nmethod " PTR_FORMAT" in its strong code roots", _hr->bottom(), _hr->end(), nm); _failures = true; } else { VerifyStrongCodeRootOopClosure oop_cl(_hr, nm); nm->oops_do(&oop_cl); if (!oop_cl.has_oops_in_region()) { gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has nmethod " PTR_FORMAT" in its strong code roots " "with no pointers into region", _hr->bottom(), _hr->end(), nm); _failures = true; } else if (oop_cl.failures()) { gclog_or_tty->print_cr("region ["PTR_FORMAT","PTR_FORMAT"] has other " "failures for nmethod "PTR_FORMAT, _hr->bottom(), _hr->end(), nm); _failures = true; } } } }
bool YoungList::check_list_well_formed() { bool ret = true; uint length = 0; HeapRegion* curr = _head; HeapRegion* last = NULL; while (curr != NULL) { if (!curr->is_young()) { log_info(gc, verify)("### YOUNG REGION " PTR_FORMAT "-" PTR_FORMAT " " "incorrectly tagged (y: %d, surv: %d)", p2i(curr->bottom()), p2i(curr->end()), curr->is_young(), curr->is_survivor()); ret = false; } ++length; last = curr; curr = curr->get_next_young_region(); } ret = ret && (length == _length); if (!ret) { log_info(gc, verify)("### YOUNG LIST seems not well formed!"); log_info(gc, verify)("### list has %u entries, _length is %u", length, _length); } return ret; }
HeapRegion* OldGCAllocRegion::release() { HeapRegion* cur = get(); if (cur != NULL) { // Determine how far we are from the next card boundary. If it is smaller than // the minimum object size we can allocate into, expand into the next card. HeapWord* top = cur->top(); HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes); size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); if (to_allocate_words != 0) { // We are not at a card boundary. Fill up, possibly into the next, taking the // end of the region and the minimum object size into account. to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize), MAX2(to_allocate_words, G1CollectedHeap::min_fill_size())); // Skip allocation if there is not enough space to allocate even the smallest // possible object. In this case this region will not be retained, so the // original problem cannot occur. if (to_allocate_words >= G1CollectedHeap::min_fill_size()) { HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */); CollectedHeap::fill_with_object(dummy, to_allocate_words); } } } return G1AllocRegion::release(); }
template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) { oop obj = oopDesc::load_decode_heap_oop(p); #ifdef ASSERT // can't do because of races // assert(obj == NULL || obj->is_oop(), "expected an oop"); // Do the safe subset of is_oop if (obj != NULL) { #ifdef CHECK_UNHANDLED_OOPS oopDesc* o = obj.obj(); #else oopDesc* o = obj; #endif // CHECK_UNHANDLED_OOPS assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); } #endif // ASSERT assert(from == NULL || from->is_in_reserved(p), "p is not in from"); HeapRegion* to = _g1->heap_region_containing(obj); // The test below could be optimized by applying a bit op to to and from. if (to != NULL && from != NULL && from != to) { // The _traversal_in_progress flag is true during the collection pause, // false during the evacuation failure handling. This should avoid a // potential loop if we were to add the card containing 'p' to the DCQS // that's used to regenerate the remembered sets for the collection set, // in the event of an evacuation failure, here. The UpdateRSImmediate // closure will eventally call this routine. if (_traversal_in_progress && to->in_collection_set() && !self_forwarded(obj)) { assert(_cset_rs_update_cl[tid] != NULL, "should have been set already"); _cset_rs_update_cl[tid]->do_oop(p); // Deferred updates to the CSet are either discarded (in the normal case), // or processed (if an evacuation failure occurs) at the end // of the collection. // See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do(). } else { #if G1_REM_SET_LOGGING gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS" " for region [" PTR_FORMAT ", " PTR_FORMAT ")", p, obj, to->bottom(), to->end()); #endif assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); to->rem_set()->add_reference(p, tid); } } }
template <class T> inline void HRInto_G1RemSet::par_write_ref_nv(HeapRegion* from, T* p, int tid) { oop obj = oopDesc::load_decode_heap_oop(p); #ifdef ASSERT // can't do because of races // assert(obj == NULL || obj->is_oop(), "expected an oop"); // Do the safe subset of is_oop if (obj != NULL) { #ifdef CHECK_UNHANDLED_OOPS oopDesc* o = obj.obj(); #else oopDesc* o = obj; #endif // CHECK_UNHANDLED_OOPS assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); assert(Universe::heap()->is_in_reserved(obj), "must be in heap"); } #endif // ASSERT assert(from == NULL || from->is_in_reserved(p), "p is not in from"); HeapRegion* to = _g1->heap_region_containing(obj); // The test below could be optimized by applying a bit op to to and from. if (to != NULL && from != NULL && from != to) { // There is a tricky infinite loop if we keep pushing // self forwarding pointers onto our _new_refs list. // The _par_traversal_in_progress flag is true during the collection pause, // false during the evacuation failure handing. if (_par_traversal_in_progress && to->in_collection_set() && !self_forwarded(obj)) { _new_refs[tid]->push((void*)p); // Deferred updates to the Cset are either discarded (in the normal case), // or processed (if an evacuation failure occurs) at the end // of the collection. // See HRInto_G1RemSet::cleanup_after_oops_into_collection_set_do(). } else { #if G1_REM_SET_LOGGING gclog_or_tty->print_cr("Adding " PTR_FORMAT " (" PTR_FORMAT ") to RS" " for region [" PTR_FORMAT ", " PTR_FORMAT ")", p, obj, to->bottom(), to->end()); #endif assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); to->rem_set()->add_reference(p, tid); } } }
MemRegion HeapRegionSeq::expand_by(HeapWord* old_end, HeapWord* new_end, FreeRegionList* list) { assert(old_end < new_end, "don't call it otherwise"); G1CollectedHeap* g1h = G1CollectedHeap::heap(); HeapWord* next_bottom = old_end; assert(heap_bottom() <= next_bottom, "invariant"); while (next_bottom < new_end) { assert(next_bottom < heap_end(), "invariant"); uint index = length(); assert(index < max_length(), "otherwise we cannot expand further"); if (index == 0) { // We have not allocated any regions so far assert(next_bottom == heap_bottom(), "invariant"); } else { // next_bottom should match the end of the last/previous region assert(next_bottom == at(index - 1)->end(), "invariant"); } if (index == _allocated_length) { // We have to allocate a new HeapRegion. HeapRegion* new_hr = g1h->new_heap_region(index, next_bottom); if (new_hr == NULL) { // allocation failed, we bail out and return what we have done so far return MemRegion(old_end, next_bottom); } assert(_regions.get_by_index(index) == NULL, "invariant"); _regions.set_by_index(index, new_hr); increment_allocated_length(); } // Have to increment the length first, otherwise we will get an // assert failure at(index) below. increment_length(); HeapRegion* hr = at(index); list->add_as_tail(hr); next_bottom = hr->end(); } assert(next_bottom == new_end, "post-condition"); return MemRegion(old_end, next_bottom); }
void HeapRegionSeq::verify_optional() { guarantee(length() <= _allocated_length, err_msg("invariant: _length: %u _allocated_length: %u", length(), _allocated_length)); guarantee(_allocated_length <= max_length(), err_msg("invariant: _allocated_length: %u _max_length: %u", _allocated_length, max_length())); guarantee(_next_search_index <= length(), err_msg("invariant: _next_search_index: %u _length: %u", _next_search_index, length())); HeapWord* prev_end = heap_bottom(); for (uint i = 0; i < _allocated_length; i += 1) { HeapRegion* hr = _regions.get_by_index(i); guarantee(hr != NULL, err_msg("invariant: i: %u", i)); guarantee(hr->bottom() == prev_end, err_msg("invariant i: %u "HR_FORMAT" prev_end: "PTR_FORMAT, i, HR_FORMAT_PARAMS(hr), p2i(prev_end))); guarantee(hr->hrs_index() == i, err_msg("invariant: i: %u hrs_index(): %u", i, hr->hrs_index())); if (i < length()) { // Asserts will fire if i is >= _length HeapWord* addr = hr->bottom(); guarantee(addr_to_region(addr) == hr, "sanity"); guarantee(addr_to_region_unsafe(addr) == hr, "sanity"); } else { guarantee(hr->is_empty(), "sanity"); guarantee(!hr->isHumongous(), "sanity"); // using assert instead of guarantee here since containing_set() // is only available in non-product builds. assert(hr->containing_set() == NULL, "sanity"); } if (hr->startsHumongous()) { prev_end = hr->orig_end(); } else { prev_end = hr->end(); } } for (uint i = _allocated_length; i < max_length(); i += 1) { guarantee(_regions.get_by_index(i) == NULL, err_msg("invariant i: %u", i)); } }
void HeapRegionManager::verify() { guarantee(length() <= _allocated_heapregions_length, "invariant: _length: %u _allocated_length: %u", length(), _allocated_heapregions_length); guarantee(_allocated_heapregions_length <= max_length(), "invariant: _allocated_length: %u _max_length: %u", _allocated_heapregions_length, max_length()); bool prev_committed = true; uint num_committed = 0; HeapWord* prev_end = heap_bottom(); for (uint i = 0; i < _allocated_heapregions_length; i++) { if (!is_available(i)) { prev_committed = false; continue; } num_committed++; HeapRegion* hr = _regions.get_by_index(i); guarantee(hr != NULL, "invariant: i: %u", i); guarantee(!prev_committed || hr->bottom() == prev_end, "invariant i: %u " HR_FORMAT " prev_end: " PTR_FORMAT, i, HR_FORMAT_PARAMS(hr), p2i(prev_end)); guarantee(hr->hrm_index() == i, "invariant: i: %u hrm_index(): %u", i, hr->hrm_index()); // Asserts will fire if i is >= _length HeapWord* addr = hr->bottom(); guarantee(addr_to_region(addr) == hr, "sanity"); // We cannot check whether the region is part of a particular set: at the time // this method may be called, we have only completed allocation of the regions, // but not put into a region set. prev_committed = true; prev_end = hr->end(); } for (uint i = _allocated_heapregions_length; i < max_length(); i++) { guarantee(_regions.get_by_index(i) == NULL, "invariant i: %u", i); } guarantee(num_committed == _num_committed, "Found %u committed regions, but should be %u", num_committed, _num_committed); _free_list.verify(); }
template <class T> void do_oop_work(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); // Note: not all the oops embedded in the nmethod are in the // current region. We only look at those which are. if (_hr->is_in(obj)) { // Object is in the region. Check that its less than top if (_hr->top() <= (HeapWord*)obj) { // Object is above top gclog_or_tty->print_cr("Object "PTR_FORMAT" in region " "["PTR_FORMAT", "PTR_FORMAT") is above " "top "PTR_FORMAT, (void *)obj, _hr->bottom(), _hr->end(), _hr->top()); _failures = true; return; } // Nmethod has at least one oop in the current region _has_oops_in_region = true; } } }
template <class T> void do_oop_work(T* p) { assert(_containing_obj != NULL, "Precondition"); assert(!_g1h->is_obj_dead_cond(_containing_obj, _use_prev_marking), "Precondition"); T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); bool failed = false; if (!_g1h->is_in_closed_subset(obj) || _g1h->is_obj_dead_cond(obj, _use_prev_marking)) { if (!_failures) { gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("----------"); } if (!_g1h->is_in_closed_subset(obj)) { HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); gclog_or_tty->print_cr("Field "PTR_FORMAT " of live obj "PTR_FORMAT" in region " "["PTR_FORMAT", "PTR_FORMAT")", p, (void*) _containing_obj, from->bottom(), from->end()); print_object(gclog_or_tty, _containing_obj); gclog_or_tty->print_cr("points to obj "PTR_FORMAT" not in the heap", (void*) obj); } else { HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); HeapRegion* to = _g1h->heap_region_containing((HeapWord*)obj); gclog_or_tty->print_cr("Field "PTR_FORMAT " of live obj "PTR_FORMAT" in region " "["PTR_FORMAT", "PTR_FORMAT")", p, (void*) _containing_obj, from->bottom(), from->end()); print_object(gclog_or_tty, _containing_obj); gclog_or_tty->print_cr("points to dead obj "PTR_FORMAT" in region " "["PTR_FORMAT", "PTR_FORMAT")", (void*) obj, to->bottom(), to->end()); print_object(gclog_or_tty, obj); } gclog_or_tty->print_cr("----------"); _failures = true; failed = true; _n_failures++; } if (!_g1h->full_collection()) { HeapRegion* from = _g1h->heap_region_containing((HeapWord*)p); HeapRegion* to = _g1h->heap_region_containing(obj); if (from != NULL && to != NULL && from != to && !to->isHumongous()) { jbyte cv_obj = *_bs->byte_for_const(_containing_obj); jbyte cv_field = *_bs->byte_for_const(p); const jbyte dirty = CardTableModRefBS::dirty_card_val(); bool is_bad = !(from->is_young() || to->rem_set()->contains_reference(p) || !G1HRRSFlushLogBuffersOnVerify && // buffers were not flushed (_containing_obj->is_objArray() ? cv_field == dirty : cv_obj == dirty || cv_field == dirty)); if (is_bad) { if (!_failures) { gclog_or_tty->print_cr(""); gclog_or_tty->print_cr("----------"); } gclog_or_tty->print_cr("Missing rem set entry:"); gclog_or_tty->print_cr("Field "PTR_FORMAT " of obj "PTR_FORMAT ", in region %d ["PTR_FORMAT ", "PTR_FORMAT"),", p, (void*) _containing_obj, from->hrs_index(), from->bottom(), from->end()); _containing_obj->print_on(gclog_or_tty); gclog_or_tty->print_cr("points to obj "PTR_FORMAT " in region %d ["PTR_FORMAT ", "PTR_FORMAT").", (void*) obj, to->hrs_index(), to->bottom(), to->end()); obj->print_on(gclog_or_tty); gclog_or_tty->print_cr("Obj head CTE = %d, field CTE = %d.", cv_obj, cv_field); gclog_or_tty->print_cr("----------"); _failures = true; if (!failed) _n_failures++; } } } } }