inline HeapRegion* HeapRegionManager::at(uint index) const { assert(is_available(index), "pre-condition"); HeapRegion* hr = _regions.get_by_index(index); assert(hr != NULL, "sanity"); assert(hr->hrm_index() == index, "sanity"); return hr; }
uint HeapRegionManager::find_highest_free(bool* expanded) { // Loop downwards from the highest region index, looking for an // entry which is either free or not yet committed. If not yet // committed, expand_at that index. uint curr = max_length() - 1; while (true) { HeapRegion *hr = _regions.get_by_index(curr); if (hr == NULL) { uint res = expand_at(curr, 1); if (res == 1) { *expanded = true; return curr; } } else { if (hr->is_free()) { *expanded = false; return curr; } } if (curr == 0) { return G1_NO_HRM_INDEX; } curr--; } }
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { HeapRegion* hr = heap_region_containing_raw(addr); if (hr->is_continues_humongous()) { return hr->humongous_start_region(); } return hr; }
void G1MarkSweep::mark_sweep_phase2() { // Now all live objects are marked, compute the new object addresses. // It is imperative that we traverse perm_gen LAST. If dead space is // allowed a range of dead object may get overwritten by a dead int // array. If perm_gen is not traversed last a klassOop may get // overwritten. This is fine since it is dead, but if the class has dead // instances we have to skip them, and in order to find their size we // need the klassOop! // // It is not required that we traverse spaces in the same order in // phase2, phase3 and phase4, but the ValidateMarkSweep live oops // tracking expects us to do so. See comment under phase4. G1CollectedHeap* g1h = G1CollectedHeap::heap(); Generation* pg = g1h->perm_gen(); EventMark m("2 compute new addresses"); TraceTime tm("phase 2", PrintGC && Verbose, true, gclog_or_tty); GenMarkSweep::trace("2"); FindFirstRegionClosure cl; g1h->heap_region_iterate(&cl); HeapRegion *r = cl.result(); CompactibleSpace* sp = r; if (r->isHumongous() && oop(r->bottom())->is_gc_marked()) { sp = r->next_compaction_space(); } G1PrepareCompactClosure blk(sp); g1h->heap_region_iterate(&blk); CompactPoint perm_cp(pg, NULL, NULL); pg->prepare_for_compaction(&perm_cp); }
void CSetChooserCache::insert(HeapRegion *hr) { guarantee(false, "CSetChooserCache::insert(): don't call this any more"); assert(!is_full(), "cache should not be empty"); hr->calc_gc_efficiency(); int empty_index; if (_occupancy == 0) { empty_index = _first; } else { empty_index = trim_index(_first + _occupancy); assert(_cache[empty_index] == NULL, "last slot should be empty"); int last_index = trim_index(empty_index - 1); HeapRegion *last = _cache[last_index]; assert(last != NULL,"as the cache is not empty, last should not be empty"); while (empty_index != _first && last->gc_efficiency() < hr->gc_efficiency()) { _cache[empty_index] = last; last->set_sort_index(get_sort_index(empty_index)); empty_index = last_index; last_index = trim_index(last_index - 1); last = _cache[last_index]; } } _cache[empty_index] = hr; hr->set_sort_index(get_sort_index(empty_index)); ++_occupancy; assert(verify(), "cache should be consistent"); }
oop G1ParScanThreadState::handle_evacuation_failure_par(oop old, markOop m) { assert(_g1h->obj_in_cs(old), err_msg("Object " PTR_FORMAT " should be in the CSet", p2i(old))); oop forward_ptr = old->forward_to_atomic(old); if (forward_ptr == NULL) { // Forward-to-self succeeded. We are the "owner" of the object. HeapRegion* r = _g1h->heap_region_containing(old); if (!r->evacuation_failed()) { r->set_evacuation_failed(true); _g1h->hr_printer()->evac_failure(r); } _g1h->preserve_mark_during_evac_failure(_worker_id, old, m); _scanner.set_region(r); old->oop_iterate_backwards(&_scanner); return old; } else { // Forward-to-self failed. Either someone else managed to allocate // space for this object (old != forward_ptr) or they beat us in // self-forwarding it (old == forward_ptr). assert(old == forward_ptr || !_g1h->obj_in_cs(forward_ptr), err_msg("Object " PTR_FORMAT " forwarded to: " PTR_FORMAT " " "should not be in the CSet", p2i(old), p2i(forward_ptr))); return forward_ptr; } }
void HeapRegionManager::make_regions_available(uint start, uint num_regions) { guarantee(num_regions > 0, "No point in calling this for zero regions"); commit_regions(start, num_regions); for (uint i = start; i < start + num_regions; i++) { if (_regions.get_by_index(i) == NULL) { HeapRegion* new_hr = new_heap_region(i); _regions.set_by_index(i, new_hr); _allocated_heapregions_length = MAX2(_allocated_heapregions_length, i + 1); } } _available_map.par_set_range(start, start + num_regions, BitMap::unknown_range); for (uint i = start; i < start + num_regions; i++) { assert(is_available(i), "Just made region %u available but is apparently not.", i); HeapRegion* hr = at(i); if (G1CollectedHeap::heap()->hr_printer()->is_active()) { G1CollectedHeap::heap()->hr_printer()->commit(hr); } HeapWord* bottom = G1CollectedHeap::heap()->bottom_addr_for_region(i); MemRegion mr(bottom, bottom + HeapRegion::GrainWords); hr->initialize(mr); insert_into_free_list(at(i)); } }
inline void G1RemSet::par_write_ref(HeapRegion* from, T* p, uint tid) { oop obj = oopDesc::load_decode_heap_oop(p); if (obj == NULL) { return; } #ifdef ASSERT // can't do because of races // assert(obj == NULL || obj->is_oop(), "expected an oop"); // Do the safe subset of is_oop #ifdef CHECK_UNHANDLED_OOPS oopDesc* o = obj.obj(); #else oopDesc* o = obj; #endif // CHECK_UNHANDLED_OOPS assert((intptr_t)o % MinObjAlignmentInBytes == 0, "not oop aligned"); assert(_g1->is_in_reserved(obj), "must be in heap"); #endif // ASSERT assert(from == NULL || from->is_in_reserved(p), "p is not in from"); HeapRegion* to = _g1->heap_region_containing(obj); if (from != to) { assert(to->rem_set() != NULL, "Need per-region 'into' remsets."); to->rem_set()->add_reference(p, tid); } }
bool G1ArchiveAllocator::alloc_new_region() { // Allocate the highest free region in the reserved heap, // and add it to our list of allocated regions. It is marked // archive and added to the old set. HeapRegion* hr = _g1h->alloc_highest_free_region(); if (hr == NULL) { return false; } assert(hr->is_empty(), "expected empty region (index %u)", hr->hrm_index()); hr->set_archive(); _g1h->old_set_add(hr); _g1h->hr_printer()->alloc(hr); _allocated_regions.append(hr); _allocation_region = hr; // Set up _bottom and _max to begin allocating in the lowest // min_region_size'd chunk of the allocated G1 region. _bottom = hr->bottom(); _max = _bottom + HeapRegion::min_region_size_in_words(); // Tell mark-sweep that objects in this region are not to be marked. G1MarkSweep::set_range_archive(MemRegion(_bottom, HeapRegion::GrainWords), true); // Since we've modified the old set, call update_sizes. _g1h->g1mm()->update_sizes(); return true; }
uint HeapRegionManager::find_contiguous(size_t num, bool empty_only) { uint found = 0; size_t length_found = 0; uint cur = 0; while (length_found < num && cur < max_length()) { HeapRegion* hr = _regions.get_by_index(cur); if ((!empty_only && !is_available(cur)) || (is_available(cur) && hr != NULL && hr->is_empty())) { // This region is a potential candidate for allocation into. length_found++; } else { // This region is not a candidate. The next region is the next possible one. found = cur + 1; length_found = 0; } cur++; } if (length_found == num) { for (uint i = found; i < (found + num); i++) { HeapRegion* hr = _regions.get_by_index(i); // sanity check guarantee((!empty_only && !is_available(i)) || (is_available(i) && hr != NULL && hr->is_empty()), "Found region sequence starting at " UINT32_FORMAT ", length " SIZE_FORMAT " that is not empty at " UINT32_FORMAT ". Hr is " PTR_FORMAT, found, num, i, p2i(hr)); } return found; } else { return G1_NO_HRM_INDEX; } }
HeapRegion* OldGCAllocRegion::release() { HeapRegion* cur = get(); if (cur != NULL) { // Determine how far we are from the next card boundary. If it is smaller than // the minimum object size we can allocate into, expand into the next card. HeapWord* top = cur->top(); HeapWord* aligned_top = (HeapWord*)align_ptr_up(top, G1BlockOffsetSharedArray::N_bytes); size_t to_allocate_words = pointer_delta(aligned_top, top, HeapWordSize); if (to_allocate_words != 0) { // We are not at a card boundary. Fill up, possibly into the next, taking the // end of the region and the minimum object size into account. to_allocate_words = MIN2(pointer_delta(cur->end(), cur->top(), HeapWordSize), MAX2(to_allocate_words, G1CollectedHeap::min_fill_size())); // Skip allocation if there is not enough space to allocate even the smallest // possible object. In this case this region will not be retained, so the // original problem cannot occur. if (to_allocate_words >= G1CollectedHeap::min_fill_size()) { HeapWord* dummy = attempt_allocation(to_allocate_words, true /* bot_updates */); CollectedHeap::fill_with_object(dummy, to_allocate_words); } } } return G1AllocRegion::release(); }
inline HeapRegion* HeapRegionSeq::at(uint index) const { assert(index < length(), "pre-condition"); HeapRegion* hr = _regions.get_by_index(index); assert(hr != NULL, "sanity"); assert(hr->hrs_index() == index, "sanity"); return hr; }
size_t G1CMObjArrayProcessor::process_slice(oop obj) { HeapWord* const decoded_address = decode_array_slice(obj); // Find the start address of the objArrayOop. // Shortcut the BOT access if the given address is from a humongous object. The BOT // slide is fast enough for "smaller" objects in non-humongous regions, but is slower // than directly using heap region table. G1CollectedHeap* g1h = G1CollectedHeap::heap(); HeapRegion* r = g1h->heap_region_containing(decoded_address); HeapWord* const start_address = r->is_humongous() ? r->humongous_start_region()->bottom() : g1h->block_start(decoded_address); assert(oop(start_address)->is_objArray(), "Address " PTR_FORMAT " does not refer to an object array ", p2i(start_address)); assert(start_address < decoded_address, "Object start address " PTR_FORMAT " must be smaller than decoded address " PTR_FORMAT, p2i(start_address), p2i(decoded_address)); objArrayOop objArray = objArrayOop(start_address); size_t already_scanned = decoded_address - start_address; size_t remaining = objArray->size() - already_scanned; return process_array_slice(objArray, decoded_address, remaining); }
HeapRegion* allocate_free_region(bool is_old) { HeapRegion* hr = _free_list.remove_region(is_old); if (hr != NULL) { assert(hr->next() == NULL, "Single region should not have next"); assert(is_available(hr->hrm_index()), "Must be committed"); } return hr; }
inline HeapRegion* G1CollectedHeap::heap_region_containing(const T addr) const { HeapRegion* hr = _hrs.addr_to_region((HeapWord*) addr); // hr can be null if addr in perm_gen if (hr != NULL && hr->continuesHumongous()) { hr = hr->humongous_start_region(); } return hr; }
void CSetChooserCache::clear() { _occupancy = 0; _first = 0; for (int i = 0; i < CacheLength; ++i) { HeapRegion *hr = _cache[i]; if (hr != NULL) hr->set_sort_index(-1); _cache[i] = NULL; } }
void do_oop_work(T* p) { _work->do_oop(p); T oop_or_narrowoop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(oop_or_narrowoop)) { oop o = oopDesc::decode_heap_oop_not_null(oop_or_narrowoop); HeapRegion* hr = _g1h->heap_region_containing_raw(o); assert(!_g1h->obj_in_cs(o) || hr->rem_set()->strong_code_roots_list_contains(_nm), "if o still in CS then evacuation failed and nm must already be in the remset"); hr->add_strong_code_root(_nm); } }
inline HeapRegion* FreeRegionList::remove_from_head_impl() { HeapRegion* result = _head; _head = result->next(); if (_head == NULL) { _tail = NULL; } else { _head->set_prev(NULL); } result->set_next(NULL); return result; }
inline HeapRegion* FreeRegionList::remove_from_tail_impl() { HeapRegion* result = _tail; _tail = result->prev(); if (_tail == NULL) { _head = NULL; } else { _tail->set_next(NULL); } result->set_prev(NULL); return result; }
void CollectionSetChooser::clearMarkedHeapRegions() { for (int i = 0; i < _markedRegions.length(); i++) { HeapRegion* r = _markedRegions.at(i); if (r != NULL) { r->set_sort_index(-1); } } _markedRegions.clear(); _curr_index = 0; _length = 0; _remainingReclaimableBytes = 0; };
virtual size_t used_in_alloc_regions() { assert(Heap_lock->owner() != NULL, "Should be owned on this thread's behalf."); size_t result = 0; // Read only once in case it is set to NULL concurrently HeapRegion* hr = mutator_alloc_region(AllocationContext::current())->get(); if (hr != NULL) { result += hr->used(); } return result; }
HeapRegion* HRInto_G1RemSet::calculateStartRegion(int worker_i) { HeapRegion* result = _g1p->collection_set(); if (ParallelGCThreads > 0) { size_t cs_size = _g1p->collection_set_size(); int n_workers = _g1->workers()->total_workers(); size_t cs_spans = cs_size / n_workers; size_t ind = cs_spans * worker_i; for (size_t i = 0; i < ind; i++) result = result->next_in_collection_set(); } return result; }
template <class T> inline void FilterAndMarkInHeapRegionAndIntoCSClosure::do_oop_nv(T* p) { T heap_oop = oopDesc::load_heap_oop(p); if (!oopDesc::is_null(heap_oop)) { oop obj = oopDesc::decode_heap_oop_not_null(heap_oop); HeapRegion* hr = _g1->heap_region_containing((HeapWord*) obj); if (hr != NULL) { if (hr->in_collection_set()) _oc->do_oop(p); else if (!hr->is_young()) _cm->grayRoot(obj); } } }
bool CollectionSetChooser::verify() { guarantee(_length >= 0, err_msg("_length: %d", _length)); guarantee(0 <= _curr_index && _curr_index <= _length, err_msg("_curr_index: %d _length: %d", _curr_index, _length)); int index = 0; size_t sum_of_reclaimable_bytes = 0; while (index < _curr_index) { guarantee(_markedRegions.at(index) == NULL, "all entries before _curr_index should be NULL"); index += 1; } HeapRegion *prev = NULL; while (index < _length) { HeapRegion *curr = _markedRegions.at(index++); guarantee(curr != NULL, "Regions in _markedRegions array cannot be NULL"); int si = curr->sort_index(); guarantee(!curr->is_young(), "should not be young!"); guarantee(!curr->isHumongous(), "should not be humongous!"); guarantee(si > -1 && si == (index-1), "sort index invariant"); if (prev != NULL) { guarantee(orderRegions(prev, curr) != 1, err_msg("GC eff prev: %1.4f GC eff curr: %1.4f", prev->gc_efficiency(), curr->gc_efficiency())); } sum_of_reclaimable_bytes += curr->reclaimable_bytes(); prev = curr; } guarantee(sum_of_reclaimable_bytes == _remainingReclaimableBytes, err_msg("reclaimable bytes inconsistent, " "remaining: "SIZE_FORMAT" sum: "SIZE_FORMAT, _remainingReclaimableBytes, sum_of_reclaimable_bytes)); return true; }
CompactibleSpace* HeapRegion::next_compaction_space() const { // We're not using an iterator given that it will wrap around when // it reaches the last region and this is not what we want here. G1CollectedHeap* g1h = G1CollectedHeap::heap(); uint index = hrs_index() + 1; while (index < g1h->n_regions()) { HeapRegion* hr = g1h->region_at(index); if (!hr->isHumongous()) { return hr; } index += 1; } return NULL; }
void HRInto_G1RemSet::concurrentRefineOneCard_impl(jbyte* card_ptr, int worker_i) { // Construct the region representing the card. HeapWord* start = _ct_bs->addr_for(card_ptr); // And find the region containing it. HeapRegion* r = _g1->heap_region_containing(start); assert(r != NULL, "unexpected null"); HeapWord* end = _ct_bs->addr_for(card_ptr + 1); MemRegion dirtyRegion(start, end); #if CARD_REPEAT_HISTO init_ct_freq_table(_g1->g1_reserved_obj_bytes()); ct_freq_note_card(_ct_bs->index_for(start)); #endif UpdateRSOopClosure update_rs_oop_cl(this, worker_i); update_rs_oop_cl.set_from(r); FilterOutOfRegionClosure filter_then_update_rs_oop_cl(r, &update_rs_oop_cl); // Undirty the card. *card_ptr = CardTableModRefBS::clean_card_val(); // We must complete this write before we do any of the reads below. OrderAccess::storeload(); // And process it, being careful of unallocated portions of TLAB's. HeapWord* stop_point = r->oops_on_card_seq_iterate_careful(dirtyRegion, &filter_then_update_rs_oop_cl); // If stop_point is non-null, then we encountered an unallocated region // (perhaps the unfilled portion of a TLAB.) For now, we'll dirty the // card and re-enqueue: if we put off the card until a GC pause, then the // unallocated portion will be filled in. Alternatively, we might try // the full complexity of the technique used in "regular" precleaning. if (stop_point != NULL) { // The card might have gotten re-dirtied and re-enqueued while we // worked. (In fact, it's pretty likely.) if (*card_ptr != CardTableModRefBS::dirty_card_val()) { *card_ptr = CardTableModRefBS::dirty_card_val(); MutexLockerEx x(Shared_DirtyCardQ_lock, Mutex::_no_safepoint_check_flag); DirtyCardQueue* sdcq = JavaThread::dirty_card_queue_set().shared_dirty_card_queue(); sdcq->enqueue(card_ptr); } } else { out_of_histo.add_entry(filter_then_update_rs_oop_cl.out_of_region()); _conc_refine_cards++; } }
size_t G1Allocator::unsafe_max_tlab_alloc(AllocationContext_t context) { // Return the remaining space in the cur alloc region, but not less than // the min TLAB size. // Also, this value can be at most the humongous object threshold, // since we can't allow tlabs to grow big enough to accommodate // humongous objects. HeapRegion* hr = mutator_alloc_region(context)->get(); size_t max_tlab = _g1h->max_tlab_size() * wordSize; if (hr == NULL) { return max_tlab; } else { return MIN2(MAX2(hr->free(), (size_t) MinTLABSize), max_tlab); } }
inline bool requires_marking(const void* entry, G1CollectedHeap* heap) { // Includes rejection of NULL pointers. assert(heap->is_in_reserved(entry), err_msg("Non-heap pointer in SATB buffer: " PTR_FORMAT, p2i(entry))); HeapRegion* region = heap->heap_region_containing_raw(entry); assert(region != NULL, err_msg("No region for " PTR_FORMAT, p2i(entry))); if (entry >= region->next_top_at_mark_start()) { return false; } assert(((oop)entry)->is_oop(true /* ignore mark word */), err_msg("Invalid oop in SATB buffer: " PTR_FORMAT, p2i(entry))); return true; }
HeapRegion *CSetChooserCache::remove_first() { guarantee(false, "CSetChooserCache::remove_first(): " "don't call this any more"); if (_occupancy > 0) { assert(_cache[_first] != NULL, "cache should have at least one region"); HeapRegion *ret = _cache[_first]; _cache[_first] = NULL; ret->set_sort_index(-1); --_occupancy; _first = trim_index(_first + 1); assert(verify(), "cache should be consistent"); return ret; } else { return NULL; } }
void CollectionSetChooser::fillCache() { guarantee(false, "fillCache: don't call this any more"); while (!_cache.is_full() && (_curr_index < _length)) { HeapRegion* hr = _markedRegions.at(_curr_index); assert(hr != NULL, err_msg("Unexpected NULL hr in _markedRegions at index %d", _curr_index)); _curr_index += 1; assert(!hr->is_young(), "should not be young!"); assert(hr->sort_index() == _curr_index-1, "sort_index invariant"); _markedRegions.at_put(hr->sort_index(), NULL); _cache.insert(hr); assert(!_cache.is_empty(), "cache should not be empty"); } assert(verify(), "cache should be consistent"); }