void VM_G1CollectForAllocation::doit() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); GCCauseSetter x(g1h, _gc_cause); _result = g1h->satisfy_failed_allocation(_word_size, allocation_context(), &_pause_succeeded); assert(_result == NULL || _pause_succeeded, "if we get back a result, the pause should have succeeded"); }
void HeapRegion::print_on(outputStream* st) const { st->print("AC%4u", allocation_context()); st->print(" %2s", get_short_type_str()); if (in_collection_set()) st->print(" CS"); else st->print(" "); st->print(" TS %5d", _gc_time_stamp); st->print(" PTAMS "PTR_FORMAT" NTAMS "PTR_FORMAT, prev_top_at_mark_start(), next_top_at_mark_start()); G1OffsetTableContigSpace::print_on(st); }
void G1AllocRegion::update_alloc_region(HeapRegion* alloc_region) { trace("update"); // We explicitly check that the region is not empty to make sure we // maintain the "the alloc region cannot be empty" invariant. assert(alloc_region != NULL && !alloc_region->is_empty(), ar_ext_msg(this, "pre-condition")); _alloc_region = alloc_region; _alloc_region->set_allocation_context(allocation_context()); _count += 1; trace("updated"); }
void VM_G1IncCollectionPause::doit() { G1CollectedHeap* g1h = G1CollectedHeap::heap(); assert(!_should_initiate_conc_mark || g1h->should_do_concurrent_full_gc(_gc_cause), "only a GC locker, a System.gc(), stats update, whitebox, or a hum allocation induced GC should start a cycle"); if (_word_size > 0) { // An allocation has been requested. So, try to do that first. _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(), false /* expect_null_cur_alloc_region */); if (_result != NULL) { // If we can successfully allocate before we actually do the // pause then we will consider this pause successful. _pause_succeeded = true; return; } } GCCauseSetter x(g1h, _gc_cause); if (_should_initiate_conc_mark) { // It's safer to read old_marking_cycles_completed() here, given // that noone else will be updating it concurrently. Since we'll // only need it if we're initiating a marking cycle, no point in // setting it earlier. _old_marking_cycles_completed_before = g1h->old_marking_cycles_completed(); // At this point we are supposed to start a concurrent cycle. We // will do so if one is not already in progress. bool res = g1h->g1_policy()->force_initial_mark_if_outside_cycle(_gc_cause); // The above routine returns true if we were able to force the // next GC pause to be an initial mark; it returns false if a // marking cycle is already in progress. // // If a marking cycle is already in progress just return and skip the // pause below - if the reason for requesting this initial mark pause // was due to a System.gc() then the requesting thread should block in // doit_epilogue() until the marking cycle is complete. // // If this initial mark pause was requested as part of a humongous // allocation then we know that the marking cycle must just have // been started by another thread (possibly also allocating a humongous // object) as there was no active marking cycle when the requesting // thread checked before calling collect() in // attempt_allocation_humongous(). Retrying the GC, in this case, // will cause the requesting thread to spin inside collect() until the // just started marking cycle is complete - which may be a while. So // we do NOT retry the GC. if (!res) { assert(_word_size == 0, "Concurrent Full GC/Humongous Object IM shouldn't be allocating"); if (_gc_cause != GCCause::_g1_humongous_allocation) { _should_retry_gc = true; } return; } } _pause_succeeded = g1h->do_collection_pause_at_safepoint(_target_pause_time_ms); if (_pause_succeeded && _word_size > 0) { // An allocation had been requested. _result = g1h->attempt_allocation_at_safepoint(_word_size, allocation_context(), true /* expect_null_cur_alloc_region */); } else { assert(_result == NULL, "invariant"); if (!_pause_succeeded) { // Another possible reason reason for the pause to not be successful // is that, again, the GC locker is active (and has become active // since the prologue was executed). In this case we should retry // the pause after waiting for the GC locker to become inactive. _should_retry_gc = true; } } }