void VM_GenCollectFull::doit() {
  SvcGCMarker sgcm(SvcGCMarker::FULL);

  GenCollectedHeap* gch = GenCollectedHeap::heap();
  GCCauseSetter gccs(gch, _gc_cause);
  gch->do_full_collection(gch->must_clear_all_soft_refs(), _max_level);
}
void VM_GenCollectForPermanentAllocation::doit() {
  SvcGCMarker sgcm(SvcGCMarker::FULL);

  SharedHeap* heap = (SharedHeap*)Universe::heap();
  GCCauseSetter gccs(heap, _gc_cause);
  switch (heap->kind()) {
    case (CollectedHeap::GenCollectedHeap): {
      GenCollectedHeap* gch = (GenCollectedHeap*)heap;
      gch->do_full_collection(gch->must_clear_all_soft_refs(),
                              gch->n_gens() - 1);
      break;
    }
#ifndef SERIALGC
    case (CollectedHeap::G1CollectedHeap): {
      G1CollectedHeap* g1h = (G1CollectedHeap*)heap;
      g1h->do_full_collection(_gc_cause == GCCause::_last_ditch_collection);
      break;
    }
#endif // SERIALGC
    default:
      ShouldNotReachHere();
  }
  _res = heap->perm_gen()->allocate(_size, false);
  assert(heap->is_in_reserved_or_null(_res), "result not in heap");
  if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
    set_gc_locked();
  }
}
void VM_GenCollectForAllocation::doit() {
  SvcGCMarker sgcm(SvcGCMarker::MINOR);

  GenCollectedHeap* gch = GenCollectedHeap::heap();
  GCCauseSetter gccs(gch, _gc_cause);
  _res = gch->satisfy_failed_allocation(_size, _tlab);
  assert(gch->is_in_reserved_or_null(_res), "result not in heap");

  if (_res == NULL && GC_locker::is_active_and_needs_gc()) {
    set_gc_locked();
  }
}
void VM_CollectForMetadataAllocation::doit() {
  SvcGCMarker sgcm(SvcGCMarker::FULL);

  CollectedHeap* heap = Universe::heap();
  GCCauseSetter gccs(heap, _gc_cause);

  // Check again if the space is available.  Another thread
  // may have similarly failed a metadata allocation and induced
  // a GC that freed space for the allocation.
  if (!MetadataAllocationFailALot) {
    _result = _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
  }

  if (_result == NULL) {
    if (UseConcMarkSweepGC) {
      if (CMSClassUnloadingEnabled) {
        MetaspaceGC::set_should_concurrent_collect(true);
      }
      // For CMS expand since the collection is going to be concurrent.
      _result =
        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
    }
    if (_result == NULL) {
      // Don't clear the soft refs yet.
      if (Verbose && PrintGCDetails && UseConcMarkSweepGC) {
        gclog_or_tty->print_cr("\nCMS full GC for Metaspace");
      }
      heap->collect_as_vm_thread(GCCause::_metadata_GC_threshold);
      // After a GC try to allocate without expanding.  Could fail
      // and expansion will be tried below.
      _result =
        _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
    }
    if (_result == NULL) {
      // If still failing, allow the Metaspace to expand.
      // See delta_capacity_until_GC() for explanation of the
      // amount of the expansion.
      // This should work unless there really is no more space
      // or a MaxMetaspaceSize has been specified on the command line.
      _result =
        _loader_data->metaspace_non_null()->expand_and_allocate(_size, _mdtype);
      if (_result == NULL) {
        // If expansion failed, do a last-ditch collection and try allocating
        // again.  A last-ditch collection will clear softrefs.  This
        // behavior is similar to the last-ditch collection done for perm
        // gen when it was full and a collection for failed allocation
        // did not free perm gen space.
        heap->collect_as_vm_thread(GCCause::_last_ditch_collection);
        _result =
          _loader_data->metaspace_non_null()->allocate(_size, _mdtype);
      }
    }
    if (Verbose && PrintGCDetails && _result == NULL) {
      gclog_or_tty->print_cr("\nAfter Metaspace GC failed to allocate size "
                             SIZE_FORMAT, _size);
    }
  }

  if (_result == NULL && GC_locker::is_active_and_needs_gc()) {
    set_gc_locked();
  }
}
void ConcurrentMarkThread::run() {
  initialize_in_thread();
  _vtime_start = os::elapsedVTime();
  wait_for_universe_init();

  G1CollectedHeap* g1h = G1CollectedHeap::heap();
  G1CollectorPolicy* g1_policy = g1h->g1_policy();
  G1MMUTracker *mmu_tracker = g1_policy->mmu_tracker();
  Thread *current_thread = Thread::current();

  while (!_should_terminate) {
    // wait until started is set.
    sleepBeforeNextCycle();
    {
      ResourceMark rm;
      HandleMark   hm;
      
      SvcGCMarker sgcm(SvcGCMarker::OTHER); // <underscore>
      gclog_or_tty->print_cr("Sending GC start event (underscore)");
      
      double cycle_start = os::elapsedVTime();

      // We have to ensure that we finish scanning the root regions
      // before the next GC takes place. To ensure this we have to
      // make sure that we do not join the STS until the root regions
      // have been scanned. If we did then it's possible that a
      // subsequent GC could block us from joining the STS and proceed
      // without the root regions have been scanned which would be a
      // correctness issue.

      double scan_start = os::elapsedTime();
      if (!cm()->has_aborted()) {
        if (G1Log::fine()) {
          gclog_or_tty->date_stamp(PrintGCDateStamps);
          gclog_or_tty->stamp(PrintGCTimeStamps);
          gclog_or_tty->print_cr("[GC concurrent-root-region-scan-start]");
        }

        _cm->scanRootRegions();

        double scan_end = os::elapsedTime();
        if (G1Log::fine()) {
          gclog_or_tty->date_stamp(PrintGCDateStamps);
          gclog_or_tty->stamp(PrintGCTimeStamps);
          gclog_or_tty->print_cr("[GC concurrent-root-region-scan-end, %1.7lf secs]",
                                 scan_end - scan_start);
        }
      }

      double mark_start_sec = os::elapsedTime();
      if (G1Log::fine()) {
        gclog_or_tty->date_stamp(PrintGCDateStamps);
        gclog_or_tty->stamp(PrintGCTimeStamps);
        gclog_or_tty->print_cr("[GC concurrent-mark-start]");
      }

      int iter = 0;
      do {
        iter++;
        if (!cm()->has_aborted()) {
          _cm->markFromRoots();
        }

        double mark_end_time = os::elapsedVTime();
        double mark_end_sec = os::elapsedTime();
        _vtime_mark_accum += (mark_end_time - cycle_start);
        if (!cm()->has_aborted()) {
          if (g1_policy->adaptive_young_list_length()) {
            double now = os::elapsedTime();
            double remark_prediction_ms = g1_policy->predict_remark_time_ms();
            jlong sleep_time_ms = mmu_tracker->when_ms(now, remark_prediction_ms);
            os::sleep(current_thread, sleep_time_ms, false);
          }

          if (G1Log::fine()) {
            gclog_or_tty->date_stamp(PrintGCDateStamps);
            gclog_or_tty->stamp(PrintGCTimeStamps);
            gclog_or_tty->print_cr("[GC concurrent-mark-end, %1.7lf secs]",
                                      mark_end_sec - mark_start_sec);
          }

          CMCheckpointRootsFinalClosure final_cl(_cm);
          VM_CGC_Operation op(&final_cl, "GC remark", true /* needs_pll */);
          VMThread::execute(&op);
        }
        if (cm()->restart_for_overflow()) {
          if (G1TraceMarkStackOverflow) {
            gclog_or_tty->print_cr("Restarting conc marking because of MS overflow "
                                   "in remark (restart #%d).", iter);
          }
          if (G1Log::fine()) {
            gclog_or_tty->date_stamp(PrintGCDateStamps);
            gclog_or_tty->stamp(PrintGCTimeStamps);
            gclog_or_tty->print_cr("[GC concurrent-mark-restart-for-overflow]");
          }
        }
      } while (cm()->restart_for_overflow());

      double end_time = os::elapsedVTime();
      // Update the total virtual time before doing this, since it will try
      // to measure it to get the vtime for this marking.  We purposely
      // neglect the presumably-short "completeCleanup" phase here.
      _vtime_accum = (end_time - _vtime_start);

      if (!cm()->has_aborted()) {
        if (g1_policy->adaptive_young_list_length()) {
          double now = os::elapsedTime();
          double cleanup_prediction_ms = g1_policy->predict_cleanup_time_ms();
          jlong sleep_time_ms = mmu_tracker->when_ms(now, cleanup_prediction_ms);
          os::sleep(current_thread, sleep_time_ms, false);
        }

        CMCleanUp cl_cl(_cm);
        VM_CGC_Operation op(&cl_cl, "GC cleanup", false /* needs_pll */);
        VMThread::execute(&op);
      } else {
        // We don't want to update the marking status if a GC pause
        // is already underway.
        _sts.join();
        g1h->set_marking_complete();
        _sts.leave();
      }

      // Check if cleanup set the free_regions_coming flag. If it
      // hasn't, we can just skip the next step.
      if (g1h->free_regions_coming()) {
        // The following will finish freeing up any regions that we
        // found to be empty during cleanup. We'll do this part
        // without joining the suspendible set. If an evacuation pause
        // takes place, then we would carry on freeing regions in
        // case they are needed by the pause. If a Full GC takes
        // place, it would wait for us to process the regions
        // reclaimed by cleanup.

        double cleanup_start_sec = os::elapsedTime();
        if (G1Log::fine()) {
          gclog_or_tty->date_stamp(PrintGCDateStamps);
          gclog_or_tty->stamp(PrintGCTimeStamps);
          gclog_or_tty->print_cr("[GC concurrent-cleanup-start]");
        }

        // Now do the concurrent cleanup operation.
        _cm->completeCleanup();

        // Notify anyone who's waiting that there are no more free
        // regions coming. We have to do this before we join the STS
        // (in fact, we should not attempt to join the STS in the
        // interval between finishing the cleanup pause and clearing
        // the free_regions_coming flag) otherwise we might deadlock:
        // a GC worker could be blocked waiting for the notification
        // whereas this thread will be blocked for the pause to finish
        // while it's trying to join the STS, which is conditional on
        // the GC workers finishing.
        g1h->reset_free_regions_coming();

        double cleanup_end_sec = os::elapsedTime();
        if (G1Log::fine()) {
          gclog_or_tty->date_stamp(PrintGCDateStamps);
          gclog_or_tty->stamp(PrintGCTimeStamps);
          gclog_or_tty->print_cr("[GC concurrent-cleanup-end, %1.7lf secs]",
                                 cleanup_end_sec - cleanup_start_sec);
        }
      }
      guarantee(cm()->cleanup_list_is_empty(),
                "at this point there should be no regions on the cleanup list");

      // There is a tricky race before recording that the concurrent
      // cleanup has completed and a potential Full GC starting around
      // the same time. We want to make sure that the Full GC calls
      // abort() on concurrent mark after
      // record_concurrent_mark_cleanup_completed(), since abort() is
      // the method that will reset the concurrent mark state. If we
      // end up calling record_concurrent_mark_cleanup_completed()
      // after abort() then we might incorrectly undo some of the work
      // abort() did. Checking the has_aborted() flag after joining
      // the STS allows the correct ordering of the two methods. There
      // are two scenarios:
      //
      // a) If we reach here before the Full GC, the fact that we have
      // joined the STS means that the Full GC cannot start until we
      // leave the STS, so record_concurrent_mark_cleanup_completed()
      // will complete before abort() is called.
      //
      // b) If we reach here during the Full GC, we'll be held up from
      // joining the STS until the Full GC is done, which means that
      // abort() will have completed and has_aborted() will return
      // true to prevent us from calling
      // record_concurrent_mark_cleanup_completed() (and, in fact, it's
      // not needed any more as the concurrent mark state has been
      // already reset).
      _sts.join();
      if (!cm()->has_aborted()) {
        g1_policy->record_concurrent_mark_cleanup_completed();
      }
      _sts.leave();

      if (cm()->has_aborted()) {
        if (G1Log::fine()) {
          gclog_or_tty->date_stamp(PrintGCDateStamps);
          gclog_or_tty->stamp(PrintGCTimeStamps);
          gclog_or_tty->print_cr("[GC concurrent-mark-abort]");
        }
      }

      // We now want to allow clearing of the marking bitmap to be
      // suspended by a collection pause.
      _sts.join();
      _cm->clearNextBitmap();
      _sts.leave();
    }

    // Update the number of full collections that have been
    // completed. This will also notify the FullGCCount_lock in case a
    // Java thread is waiting for a full GC to happen (e.g., it
    // called System.gc() with +ExplicitGCInvokesConcurrent).
    _sts.join();
    g1h->increment_old_marking_cycles_completed(true /* concurrent */);
    g1h->register_concurrent_cycle_end();
    _sts.leave();
  }
  assert(_should_terminate, "just checking");

  terminate();
}