예제 #1
0
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool& notify_ref_lock, bool clear_all_softrefs) {
    assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
    assert(ref_processor() != NULL, "Sanity");

    if (GC_locker::is_active()) return;

    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

    PSYoungGen* young_gen = heap->young_gen();
    PSOldGen* old_gen = heap->old_gen();
    PSPermGen* perm_gen = heap->perm_gen();

    // Increment the invocation count
    heap->increment_total_collections();

    // We need to track unique mark sweep invocations as well.
    _total_invocations++;

    if (PrintHeapAtGC) {
        gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
        Universe::print();
    }

    // Fill in TLABs
    heap->ensure_parseability();

    if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
        HandleMark hm;  // Discard invalid handles created during verification
        tty->print(" VerifyBeforeGC:");
        Universe::verify(true);
    }

    {
        HandleMark hm;
        TraceTime t1("Full GC", PrintGC, true, gclog_or_tty);
        TraceCollectorStats tcs(counters());
        if (TraceGen1Time) accumulated_time()->start();

        // Let the size policy know we're starting
        AdaptiveSizePolicy* size_policy = heap->size_policy();
        size_policy->major_collection_begin();

        // When collecting the permanent generation methodOops may be moving,
        // so we either have to flush all bcp data or convert it into bci.
        NOT_CORE(CodeCache::gc_prologue());
        Threads::gc_prologue();

        // Capture heap size before collection for printing.
        size_t prev_used = heap->used();

        // Capture perm gen size before collection for sizing.
        size_t perm_gen_prev_used = perm_gen->used_in_bytes();

        bool marked_for_unloading = false;

        allocate_stacks();

        NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
        COMPILER2_ONLY(DerivedPointerTable::clear());

        ref_processor()->enable_discovery();

        mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);

        mark_sweep_phase2();

        // Don't add any more derived pointers during phase3
        COMPILER2_ONLY(assert(DerivedPointerTable::is_active(), "Sanity"));
        COMPILER2_ONLY(DerivedPointerTable::set_active(false));

        mark_sweep_phase3();

        mark_sweep_phase4();

        restore_marks();

        deallocate_stacks();

        // "free at last gc" is calculated from these.
        Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
        Universe::set_heap_used_at_last_gc(Universe::heap()->used());

        bool all_empty = young_gen->eden_space()->is_empty() &&
                         young_gen->from_space()->is_empty() &&
                         young_gen->to_space()->is_empty();

        BarrierSet* bs = heap->barrier_set();
        if (bs->is_a(BarrierSet::ModRef)) {
            ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
            MemRegion old_mr = heap->old_gen()->reserved();
            MemRegion perm_mr = heap->perm_gen()->reserved();
            assert(old_mr.end() <= perm_mr.start(), "Generations out of order");

            if (all_empty) {
                modBS->clear(MemRegion(old_mr.start(), perm_mr.end()));
            } else {
                modBS->invalidate(MemRegion(old_mr.start(), perm_mr.end()));
            }
        }

        Threads::gc_epilogue();
        NOT_CORE(CodeCache::gc_epilogue());

        COMPILER2_ONLY(DerivedPointerTable::update_pointers());

        notify_ref_lock |= ref_processor()->enqueue_discovered_references();

        // Update time of last GC
        reset_millis_since_last_gc();

        // Let the size policy know we're done
        size_policy->major_collection_end(old_gen->used_in_bytes());

        if (UseAdaptiveSizePolicy) {

            if (PrintAdaptiveSizePolicy) {
                tty->print_cr("AdaptiveSizeStart: collection: %d ",
                              heap->total_collections());
            }

            // Calculate optimial free space amounts
            size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
                    old_gen->used_in_bytes(),
                    perm_gen->used_in_bytes(),
                    true /* full gc*/);

            // Resize old and young generations
            old_gen->resize(size_policy->calculated_old_free_size_in_bytes());

            young_gen->resize(size_policy->calculated_eden_size_in_bytes(),
                              size_policy->calculated_survivor_size_in_bytes());

            if (PrintAdaptiveSizePolicy) {
                tty->print_cr("AdaptiveSizeStop: collection: %d ",
                              heap->total_collections());
            }
        }

        // We collected the perm gen, so we'll resize it here.
        perm_gen->compute_new_size(perm_gen_prev_used);

        if (TraceGen1Time) accumulated_time()->stop();

        if (PrintGC) {
            heap->print_heap_change(prev_used);
        }

        heap->update_counters();
    }

    if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
        HandleMark hm;  // Discard invalid handles created during verification
        tty->print(" VerifyAfterGC:");
        Universe::verify(false);
    }

    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());

    if (PrintHeapAtGC) {
        gclog_or_tty->print_cr(" Heap after GC invocations=%d:", heap->total_collections());
        Universe::print();
        gclog_or_tty->print("} ");
    }
}
예제 #2
0
// This method contains no policy. You should probably
// be calling invoke() instead. 
void PSScavenge::invoke_no_policy(bool& notify_ref_lock) {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");

  TimeStamp scavenge_entry;
  TimeStamp scavenge_midpoint;
  TimeStamp scavenge_exit;

  scavenge_entry.update();

  if (GC_locker::is_active()) return;

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Check for potential problems.
  if (!should_attempt_scavenge()) {
    return;
  }

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();
  AdaptiveSizePolicy* size_policy = heap->size_policy();

  heap->increment_total_collections();

  if (PrintHeapAtGC){
    gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
    Universe::print();
  }

  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");

  size_t prev_used = heap->used();
  assert(promotion_failed() == false, "Sanity");

  // Fill in TLABs
  heap->ensure_parseability();

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    tty->print(" VerifyBeforeGC:");
    Universe::verify(true);
  }

  {
    ResourceMark rm;
    HandleMark hm;

    TraceTime t1("GC", PrintGC, true, gclog_or_tty);
    TraceCollectorStats tcs(counters());
    if (TraceGen0Time) accumulated_time()->start();

    // Let the size policy know we're starting
    size_policy->minor_collection_begin();
    
    // Verify no unmarked old->young roots
    if (VerifyRememberedSets) {
      old_gen->verify_object_start_array();
      perm_gen->verify_object_start_array();
      CardTableExtension::verify_all_young_refs_imprecise();
    }
    
    assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space");
    young_gen->to_space()->clear();

    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
    COMPILER2_ONLY(DerivedPointerTable::clear(););

    reference_processor()->enable_discovery();
    
    // We track how much was promoted to the next generation for
    // the AdaptiveSizePolicy.
    size_t old_gen_used_before = old_gen->object_space()->used_in_bytes();

    // Reset our survivor overflow.
    set_survivor_overflow(false);
    
    // We need to save the old/perm top values before
    // creating the promotion_manager. We pass the top
    // values to the card_table, to prevent it from
    // straying into the promotion labs.
    HeapWord* old_top = old_gen->object_space()->top();
    HeapWord* perm_top = perm_gen->object_space()->top();

    // Release all previously held resources
    gc_task_manager()->release_all_resources();

    PSPromotionManager::pre_scavenge();

    // We'll use the promotion manager again later.
    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
    {
      // TraceTime("Roots");
      
      GCTaskQueue* q = GCTaskQueue::create();
      
      for(uint i=0; i<ParallelGCThreads; i++) {
        q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
      }

      q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));

      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
      // We scan the thread roots in parallel
      Threads::create_thread_roots_tasks(q);
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));

      if (ParallelGCThreads>1) {
        for (uint j=0; j<ParallelGCThreads-1; j++) {
          q->enqueue(new StealTask(false));
        }
        q->enqueue(new StealTask(true));
      }

      WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
      q->enqueue(fin);

      gc_task_manager()->add_list(q);
      
      fin->wait_for();

      // We have to release the barrier tasks!
      WaitForBarrierGCTask::destroy(fin);
    }

    scavenge_midpoint.update();

    NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy());
    COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy());
    
    PSIsAliveClosure is_alive;
    PSKeepAliveClosure keep_alive(promotion_manager);
    PSEvacuateFollowersClosure evac_followers(promotion_manager);
    
    // Process reference objects discovered during scavenge
    reference_processor()->process_discovered_references(soft_ref_policy, &is_alive,
                                                         &keep_alive, &evac_followers);
    
    // Enqueue reference objects discovered during scavenge.
    notify_ref_lock = reference_processor()->enqueue_discovered_references();
    
    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
    assert(promotion_manager->claimed_stack()->size() == 0, "Sanity");
    PSPromotionManager::post_scavenge();

    bool scavenge_promotion_failure = promotion_failed();
    if (scavenge_promotion_failure) {
      clean_up_failed_promotion();
      if (PrintGC) {
        gclog_or_tty->print("--");
      }
    }

    // Let the size policy know we're done. Note that we count promotion
    // failure cleanup time as part of the collection (otherwise, we're implicitly
    // saying it's mutator time).
    size_policy->minor_collection_end();

    if (!scavenge_promotion_failure) {
      // Swap the survivor spaces.
      young_gen->eden_space()->clear();
      young_gen->from_space()->clear();
      young_gen->swap_spaces();

      if (UseAdaptiveSizePolicy) {
        // Calculate the new survivor size and tenuring threshold
        size_t survived = young_gen->from_space()->used_in_bytes();
        size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;

        if (PrintAdaptiveSizePolicy) {
          tty->print_cr("AdaptiveSizeStart: collection: %d ",
                         heap->total_collections());
        }

        size_t survivor_limit = 
	  size_policy->max_survivor_size(young_gen->max_size());
        _tenuring_threshold = 
           size_policy->compute_survivor_space_size_and_threshold(survived, 
                                                           promoted,
                                                           _survivor_overflow, 
                                                           _tenuring_threshold,
                                                           survivor_limit);

        // Calculate optimial free space amounts
        size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
                                                   old_gen->used_in_bytes(),
                                                   perm_gen->used_in_bytes(),
                                                   false  /* full gc*/);
        
        // Resize the old and young generations
        old_gen->resize(size_policy->calculated_old_free_size_in_bytes());
        
        young_gen->resize(size_policy->calculated_eden_size_in_bytes(),
                          size_policy->calculated_survivor_size_in_bytes());

        if (PrintAdaptiveSizePolicy) {
          tty->print_cr("AdaptiveSizeStop: collection: %d ",
                         heap->total_collections());
        }


      }

      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
    }

    COMPILER2_ONLY(DerivedPointerTable::update_pointers());
    
    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
        
    // Verify all old -> young cards are now precise
    if (VerifyRememberedSets) {
      // Precise verification will give false positives. Until this is fixed,
      // use imprecise verification.
      // CardTableExtension::verify_all_young_refs_precise();
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (TraceGen0Time) accumulated_time()->stop();
    
    if (PrintGC) {
      heap->print_heap_change(prev_used);
    }

    heap->update_counters();
  }