Exemplo n.º 1
0
void ProvReg_Dump(
    ProvReg* self,
    FILE* os)
{
    ProvRegEntry* p;

    for (p = self->head; p; p = p->next)
    {
        Ftprintf(os, PAL_T("==== ProvRegEntry\n"));
        Ftprintf(os, PAL_T("provInterface[%u]\n"), p->provInterface);
        Ftprintf(os, PAL_T("nameSpace[%T]\n"), tcs(p->nameSpace));
        Ftprintf(os, PAL_T("className[%T]\n"), tcs(p->className));
        Ftprintf(os, PAL_T("libraryName[%s]\n"), scs(p->libraryName));
    }
}
Exemplo n.º 2
0
char*
unrfc2047(char *s)
{
	char *p, *q, *t, *u, *v;
	int len;
	Rune r;
	Fmt fmt;
	
	if(s == nil)
		return nil;

	if(strstr(s, "=?") == nil)
		return s;
	
	fmtstrinit(&fmt);
	for(p=s; *p; ){
		/* =?charset?e?text?= */
		if(*p=='=' && *(p+1)=='?'){
			p += 2;
			q = strchr(p, '?');
			if(q == nil)
				goto emit;
			q++;
			if(*q == '?' || *(q+1) != '?')
				goto emit;
			t = q+2;
			u = strchr(t, '?');
			if(u == nil || *(u+1) != '=')
				goto emit;
			switch(*q){
			case 'q':
			case 'Q':
				*u = 0;
				v = decode(QuotedPrintableU, t, &len);
				break;
			case 'b':
			case 'B':
				*u = 0;
				v = decode(Base64, t, &len);
				break;
			default:
				goto emit;
			}
			*(q-1) = 0;
			v = tcs(p, v);
			fmtstrcpy(&fmt, v);
			free(v);
			p = u+2;
		}
	emit:
		p += chartorune(&r, p);
		fmtrune(&fmt, r);
	}
	p = fmtstrflush(&fmt);
	if(p == nil)
		sysfatal("out of memory");
	free(s);
	return p;
}
Exemplo n.º 3
0
    /// return error case count
    static int RunAll()
    {
        int error_count = 0;
        UnitTestCase* test_case = Head();

        while (test_case)
        {
            if (!test_case->Run())
                ++error_count;

            test_case = test_case->m_next;
        }

        const std::vector<Fail>& fails = Fails();

        if (!fails.empty())
        {
            fprintf(
                stderr,
                "============================================================================\n"
                "Summary:\n"
                "----------------------------------------------------------------------------\n"
            );

            TextColorSwitcher tcs(FOREGROUND_RED);
            fprintf(stderr, "Total %u errors found:\n", (unsigned int) fails.size());

            for (size_t i = 0; i < fails.size(); ++i)
            {
                fprintf(stderr, "[FAIL]  %s:%d %s\n", fails[i].file, fails[i].line, fails[i].expr);
            }
            return EXIT_FAILURE;
        }
        else
        {
            TextColorSwitcher tcs(FOREGROUND_GREEN);
            fprintf(stderr, "All pass\n");
            return EXIT_SUCCESS;
        }
    }
Exemplo n.º 4
0
/*
 * servers can lie about lie about the charset,
 * so we use the charset based on the priority.
 */
char *
convert(Runestr ctype, char *s, long *np)
{
	char t[25], buf[256];

	*t = '\0';
	if(ctype.nr){
		snprint(buf, sizeof(buf), "%.*S", ctype.nr, ctype.r);
		findctype(t, sizeof(t), "charset", buf);
	}
	if(findxmltype(buf, sizeof(buf), s)==0)
		findctype(t, sizeof(t), "encoding", buf);
	if(finddocctype(buf, sizeof(buf), s) == 0)
		findctype(t, sizeof(t), "charset", buf);

	if(*t == '\0')
		strcpy(t, charset);
	return tcs(t, s, np);
}
Exemplo n.º 5
0
    bool Run() const
    {
        bool result = false;

        try
        {
            fprintf(stderr, "[INFO]  Test %s start\n", m_name);
            m_routine();
            fprintf(stderr, "[INFO]  Test %s complete\n", m_name);
            result = true;
        }
        catch (UnitTestError& e)
        {
            TextColorSwitcher tcs(FOREGROUND_RED);
            fprintf(stderr, "%s\n", e.what());
            fprintf(stderr, "[FAIL]  TestCase %s\n", m_name);
        }
        printf("----------------------------------------------------------------------------\n");

        return result;
    }
// This method contains no policy. You should probably
// be calling invoke() instead. 
bool PSScavenge::invoke_no_policy() {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");

elapsedTimer scavenge_time;

  TimeStamp scavenge_entry;
  TimeStamp scavenge_midpoint;
  TimeStamp scavenge_exit;

  scavenge_entry.update();

  if (GC_locker::check_active_before_gc()) {
    return false;
  }

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  GCCause::Cause gc_cause = heap->gc_cause();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Check for potential problems.
  if (!should_attempt_scavenge()) {
    return false;
  }

  bool promotion_failure_occurred = false;

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();
  PSAdaptiveSizePolicy* size_policy = heap->size_policy();
  heap->increment_total_collections();

  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());

  if ((gc_cause != GCCause::_java_lang_system_gc) ||
       UseAdaptiveSizePolicyWithSystemGC) {
    // Gather the feedback data for eden occupancy.
    young_gen->eden_space()->accumulate_statistics();
  }
  // We need to track unique scavenge invocations as well.
  _total_invocations++;

  if (PrintHeapAtGC) {
    Universe::print_heap_before_gc();
  }

assert(!NeverTenure||_tenuring_threshold==markWord::max_age+1,"Sanity");
  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");

  size_t prev_used = heap->used();
  assert(promotion_failed() == false, "Sanity");

  // Fill in TLABs
  heap->accumulate_statistics_all_tlabs();
  heap->ensure_parsability(true);  // retire TLABs

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    gclog_or_tty->print(" VerifyBeforeGC:");
    Universe::verify(true);
  }

  {
    ResourceMark rm;
    HandleMark hm;

    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
    TraceTime t1("GC", PrintGC, !PrintGCDetails, gclog_or_tty);
    TraceCollectorStats tcs(counters());
    TraceMemoryManagerStats tms(false /* not full GC */);

    if (TraceGen0Time) scavenge_time.start();

    // Let the size policy know we're starting
    size_policy->minor_collection_begin();
    
    // Verify the object start arrays.
    if (VerifyObjectStartArray &&
	VerifyBeforeGC) {
      old_gen->verify_object_start_array();
      perm_gen->verify_object_start_array();
    }

    // Verify no unmarked old->young roots
    if (VerifyRememberedSets) {
      CardTableExtension::verify_all_young_refs_imprecise();
    }
    
    if (!ScavengeWithObjectsInToSpace) {
      assert(young_gen->to_space()->is_empty(),
	     "Attempt to scavenge with live objects in to_space");
      young_gen->to_space()->clear();
    } else if (ZapUnusedHeapArea) {
      young_gen->to_space()->mangle_unused_area();
    }
    save_to_space_top_before_gc();

    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
DerivedPointerTable::clear();

    reference_processor()->enable_discovery();
    
    // We track how much was promoted to the next generation for
    // the AdaptiveSizePolicy.
    size_t old_gen_used_before = old_gen->used_in_bytes();

    // For PrintGCDetails
    size_t young_gen_used_before = young_gen->used_in_bytes();

    // Reset our survivor overflow.
    set_survivor_overflow(false);
    
    // We need to save the old/perm top values before
    // creating the promotion_manager. We pass the top
    // values to the card_table, to prevent it from
    // straying into the promotion labs.
    HeapWord* old_top = old_gen->object_space()->top();
    HeapWord* perm_top = perm_gen->object_space()->top();

    // Release all previously held resources
    gc_task_manager()->release_all_resources();

    PSPromotionManager::pre_scavenge();

    // We'll use the promotion manager again later.
    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
    {
      // TraceTime("Roots");
      
      GCTaskQueue* q = GCTaskQueue::create();
      
      for(uint i=0; i<ParallelGCThreads; i++) {
        q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
q->enqueue(new OldToYoungRootsTask(perm_gen,perm_top,i));
      }

      // q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));

      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
      // We scan the thread roots in parallel
      // FIX ME! We should have a NoResourceMarkVerifier here!
      Threads::create_thread_roots_tasks(q);
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));

      // NOTE! ArtaObjects are not normal roots. During scavenges, they are
      // considered strong roots. During a mark sweep they are weak roots.
q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::arta_objects));

      ParallelTaskTerminator terminator(
	gc_task_manager()->workers(),
	promotion_manager->depth_first() ?
	    (TaskQueueSetSuper*)promotion_manager->stack_array_depth()
	  : (TaskQueueSetSuper*)promotion_manager->stack_array_breadth());
      if (ParallelGCThreads>1) {
        for (uint j=0; j<ParallelGCThreads; j++) {
          q->enqueue(new StealTask(&terminator));
        }
      }

      gc_task_manager()->execute_and_wait(q);
    }

    scavenge_midpoint.update();

    // Process reference objects discovered during scavenge
    {
      ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy();    
    
      PSKeepAliveClosure keep_alive(promotion_manager);
      PSEvacuateFollowersClosure evac_followers(promotion_manager);
      assert(soft_ref_policy != NULL,"No soft reference policy");
      if (reference_processor()->processing_is_mt()) {
        PSRefProcTaskExecutor task_executor;
        reference_processor()->process_discovered_references(
          soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers, 
          &task_executor);
      } else {
        reference_processor()->process_discovered_references(
          soft_ref_policy, &_is_alive_closure, &keep_alive, &evac_followers,
          NULL);
      }
    }
    
    // Enqueue reference objects discovered during scavenge.
    if (reference_processor()->processing_is_mt()) {
      PSRefProcTaskExecutor task_executor;
      reference_processor()->enqueue_discovered_references(&task_executor);
    } else {
      reference_processor()->enqueue_discovered_references(NULL);
    }
    
    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
    assert(promotion_manager->claimed_stack_empty(), "Sanity");
    PSPromotionManager::post_scavenge();

    promotion_failure_occurred = promotion_failed();
    if (promotion_failure_occurred) {
      _total_promotion_failures++;
      clean_up_failed_promotion();
      if (PrintGC) {
        gclog_or_tty->print("--");
      }
    }

    // Let the size policy know we're done.  Note that we count promotion
    // failure cleanup time as part of the collection (otherwise, we're
    // implicitly saying it's mutator time).
    size_policy->minor_collection_end(gc_cause);

    if (!promotion_failure_occurred) {
      // Swap the survivor spaces.
      young_gen->eden_space()->clear();
      young_gen->from_space()->clear();
      young_gen->swap_spaces();

      size_t survived = young_gen->from_space()->used_in_bytes();
      size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
      size_policy->update_averages(_survivor_overflow, survived, promoted);

      if (UseAdaptiveSizePolicy) {
        // Calculate the new survivor size and tenuring threshold

        if (PrintAdaptiveSizePolicy) {
          gclog_or_tty->print("AdaptiveSizeStart: ");
          gclog_or_tty->stamp();
          gclog_or_tty->print_cr(" collection: %d ",
                         heap->total_collections());

          if (Verbose) {
gclog_or_tty->print("old_gen_capacity: %zd young_gen_capacity: %zd"
" perm_gen_capacity: %zd ",
              old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
              perm_gen->capacity_in_bytes());
          }
        }


  	if (UsePerfData) {
	  PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
  	  counters->update_old_eden_size(
	    size_policy->calculated_eden_size_in_bytes());
  	  counters->update_old_promo_size(
	    size_policy->calculated_promo_size_in_bytes());
          counters->update_old_capacity(old_gen->capacity_in_bytes());
          counters->update_young_capacity(young_gen->capacity_in_bytes());
  	  counters->update_survived(survived);
  	  counters->update_promoted(promoted);
  	  counters->update_survivor_overflowed(_survivor_overflow);
  	}

        size_t survivor_limit = 
	  size_policy->max_survivor_size(young_gen->max_size());
        _tenuring_threshold = 
	  size_policy->compute_survivor_space_size_and_threshold(
                                                           _survivor_overflow, 
                                                           _tenuring_threshold,
                                                           survivor_limit);

       if (PrintTenuringDistribution) {
         gclog_or_tty->cr();
gclog_or_tty->print_cr("Desired survivor size %ld bytes, new threshold %d (max %ld)",
                                size_policy->calculated_survivor_size_in_bytes(), 
                                _tenuring_threshold, MaxTenuringThreshold);
       }
    
	if (UsePerfData) {
          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
          counters->update_tenuring_threshold(_tenuring_threshold);
          counters->update_survivor_size_counters();
	}

	// Do call at minor collections?
	// Don't check if the size_policy is ready at this
	// level.  Let the size_policy check that internally.
	if (UseAdaptiveSizePolicy &&
	    UseAdaptiveGenerationSizePolicyAtMinorCollection &&
            ((gc_cause != GCCause::_java_lang_system_gc) ||
              UseAdaptiveSizePolicyWithSystemGC)) {

          // Calculate optimial free space amounts
          assert(young_gen->max_size() > 
            young_gen->from_space()->capacity_in_bytes() + 
            young_gen->to_space()->capacity_in_bytes(), 
            "Sizes of space in young gen are out-of-bounds");
          size_t max_eden_size = young_gen->max_size() - 
            young_gen->from_space()->capacity_in_bytes() - 
            young_gen->to_space()->capacity_in_bytes();
          size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
				   young_gen->eden_space()->used_in_bytes(),
                                   old_gen->used_in_bytes(),
                                   perm_gen->used_in_bytes(),
				   young_gen->eden_space()->capacity_in_bytes(),
                                   old_gen->max_gen_size(),
                                   max_eden_size,
                                   false  /* full gc*/,
				   gc_cause);
        
	}
        // Resize the young generation at every collection
	// even if new sizes have not been calculated.  This is
	// to allow resizes that may have been inhibited by the
	// relative location of the "to" and "from" spaces.
        
	// Resizing the old gen at minor collects can cause increases
	// that don't feed back to the generation sizing policy until
	// a major collection.  Don't resize the old gen here.

        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                        size_policy->calculated_survivor_size_in_bytes());

        if (PrintAdaptiveSizePolicy) {
          gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
                         heap->total_collections());
        }
      }

      // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
      // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
      // Also update() will case adaptive NUMA chunk resizing.
      assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
      young_gen->eden_space()->update();

      heap->gc_policy_counters()->update_counters();

      heap->resize_all_tlabs();

      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
    }

    DerivedPointerTable::update_pointers();

    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
    
    // Re-verify object start arrays
    if (VerifyObjectStartArray &&
	VerifyAfterGC) {
      old_gen->verify_object_start_array();
      perm_gen->verify_object_start_array();
    }

    // Verify all old -> young cards are now precise
    if (VerifyRememberedSets) {
      // Precise verification will give false positives. Until this is fixed,
      // use imprecise verification.
      // CardTableExtension::verify_all_young_refs_precise();
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (TraceGen0Time) {
scavenge_time.stop();
      if (promotion_failure_occurred)
        accumulated_undo_time()->add(scavenge_time);
      else
        accumulated_gc_time()->add(scavenge_time);
    }

    if (PrintGC) {
      if (PrintGCDetails) {
	// Don't print a GC timestamp here.  This is after the GC so
	// would be confusing.
	young_gen->print_used_change(young_gen_used_before);
      }
      heap->print_heap_change(prev_used);
    }

    // Track memory usage and detect low memory
    MemoryService::track_memory_usage();
    heap->update_counters();
  }

  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    gclog_or_tty->print(" VerifyAfterGC:");
    Universe::verify(false);
  }

  if (PrintHeapAtGC) {
    Universe::print_heap_after_gc();
  }

  scavenge_exit.update();

  if (PrintGCTaskTimeStamps) {
tty->print_cr("VM-Thread %lld %lld %lld",
                  scavenge_entry.ticks(), scavenge_midpoint.ticks(),
		  scavenge_exit.ticks());
    gc_task_manager()->print_task_time_stamps();
  }

  return !promotion_failure_occurred;
}
Exemplo n.º 7
0
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  assert(ref_processor() != NULL, "Sanity");

  if (GC_locker::check_active_before_gc()) {
    return;
  }

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  GCCause::Cause gc_cause = heap->gc_cause();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");
  PSAdaptiveSizePolicy* size_policy = heap->size_policy();

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();

  // Increment the invocation count
  heap->increment_total_collections(true /* full */);

  // Save information needed to minimize mangling
  heap->record_gen_tops_before_GC();

  // We need to track unique mark sweep invocations as well.
  _total_invocations++;

  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());

  if (PrintHeapAtGC) {
    Universe::print_heap_before_gc();
  }

  // Fill in TLABs
  heap->accumulate_statistics_all_tlabs();
  heap->ensure_parsability(true);  // retire TLABs

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    gclog_or_tty->print(" VerifyBeforeGC:");
    Universe::verify(true);
  }

  // Verify object start arrays
  if (VerifyObjectStartArray &&
      VerifyBeforeGC) {
    old_gen->verify_object_start_array();
    perm_gen->verify_object_start_array();
  }

  heap->pre_full_gc_dump();

  // Filled in below to track the state of the young gen after the collection.
  bool eden_empty;
  bool survivors_empty;
  bool young_gen_empty;

  {
    HandleMark hm;
    const bool is_system_gc = gc_cause == GCCause::_java_lang_system_gc;
    // This is useful for debugging but don't change the output the
    // the customer sees.
    const char* gc_cause_str = "Full GC";
    if (is_system_gc && PrintGCDetails) {
      gc_cause_str = "Full GC (System)";
    }
    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
    TraceTime t1(gc_cause_str, PrintGC, !PrintGCDetails, gclog_or_tty);
    TraceCollectorStats tcs(counters());
    TraceMemoryManagerStats tms(true /* Full GC */);

    if (TraceGen1Time) accumulated_time()->start();

    // Let the size policy know we're starting
    size_policy->major_collection_begin();

    // When collecting the permanent generation methodOops may be moving,
    // so we either have to flush all bcp data or convert it into bci.
    CodeCache::gc_prologue();
    Threads::gc_prologue();
    BiasedLocking::preserve_marks();

    // Capture heap size before collection for printing.
    size_t prev_used = heap->used();

    // Capture perm gen size before collection for sizing.
    size_t perm_gen_prev_used = perm_gen->used_in_bytes();

    // For PrintGCDetails
    size_t old_gen_prev_used = old_gen->used_in_bytes();
    size_t young_gen_prev_used = young_gen->used_in_bytes();

    allocate_stacks();

    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
    COMPILER2_PRESENT(DerivedPointerTable::clear());

    ref_processor()->enable_discovery();
    ref_processor()->setup_policy(clear_all_softrefs);

    mark_sweep_phase1(clear_all_softrefs);

    mark_sweep_phase2();

    // Don't add any more derived pointers during phase3
    COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
    COMPILER2_PRESENT(DerivedPointerTable::set_active(false));

    mark_sweep_phase3();

    mark_sweep_phase4();

    restore_marks();

    deallocate_stacks();

    if (ZapUnusedHeapArea) {
      // Do a complete mangle (top to end) because the usage for
      // scratch does not maintain a top pointer.
      young_gen->to_space()->mangle_unused_area_complete();
    }

    eden_empty = young_gen->eden_space()->is_empty();
    if (!eden_empty) {
      eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
    }

    // Update heap occupancy information which is used as
    // input to soft ref clearing policy at the next gc.
    Universe::update_heap_info_at_gc();

    survivors_empty = young_gen->from_space()->is_empty() &&
                      young_gen->to_space()->is_empty();
    young_gen_empty = eden_empty && survivors_empty;

    BarrierSet* bs = heap->barrier_set();
    if (bs->is_a(BarrierSet::ModRef)) {
      ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
      MemRegion old_mr = heap->old_gen()->reserved();
      MemRegion perm_mr = heap->perm_gen()->reserved();
      assert(perm_mr.end() <= old_mr.start(), "Generations out of order");

      if (young_gen_empty) {
        modBS->clear(MemRegion(perm_mr.start(), old_mr.end()));
      } else {
        modBS->invalidate(MemRegion(perm_mr.start(), old_mr.end()));
      }
    }

    BiasedLocking::restore_marks();
    Threads::gc_epilogue();
    CodeCache::gc_epilogue();

    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());

    ref_processor()->enqueue_discovered_references(NULL);

    // Update time of last GC
    reset_millis_since_last_gc();

    // Let the size policy know we're done
    size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);

    if (UseAdaptiveSizePolicy) {

      if (PrintAdaptiveSizePolicy) {
        gclog_or_tty->print("AdaptiveSizeStart: ");
        gclog_or_tty->stamp();
        gclog_or_tty->print_cr(" collection: %d ",
                       heap->total_collections());
        if (Verbose) {
          gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d"
            " perm_gen_capacity: %d ",
            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes(),
            perm_gen->capacity_in_bytes());
        }
      }

      // Don't check if the size_policy is ready here.  Let
      // the size_policy check that internally.
      if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
          ((gc_cause != GCCause::_java_lang_system_gc) ||
            UseAdaptiveSizePolicyWithSystemGC)) {
        // Calculate optimal free space amounts
        assert(young_gen->max_size() >
          young_gen->from_space()->capacity_in_bytes() +
          young_gen->to_space()->capacity_in_bytes(),
          "Sizes of space in young gen are out-of-bounds");
        size_t max_eden_size = young_gen->max_size() -
          young_gen->from_space()->capacity_in_bytes() -
          young_gen->to_space()->capacity_in_bytes();
        size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
                                 young_gen->eden_space()->used_in_bytes(),
                                 old_gen->used_in_bytes(),
                                 perm_gen->used_in_bytes(),
                                 young_gen->eden_space()->capacity_in_bytes(),
                                 old_gen->max_gen_size(),
                                 max_eden_size,
                                 true /* full gc*/,
                                 gc_cause);

        heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());

        // Don't resize the young generation at an major collection.  A
        // desired young generation size may have been calculated but
        // resizing the young generation complicates the code because the
        // resizing of the old generation may have moved the boundary
        // between the young generation and the old generation.  Let the
        // young generation resizing happen at the minor collections.
      }
      if (PrintAdaptiveSizePolicy) {
        gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
                       heap->total_collections());
      }
    }

    if (UsePerfData) {
      heap->gc_policy_counters()->update_counters();
      heap->gc_policy_counters()->update_old_capacity(
        old_gen->capacity_in_bytes());
      heap->gc_policy_counters()->update_young_capacity(
        young_gen->capacity_in_bytes());
    }

    heap->resize_all_tlabs();

    // We collected the perm gen, so we'll resize it here.
    perm_gen->compute_new_size(perm_gen_prev_used);

    if (TraceGen1Time) accumulated_time()->stop();

    if (PrintGC) {
      if (PrintGCDetails) {
        // Don't print a GC timestamp here.  This is after the GC so
        // would be confusing.
        young_gen->print_used_change(young_gen_prev_used);
        old_gen->print_used_change(old_gen_prev_used);
      }
      heap->print_heap_change(prev_used);
      // Do perm gen after heap becase prev_used does
      // not include the perm gen (done this way in the other
      // collectors).
      if (PrintGCDetails) {
        perm_gen->print_used_change(perm_gen_prev_used);
      }
    }

    // Track memory usage and detect low memory
    MemoryService::track_memory_usage();
    heap->update_counters();

    if (PrintGCDetails) {
      if (size_policy->print_gc_time_limit_would_be_exceeded()) {
        if (size_policy->gc_time_limit_exceeded()) {
          gclog_or_tty->print_cr("      GC time is exceeding GCTimeLimit "
            "of %d%%", GCTimeLimit);
        } else {
          gclog_or_tty->print_cr("      GC time would exceed GCTimeLimit "
            "of %d%%", GCTimeLimit);
        }
      }
      size_policy->set_print_gc_time_limit_would_be_exceeded(false);
    }
  }

  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    gclog_or_tty->print(" VerifyAfterGC:");
    Universe::verify(false);
  }

  // Re-verify object start arrays
  if (VerifyObjectStartArray &&
      VerifyAfterGC) {
    old_gen->verify_object_start_array();
    perm_gen->verify_object_start_array();
  }

  if (ZapUnusedHeapArea) {
    old_gen->object_space()->check_mangled_unused_area_complete();
    perm_gen->object_space()->check_mangled_unused_area_complete();
  }

  NOT_PRODUCT(ref_processor()->verify_no_references_recorded());

  if (PrintHeapAtGC) {
    Universe::print_heap_after_gc();
  }

  heap->post_full_gc_dump();

#ifdef TRACESPINNING
  ParallelTaskTerminator::print_termination_counts();
#endif
}
Exemplo n.º 8
0
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSMarkSweep::invoke_no_policy(bool clear_all_softrefs) {
  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
  assert(ref_processor() != NULL, "Sanity");

  if (GC_locker::check_active_before_gc()) {
    return false;
  }

  ParallelScavengeHeap* heap = ParallelScavengeHeap::heap();
  GCCause::Cause gc_cause = heap->gc_cause();

  _gc_timer->register_gc_start();
  _gc_tracer->report_gc_start(gc_cause, _gc_timer->gc_start());

  PSAdaptiveSizePolicy* size_policy = heap->size_policy();

  // The scope of casr should end after code that can change
  // CollectorPolicy::_should_clear_all_soft_refs.
  ClearedAllSoftRefs casr(clear_all_softrefs, heap->collector_policy());

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();

  // Increment the invocation count
  heap->increment_total_collections(true /* full */);

  // Save information needed to minimize mangling
  heap->record_gen_tops_before_GC();

  // We need to track unique mark sweep invocations as well.
  _total_invocations++;

  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());

  heap->print_heap_before_gc();
  heap->trace_heap_before_gc(_gc_tracer);

  // Fill in TLABs
  heap->accumulate_statistics_all_tlabs();
  heap->ensure_parsability(true);  // retire TLABs

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    Universe::verify(" VerifyBeforeGC:");
  }

  // Verify object start arrays
  if (VerifyObjectStartArray &&
      VerifyBeforeGC) {
    old_gen->verify_object_start_array();
  }

  heap->pre_full_gc_dump(_gc_timer);

  // Filled in below to track the state of the young gen after the collection.
  bool eden_empty;
  bool survivors_empty;
  bool young_gen_empty;

  {
    HandleMark hm;

    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
    GCTraceTime t1(GCCauseString("Full GC", gc_cause), PrintGC, !PrintGCDetails, NULL, _gc_tracer->gc_id());
    TraceCollectorStats tcs(counters());
    TraceMemoryManagerStats tms(true /* Full GC */,gc_cause);

    if (TraceOldGenTime) accumulated_time()->start();

    // Let the size policy know we're starting
    size_policy->major_collection_begin();

    CodeCache::gc_prologue();
    BiasedLocking::preserve_marks();

    // Capture heap size before collection for printing.
    size_t prev_used = heap->used();

    // Capture metadata size before collection for sizing.
    size_t metadata_prev_used = MetaspaceAux::used_bytes();

    // For PrintGCDetails
    size_t old_gen_prev_used = old_gen->used_in_bytes();
    size_t young_gen_prev_used = young_gen->used_in_bytes();

    allocate_stacks();

    COMPILER2_PRESENT(DerivedPointerTable::clear());

    ref_processor()->enable_discovery();
    ref_processor()->setup_policy(clear_all_softrefs);

    mark_sweep_phase1(clear_all_softrefs);

    mark_sweep_phase2();

    // Don't add any more derived pointers during phase3
    COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
    COMPILER2_PRESENT(DerivedPointerTable::set_active(false));

    mark_sweep_phase3();

    mark_sweep_phase4();

    restore_marks();

    deallocate_stacks();

    if (ZapUnusedHeapArea) {
      // Do a complete mangle (top to end) because the usage for
      // scratch does not maintain a top pointer.
      young_gen->to_space()->mangle_unused_area_complete();
    }

    eden_empty = young_gen->eden_space()->is_empty();
    if (!eden_empty) {
      eden_empty = absorb_live_data_from_eden(size_policy, young_gen, old_gen);
    }

    // Update heap occupancy information which is used as
    // input to soft ref clearing policy at the next gc.
    Universe::update_heap_info_at_gc();

    survivors_empty = young_gen->from_space()->is_empty() &&
                      young_gen->to_space()->is_empty();
    young_gen_empty = eden_empty && survivors_empty;

    ModRefBarrierSet* modBS = barrier_set_cast<ModRefBarrierSet>(heap->barrier_set());
    MemRegion old_mr = heap->old_gen()->reserved();
    if (young_gen_empty) {
      modBS->clear(MemRegion(old_mr.start(), old_mr.end()));
    } else {
      modBS->invalidate(MemRegion(old_mr.start(), old_mr.end()));
    }

    // Delete metaspaces for unloaded class loaders and clean up loader_data graph
    ClassLoaderDataGraph::purge();
    MetaspaceAux::verify_metrics();

    BiasedLocking::restore_marks();
    CodeCache::gc_epilogue();
    JvmtiExport::gc_epilogue();

    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());

    ref_processor()->enqueue_discovered_references(NULL);

    // Update time of last GC
    reset_millis_since_last_gc();

    // Let the size policy know we're done
    size_policy->major_collection_end(old_gen->used_in_bytes(), gc_cause);

    if (UseAdaptiveSizePolicy) {

      if (PrintAdaptiveSizePolicy) {
        gclog_or_tty->print("AdaptiveSizeStart: ");
        gclog_or_tty->stamp();
        gclog_or_tty->print_cr(" collection: %d ",
                       heap->total_collections());
        if (Verbose) {
          gclog_or_tty->print("old_gen_capacity: " SIZE_FORMAT
            " young_gen_capacity: " SIZE_FORMAT,
            old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
        }
      }

      // Don't check if the size_policy is ready here.  Let
      // the size_policy check that internally.
      if (UseAdaptiveGenerationSizePolicyAtMajorCollection &&
          ((gc_cause != GCCause::_java_lang_system_gc) ||
            UseAdaptiveSizePolicyWithSystemGC)) {
        // Swap the survivor spaces if from_space is empty. The
        // resize_young_gen() called below is normally used after
        // a successful young GC and swapping of survivor spaces;
        // otherwise, it will fail to resize the young gen with
        // the current implementation.
        if (young_gen->from_space()->is_empty()) {
          young_gen->from_space()->clear(SpaceDecorator::Mangle);
          young_gen->swap_spaces();
        }

        // Calculate optimal free space amounts
        assert(young_gen->max_size() >
          young_gen->from_space()->capacity_in_bytes() +
          young_gen->to_space()->capacity_in_bytes(),
          "Sizes of space in young gen are out-of-bounds");

        size_t young_live = young_gen->used_in_bytes();
        size_t eden_live = young_gen->eden_space()->used_in_bytes();
        size_t old_live = old_gen->used_in_bytes();
        size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
        size_t max_old_gen_size = old_gen->max_gen_size();
        size_t max_eden_size = young_gen->max_size() -
          young_gen->from_space()->capacity_in_bytes() -
          young_gen->to_space()->capacity_in_bytes();

        // Used for diagnostics
        size_policy->clear_generation_free_space_flags();

        size_policy->compute_generations_free_space(young_live,
                                                    eden_live,
                                                    old_live,
                                                    cur_eden,
                                                    max_old_gen_size,
                                                    max_eden_size,
                                                    true /* full gc*/);

        size_policy->check_gc_overhead_limit(young_live,
                                             eden_live,
                                             max_old_gen_size,
                                             max_eden_size,
                                             true /* full gc*/,
                                             gc_cause,
                                             heap->collector_policy());

        size_policy->decay_supplemental_growth(true /* full gc*/);

        heap->resize_old_gen(size_policy->calculated_old_free_size_in_bytes());

        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                               size_policy->calculated_survivor_size_in_bytes());
      }
      if (PrintAdaptiveSizePolicy) {
        gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
                       heap->total_collections());
      }
    }

    if (UsePerfData) {
      heap->gc_policy_counters()->update_counters();
      heap->gc_policy_counters()->update_old_capacity(
        old_gen->capacity_in_bytes());
      heap->gc_policy_counters()->update_young_capacity(
        young_gen->capacity_in_bytes());
    }

    heap->resize_all_tlabs();

    // We collected the heap, recalculate the metaspace capacity
    MetaspaceGC::compute_new_size();

    if (TraceOldGenTime) accumulated_time()->stop();

    if (PrintGC) {
      if (PrintGCDetails) {
        // Don't print a GC timestamp here.  This is after the GC so
        // would be confusing.
        young_gen->print_used_change(young_gen_prev_used);
        old_gen->print_used_change(old_gen_prev_used);
      }
      heap->print_heap_change(prev_used);
      if (PrintGCDetails) {
        MetaspaceAux::print_metaspace_change(metadata_prev_used);
      }
    }

    // Track memory usage and detect low memory
    MemoryService::track_memory_usage();
    heap->update_counters();
  }

  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    Universe::verify(" VerifyAfterGC:");
  }

  // Re-verify object start arrays
  if (VerifyObjectStartArray &&
      VerifyAfterGC) {
    old_gen->verify_object_start_array();
  }

  if (ZapUnusedHeapArea) {
    old_gen->object_space()->check_mangled_unused_area_complete();
  }

  NOT_PRODUCT(ref_processor()->verify_no_references_recorded());

  heap->print_heap_after_gc();
  heap->trace_heap_after_gc(_gc_tracer);

  heap->post_full_gc_dump(_gc_timer);

#ifdef TRACESPINNING
  ParallelTaskTerminator::print_termination_counts();
#endif

  _gc_timer->register_gc_end();

  _gc_tracer->report_gc_end(_gc_timer->gc_end(), _gc_timer->time_partitions());

  return true;
}
Exemplo n.º 9
0
/// TODO: We need a way to load the (scoped) environment variables
/// for now we just add some variables for some common languages
static void initHardCodedDynamicScopes( DynamicVariables* env )
{
    QString tcs("TM_COMMENT_START");
    QString tce("TM_COMMENT_END");
    QString tcs2("TM_COMMENT_START_2");
    QString tce2("TM_COMMENT_END_2");
    QString tcs3("TM_COMMENT_START_3");
    QString tce3("TM_COMMENT_END_3");
    env->setAndGiveScopedSelector( tcs, "# ", "source.yaml");

    env->setAndGiveScopedSelector( tcs, "// ", "source.c, source.c++, source.objc, source.objc++");
    env->setAndGiveScopedSelector( tcs2, "/*", "source.c, source.c++, source.objc, source.objc++");
    env->setAndGiveScopedSelector( tce2, "*/", "source.c, source.c++, source.objc, source.objc++");

    env->setAndGiveScopedSelector( tcs, "-- ", "source.lua");
    env->setAndGiveScopedSelector( tcs2, "--[[", "source.lua");
    env->setAndGiveScopedSelector( tce2, "]]", "source.lua");

    env->setAndGiveScopedSelector( tcs, "/*", "source.css");
    env->setAndGiveScopedSelector( tce, "*/", "source.css");

    env->setAndGiveScopedSelector( tcs, "; ", "source.clojure");

    env->setAndGiveScopedSelector( tcs, "# ", "source.coffee");
    env->setAndGiveScopedSelector( tcs2, "###", "source.coffee");
    env->setAndGiveScopedSelector( tce2, "###", "source.coffee");

    env->setAndGiveScopedSelector( tcs, "<!-- ", "text.html");
    env->setAndGiveScopedSelector( tce, " -->", "text.html");

    env->setAndGiveScopedSelector( tcs, "<!-- ", "text.xml");
    env->setAndGiveScopedSelector( tce, " -->", "text.xml");

    env->setAndGiveScopedSelector( tcs, "// ", "source.java");
    env->setAndGiveScopedSelector( tcs2, "/*", "source.java");
    env->setAndGiveScopedSelector( tce2, "*/", "source.java");

    env->setAndGiveScopedSelector( tcs, "// ", "source.js, source.json");
    env->setAndGiveScopedSelector( tcs2, "/*", "source.js, source.json");
    env->setAndGiveScopedSelector( tce2, "*/", "source.js, source.json");

    env->setAndGiveScopedSelector( tcs, "// ", "source.php");
    env->setAndGiveScopedSelector( tcs2, "# ", "source.php");
    env->setAndGiveScopedSelector( tcs3, "/*", "source.php");
    env->setAndGiveScopedSelector( tce3, "*/", "source.php");

    env->setAndGiveScopedSelector( tcs, "# ", "source.perl");

    env->setAndGiveScopedSelector( tcs, "-# ", "text.haml");    // I hate the default '/'

    env->setAndGiveScopedSelector( tcs, "# ", "source.js, source.ruby");
    env->setAndGiveScopedSelector( tcs2, "=begin", "source.js, source.ruby");
    env->setAndGiveScopedSelector( tce2, "=end", "source.js, source.ruby");

    env->setAndGiveScopedSelector( tcs, "// ", "source.scss");
    env->setAndGiveScopedSelector( tcs2, "/*", "source.scss");
    env->setAndGiveScopedSelector( tce2, "*/", "source.scss");

    env->setAndGiveScopedSelector( tcs, "-- ", "source.sql");
    env->setAndGiveScopedSelector( tcs2, "/*", "source.sql");
    env->setAndGiveScopedSelector( tce2, "*/", "source.sql");

    env->setAndGiveScopedSelector( tcs, "# ", "source.shell");

}
Exemplo n.º 10
0
void
test_mgr::run()
{
    // TODO: complete tidy-up of non-summary (verbose) results, then pull through
    // a command-line summary to control this.
    // Idea: --summary: prints short results afterwards
    //       --verbose: prints long version as test progresses
    //       defualt: none of these, similar to current summary = true version.
    const bool summary = true;

    std::cout << "CSV file: \"" << _args.test_case_csv_file_name << "\"";
    test_case_set tcs(_args.test_case_csv_file_name, _args.recover_mode);

    if (tcs.size())
    {
        std::cout << " (found " << tcs.size() << " test case" << (tcs.size() != 1 ? "s" : "") <<
                ")" << std::endl;
        if (tcs.ignored())
            std::cout << "WARNING: " << tcs.ignored() << " test cases were ignored. (All test "
                    "cases without auto-dequeue are ignored when recover-mode is selected.)" <<
                    std::endl;
        _args.print_args();
    }
    else if(tcs.ignored())
    {
        std::cout << " WARNING: All " << tcs.ignored() << " test case(s) were ignored. (All test "
                "cases without auto-dequeue are ignored when recover-mode is selected.)" <<
                std::endl;
    }
    else
        std::cout << " (WARNING: This CSV file is empty or does not exist.)" << std::endl;

    do
    {
        unsigned u = 0;
        if (_args.randomize)
            random_shuffle(tcs.begin(), tcs.end(), _random_fn_ptr);
        for (test_case_set::tcl_itr tci = tcs.begin(); tci != tcs.end(); tci++, u++)
        {
            if (summary)
                std::cout << "Test case " << (*tci)->test_case_num() << ": \"" <<
                        (*tci)->comment() << "\"" << std::endl;
            else
                std::cout << (*tci)->str() << std::endl;
            if (!_args.reuse_instance || _ji_list.empty())
                initialize_jrnls();
            for (ji_list_citr jii=_ji_list.begin(); jii!=_ji_list.end(); jii++)
                (*jii)->init_tc(*tci, &_args);
            for (ji_list_citr jii=_ji_list.begin(); jii!=_ji_list.end(); jii++)
                (*jii)->run_tc();
            for (ji_list_citr jii=_ji_list.begin(); jii!=_ji_list.end(); jii++)
                (*jii)->tc_wait_compl();

            if (_args.format_chk)
            {
                for (ji_list_citr jii=_ji_list.begin(); jii!=_ji_list.end(); jii++)
                {
                    jrnl_init_params::shared_ptr jpp = (*jii)->params();
                    std::string ja = _args.jfile_analyzer;
                    if (ja.empty()) ja = "./jfile_chk.py";
                    if (!exists(ja))
                    {
                        std::ostringstream oss;
                        oss << "ERROR: Validation program \"" << ja << "\" does not exist" << std::endl;
                        throw std::runtime_error(oss.str());
                    }
                    std::ostringstream oss;
                    oss << ja << " -b " << jpp->base_filename();
                    // TODO: When jfile_check.py can handle previously recovered journals for
                    // specific tests, then remove this exclusion.
                    if (!_args.recover_mode)
                    {
                        oss << " -c " << _args.test_case_csv_file_name;
                        oss << " -t " << (*tci)->test_case_num();
                    }
                    oss << " -q " << jpp->jdir();
                    bool res = system(oss.str().c_str()) != 0;
                    (*tci)->set_fmt_chk_res(res, jpp->jid());
                    if (res) _err_flag = true;
                }
            }

            if (!_args.recover_mode && !_args.keep_jrnls)
                for (ji_list_citr jii=_ji_list.begin(); jii!=_ji_list.end(); jii++)
                    try { mrg::journal::jdir::delete_dir((*jii)->jrnl_dir()); }
                    catch (...) {} // TODO - work out exception strategy for failure here...

            print_results(*tci, summary);
            if ((*tci)->average().exception())
                _err_flag = true;
            if (_abort || (!_args.repeat_flag && _signal))
                break;
            if (_args.pause_secs && tci != tcs.end())
                ::usleep(_args.pause_secs * 1000000);
        }
    }
    while (_args.repeat_flag && !_signal);
}
Exemplo n.º 11
0
    void initialize() override
	{
		OpenGLWindow::initialize();

		//load shaders
		{
			QOpenGLShader vs(QOpenGLShader::Vertex);
				vs.compileSourceFile("TessellationTerrain/main.vs.glsl");
				mProgram.addShader(&vs);

			QOpenGLShader tcs(QOpenGLShader::TessellationControl);
				tcs.compileSourceFile("TessellationTerrain/main.tcs.glsl");
				mProgram.addShader(&tcs);

			QOpenGLShader tes(QOpenGLShader::TessellationEvaluation);
				tes.compileSourceFile("TessellationTerrain/main.tes.glsl");
				mProgram.addShader(&tes);

			QOpenGLShader fs(QOpenGLShader::Fragment);
				fs.compileSourceFile("TessellationTerrain/main.fs.glsl");
				mProgram.addShader(&fs);

			if (!mProgram.link())
				qFatal("Error linking shaders");
		}

		//grab uniform locations
		{
			mUniforms.mvMatrix = mProgram.uniformLocation("mvMatrix");
			mUniforms.mvpMatrix = mProgram.uniformLocation("mvpMatrix");
			mUniforms.projMatrix = mProgram.uniformLocation("projMatrix");
			mUniforms.dmapDepth = mProgram.uniformLocation("dmapDepth");
		}

		mVao.bind();
		glPatchParameteri(GL_PATCH_VERTICES, 4);
		glPolygonMode(GL_FRONT_AND_BACK, GL_LINE);

		//build displacment map
		{
			QVector<GLubyte> displacmentMap;
			displacmentMap.reserve(mSize*mSize);

			std::srand(35456);
		
			for (int i=0; i<mSize*mSize; i++)
				displacmentMap.append(randInt(0, 255));

			glGenTextures(1, &tex);
			glBindTexture(GL_TEXTURE_2D, tex);
			glTexStorage2D(GL_TEXTURE_2D, 1, GL_R8, mSize, mSize);
			glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, mSize, mSize, GL_R, GL_UNSIGNED_BYTE, &displacmentMap[0]);
		}

		//load terrain texture
		{
			glActiveTexture(GL_TEXTURE1);
			mTerrainTexture = QSharedPointer<QOpenGLTexture>(new QOpenGLTexture(QImage("./Common/dirt.png").mirrored()));
			mTerrainTexture->setMinificationFilter(QOpenGLTexture::LinearMipMapLinear);
			mTerrainTexture->setMagnificationFilter(QOpenGLTexture::Linear);
		}
	}
// This method contains no policy. You should probably
// be calling invoke() instead.
bool PSScavenge::invoke_no_policy() {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");

  assert(_preserved_mark_stack.is_empty(), "should be empty");
  assert(_preserved_oop_stack.is_empty(), "should be empty");

  _gc_timer.register_gc_start();

  TimeStamp scavenge_entry;
  TimeStamp scavenge_midpoint;
  TimeStamp scavenge_exit;

  scavenge_entry.update();

  if (GC_locker::check_active_before_gc()) {
    return false;
  }

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  GCCause::Cause gc_cause = heap->gc_cause();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Check for potential problems.
  if (!should_attempt_scavenge()) {
    return false;
  }

  _gc_tracer.report_gc_start(heap->gc_cause(), _gc_timer.gc_start());

  bool promotion_failure_occurred = false;

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSAdaptiveSizePolicy* size_policy = heap->size_policy();

  heap->increment_total_collections();

  AdaptiveSizePolicyOutput(size_policy, heap->total_collections());

  if ((gc_cause != GCCause::_java_lang_system_gc) ||
       UseAdaptiveSizePolicyWithSystemGC) {
    // Gather the feedback data for eden occupancy.
    young_gen->eden_space()->accumulate_statistics();
  }

  if (ZapUnusedHeapArea) {
    // Save information needed to minimize mangling
    heap->record_gen_tops_before_GC();
  }

  heap->print_heap_before_gc();
  heap->trace_heap_before_gc(&_gc_tracer);

  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");

  size_t prev_used = heap->used();

  // Fill in TLABs
  heap->accumulate_statistics_all_tlabs();
  heap->ensure_parsability(true);  // retire TLABs

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    Universe::verify(" VerifyBeforeGC:");
  }

  {
    ResourceMark rm;
    HandleMark hm;

    gclog_or_tty->date_stamp(PrintGC && PrintGCDateStamps);
    TraceCPUTime tcpu(PrintGCDetails, true, gclog_or_tty);
    GCTraceTime t1(GCCauseString("GC", gc_cause), PrintGC, !PrintGCDetails, NULL);
    TraceCollectorStats tcs(counters());
    TraceMemoryManagerStats tms(false /* not full GC */,gc_cause);

    if (TraceGen0Time) accumulated_time()->start();

    // Let the size policy know we're starting
    size_policy->minor_collection_begin();

    // Verify the object start arrays.
    if (VerifyObjectStartArray &&
        VerifyBeforeGC) {
      old_gen->verify_object_start_array();
    }

    // Verify no unmarked old->young roots
    if (VerifyRememberedSets) {
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (!ScavengeWithObjectsInToSpace) {
      assert(young_gen->to_space()->is_empty(),
             "Attempt to scavenge with live objects in to_space");
      young_gen->to_space()->clear(SpaceDecorator::Mangle);
    } else if (ZapUnusedHeapArea) {
      young_gen->to_space()->mangle_unused_area();
    }
    save_to_space_top_before_gc();

    COMPILER2_PRESENT(DerivedPointerTable::clear());

    reference_processor()->enable_discovery(true /*verify_disabled*/, true /*verify_no_refs*/);
    reference_processor()->setup_policy(false);

    // We track how much was promoted to the next generation for
    // the AdaptiveSizePolicy.
    size_t old_gen_used_before = old_gen->used_in_bytes();

    // For PrintGCDetails
    size_t young_gen_used_before = young_gen->used_in_bytes();

    // Reset our survivor overflow.
    set_survivor_overflow(false);

    // We need to save the old top values before
    // creating the promotion_manager. We pass the top
    // values to the card_table, to prevent it from
    // straying into the promotion labs.
    HeapWord* old_top = old_gen->object_space()->top();

    // Release all previously held resources
    gc_task_manager()->release_all_resources();

    // Set the number of GC threads to be used in this collection
    gc_task_manager()->set_active_gang();
    gc_task_manager()->task_idle_workers();
    // Get the active number of workers here and use that value
    // throughout the methods.
    uint active_workers = gc_task_manager()->active_workers();
    heap->set_par_threads(active_workers);

    PSPromotionManager::pre_scavenge();

    // We'll use the promotion manager again later.
    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
    {
      GCTraceTime tm("Scavenge", false, false, &_gc_timer);
      ParallelScavengeHeap::ParStrongRootsScope psrs;

      GCTaskQueue* q = GCTaskQueue::create();

      if (!old_gen->object_space()->is_empty()) {
        // There are only old-to-young pointers if there are objects
        // in the old gen.
        uint stripe_total = active_workers;
        for(uint i=0; i < stripe_total; i++) {
          q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i, stripe_total));
        }
      }

      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
      // We scan the thread roots in parallel
      Threads::create_thread_roots_tasks(q);
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::management));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::class_loader_data));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jvmti));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::code_cache));

      ParallelTaskTerminator terminator(
        active_workers,
                  (TaskQueueSetSuper*) promotion_manager->stack_array_depth());
      if (active_workers > 1) {
        for (uint j = 0; j < active_workers; j++) {
          q->enqueue(new StealTask(&terminator));
        }
      }

      gc_task_manager()->execute_and_wait(q);
    }

    scavenge_midpoint.update();

    // Process reference objects discovered during scavenge
    {
      GCTraceTime tm("References", false, false, &_gc_timer);

      reference_processor()->setup_policy(false); // not always_clear
      reference_processor()->set_active_mt_degree(active_workers);
      PSKeepAliveClosure keep_alive(promotion_manager);
      PSEvacuateFollowersClosure evac_followers(promotion_manager);
      ReferenceProcessorStats stats;
      if (reference_processor()->processing_is_mt()) {
        PSRefProcTaskExecutor task_executor;
        stats = reference_processor()->process_discovered_references(
          &_is_alive_closure, &keep_alive, &evac_followers, &task_executor,
          &_gc_timer);
      } else {
        stats = reference_processor()->process_discovered_references(
          &_is_alive_closure, &keep_alive, &evac_followers, NULL, &_gc_timer);
      }

      _gc_tracer.report_gc_reference_stats(stats);

      // Enqueue reference objects discovered during scavenge.
      if (reference_processor()->processing_is_mt()) {
        PSRefProcTaskExecutor task_executor;
        reference_processor()->enqueue_discovered_references(&task_executor);
      } else {
        reference_processor()->enqueue_discovered_references(NULL);
      }
    }

    {
      GCTraceTime tm("StringTable", false, false, &_gc_timer);
      // Unlink any dead interned Strings and process the remaining live ones.
      PSScavengeRootsClosure root_closure(promotion_manager);
      StringTable::unlink_or_oops_do(&_is_alive_closure, &root_closure);
    }

    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
    promotion_failure_occurred = PSPromotionManager::post_scavenge(_gc_tracer);
    if (promotion_failure_occurred) {
      clean_up_failed_promotion();
      if (PrintGC) {
        gclog_or_tty->print("--");
      }
    }

    // Let the size policy know we're done.  Note that we count promotion
    // failure cleanup time as part of the collection (otherwise, we're
    // implicitly saying it's mutator time).
    size_policy->minor_collection_end(gc_cause);

    if (!promotion_failure_occurred) {
      // Swap the survivor spaces.
      young_gen->eden_space()->clear(SpaceDecorator::Mangle);
      young_gen->from_space()->clear(SpaceDecorator::Mangle);
      young_gen->swap_spaces();

      size_t survived = young_gen->from_space()->used_in_bytes();
      size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;
      size_policy->update_averages(_survivor_overflow, survived, promoted);

      // A successful scavenge should restart the GC time limit count which is
      // for full GC's.
      size_policy->reset_gc_overhead_limit_count();
      if (UseAdaptiveSizePolicy) {
        // Calculate the new survivor size and tenuring threshold

        if (PrintAdaptiveSizePolicy) {
          gclog_or_tty->print("AdaptiveSizeStart: ");
          gclog_or_tty->stamp();
          gclog_or_tty->print_cr(" collection: %d ",
                         heap->total_collections());

          if (Verbose) {
            gclog_or_tty->print("old_gen_capacity: %d young_gen_capacity: %d",
              old_gen->capacity_in_bytes(), young_gen->capacity_in_bytes());
          }
        }


        if (UsePerfData) {
          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
          counters->update_old_eden_size(
            size_policy->calculated_eden_size_in_bytes());
          counters->update_old_promo_size(
            size_policy->calculated_promo_size_in_bytes());
          counters->update_old_capacity(old_gen->capacity_in_bytes());
          counters->update_young_capacity(young_gen->capacity_in_bytes());
          counters->update_survived(survived);
          counters->update_promoted(promoted);
          counters->update_survivor_overflowed(_survivor_overflow);
        }

        size_t max_young_size = young_gen->max_size();

        // Deciding a free ratio in the young generation is tricky, so if
        // MinHeapFreeRatio or MaxHeapFreeRatio are in use (implicating
        // that the old generation size may have been limited because of them) we
        // should then limit our young generation size using NewRatio to have it
        // follow the old generation size.
        if (MinHeapFreeRatio != 0 || MaxHeapFreeRatio != 100) {
          max_young_size = MIN2(old_gen->capacity_in_bytes() / NewRatio, young_gen->max_size());
        }

        size_t survivor_limit =
          size_policy->max_survivor_size(max_young_size);
        _tenuring_threshold =
          size_policy->compute_survivor_space_size_and_threshold(
                                                           _survivor_overflow,
                                                           _tenuring_threshold,
                                                           survivor_limit);

       if (PrintTenuringDistribution) {
         gclog_or_tty->cr();
         gclog_or_tty->print_cr("Desired survivor size " SIZE_FORMAT " bytes, new threshold %u (max %u)",
                                size_policy->calculated_survivor_size_in_bytes(),
                                _tenuring_threshold, MaxTenuringThreshold);
       }

        if (UsePerfData) {
          PSGCAdaptivePolicyCounters* counters = heap->gc_policy_counters();
          counters->update_tenuring_threshold(_tenuring_threshold);
          counters->update_survivor_size_counters();
        }

        // Do call at minor collections?
        // Don't check if the size_policy is ready at this
        // level.  Let the size_policy check that internally.
        if (UseAdaptiveGenerationSizePolicyAtMinorCollection &&
            ((gc_cause != GCCause::_java_lang_system_gc) ||
              UseAdaptiveSizePolicyWithSystemGC)) {

          // Calculate optimial free space amounts
          assert(young_gen->max_size() >
            young_gen->from_space()->capacity_in_bytes() +
            young_gen->to_space()->capacity_in_bytes(),
            "Sizes of space in young gen are out-of-bounds");

          size_t young_live = young_gen->used_in_bytes();
          size_t eden_live = young_gen->eden_space()->used_in_bytes();
          size_t cur_eden = young_gen->eden_space()->capacity_in_bytes();
          size_t max_old_gen_size = old_gen->max_gen_size();
          size_t max_eden_size = max_young_size -
            young_gen->from_space()->capacity_in_bytes() -
            young_gen->to_space()->capacity_in_bytes();

          // Used for diagnostics
          size_policy->clear_generation_free_space_flags();

          size_policy->compute_eden_space_size(young_live,
                                               eden_live,
                                               cur_eden,
                                               max_eden_size,
                                               false /* not full gc*/);

          size_policy->check_gc_overhead_limit(young_live,
                                               eden_live,
                                               max_old_gen_size,
                                               max_eden_size,
                                               false /* not full gc*/,
                                               gc_cause,
                                               heap->collector_policy());

          size_policy->decay_supplemental_growth(false /* not full gc*/);
        }
        // Resize the young generation at every collection
        // even if new sizes have not been calculated.  This is
        // to allow resizes that may have been inhibited by the
        // relative location of the "to" and "from" spaces.

        // Resizing the old gen at minor collects can cause increases
        // that don't feed back to the generation sizing policy until
        // a major collection.  Don't resize the old gen here.

        heap->resize_young_gen(size_policy->calculated_eden_size_in_bytes(),
                        size_policy->calculated_survivor_size_in_bytes());

        if (PrintAdaptiveSizePolicy) {
          gclog_or_tty->print_cr("AdaptiveSizeStop: collection: %d ",
                         heap->total_collections());
        }
      }

      // Update the structure of the eden. With NUMA-eden CPU hotplugging or offlining can
      // cause the change of the heap layout. Make sure eden is reshaped if that's the case.
      // Also update() will case adaptive NUMA chunk resizing.
      assert(young_gen->eden_space()->is_empty(), "eden space should be empty now");
      young_gen->eden_space()->update();

      heap->gc_policy_counters()->update_counters();

      heap->resize_all_tlabs();

      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
    }

    COMPILER2_PRESENT(DerivedPointerTable::update_pointers());

    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());

    {
      GCTraceTime tm("Prune Scavenge Root Methods", false, false, &_gc_timer);

      CodeCache::prune_scavenge_root_nmethods();
    }

    // Re-verify object start arrays
    if (VerifyObjectStartArray &&
        VerifyAfterGC) {
      old_gen->verify_object_start_array();
    }

    // Verify all old -> young cards are now precise
    if (VerifyRememberedSets) {
      // Precise verification will give false positives. Until this is fixed,
      // use imprecise verification.
      // CardTableExtension::verify_all_young_refs_precise();
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (TraceGen0Time) accumulated_time()->stop();

    if (PrintGC) {
      if (PrintGCDetails) {
        // Don't print a GC timestamp here.  This is after the GC so
        // would be confusing.
        young_gen->print_used_change(young_gen_used_before);
      }
      heap->print_heap_change(prev_used);
    }

    // Track memory usage and detect low memory
    MemoryService::track_memory_usage();
    heap->update_counters();

    gc_task_manager()->release_idle_workers();
  }

  if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    Universe::verify(" VerifyAfterGC:");
  }

  heap->print_heap_after_gc();
  heap->trace_heap_after_gc(&_gc_tracer);
  _gc_tracer.report_tenuring_threshold(tenuring_threshold());

  if (ZapUnusedHeapArea) {
    young_gen->eden_space()->check_mangled_unused_area_complete();
    young_gen->from_space()->check_mangled_unused_area_complete();
    young_gen->to_space()->check_mangled_unused_area_complete();
  }

  scavenge_exit.update();

  if (PrintGCTaskTimeStamps) {
    tty->print_cr("VM-Thread " INT64_FORMAT " " INT64_FORMAT " " INT64_FORMAT,
                  scavenge_entry.ticks(), scavenge_midpoint.ticks(),
                  scavenge_exit.ticks());
    gc_task_manager()->print_task_time_stamps();
  }

#ifdef TRACESPINNING
  ParallelTaskTerminator::print_termination_counts();
#endif


  _gc_timer.register_gc_end();

  _gc_tracer.report_gc_end(_gc_timer.gc_end(), _gc_timer.time_partitions());

  return !promotion_failure_occurred;
}
Exemplo n.º 13
0
// This method contains no policy. You should probably
// be calling invoke() instead.
void PSMarkSweep::invoke_no_policy(bool& notify_ref_lock, bool clear_all_softrefs) {
    assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");
    assert(ref_processor() != NULL, "Sanity");

    if (GC_locker::is_active()) return;

    ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
    assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

    PSYoungGen* young_gen = heap->young_gen();
    PSOldGen* old_gen = heap->old_gen();
    PSPermGen* perm_gen = heap->perm_gen();

    // Increment the invocation count
    heap->increment_total_collections();

    // We need to track unique mark sweep invocations as well.
    _total_invocations++;

    if (PrintHeapAtGC) {
        gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
        Universe::print();
    }

    // Fill in TLABs
    heap->ensure_parseability();

    if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
        HandleMark hm;  // Discard invalid handles created during verification
        tty->print(" VerifyBeforeGC:");
        Universe::verify(true);
    }

    {
        HandleMark hm;
        TraceTime t1("Full GC", PrintGC, true, gclog_or_tty);
        TraceCollectorStats tcs(counters());
        if (TraceGen1Time) accumulated_time()->start();

        // Let the size policy know we're starting
        AdaptiveSizePolicy* size_policy = heap->size_policy();
        size_policy->major_collection_begin();

        // When collecting the permanent generation methodOops may be moving,
        // so we either have to flush all bcp data or convert it into bci.
        NOT_CORE(CodeCache::gc_prologue());
        Threads::gc_prologue();

        // Capture heap size before collection for printing.
        size_t prev_used = heap->used();

        // Capture perm gen size before collection for sizing.
        size_t perm_gen_prev_used = perm_gen->used_in_bytes();

        bool marked_for_unloading = false;

        allocate_stacks();

        NOT_PRODUCT(ref_processor()->verify_no_references_recorded());
        COMPILER2_ONLY(DerivedPointerTable::clear());

        ref_processor()->enable_discovery();

        mark_sweep_phase1(marked_for_unloading, clear_all_softrefs);

        mark_sweep_phase2();

        // Don't add any more derived pointers during phase3
        COMPILER2_ONLY(assert(DerivedPointerTable::is_active(), "Sanity"));
        COMPILER2_ONLY(DerivedPointerTable::set_active(false));

        mark_sweep_phase3();

        mark_sweep_phase4();

        restore_marks();

        deallocate_stacks();

        // "free at last gc" is calculated from these.
        Universe::set_heap_capacity_at_last_gc(Universe::heap()->capacity());
        Universe::set_heap_used_at_last_gc(Universe::heap()->used());

        bool all_empty = young_gen->eden_space()->is_empty() &&
                         young_gen->from_space()->is_empty() &&
                         young_gen->to_space()->is_empty();

        BarrierSet* bs = heap->barrier_set();
        if (bs->is_a(BarrierSet::ModRef)) {
            ModRefBarrierSet* modBS = (ModRefBarrierSet*)bs;
            MemRegion old_mr = heap->old_gen()->reserved();
            MemRegion perm_mr = heap->perm_gen()->reserved();
            assert(old_mr.end() <= perm_mr.start(), "Generations out of order");

            if (all_empty) {
                modBS->clear(MemRegion(old_mr.start(), perm_mr.end()));
            } else {
                modBS->invalidate(MemRegion(old_mr.start(), perm_mr.end()));
            }
        }

        Threads::gc_epilogue();
        NOT_CORE(CodeCache::gc_epilogue());

        COMPILER2_ONLY(DerivedPointerTable::update_pointers());

        notify_ref_lock |= ref_processor()->enqueue_discovered_references();

        // Update time of last GC
        reset_millis_since_last_gc();

        // Let the size policy know we're done
        size_policy->major_collection_end(old_gen->used_in_bytes());

        if (UseAdaptiveSizePolicy) {

            if (PrintAdaptiveSizePolicy) {
                tty->print_cr("AdaptiveSizeStart: collection: %d ",
                              heap->total_collections());
            }

            // Calculate optimial free space amounts
            size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
                    old_gen->used_in_bytes(),
                    perm_gen->used_in_bytes(),
                    true /* full gc*/);

            // Resize old and young generations
            old_gen->resize(size_policy->calculated_old_free_size_in_bytes());

            young_gen->resize(size_policy->calculated_eden_size_in_bytes(),
                              size_policy->calculated_survivor_size_in_bytes());

            if (PrintAdaptiveSizePolicy) {
                tty->print_cr("AdaptiveSizeStop: collection: %d ",
                              heap->total_collections());
            }
        }

        // We collected the perm gen, so we'll resize it here.
        perm_gen->compute_new_size(perm_gen_prev_used);

        if (TraceGen1Time) accumulated_time()->stop();

        if (PrintGC) {
            heap->print_heap_change(prev_used);
        }

        heap->update_counters();
    }

    if (VerifyAfterGC && heap->total_collections() >= VerifyGCStartAt) {
        HandleMark hm;  // Discard invalid handles created during verification
        tty->print(" VerifyAfterGC:");
        Universe::verify(false);
    }

    NOT_PRODUCT(ref_processor()->verify_no_references_recorded());

    if (PrintHeapAtGC) {
        gclog_or_tty->print_cr(" Heap after GC invocations=%d:", heap->total_collections());
        Universe::print();
        gclog_or_tty->print("} ");
    }
}
Exemplo n.º 14
0
/*
* Discover reg info of given class and all its children
* and add into list
*/
static int _DiscoverClassAndChildrenClasses(
    _In_opt_z_ const MI_Char* cn,
    _In_z_ const MI_Char* ns,
    _Inout_ IndicationClassList *list,
    _Inout_ MI_Uint32* classcount,
    _In_ MI_Boolean children,
    _In_ ProvRegType regtype)
{
    /* Find the class first */
    MI_Result r;
    ProvRegPosition pos;
    const ProvRegEntry* e = ProvReg_FindProviderForClassByType(list->provreg, ns, cn, regtype, &r);
    if (e)
    {
        if (IndicationClassList_AddEntry(list, e))
        {
            trace_DiscoverClassesFoundTypeClass( GetRegClassTypeName(regtype), e->className );
            (*classcount)++;
        }
        else
            trace_DiscoverClassesFailedToAdd(e->className, r);
    }
    else
        trace_DiscoverClassesNoRegInfo(GetRegClassTypeName(regtype), (cn) ? cn : PAL_T(""), ns);

    /* return if no need to discover children class */
    if (MI_FALSE == children)
        return 0;

    /* Begin enumeration of classes for this request */
    {
        r = ProvReg_BeginClasses(list->provreg, ns, cn, MI_TRUE, &pos, MI_FALSE);
        if (MI_RESULT_OK != r)
        {
            trace_DiscoverClassesBeginFailed(r, tcs(Result_ToString(r)), tcs(ns), tcs(cn));
            return -1;
        }
    }

    /* While more classes */
    for (;;)
    {
        const ZChar* derived = NULL;
        MI_Boolean done;

        r = ProvReg_NextClass(&pos, &derived, &done);

        if (done)
            break;

        if (MI_RESULT_OK != r)
        {
            trace_DiscoverClassesNextFailed(r, tcs(Result_ToString(r)), tcs(ns), tcs(cn));
            return -1;
        }
        e = ProvReg_FindProviderForClassByType(list->provreg, ns, derived, regtype, &r);
        if (e)
        {
            if (IndicationClassList_AddEntry(list, e))
            {
                trace_DiscoverClassesFoundTypeClass(GetRegClassTypeName(regtype), e->className);
                (*classcount)++;
            }
            else
                trace_DiscoverClassesFailedToAdd(e->className, r);
        }
    }

    /* Finalize enumeration */
    {
        r = ProvReg_EndClasses(&pos);
        if (MI_RESULT_OK != r)
        {
            trace_DiscoverClassesEndFailed(r, tcs(Result_ToString(r)));
            return -1;
        }
    }
    return 0;
}
Exemplo n.º 15
0
// This method contains no policy. You should probably
// be calling invoke() instead. 
void PSScavenge::invoke_no_policy(bool& notify_ref_lock) {
  assert(SafepointSynchronize::is_at_safepoint(), "should be at safepoint");
  assert(Thread::current() == (Thread*)VMThread::vm_thread(), "should be in vm thread");

  TimeStamp scavenge_entry;
  TimeStamp scavenge_midpoint;
  TimeStamp scavenge_exit;

  scavenge_entry.update();

  if (GC_locker::is_active()) return;

  ParallelScavengeHeap* heap = (ParallelScavengeHeap*)Universe::heap();
  assert(heap->kind() == CollectedHeap::ParallelScavengeHeap, "Sanity");

  // Check for potential problems.
  if (!should_attempt_scavenge()) {
    return;
  }

  PSYoungGen* young_gen = heap->young_gen();
  PSOldGen* old_gen = heap->old_gen();
  PSPermGen* perm_gen = heap->perm_gen();
  AdaptiveSizePolicy* size_policy = heap->size_policy();

  heap->increment_total_collections();

  if (PrintHeapAtGC){
    gclog_or_tty->print_cr(" {Heap before GC invocations=%d:", heap->total_collections());
    Universe::print();
  }

  assert(!NeverTenure || _tenuring_threshold == markOopDesc::max_age + 1, "Sanity");
  assert(!AlwaysTenure || _tenuring_threshold == 0, "Sanity");

  size_t prev_used = heap->used();
  assert(promotion_failed() == false, "Sanity");

  // Fill in TLABs
  heap->ensure_parseability();

  if (VerifyBeforeGC && heap->total_collections() >= VerifyGCStartAt) {
    HandleMark hm;  // Discard invalid handles created during verification
    tty->print(" VerifyBeforeGC:");
    Universe::verify(true);
  }

  {
    ResourceMark rm;
    HandleMark hm;

    TraceTime t1("GC", PrintGC, true, gclog_or_tty);
    TraceCollectorStats tcs(counters());
    if (TraceGen0Time) accumulated_time()->start();

    // Let the size policy know we're starting
    size_policy->minor_collection_begin();
    
    // Verify no unmarked old->young roots
    if (VerifyRememberedSets) {
      old_gen->verify_object_start_array();
      perm_gen->verify_object_start_array();
      CardTableExtension::verify_all_young_refs_imprecise();
    }
    
    assert(young_gen->to_space()->is_empty(), "Attempt to scavenge with live objects in to_space");
    young_gen->to_space()->clear();

    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
    COMPILER2_ONLY(DerivedPointerTable::clear(););

    reference_processor()->enable_discovery();
    
    // We track how much was promoted to the next generation for
    // the AdaptiveSizePolicy.
    size_t old_gen_used_before = old_gen->object_space()->used_in_bytes();

    // Reset our survivor overflow.
    set_survivor_overflow(false);
    
    // We need to save the old/perm top values before
    // creating the promotion_manager. We pass the top
    // values to the card_table, to prevent it from
    // straying into the promotion labs.
    HeapWord* old_top = old_gen->object_space()->top();
    HeapWord* perm_top = perm_gen->object_space()->top();

    // Release all previously held resources
    gc_task_manager()->release_all_resources();

    PSPromotionManager::pre_scavenge();

    // We'll use the promotion manager again later.
    PSPromotionManager* promotion_manager = PSPromotionManager::vm_thread_promotion_manager();
    {
      // TraceTime("Roots");
      
      GCTaskQueue* q = GCTaskQueue::create();
      
      for(uint i=0; i<ParallelGCThreads; i++) {
        q->enqueue(new OldToYoungRootsTask(old_gen, old_top, i));
      }

      q->enqueue(new SerialOldToYoungRootsTask(perm_gen, perm_top));

      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::universe));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::jni_handles));
      // We scan the thread roots in parallel
      Threads::create_thread_roots_tasks(q);
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::object_synchronizer));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::flat_profiler));
      q->enqueue(new ScavengeRootsTask(ScavengeRootsTask::system_dictionary));

      if (ParallelGCThreads>1) {
        for (uint j=0; j<ParallelGCThreads-1; j++) {
          q->enqueue(new StealTask(false));
        }
        q->enqueue(new StealTask(true));
      }

      WaitForBarrierGCTask* fin = WaitForBarrierGCTask::create();
      q->enqueue(fin);

      gc_task_manager()->add_list(q);
      
      fin->wait_for();

      // We have to release the barrier tasks!
      WaitForBarrierGCTask::destroy(fin);
    }

    scavenge_midpoint.update();

    NOT_COMPILER2(ReferencePolicy *soft_ref_policy = new LRUCurrentHeapPolicy());
    COMPILER2_ONLY(ReferencePolicy *soft_ref_policy = new LRUMaxHeapPolicy());
    
    PSIsAliveClosure is_alive;
    PSKeepAliveClosure keep_alive(promotion_manager);
    PSEvacuateFollowersClosure evac_followers(promotion_manager);
    
    // Process reference objects discovered during scavenge
    reference_processor()->process_discovered_references(soft_ref_policy, &is_alive,
                                                         &keep_alive, &evac_followers);
    
    // Enqueue reference objects discovered during scavenge.
    notify_ref_lock = reference_processor()->enqueue_discovered_references();
    
    // Finally, flush the promotion_manager's labs, and deallocate its stacks.
    assert(promotion_manager->claimed_stack()->size() == 0, "Sanity");
    PSPromotionManager::post_scavenge();

    bool scavenge_promotion_failure = promotion_failed();
    if (scavenge_promotion_failure) {
      clean_up_failed_promotion();
      if (PrintGC) {
        gclog_or_tty->print("--");
      }
    }

    // Let the size policy know we're done. Note that we count promotion
    // failure cleanup time as part of the collection (otherwise, we're implicitly
    // saying it's mutator time).
    size_policy->minor_collection_end();

    if (!scavenge_promotion_failure) {
      // Swap the survivor spaces.
      young_gen->eden_space()->clear();
      young_gen->from_space()->clear();
      young_gen->swap_spaces();

      if (UseAdaptiveSizePolicy) {
        // Calculate the new survivor size and tenuring threshold
        size_t survived = young_gen->from_space()->used_in_bytes();
        size_t promoted = old_gen->used_in_bytes() - old_gen_used_before;

        if (PrintAdaptiveSizePolicy) {
          tty->print_cr("AdaptiveSizeStart: collection: %d ",
                         heap->total_collections());
        }

        size_t survivor_limit = 
	  size_policy->max_survivor_size(young_gen->max_size());
        _tenuring_threshold = 
           size_policy->compute_survivor_space_size_and_threshold(survived, 
                                                           promoted,
                                                           _survivor_overflow, 
                                                           _tenuring_threshold,
                                                           survivor_limit);

        // Calculate optimial free space amounts
        size_policy->compute_generation_free_space(young_gen->used_in_bytes(),
                                                   old_gen->used_in_bytes(),
                                                   perm_gen->used_in_bytes(),
                                                   false  /* full gc*/);
        
        // Resize the old and young generations
        old_gen->resize(size_policy->calculated_old_free_size_in_bytes());
        
        young_gen->resize(size_policy->calculated_eden_size_in_bytes(),
                          size_policy->calculated_survivor_size_in_bytes());

        if (PrintAdaptiveSizePolicy) {
          tty->print_cr("AdaptiveSizeStop: collection: %d ",
                         heap->total_collections());
        }


      }

      assert(young_gen->to_space()->is_empty(), "to space should be empty now");
    }

    COMPILER2_ONLY(DerivedPointerTable::update_pointers());
    
    NOT_PRODUCT(reference_processor()->verify_no_references_recorded());
        
    // Verify all old -> young cards are now precise
    if (VerifyRememberedSets) {
      // Precise verification will give false positives. Until this is fixed,
      // use imprecise verification.
      // CardTableExtension::verify_all_young_refs_precise();
      CardTableExtension::verify_all_young_refs_imprecise();
    }

    if (TraceGen0Time) accumulated_time()->stop();
    
    if (PrintGC) {
      heap->print_heap_change(prev_used);
    }

    heap->update_counters();
  }