void CollectorPolicy::cleared_all_soft_refs() {
  // If near gc overhear limit, continue to clear SoftRefs.  SoftRefs may
  // have been cleared in the last collection but if the gc overhear
  // limit continues to be near, SoftRefs should still be cleared.
  if (size_policy() != NULL) {
    _should_clear_all_soft_refs = size_policy()->gc_overhead_limit_near();
  }
  _all_soft_refs_clear = true;
}
示例#2
0
// Before delegating the resize to the old generation,
// the reserved space for the young and old generations
// may be changed to accomodate the desired resize.
void ParallelScavengeHeap::resize_old_gen(size_t desired_free_space) {
  if (UseAdaptiveGCBoundary) {
    if (size_policy()->bytes_absorbed_from_eden() != 0) {
      size_policy()->reset_bytes_absorbed_from_eden();
      return;  // The generation changed size already.
    }
    gens()->adjust_boundary_for_old_gen_needs(desired_free_space);
  }

  // Delegate the resize to the generation.
  _old_gen->resize(desired_free_space);
}
void ASConcurrentMarkSweepPolicy::initialize_gc_policy_counters() {

  assert(size_policy() != NULL, "A size policy is required");
  // initialize the policy counters - 2 collectors, 3 generations
  if (ParNewGeneration::in_use()) {
    _gc_policy_counters = new CMSGCAdaptivePolicyCounters("ParNew:CMS", 2, 3,
      size_policy());
  }
  else {
    _gc_policy_counters = new CMSGCAdaptivePolicyCounters("Copy:CMS", 2, 3,
      size_policy());
  }
}
示例#4
0
// Before delegating the resize to the young generation,
// the reserved space for the young and old generations
// may be changed to accomodate the desired resize.
void ParallelScavengeHeap::resize_young_gen(size_t eden_size,
    size_t survivor_size) {
  if (UseAdaptiveGCBoundary) {
    if (size_policy()->bytes_absorbed_from_eden() != 0) {
      size_policy()->reset_bytes_absorbed_from_eden();
      return;  // The generation changed size already.
    }
    gens()->adjust_boundary_for_young_gen_needs(eden_size, survivor_size);
  }

  // Delegate the resize to the generation.
  _young_gen->resize(eden_size, survivor_size);
}
void GCAdaptivePolicyCounters::update_counters_from_policy() {
  if (UsePerfData && (size_policy() != NULL)) {
    update_avg_minor_pause_counter();
    update_avg_minor_interval_counter();
#ifdef NOT_PRODUCT
    update_minor_pause_counter();
#endif
    update_minor_gc_cost_counter();
    update_avg_young_live_counter();

    update_survivor_size_counters();
    update_avg_survived_avg_counters();
    update_avg_survived_dev_counters();
    update_avg_survived_padded_avg_counters();

    update_change_old_gen_for_throughput();
    update_change_young_gen_for_throughput();
    update_decrease_for_footprint();
    update_change_young_gen_for_min_pauses();
    update_change_old_gen_for_maj_pauses();

    update_minor_pause_young_slope_counter();
    update_minor_collection_slope_counter();
    update_major_collection_slope_counter();
  }
}
// Basic allocation policy. Should never be called at a safepoint, or
// from the VM thread.
//
// This method must handle cases where many mem_allocate requests fail
// simultaneously. When that happens, only one VM operation will succeed,
// and the rest will not be executed. For that reason, this method loops
// during failed allocation attempts. If the java heap becomes exhausted,
// we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate(size_t size, bool is_noref, bool is_tlab) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  HeapWord* result;

  uint loop_count = 0;

  do {
    result = young_gen()->allocate(size, is_noref, is_tlab);

    // In some cases, the requested object will be too large to easily
    // fit in the young_gen. Rather than force a safepoint and collection
    // for each one, try allocation in old_gen for objects likely to fail
    // allocation in eden.
    if (result == NULL && size >= (young_gen()->eden_space()->capacity_in_words() / 2) && !is_tlab) {
      MutexLocker ml(Heap_lock);
      result = old_gen()->allocate(size, is_noref, is_tlab);
    }

    if (result == NULL) {
      // Generate a VM operation
      VM_ParallelGCFailedAllocation op(size, is_noref, is_tlab);
      VMThread::execute(&op);
      
      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_or_null(op.result()), "result not in heap");
        return op.result();
      }
    }

    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((QueuedAllocationWarningCount > 0) && (loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
              " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
    }
  } while (result == NULL && !size_policy()->gc_time_limit_exceeded());

  return result;
}
//
// This is the policy loop for allocating in the permanent generation.
// If the initial allocation fails, we create a vm operation which will
// cause a collection.
HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  HeapWord* result;
  uint loop_count = 0;

  do {
    {
      MutexLocker ml(Heap_lock);
      result = perm_gen()->allocate_permanent(size);
    }

    if (result == NULL) {
      // Generate a VM operation
      VM_ParallelGCFailedPermanentAllocation op(size);
      VMThread::execute(&op);
      
      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_permanent_or_null(op.result()), "result not in heap");
        return op.result();
      }
    }
    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((QueuedAllocationWarningCount > 0) && (loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
              " size=%d", loop_count, size);
    }
  } while (result == NULL && !size_policy()->gc_time_limit_exceeded());

  return result;
}
 inline void update_major_collection_slope_counter() {
   _major_collection_slope_counter->set_value(
     (jlong)(size_policy()->major_collection_slope() * 1000)
   );
 }
 inline void update_change_old_gen_for_maj_pauses() {
   _change_old_gen_for_maj_pauses_counter->set_value(
     size_policy()->change_old_gen_for_maj_pauses());
 }
 inline void update_decrement_tenuring_threshold_for_survivor_limit() {
   _decrement_tenuring_threshold_for_survivor_limit_counter->set_value(
     size_policy()->decrement_tenuring_threshold_for_survivor_limit());
 }
示例#11
0
// Basic allocation policy. Should never be called at a safepoint, or
// from the VM thread.
//
// This method must handle cases where many mem_allocate requests fail
// simultaneously. When that happens, only one VM operation will succeed,
// and the rest will not be executed. For that reason, this method loops
// during failed allocation attempts. If the java heap becomes exhausted,
// we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate(
                                     size_t size,
                                     bool* gc_overhead_limit_was_exceeded) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = young_gen()->allocate(size);

  uint loop_count = 0;
  uint gc_count = 0;
  uint gclocker_stalled_count = 0;

  while (result == NULL) {
    // We don't want to have multiple collections for a single filled generation.
    // To prevent this, each thread tracks the total_collections() value, and if
    // the count has changed, does not do a new collection.
    //
    // The collection count must be read only while holding the heap lock. VM
    // operations also hold the heap lock during collections. There is a lock
    // contention case where thread A blocks waiting on the Heap_lock, while
    // thread B is holding it doing a collection. When thread A gets the lock,
    // the collection count has already changed. To prevent duplicate collections,
    // The policy MUST attempt allocations during the same period it reads the
    // total_collections() value!
    {
      MutexLocker ml(Heap_lock);
      gc_count = total_collections();

      result = young_gen()->allocate(size);
      if (result != NULL) {
        return result;
      }

      // If certain conditions hold, try allocating from the old gen.
      result = mem_allocate_old_gen(size);
      if (result != NULL) {
        return result;
      }

      if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
        return NULL;
      }

      // Failed to allocate without a gc.
      if (GCLocker::is_active_and_needs_gc()) {
        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          GCLocker::stall_until_clear();
          gclocker_stalled_count += 1;
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }
    }

    if (result == NULL) {
      // Generate a VM operation
      VM_ParallelGCFailedAllocation op(size, gc_count);
      VMThread::execute(&op);

      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(is_in_or_null(op.result()), "result not in heap");

        // If GC was locked out during VM operation then retry allocation
        // and/or stall as necessary.
        if (op.gc_locked()) {
          assert(op.result() == NULL, "must be NULL if gc_locked() is true");
          continue;  // retry and/or stall as necessary
        }

        // Exit the loop if the gc time limit has been exceeded.
        // The allocation must have failed above ("result" guarding
        // this path is NULL) and the most recent collection has exceeded the
        // gc overhead limit (although enough may have been collected to
        // satisfy the allocation).  Exit the loop so that an out-of-memory
        // will be thrown (return a NULL ignoring the contents of
        // op.result()),
        // but clear gc_overhead_limit_exceeded so that the next collection
        // starts with a clean slate (i.e., forgets about previous overhead
        // excesses).  Fill op.result() with a filler object so that the
        // heap remains parsable.
        const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
        const bool softrefs_clear = collector_policy()->all_soft_refs_clear();

        if (limit_exceeded && softrefs_clear) {
          *gc_overhead_limit_was_exceeded = true;
          size_policy()->set_gc_overhead_limit_exceeded(false);
          log_trace(gc)("ParallelScavengeHeap::mem_allocate: return NULL because gc_overhead_limit_exceeded is set");
          if (op.result() != NULL) {
            CollectedHeap::fill_with_object(op.result(), size);
          }
          return NULL;
        }

        return op.result();
      }
    }

    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
        (loop_count % QueuedAllocationWarningCount == 0)) {
      log_warning(gc)("ParallelScavengeHeap::mem_allocate retries %d times", loop_count);
      log_warning(gc)("\tsize=" SIZE_FORMAT, size);
    }
  }

  return result;
}
示例#12
0
QWidget* MenuItem::rowWidget(CamcopsApp& app) const
{
    QWidget* row = new QWidget();
    QHBoxLayout* rowlayout = new QHBoxLayout();
    row->setLayout(rowlayout);

    if (m_p_task) {
        // --------------------------------------------------------------------
        // Task instance
        // --------------------------------------------------------------------
        // Stretch: http://stackoverflow.com/questions/14561516/qt-qhboxlayout-percentage-size

        bool complete = m_p_task->isComplete();
        const bool& threecols = m_task_shows_taskname;

        // Taskname
        if (m_task_shows_taskname) {
            QLabel* taskname = new LabelWordWrapWide(m_p_task->shortname());
            taskname->setObjectName(complete
                                    ? "task_item_taskname_complete"
                                    : "task_item_taskname_incomplete");
            QSizePolicy spTaskname(QSizePolicy::Preferred,
                                   QSizePolicy::Preferred);
            spTaskname.setHorizontalStretch(STRETCH_3COL_TASKNAME);
            taskname->setSizePolicy(spTaskname);
            rowlayout->addWidget(taskname);
        }
        // Timestamp
        QLabel* timestamp = new LabelWordWrapWide(
            m_p_task->whenCreated().toString(DateTime::SHORT_DATETIME_FORMAT));
        timestamp->setObjectName(complete ? "task_item_timestamp_complete"
                                          : "task_item_timestamp_incomplete");
        QSizePolicy spTimestamp(QSizePolicy::Preferred,
                                QSizePolicy::Preferred);
        spTimestamp.setHorizontalStretch(threecols ? STRETCH_3COL_TIMESTAMP
                                                   : STRETCH_2COL_TIMESTAMP);
        timestamp->setSizePolicy(spTimestamp);
        rowlayout->addWidget(timestamp);
        // Summary
        QLabel* summary = new LabelWordWrapWide(
                    m_p_task->summaryWithCompleteSuffix());
        summary->setObjectName(complete ? "task_item_summary_complete"
                                        : "task_item_summary_incomplete");
        QSizePolicy spSummary(QSizePolicy::Preferred, QSizePolicy::Preferred);
        spSummary.setHorizontalStretch(threecols ? STRETCH_3COL_SUMMARY
                                                 : STRETCH_2COL_SUMMARY);
        summary->setSizePolicy(spSummary);
        rowlayout->addWidget(summary);
    } else {
        // --------------------------------------------------------------------
        // Conventional menu item
        // --------------------------------------------------------------------

        // Icon
        if (!m_label_only) {  // Labels go full-left
            if (!m_icon.isEmpty()) {
                QLabel* icon = UiFunc::iconWidget(m_icon, row);
                rowlayout->addWidget(icon);
            } else if (m_chain) {
                QLabel* icon = UiFunc::iconWidget(
                    UiFunc::iconFilename(UiConst::ICON_CHAIN), row);
                rowlayout->addWidget(icon);
            } else {
                rowlayout->addWidget(UiFunc::blankIcon(row));
            }
        }

        // Title/subtitle
        QVBoxLayout* textlayout = new QVBoxLayout();

        QLabel* title = new LabelWordWrapWide(m_title);
        title->setObjectName("menu_item_title");
        textlayout->addWidget(title);
        if (!m_subtitle.isEmpty()) {
            QLabel* subtitle = new LabelWordWrapWide(m_subtitle);
            subtitle->setObjectName("menu_item_subtitle");
            textlayout->addWidget(subtitle);
        }
        rowlayout->addLayout(textlayout);
        rowlayout->addStretch();

        // Arrow on right
        if (m_arrow_on_right) {
            QLabel* iconLabel = UiFunc::iconWidget(
                UiFunc::iconFilename(UiConst::ICON_HASCHILD),
                nullptr, false);
            rowlayout->addWidget(iconLabel);
        }

        // Background colour, via stylesheets
        if (m_label_only) {
            row->setObjectName("label_only");
        } else if (!m_implemented) {
            row->setObjectName("not_implemented");
        } else if (m_unsupported) {
            row->setObjectName("unsupported");
        } else if (m_not_if_locked && app.locked()) {
            row->setObjectName("locked");
        } else if (m_needs_privilege && !app.privileged()) {
            row->setObjectName("needs_privilege");
        }
        // ... but not for locked/needs privilege, as otherwise we'd need
        // to refresh the whole menu? Well, we could try it.
        // On Linux desktop, it's extremely fast.
    }

    // Size policy
    QSizePolicy size_policy(QSizePolicy::MinimumExpanding,  // horizontal
                            QSizePolicy::Fixed);  // vertical
    row->setSizePolicy(size_policy);

    return row;
}
示例#13
0
// Basic allocation policy. Should never be called at a safepoint, or
// from the VM thread.
//
// This method must handle cases where many mem_allocate requests fail
// simultaneously. When that happens, only one VM operation will succeed,
// and the rest will not be executed. For that reason, this method loops
// during failed allocation attempts. If the java heap becomes exhausted,
// we rely on the size_policy object to force a bail out.
HeapWord* ParallelScavengeHeap::mem_allocate(
                                     size_t size,
                                     bool is_noref,
                                     bool is_tlab,
                                     bool* gc_overhead_limit_was_exceeded) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = young_gen()->allocate(size, is_tlab);

  uint loop_count = 0;
  uint gc_count = 0;

  while (result == NULL) {
    // We don't want to have multiple collections for a single filled generation.
    // To prevent this, each thread tracks the total_collections() value, and if
    // the count has changed, does not do a new collection.
    //
    // The collection count must be read only while holding the heap lock. VM
    // operations also hold the heap lock during collections. There is a lock
    // contention case where thread A blocks waiting on the Heap_lock, while
    // thread B is holding it doing a collection. When thread A gets the lock,
    // the collection count has already changed. To prevent duplicate collections,
    // The policy MUST attempt allocations during the same period it reads the
    // total_collections() value!
    {
      MutexLocker ml(Heap_lock);
      gc_count = Universe::heap()->total_collections();

      result = young_gen()->allocate(size, is_tlab);

      // (1) If the requested object is too large to easily fit in the
      //     young_gen, or
      // (2) If GC is locked out via GCLocker, young gen is full and
      //     the need for a GC already signalled to GCLocker (done
      //     at a safepoint),
      // ... then, rather than force a safepoint and (a potentially futile)
      // collection (attempt) for each allocation, try allocation directly
      // in old_gen. For case (2) above, we may in the future allow
      // TLAB allocation directly in the old gen.
      if (result != NULL) {
        return result;
      }
      if (!is_tlab &&
          size >= (young_gen()->eden_space()->capacity_in_words(Thread::current()) / 2)) {
        result = old_gen()->allocate(size, is_tlab);
        if (result != NULL) {
          return result;
        }
      }
      if (GC_locker::is_active_and_needs_gc()) {
        // GC is locked out. If this is a TLAB allocation,
        // return NULL; the requestor will retry allocation
        // of an idividual object at a time.
        if (is_tlab) {
          return NULL;
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          GC_locker::stall_until_clear();
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }
    }

    if (result == NULL) {

      // Generate a VM operation
      VM_ParallelGCFailedAllocation op(size, is_tlab, gc_count);
      VMThread::execute(&op);

      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_or_null(op.result()),
          "result not in heap");

        // If GC was locked out during VM operation then retry allocation
        // and/or stall as necessary.
        if (op.gc_locked()) {
          assert(op.result() == NULL, "must be NULL if gc_locked() is true");
          continue;  // retry and/or stall as necessary
        }

        // Exit the loop if the gc time limit has been exceeded.
        // The allocation must have failed above ("result" guarding
        // this path is NULL) and the most recent collection has exceeded the
        // gc overhead limit (although enough may have been collected to
        // satisfy the allocation).  Exit the loop so that an out-of-memory
        // will be thrown (return a NULL ignoring the contents of
        // op.result()),
        // but clear gc_overhead_limit_exceeded so that the next collection
        // starts with a clean slate (i.e., forgets about previous overhead
        // excesses).  Fill op.result() with a filler object so that the
        // heap remains parsable.
        const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
        const bool softrefs_clear = collector_policy()->all_soft_refs_clear();
        assert(!limit_exceeded || softrefs_clear, "Should have been cleared");
        if (limit_exceeded && softrefs_clear) {
          *gc_overhead_limit_was_exceeded = true;
          size_policy()->set_gc_overhead_limit_exceeded(false);
          if (PrintGCDetails && Verbose) {
            gclog_or_tty->print_cr("ParallelScavengeHeap::mem_allocate: "
              "return NULL because gc_overhead_limit_exceeded is set");
          }
          if (op.result() != NULL) {
            CollectedHeap::fill_with_object(op.result(), size);
          }
          return NULL;
        }

        return op.result();
      }
    }

    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((result == NULL) && (QueuedAllocationWarningCount > 0) &&
        (loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::mem_allocate retries %d times \n\t"
              " size=%d %s", loop_count, size, is_tlab ? "(TLAB)" : "");
    }
  }

  return result;
}
 inline void update_avg_minor_pause_counter() {
   _avg_minor_pause_counter->set_value((jlong)
     (size_policy()->avg_minor_pause()->average() * 1000.0));
 }
 inline void update_decrease_for_footprint() {
   _decrease_for_footprint_counter->set_value(
     size_policy()->decrease_for_footprint());
 }
 inline void update_change_young_gen_for_throughput() {
   _change_young_gen_for_throughput_counter->set_value(
     size_policy()->change_young_gen_for_throughput());
 }
 inline void update_avg_survived_padded_avg_counters() {
   _avg_survived_padded_avg_counter->set_value(
     (jlong)(size_policy()->_avg_survived->padded_average())
   );
 }
 inline void update_avg_survived_dev_counters() {
   _avg_survived_dev_counter->set_value(
     (jlong)(size_policy()->_avg_survived->deviation())
   );
 }
 inline void update_avg_young_live_counter() {
   _avg_young_live_counter->set_value(
     (jlong)(size_policy()->avg_young_live()->average())
   );
 }
 inline void update_minor_gc_cost_counter() {
   _minor_gc_cost_counter->set_value((jlong)
     (size_policy()->minor_gc_cost() * 100.0));
 }
 inline void update_minor_pause_counter() {
   _minor_pause_counter->set_value((jlong)
     (size_policy()->avg_minor_pause()->last_sample() * 1000.0));
 }
 inline void update_eden_size() {
   size_t eden_size_in_bytes = size_policy()->calculated_eden_size_in_bytes();
   _eden_size_counter->set_value(eden_size_in_bytes);
 }
 inline void update_promo_size() {
   _promo_size_counter->set_value(
     size_policy()->calculated_promo_size_in_bytes());
 }
 inline void update_minor_pause_young_slope_counter() {
   _minor_pause_young_slope_counter->set_value(
     (jlong)(size_policy()->minor_pause_young_slope() * 1000)
   );
 }
PSGCAdaptivePolicyCounters::PSGCAdaptivePolicyCounters(const char* name_arg,
                       		      int collectors, 
				      int generations,
                       		      PSAdaptiveSizePolicy* size_policy_arg)
	: GCAdaptivePolicyCounters(name_arg, 
				   collectors, 
				   generations, 
				   size_policy_arg) {
  if (UsePerfData) {
    EXCEPTION_MARK;
    ResourceMark rm;

    const char* cname;

    cname = PerfDataManager::counter_name(name_space(), "oldPromoSize");
    _old_promo_size = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, ps_size_policy()->calculated_promo_size_in_bytes(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "oldEdenSize");
    _old_eden_size = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, ps_size_policy()->calculated_eden_size_in_bytes(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "oldCapacity");
    _old_capacity = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, (jlong) Arguments::initial_heap_size(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "boundaryMoved");
    _boundary_moved = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, (jlong) 0, CHECK);

    cname = PerfDataManager::counter_name(name_space(), "avgPromotedAvg");
    _avg_promoted_avg_counter = 
      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
        ps_size_policy()->calculated_promo_size_in_bytes(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "avgPromotedDev");
    _avg_promoted_dev_counter = 
      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
        (jlong) 0 , CHECK);

    cname = PerfDataManager::counter_name(name_space(), "avgPromotedPaddedAvg");
    _avg_promoted_padded_avg_counter = 
      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
        ps_size_policy()->calculated_promo_size_in_bytes(), CHECK);

    cname = PerfDataManager::counter_name(name_space(),
      "avgPretenuredPaddedAvg");
    _avg_pretenured_padded_avg = 
      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Bytes,
        (jlong) 0, CHECK);


    cname = PerfDataManager::counter_name(name_space(), 
      "changeYoungGenForMajPauses");
    _change_young_gen_for_maj_pauses_counter = 
      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events, 
        (jlong)0, CHECK);

    cname = PerfDataManager::counter_name(name_space(), 
      "changeOldGenForMinPauses");
    _change_old_gen_for_min_pauses = 
      PerfDataManager::create_variable(SUN_GC, cname, PerfData::U_Events, 
        (jlong)0, CHECK);


    cname = PerfDataManager::counter_name(name_space(), "avgMajorPauseTime");
    _avg_major_pause = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Ticks, (jlong) ps_size_policy()->_avg_major_pause->average(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "avgMajorIntervalTime");
    _avg_major_interval = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Ticks, (jlong) ps_size_policy()->_avg_major_interval->average(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "majorGcCost");
    _major_gc_cost_counter = PerfDataManager::create_variable(SUN_GC, cname,
       PerfData::U_Ticks, (jlong) ps_size_policy()->major_gc_cost(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "liveSpace");
    _live_space = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, ps_size_policy()->live_space(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "freeSpace");
    _free_space = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, ps_size_policy()->free_space(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "avgBaseFootprint");
    _avg_base_footprint = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, (jlong) ps_size_policy()->avg_base_footprint()->average(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "gcTimeLimitExceeded");
    _gc_time_limit_exceeded = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Events, ps_size_policy()->gc_time_limit_exceeded(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "liveAtLastFullGc");
    _live_at_last_full_gc = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, ps_size_policy()->live_at_last_full_gc(), CHECK);

    cname = PerfDataManager::counter_name(name_space(), "majorPauseOldSlope");
    _major_pause_old_slope = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_None, (jlong) 0, CHECK);

    cname = PerfDataManager::counter_name(name_space(), "minorPauseOldSlope");
    _minor_pause_old_slope = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_None, (jlong) 0, CHECK);

    cname = PerfDataManager::counter_name(name_space(), "majorPauseYoungSlope");
    _major_pause_young_slope = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_None, (jlong) 0, CHECK);

    cname = PerfDataManager::counter_name(name_space(), "scavengeSkipped");
    _scavenge_skipped = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, (jlong) 0, CHECK);

    cname = PerfDataManager::counter_name(name_space(), "fullFollowsScavenge");
    _full_follows_scavenge = PerfDataManager::create_variable(SUN_GC, cname,
      PerfData::U_Bytes, (jlong) 0, CHECK);

    _counter_time_stamp.update();
  }

  assert(size_policy()->is_gc_ps_adaptive_size_policy(),
    "Wrong type of size policy");
}
 inline void update_survivor_size_counters() {
   desired_survivor_size()->set_value(
     size_policy()->calculated_survivor_size_in_bytes());
 }
示例#27
0
//
// This is the policy loop for allocating in the permanent generation.
// If the initial allocation fails, we create a vm operation which will
// cause a collection.
HeapWord* ParallelScavengeHeap::permanent_mem_allocate(size_t size) {
  assert(!SafepointSynchronize::is_at_safepoint(), "should not be at safepoint");
  assert(Thread::current() != (Thread*)VMThread::vm_thread(), "should not be in vm thread");
  assert(!Heap_lock->owned_by_self(), "this thread should not own the Heap_lock");

  HeapWord* result;

  uint loop_count = 0;
  uint gc_count = 0;
  uint full_gc_count = 0;

  do {
    // We don't want to have multiple collections for a single filled generation.
    // To prevent this, each thread tracks the total_collections() value, and if
    // the count has changed, does not do a new collection.
    //
    // The collection count must be read only while holding the heap lock. VM
    // operations also hold the heap lock during collections. There is a lock
    // contention case where thread A blocks waiting on the Heap_lock, while
    // thread B is holding it doing a collection. When thread A gets the lock,
    // the collection count has already changed. To prevent duplicate collections,
    // The policy MUST attempt allocations during the same period it reads the
    // total_collections() value!
    {
      MutexLocker ml(Heap_lock);
      gc_count      = Universe::heap()->total_collections();
      full_gc_count = Universe::heap()->total_full_collections();

      result = perm_gen()->allocate_permanent(size);

      if (result != NULL) {
        return result;
      }

      if (GC_locker::is_active_and_needs_gc()) {
        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          GC_locker::stall_until_clear();
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }
    }

    if (result == NULL) {

      // Exit the loop if the gc time limit has been exceeded.
      // The allocation must have failed above (result must be NULL),
      // and the most recent collection must have exceeded the
      // gc time limit.  Exit the loop so that an out-of-memory
      // will be thrown (returning a NULL will do that), but
      // clear gc_overhead_limit_exceeded so that the next collection
      // will succeeded if the applications decides to handle the
      // out-of-memory and tries to go on.
      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
      if (limit_exceeded) {
        size_policy()->set_gc_overhead_limit_exceeded(false);
        if (PrintGCDetails && Verbose) {
          gclog_or_tty->print_cr("ParallelScavengeHeap::permanent_mem_allocate:"
            " return NULL because gc_overhead_limit_exceeded is set");
        }
        assert(result == NULL, "Allocation did not fail");
        return NULL;
      }

      // Generate a VM operation
      VM_ParallelGCFailedPermanentAllocation op(size, gc_count, full_gc_count);
      VMThread::execute(&op);

      // Did the VM operation execute? If so, return the result directly.
      // This prevents us from looping until time out on requests that can
      // not be satisfied.
      if (op.prologue_succeeded()) {
        assert(Universe::heap()->is_in_permanent_or_null(op.result()),
          "result not in heap");
        // If GC was locked out during VM operation then retry allocation
        // and/or stall as necessary.
        if (op.gc_locked()) {
          assert(op.result() == NULL, "must be NULL if gc_locked() is true");
          continue;  // retry and/or stall as necessary
        }
        // If a NULL results is being returned, an out-of-memory
        // will be thrown now.  Clear the gc_overhead_limit_exceeded
        // flag to avoid the following situation.
        //      gc_overhead_limit_exceeded is set during a collection
        //      the collection fails to return enough space and an OOM is thrown
        //      a subsequent GC prematurely throws an out-of-memory because
        //        the gc_overhead_limit_exceeded counts did not start
        //        again from 0.
        if (op.result() == NULL) {
          size_policy()->reset_gc_overhead_limit_count();
        }
        return op.result();
      }
    }

    // The policy object will prevent us from looping forever. If the
    // time spent in gc crosses a threshold, we will bail out.
    loop_count++;
    if ((QueuedAllocationWarningCount > 0) &&
        (loop_count % QueuedAllocationWarningCount == 0)) {
      warning("ParallelScavengeHeap::permanent_mem_allocate retries %d times \n\t"
              " size=%d", loop_count, size);
    }
  } while (result == NULL);

  return result;
}
 inline void update_decrement_tenuring_threshold_for_gc_cost() {
   _decrement_tenuring_threshold_for_gc_cost_counter->set_value(
     size_policy()->decrement_tenuring_threshold_for_gc_cost());
 }
 inline void update_decide_at_full_gc_counter() {
   _decide_at_full_gc_counter->set_value(
     size_policy()->decide_at_full_gc());
 }
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
                                        bool is_tlab,
                                        bool* gc_overhead_limit_was_exceeded) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
    HandleMark hm; // discard any handles allocated in each iteration

    // First allocation attempt is lock-free.
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(),
      "Otherwise, must do alloc within heap lock");
    if (gen0->should_allocate(size, is_tlab)) {
      result = gen0->par_allocate(size, is_tlab);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }
    }
    unsigned int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }
      // Note that only large objects get a shot at being
      // allocated in later generations.
      bool first_only = ! should_try_older_generation_allocation(size);

      result = gch->attempt_allocation(size, is_tlab, first_only);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }

      if (GC_locker::is_active_and_needs_gc()) {
        if (is_tlab) {
          return NULL;  // Caller will retry allocating individual object
        }
        if (!gch->is_maximal_no_gc()) {
          // Try and expand heap to satisfy request
          result = expand_heap_and_allocate(size, is_tlab);
          // result could be null if we are out of space
          if (result != NULL) {
            return result;
          }
        }

        if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
          return NULL; // we didn't get to do a GC and we didn't get any memory
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          // Wait for JNI critical section to be exited
          GC_locker::stall_until_clear();
          gclocker_stalled_count += 1;
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }

      // Read the gc count while the heap lock is held.
      gc_count_before = Universe::heap()->total_collections();
    }

    VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
    VMThread::execute(&op);
    if (op.prologue_succeeded()) {
      result = op.result();
      if (op.gc_locked()) {
         assert(result == NULL, "must be NULL if gc_locked() is true");
         continue;  // retry and/or stall as necessary
      }

      // Allocation has failed and a collection
      // has been done.  If the gc time limit was exceeded the
      // this time, return NULL so that an out-of-memory
      // will be thrown.  Clear gc_overhead_limit_exceeded
      // so that the overhead exceeded does not persist.

      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
      const bool softrefs_clear = all_soft_refs_clear();

      if (limit_exceeded && softrefs_clear) {
        *gc_overhead_limit_was_exceeded = true;
        size_policy()->set_gc_overhead_limit_exceeded(false);
        if (op.result() != NULL) {
          CollectedHeap::fill_with_object(op.result(), size);
        }
        return NULL;
      }
      assert(result == NULL || gch->is_in_reserved(result),
             "result not in heap");
      return result;
    }

    // Give a warning if we seem to be looping forever.
    if ((QueuedAllocationWarningCount > 0) &&
        (try_count % QueuedAllocationWarningCount == 0)) {
          warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
                  " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
    }
  }
}