void GenMarkSweep::mark_sweep_phase3(int level) {
  GenCollectedHeap* gch = GenCollectedHeap::heap();

  // Adjust the pointers to reflect the new locations
  GCTraceTime tm("phase 3", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  trace("3");

  // Need new claim bits for the pointer adjustment tracing.
  ClassLoaderDataGraph::clear_claimed_marks();

  // Because the closure below is created statically, we cannot
  // use OopsInGenClosure constructor which takes a generation,
  // as the Universe has not been created when the static constructors
  // are run.
  adjust_pointer_closure.set_orig_generation(gch->get_gen(level));

  gch->gen_process_roots(level,
                         false, // Younger gens are not roots.
                         true,  // activate StrongRootsScope
                         SharedHeap::SO_AllCodeCache,
                         GenCollectedHeap::StrongAndWeakRoots,
                         &adjust_pointer_closure,
                         &adjust_pointer_closure,
                         &adjust_cld_closure);

  gch->gen_process_weak_roots(&adjust_pointer_closure);

  adjust_marks();
  GenAdjustPointersClosure blk;
  gch->generation_iterate(&blk, true);
}
Esempio n. 2
0
/**
 * 当前是否需要尝试在旧生代分配内存
 * 		1).待分配的内存大小 > 年青代容量
 * 		2).当前内存堆需要一次GC
 * 		3).内存堆的增量式GC刚刚失败
 */
bool GenCollectorPolicy::should_try_older_generation_allocation(
        size_t word_size) const {
  GenCollectedHeap* gch = GenCollectedHeap::heap();
  size_t gen0_capacity = gch->get_gen(0)->capacity_before_gc();
  return  (word_size > heap_word_size(gen0_capacity))
         || GC_locker::is_active_and_needs_gc() || gch->incremental_collection_failed();
}
void GenMarkSweep::mark_sweep_phase1(int level,
                                  bool clear_all_softrefs) {
  // Recursively traverse all live objects and mark them
  GCTraceTime tm("phase 1", PrintGC && Verbose, true, _gc_timer, _gc_tracer->gc_id());
  trace(" 1");

  GenCollectedHeap* gch = GenCollectedHeap::heap();

  // Because follow_root_closure is created statically, cannot
  // use OopsInGenClosure constructor which takes a generation,
  // as the Universe has not been created when the static constructors
  // are run.
  follow_root_closure.set_orig_generation(gch->get_gen(level));

  // Need new claim bits before marking starts.
  ClassLoaderDataGraph::clear_claimed_marks();

  gch->gen_process_roots(level,
                         false, // Younger gens are not roots.
                         true,  // activate StrongRootsScope
                         SharedHeap::SO_None,
                         GenCollectedHeap::StrongRootsOnly,
                         &follow_root_closure,
                         &follow_root_closure,
                         &follow_cld_closure);

  // Process reference objects found during marking
  {
    ref_processor()->setup_policy(clear_all_softrefs);
    const ReferenceProcessorStats& stats =
      ref_processor()->process_discovered_references(
        &is_alive, &keep_alive, &follow_stack_closure, NULL, _gc_timer, _gc_tracer->gc_id());
    gc_tracer()->report_gc_reference_stats(stats);
  }

  // This is the point where the entire marking should have completed.
  assert(_marking_stack.is_empty(), "Marking should have completed");

  // Unload classes and purge the SystemDictionary.
  bool purged_class = SystemDictionary::do_unloading(&is_alive);

  // Unload nmethods.
  CodeCache::do_unloading(&is_alive, purged_class);

  // Prune dead klasses from subklass/sibling/implementor lists.
  Klass::clean_weak_klass_links(&is_alive);

  // Delete entries for dead interned strings.
  StringTable::unlink(&is_alive);

  // Clean up unreferenced symbols in symbol table.
  SymbolTable::unlink();

  gc_tracer()->report_object_count_after_gc(&is_alive);
}
HeapWord* GenCollectorPolicy::expand_heap_and_allocate(size_t size,
                                                       bool   is_tlab) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();
  HeapWord* result = NULL;
  for (int i = number_of_generations() - 1; i >= 0 && result == NULL; i--) {
    Generation *gen = gch->get_gen(i);
    if (gen->should_allocate(size, is_tlab)) {
      result = gen->expand_and_allocate(size, is_tlab);
    }
  }
  assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
  return result;
}
Esempio n. 5
0
void CardTableRS::verify() {
  // At present, we only know how to verify the card table RS for
  // generational heaps.
  VerifyCTGenClosure blk(this);
  CollectedHeap* ch = Universe::heap();
  // We will do the perm-gen portion of the card table, too.
  Generation* pg = SharedHeap::heap()->perm_gen();
  HeapWord* pg_boundary = pg->reserved().start();

  if (ch->kind() == CollectedHeap::GenCollectedHeap) {
    GenCollectedHeap::heap()->generation_iterate(&blk, false);
    _ct_bs->verify();

    // If the old gen collections also collect perm, then we are only
    // interested in perm-to-young pointers, not perm-to-old pointers.
    GenCollectedHeap* gch = GenCollectedHeap::heap();
    CollectorPolicy* cp = gch->collector_policy();
    if (cp->is_mark_sweep_policy() || cp->is_concurrent_mark_sweep_policy()) {
      pg_boundary = gch->get_gen(1)->reserved().start();
    }
  }
  VerifyCTSpaceClosure perm_space_blk(this, pg_boundary);
  SharedHeap::heap()->perm_gen()->space_iterate(&perm_space_blk, true);
}
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size,
                                        bool is_tlab,
                                        bool* gc_overhead_limit_was_exceeded) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1, gclocker_stalled_count = 0; /* return or throw */; try_count += 1) {
    HandleMark hm; // discard any handles allocated in each iteration

    // First allocation attempt is lock-free.
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(),
      "Otherwise, must do alloc within heap lock");
    if (gen0->should_allocate(size, is_tlab)) {
      result = gen0->par_allocate(size, is_tlab);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }
    }
    unsigned int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }
      // Note that only large objects get a shot at being
      // allocated in later generations.
      bool first_only = ! should_try_older_generation_allocation(size);

      result = gch->attempt_allocation(size, is_tlab, first_only);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }

      if (GC_locker::is_active_and_needs_gc()) {
        if (is_tlab) {
          return NULL;  // Caller will retry allocating individual object
        }
        if (!gch->is_maximal_no_gc()) {
          // Try and expand heap to satisfy request
          result = expand_heap_and_allocate(size, is_tlab);
          // result could be null if we are out of space
          if (result != NULL) {
            return result;
          }
        }

        if (gclocker_stalled_count > GCLockerRetryAllocationCount) {
          return NULL; // we didn't get to do a GC and we didn't get any memory
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          // Wait for JNI critical section to be exited
          GC_locker::stall_until_clear();
          gclocker_stalled_count += 1;
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while"
                  " in jni critical section");
          }
          return NULL;
        }
      }

      // Read the gc count while the heap lock is held.
      gc_count_before = Universe::heap()->total_collections();
    }

    VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
    VMThread::execute(&op);
    if (op.prologue_succeeded()) {
      result = op.result();
      if (op.gc_locked()) {
         assert(result == NULL, "must be NULL if gc_locked() is true");
         continue;  // retry and/or stall as necessary
      }

      // Allocation has failed and a collection
      // has been done.  If the gc time limit was exceeded the
      // this time, return NULL so that an out-of-memory
      // will be thrown.  Clear gc_overhead_limit_exceeded
      // so that the overhead exceeded does not persist.

      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
      const bool softrefs_clear = all_soft_refs_clear();

      if (limit_exceeded && softrefs_clear) {
        *gc_overhead_limit_was_exceeded = true;
        size_policy()->set_gc_overhead_limit_exceeded(false);
        if (op.result() != NULL) {
          CollectedHeap::fill_with_object(op.result(), size);
        }
        return NULL;
      }
      assert(result == NULL || gch->is_in_reserved(result),
             "result not in heap");
      return result;
    }

    // Give a warning if we seem to be looping forever.
    if ((QueuedAllocationWarningCount > 0) &&
        (try_count % QueuedAllocationWarningCount == 0)) {
          warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
                  " size=" SIZE_FORMAT " %s", try_count, size, is_tlab ? "(TLAB)" : "");
    }
  }
}
void GenMarkSweep::invoke_at_safepoint(int level, ReferenceProcessor* rp, bool clear_all_softrefs) {
  guarantee(level == 1, "We always collect both old and young.");
  assert(SafepointSynchronize::is_at_safepoint(), "must be at a safepoint");

  GenCollectedHeap* gch = GenCollectedHeap::heap();
#ifdef ASSERT
  if (gch->collector_policy()->should_clear_all_soft_refs()) {
    assert(clear_all_softrefs, "Policy should have been checked earlier");
  }
#endif

  // hook up weak ref data so it can be used during Mark-Sweep
  assert(ref_processor() == NULL, "no stomping");
  assert(rp != NULL, "should be non-NULL");
  _ref_processor = rp;
  rp->setup_policy(clear_all_softrefs);

  GCTraceTime t1(GCCauseString("Full GC", gch->gc_cause()), PrintGC && !PrintGCDetails, true, NULL, _gc_tracer->gc_id());

  gch->trace_heap_before_gc(_gc_tracer);

  // When collecting the permanent generation Method*s may be moving,
  // so we either have to flush all bcp data or convert it into bci.
  CodeCache::gc_prologue();
  Threads::gc_prologue();

  // Increment the invocation count
  _total_invocations++;

  // Capture heap size before collection for printing.
  size_t gch_prev_used = gch->used();

  // Capture used regions for each generation that will be
  // subject to collection, so that card table adjustments can
  // be made intelligently (see clear / invalidate further below).
  gch->save_used_regions(level);

  allocate_stacks();

  mark_sweep_phase1(level, clear_all_softrefs);

  mark_sweep_phase2();

  // Don't add any more derived pointers during phase3
  COMPILER2_PRESENT(assert(DerivedPointerTable::is_active(), "Sanity"));
  COMPILER2_PRESENT(DerivedPointerTable::set_active(false));

  mark_sweep_phase3(level);

  mark_sweep_phase4();

  restore_marks();

  // Set saved marks for allocation profiler (and other things? -- dld)
  // (Should this be in general part?)
  gch->save_marks();

  deallocate_stacks();

  // If compaction completely evacuated all generations younger than this
  // one, then we can clear the card table.  Otherwise, we must invalidate
  // it (consider all cards dirty).  In the future, we might consider doing
  // compaction within generations only, and doing card-table sliding.
  bool all_empty = true;
  for (int i = 0; all_empty && i < level; i++) {
    Generation* g = gch->get_gen(i);
    all_empty = all_empty && gch->get_gen(i)->used() == 0;
  }
  GenRemSet* rs = gch->rem_set();
  Generation* old_gen = gch->get_gen(level);
  // Clear/invalidate below make use of the "prev_used_regions" saved earlier.
  if (all_empty) {
    // We've evacuated all generations below us.
    rs->clear_into_younger(old_gen);
  } else {
    // Invalidate the cards corresponding to the currently used
    // region and clear those corresponding to the evacuated region.
    rs->invalidate_or_clear(old_gen);
  }

  Threads::gc_epilogue();
  CodeCache::gc_epilogue();
  JvmtiExport::gc_epilogue();

  if (PrintGC && !PrintGCDetails) {
    gch->print_heap_change(gch_prev_used);
  }

  // refs processing: clean slate
  _ref_processor = NULL;

  // Update heap occupancy information which is used as
  // input to soft ref clearing policy at the next gc.
  Universe::update_heap_info_at_gc();

  // Update time of last gc for all generations we collected
  // (which curently is all the generations in the heap).
  // We need to use a monotonically non-deccreasing time in ms
  // or we will see time-warp warnings and os::javaTimeMillis()
  // does not guarantee monotonicity.
  jlong now = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
  gch->update_time_of_last_gc(now);

  gch->trace_heap_after_gc(_gc_tracer);
}
HeapWord* TwoGenerationCollectorPolicy::satisfy_failed_allocation(size_t size,
                                                                  bool   is_large_noref,
                                                                  bool   is_tlab,
                                                                  bool*  notify_ref_lock) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();
  GCCauseSetter x(gch, GCCause::_allocation_failure);
  HeapWord* result = NULL;
  
  // The gc_prologues have not executed yet.  The value
  // for incremental_collection_will_fail() is the remanent 
  // of the last collection.
  if (!gch->incremental_collection_will_fail()) {
    // Do an incremental collection.
    gch->do_collection(false            /* full */,
                       false            /* clear_all_soft_refs */,
                       size             /* size */,
                       is_large_noref   /* is_large_noref */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */,
                       notify_ref_lock  /* notify_ref_lock */);
  } else {
    // The incremental_collection_will_fail flag is set if the
    // next incremental collection will not succeed (e.g., the
    // DefNewGeneration didn't think it had space to promote all
    // its objects). However, that last incremental collection
    // continued, allowing all older generations to collect (and
    // perhaps change the state of the flag).
    // 
    // If we reach here, we know that an incremental collection of
    // all generations left us in the state where incremental collections
    // will fail, so we just try allocating the requested space. 
    // If the allocation fails everywhere, force a full collection.
    // We're probably very close to being out of memory, so forcing many
    // collections now probably won't help.
    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::satisfy_failed_allocation:"
                    " attempting allocation anywhere before full collection");
    }
    result = gch->attempt_allocation(size, 
                                     is_large_noref, 
                                     is_tlab, 
                                     false /* first_only */);
    if (result != NULL) {
      assert(gch->is_in(result), "result not in heap");
      return result;
    }

    // Allocation request hasn't yet been met; try a full collection.
    gch->do_collection(true             /* full */, 
                       false            /* clear_all_soft_refs */, 
                       size             /* size */, 
                       is_large_noref   /* is_large_noref */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */, 
                       notify_ref_lock  /* notify_ref_lock */);
  }
  
  result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /*first_only*/);
  
  if (result != NULL) {
    assert(gch->is_in(result), "result not in heap");
    return result;
  }
  
  // OK, collection failed, try expansion.
  for (int i = number_of_generations() - 1 ; i>= 0; i--) {
    Generation *gen = gch->get_gen(i);
    if (gen->should_allocate(size, is_large_noref, is_tlab)) {
      result = gen->expand_and_allocate(size, is_large_noref, is_tlab);
      if (result != NULL) {
        assert(gch->is_in(result), "result not in heap");
        return result;
      }
    }
  }
  
  // If we reach this point, we're really out of memory. Try every trick
  // we can to reclaim memory. Force collection of soft references. Force
  // a complete compaction of the heap. Any additional methods for finding
  // free memory should be here, especially if they are expensive. If this
  // attempt fails, an OOM exception will be thrown.
  {
    IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted

    gch->do_collection(true             /* full */,
                       true             /* clear_all_soft_refs */,
                       size             /* size */,
                       is_large_noref   /* is_large_noref */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */,
                       notify_ref_lock  /* notify_ref_lock */);
  }

  result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /* first_only */);
  if (result != NULL) {
    assert(gch->is_in(result), "result not in heap");
    return result;
  }
  
  // What else?  We might try synchronous finalization later.  If the total
  // space available is large enough for the allocation, then a more
  // complete compaction phase than we've tried so far might be
  // appropriate.
  return NULL;
}
HeapWord* TwoGenerationCollectorPolicy::mem_allocate_work(size_t size,
                                                          bool is_large_noref,
                                                          bool is_tlab) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1; /* return or throw */; try_count += 1) {

    // First allocation attempt is lock-free.
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(),
      "Otherwise, must do alloc within heap lock");
    if (gen0->should_allocate(size, is_large_noref, is_tlab)) {
      result = gen0->par_allocate(size, is_large_noref, is_tlab);
      if (result != NULL) {
        assert(gch->is_in(result), "result not in heap");
        return result;
      }
    }
    int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }
      // Note that only large objects get a shot at being
      // allocated in later generations.  If jvmpi slow allocation
      // is enabled, allocate in later generations (since the
      // first generation is always full.
      bool first_only = ! should_try_older_generation_allocation(size);

      result = gch->attempt_allocation(size, 
                                       is_large_noref, 
                                       is_tlab, 
                                       first_only);
      if (result != NULL) {
        assert(gch->is_in(result), "result not in heap");
        return result;
      }

      // Read the gc count while the heap lock is held.
      gc_count_before = Universe::heap()->total_collections();
    }
      
    VM_GenCollectForAllocation op(size,
                                  is_large_noref,
                                  is_tlab,
                                  gc_count_before);
    VMThread::execute(&op);
    if (op.prologue_succeeded()) {
      result = op.result();
      assert(result == NULL || gch->is_in(result), "result not in heap");
      return result;
    }

    // Give a warning if we seem to be looping forever.
    if ((QueuedAllocationWarningCount > 0) &&
        (try_count % QueuedAllocationWarningCount == 0)) {
          warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
		  " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
    }
  }
}
Esempio n. 10
0
/**
 * 分配指定大小的内存空间,基于内存分代的分配策略(轮寻式):
 * 		1.(无锁式)年青代快速分配
 * 		2.(加锁式)
 * 		   1).抢占内存堆全局锁
 * 		   2).如果请求的内存大小>年青代内存容量 || Gc被触发但无法被执行 || 增量式Gc会失败, 则依次尝试从年青代-老年代分配内存
 * 		      否则,只从年青代分配内存
 * 		   3).如果Gc被触发但目前还无法被执行:
 * 		   		a).如果某一内存代还可扩展其内存容量,则依次从老年代-年青代尝试扩展内存分配
 * 		   		b).释放内存堆全局锁,并等待Gc被执行完成
 * 		   4).释放内存堆全局锁,触发一次GC操作请求,并等待其被执行或放弃
 * 		   5).如果Gc被放弃或由于Gc锁被禁止执行,则回到1
 * 		   6).如果Gc超时,返回NULL,否则返回分配的内存块
 *
 *
 * @param size 申请的内存空间大小
 * @param is_tlab 	false: 从内存堆中分配内存空间
 * 					true: 从当前线程的本地分配缓冲区中分配内存空间
 * @gc_overhead_limit_was_exceeded Full Gc是否超时
 *
 */
HeapWord* GenCollectorPolicy::mem_allocate_work(size_t size, bool is_tlab,
		bool* gc_overhead_limit_was_exceeded) {

  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());

  //确保当前JVM没有正在进行GC
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");

  // In general gc_overhead_limit_was_exceeded should be false so
  // set it so here and reset it to true only if the gc time
  // limit is being exceeded as checked below.
  *gc_overhead_limit_was_exceeded = false;

  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1; /* return or throw */; try_count += 1) {
    HandleMark hm; // discard any handles allocated in each iteration

    //年青代必须支持无锁并发方式的内存分配
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(), "Otherwise, must do alloc within heap lock");

    //当前是否应该优先考虑从年青代分配内存
    if (gen0->should_allocate(size, is_tlab)) {
      //试图从年青代快速分配内存块
      result = gen0->par_allocate(size, is_tlab);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }
    }

    unsigned int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }

      // Note that only large objects get a shot at being
      // allocated in later generations.
      //当前是否应该只在年青代分配内存
      bool first_only = ! should_try_older_generation_allocation(size);

      //依次尝试从内存堆的各内存代中分配内存空间
      result = gch->attempt_allocation(size, is_tlab, first_only);
      if (result != NULL) {
        assert(gch->is_in_reserved(result), "result not in heap");
        return result;
      }

      if (GC_locker::is_active_and_needs_gc()) {	//当前其它线程已经触发了Gc
        if (is_tlab) {
          //当前线程是为本地分配缓冲区申请内存(进而再从本地分配缓冲区为对象分配内存),则返回NULL,
          //以让其直接从内存代中为对象申请内存
          return NULL;
        }

        if (!gch->is_maximal_no_gc()) {	//内存堆中的某一个内存代允许扩展其大小
          //在允许扩展内存代大小的情况下尝试从内存堆的各内存代中分配内存空间
          result = expand_heap_and_allocate(size, is_tlab);
          // result could be null if we are out of space
          if (result != NULL) {
            return result;
          }
        }

        // If this thread is not in a jni critical section, we stall
        // the requestor until the critical section has cleared and
        // GC allowed. When the critical section clears, a GC is
        // initiated by the last thread exiting the critical section; so
        // we retry the allocation sequence from the beginning of the loop,
        // rather than causing more, now probably unnecessary, GC attempts.
        JavaThread* jthr = JavaThread::current();
        if (!jthr->in_critical()) {
          MutexUnlocker mul(Heap_lock);
          //等待所有的本地线程退出并执行完Gc操作
          GC_locker::stall_until_clear();
          continue;
        } else {
          if (CheckJNICalls) {
            fatal("Possible deadlock due to allocating while in jni critical section");
          }
          return NULL;
        }
      }

      //分配失败,决定触发一次GC操作
      gc_count_before = Universe::heap()->total_collections();
    }

    //触发一次Gc操作,将GC型JVM操作加入VMThread的操作队列中
    //Gc的真正执行是由VMThread或特型GC线程来完成的
    VM_GenCollectForAllocation op(size, is_tlab, gc_count_before);
    VMThread::execute(&op);

    if (op.prologue_succeeded()) {	//一次Gc操作已完成
      result = op.result();
      if (op.gc_locked()) {	//当前线程没有成功触发GC(可能刚被其它线程触发了),则继续重试分配
         assert(result == NULL, "must be NULL if gc_locked() is true");
         continue;  // retry and/or stall as necessary
      }

      // Allocation has failed and a collection
      // has been done.  If the gc time limit was exceeded the
      // this time, return NULL so that an out-of-memory
      // will be thrown.  Clear gc_overhead_limit_exceeded
      // so that the overhead exceeded does not persist.

      //本次Gc耗时是否超过了设置的GC时间上限
      const bool limit_exceeded = size_policy()->gc_overhead_limit_exceeded();
      const bool softrefs_clear = all_soft_refs_clear();
      //本次GC超时一定是进行了清除软引用的操作
      assert(!limit_exceeded || softrefs_clear, "Should have been cleared");

      //Gc超时
      if (limit_exceeded && softrefs_clear) {
        *gc_overhead_limit_was_exceeded = true;
        size_policy()->set_gc_overhead_limit_exceeded(false);
        if (op.result() != NULL) {
          CollectedHeap::fill_with_object(op.result(), size);
        }
        //Gc超时,给上层调用返回NULL,让其抛出内存溢出错误
        return NULL;
      }

      //分配成功则确保该内存块一定在内存堆中
      assert(result == NULL || gch->is_in_reserved(result), "result not in heap");
      return result;
    }

    // Give a warning if we seem to be looping forever.
    if ((QueuedAllocationWarningCount > 0) && (try_count % QueuedAllocationWarningCount == 0)) {
          warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
    }

  }// for

}