Exemplo n.º 1
0
HeapWord* TwoGenerationCollectorPolicy::satisfy_failed_allocation(size_t size,
                                                                  bool   is_large_noref,
                                                                  bool   is_tlab,
                                                                  bool*  notify_ref_lock) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();
  GCCauseSetter x(gch, GCCause::_allocation_failure);
  HeapWord* result = NULL;
  
  // The gc_prologues have not executed yet.  The value
  // for incremental_collection_will_fail() is the remanent 
  // of the last collection.
  if (!gch->incremental_collection_will_fail()) {
    // Do an incremental collection.
    gch->do_collection(false            /* full */,
                       false            /* clear_all_soft_refs */,
                       size             /* size */,
                       is_large_noref   /* is_large_noref */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */,
                       notify_ref_lock  /* notify_ref_lock */);
  } else {
    // The incremental_collection_will_fail flag is set if the
    // next incremental collection will not succeed (e.g., the
    // DefNewGeneration didn't think it had space to promote all
    // its objects). However, that last incremental collection
    // continued, allowing all older generations to collect (and
    // perhaps change the state of the flag).
    // 
    // If we reach here, we know that an incremental collection of
    // all generations left us in the state where incremental collections
    // will fail, so we just try allocating the requested space. 
    // If the allocation fails everywhere, force a full collection.
    // We're probably very close to being out of memory, so forcing many
    // collections now probably won't help.
    if (PrintGC && Verbose) {
      gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::satisfy_failed_allocation:"
                    " attempting allocation anywhere before full collection");
    }
    result = gch->attempt_allocation(size, 
                                     is_large_noref, 
                                     is_tlab, 
                                     false /* first_only */);
    if (result != NULL) {
      assert(gch->is_in(result), "result not in heap");
      return result;
    }

    // Allocation request hasn't yet been met; try a full collection.
    gch->do_collection(true             /* full */, 
                       false            /* clear_all_soft_refs */, 
                       size             /* size */, 
                       is_large_noref   /* is_large_noref */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */, 
                       notify_ref_lock  /* notify_ref_lock */);
  }
  
  result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /*first_only*/);
  
  if (result != NULL) {
    assert(gch->is_in(result), "result not in heap");
    return result;
  }
  
  // OK, collection failed, try expansion.
  for (int i = number_of_generations() - 1 ; i>= 0; i--) {
    Generation *gen = gch->get_gen(i);
    if (gen->should_allocate(size, is_large_noref, is_tlab)) {
      result = gen->expand_and_allocate(size, is_large_noref, is_tlab);
      if (result != NULL) {
        assert(gch->is_in(result), "result not in heap");
        return result;
      }
    }
  }
  
  // If we reach this point, we're really out of memory. Try every trick
  // we can to reclaim memory. Force collection of soft references. Force
  // a complete compaction of the heap. Any additional methods for finding
  // free memory should be here, especially if they are expensive. If this
  // attempt fails, an OOM exception will be thrown.
  {
    IntFlagSetting flag_change(MarkSweepAlwaysCompactCount, 1); // Make sure the heap is fully compacted

    gch->do_collection(true             /* full */,
                       true             /* clear_all_soft_refs */,
                       size             /* size */,
                       is_large_noref   /* is_large_noref */,
                       is_tlab          /* is_tlab */,
                       number_of_generations() - 1 /* max_level */,
                       notify_ref_lock  /* notify_ref_lock */);
  }

  result = gch->attempt_allocation(size, is_large_noref, is_tlab, false /* first_only */);
  if (result != NULL) {
    assert(gch->is_in(result), "result not in heap");
    return result;
  }
  
  // What else?  We might try synchronous finalization later.  If the total
  // space available is large enough for the allocation, then a more
  // complete compaction phase than we've tried so far might be
  // appropriate.
  return NULL;
}
Exemplo n.º 2
0
HeapWord* TwoGenerationCollectorPolicy::mem_allocate_work(size_t size,
                                                          bool is_large_noref,
                                                          bool is_tlab) {
  GenCollectedHeap *gch = GenCollectedHeap::heap();

  debug_only(gch->check_for_valid_allocation_state());
  assert(gch->no_gc_in_progress(), "Allocation during gc not allowed");
  HeapWord* result = NULL;

  // Loop until the allocation is satisified,
  // or unsatisfied after GC.
  for (int try_count = 1; /* return or throw */; try_count += 1) {

    // First allocation attempt is lock-free.
    Generation *gen0 = gch->get_gen(0);
    assert(gen0->supports_inline_contig_alloc(),
      "Otherwise, must do alloc within heap lock");
    if (gen0->should_allocate(size, is_large_noref, is_tlab)) {
      result = gen0->par_allocate(size, is_large_noref, is_tlab);
      if (result != NULL) {
        assert(gch->is_in(result), "result not in heap");
        return result;
      }
    }
    int gc_count_before;  // read inside the Heap_lock locked region
    {
      MutexLocker ml(Heap_lock);
      if (PrintGC && Verbose) {
        gclog_or_tty->print_cr("TwoGenerationCollectorPolicy::mem_allocate_work:"
                      " attempting locked slow path allocation");
      }
      // Note that only large objects get a shot at being
      // allocated in later generations.  If jvmpi slow allocation
      // is enabled, allocate in later generations (since the
      // first generation is always full.
      bool first_only = ! should_try_older_generation_allocation(size);

      result = gch->attempt_allocation(size, 
                                       is_large_noref, 
                                       is_tlab, 
                                       first_only);
      if (result != NULL) {
        assert(gch->is_in(result), "result not in heap");
        return result;
      }

      // Read the gc count while the heap lock is held.
      gc_count_before = Universe::heap()->total_collections();
    }
      
    VM_GenCollectForAllocation op(size,
                                  is_large_noref,
                                  is_tlab,
                                  gc_count_before);
    VMThread::execute(&op);
    if (op.prologue_succeeded()) {
      result = op.result();
      assert(result == NULL || gch->is_in(result), "result not in heap");
      return result;
    }

    // Give a warning if we seem to be looping forever.
    if ((QueuedAllocationWarningCount > 0) &&
        (try_count % QueuedAllocationWarningCount == 0)) {
          warning("TwoGenerationCollectorPolicy::mem_allocate_work retries %d times \n\t"
		  " size=%d %s", try_count, size, is_tlab ? "(TLAB)" : "");
    }
  }
}