GCStatInfo::GCStatInfo(int num_pools) {
  // initialize the arrays for memory usage
  _before_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools, mtInternal);
  _after_gc_usage_array  = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools, mtInternal);
  _usage_array_size = num_pools;
  clear();
}
Esempio n. 2
0
GCStatInfo::GCStatInfo(int num_pools) {
  // initialize the arrays for memory usage
  _before_gc_usage_array = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
  _after_gc_usage_array  = (MemoryUsage*) NEW_C_HEAP_ARRAY(MemoryUsage, num_pools);
  size_t len = num_pools * sizeof(MemoryUsage);
  memset(_before_gc_usage_array, 0, len);
  memset(_after_gc_usage_array, 0, len);
  _usage_array_size = num_pools;
}
ArtaObjectPool::ArtaObjectPool(unsigned int size) {
  // FIXME: make sure size is a power of 16
_buffer_size=size;
  _object_map = NEW_C_HEAP_ARRAY(objectRef, _buffer_size);
  _last_dispensed_id = -1;
  _buffer_full = false;
}
 // Public methods that get called within the scope of the closure
 void allocate() {
   _list = NEW_C_HEAP_ARRAY(Handle, _count, mtInternal);
   assert(_list != NULL, "Out of memory");
   if (_list == NULL) {
     _count = 0;
   }
 }
// create a C-heap allocated address location map for an nmethod
void JvmtiCodeBlobEvents::build_jvmti_addr_location_map(const CodeBlob*blob,
							jvmtiAddrLocationMap** map_ptr, 
							jint *map_length_ptr)
{
  ResourceMark rm;
  jvmtiAddrLocationMap* map = NULL;
  jint map_length = 0;

  // Map code addresses into BCIs in the outermost method corresponding to the blob
if(!blob->is_native_method()){
    const DebugMap *info = blob->debuginfo();
    if( info ) {
      int table_length = info->tablesize();
map=NEW_C_HEAP_ARRAY(jvmtiAddrLocationMap,table_length);
      for( int i=0; i<table_length; i= info->next_idx(i) ) {
        int pc = info->get_relpc(i);
        if( pc == NO_MAPPING ) continue;
        const DebugScope *ds = info->get(pc);
        while( ds->caller() != NULL ) ds = ds->caller();
assert(map_length<table_length,"sanity range check");
        map[map_length].start_address = blob->code_begins()+pc;
        map[map_length].location      = ds->bci();
        map_length++;
      }
    }
  }
  *map_ptr = map;
  *map_length_ptr = map_length;
}
Esempio n. 6
0
inline GenericTaskQueueSet<T, F>::GenericTaskQueueSet(int n) : _n(n) {
  typedef T* GenericTaskQueuePtr;
  _queues = NEW_C_HEAP_ARRAY(GenericTaskQueuePtr, n, F);
  for (int i = 0; i < n; i++) {
    _queues[i] = NULL;
  }
}
void GPGC_RemapTargetArray::initialize(uint32_t max_length)
{
  _allocated_length = max_length;
  _array            = NEW_C_HEAP_ARRAY(RemapTarget, _allocated_length);

  reset();
}
void GPGC_PopulationArray::initialize(uint32_t max_length)
{
  _allocated_length = max_length;
  _array            = NEW_C_HEAP_ARRAY(PagePop, _allocated_length);

  reset();
}
Esempio n. 9
0
HeapRegionClaimer::HeapRegionClaimer(uint n_workers) :
    _n_workers(n_workers), _n_regions(G1CollectedHeap::heap()->_hrm._allocated_heapregions_length), _claims(NULL) {
  assert(n_workers > 0, "Need at least one worker.");
  uint* new_claims = NEW_C_HEAP_ARRAY(uint, _n_regions, mtGC);
  memset(new_claims, Unclaimed, sizeof(*_claims) * _n_regions);
  _claims = new_claims;
}
Esempio n. 10
0
void OopMapCacheEntry::allocate_bit_mask() {
  if (mask_size() > small_mask_limit) {
    assert(_bit_mask[0] == 0, "bit mask should be new or just flushed");
    _bit_mask[0] = (intptr_t)
      NEW_C_HEAP_ARRAY(uintptr_t, mask_word_size());
  }
}
GCTaskThread::GCTaskThread(GCTaskManager* manager,
uint which):
  _manager(manager),
  _new_gc_mode_requested(false),
  _new_gc_mode(true),
  _time_stamps(NULL),
_time_stamp_index(0),
  _preallocated_page(NoPage)
{
if(!os::create_thread(this,ttype::gc_thread))
    vm_exit_out_of_memory(0, "Cannot create GC thread. Out of system resources.");

  if (PrintGCTaskTimeStamps) {
    _time_stamps = NEW_C_HEAP_ARRAY(GCTaskTimeStamp, GCTaskTimeStampEntries );

    guarantee(_time_stamps != NULL, "Sanity");
  }

  set_id(which);

  if (UseGenPauselessGC) {
set_name("GC task thread#%d (GenPauselessGC)",which);
  } else {
    set_name("GC task thread#%d (ParallelGC)", which);
  }
}
/////////////////////////////////////////////////////
//
// The compact hash table writer implementations
//
CompactHashtableWriter::CompactHashtableWriter(int table_type,
                                               int num_entries,
                                               CompactHashtableStats* stats) {
  assert(DumpSharedSpaces, "dump-time only");
  _type = table_type;
  _num_entries = num_entries;
  _num_buckets = number_of_buckets(_num_entries);
  _buckets = NEW_C_HEAP_ARRAY(Entry*, _num_buckets, mtSymbol);
  memset(_buckets, 0, sizeof(Entry*) * _num_buckets);

  /* bucket sizes table */
  _bucket_sizes = NEW_C_HEAP_ARRAY(juint, _num_buckets, mtSymbol);
  memset(_bucket_sizes, 0, sizeof(juint) * _num_buckets);

  stats->hashentry_count = _num_entries;
  // Compact buckets' entries will have only the 4-byte offset, but
  // we don't know how many there will be at this point. So use a
  // conservative estimate here. The size is adjusted later when we
  // write out the buckets.
  stats->hashentry_bytes = _num_entries * 8;
  stats->bucket_count    = _num_buckets;
  stats->bucket_bytes    = (_num_buckets + 1) * (sizeof(juint));
  _stats = stats;

  // See compactHashtable.hpp for table layout
  _required_bytes = sizeof(juint) * 2; // _base_address, written as 2 juints
  _required_bytes+= sizeof(juint) +    // num_entries
                    sizeof(juint) +    // num_buckets
                    stats->hashentry_bytes +
                    stats->bucket_bytes;
}
Esempio n. 13
0
inline BasicHashtable::BasicHashtable(int table_size, int entry_size) {
  // Called on startup, no locking needed
  initialize(table_size, entry_size, 0);
  _buckets = NEW_C_HEAP_ARRAY(HashtableBucket, table_size);
  for (int index = 0; index < _table_size; index++) {
    _buckets[index].clear();
  }
}
Esempio n. 14
0
KlassInfoTable::KlassInfoTable(int size, HeapWord* permgen_bottom) {
  _size = size;
  _permgen_bottom = permgen_bottom;
  _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, _size);

  for (int index = 0; index < _size; index++) {
    _buckets[index].initialize();
  }
}
Esempio n. 15
0
FreeIdSet::FreeIdSet(uint size, Monitor* mon) :
  _size(size), _mon(mon), _hd(0), _waiters(0), _claimed(0)
{
  guarantee(size != 0, "must be");
  _ids = NEW_C_HEAP_ARRAY(uint, size, mtGC);
  for (uint i = 0; i < size - 1; i++) {
    _ids[i] = i+1;
  }
  _ids[size-1] = end_of_list; // end of list.
}
Esempio n. 16
0
oldSpace::oldSpace(char *name, int &size) {
  next_space= NULL;

  offset_array = NEW_C_HEAP_ARRAY(u_char, Universe::old_gen.virtual_space.reserved_size()/card_size);
  set_name(name);
  set_bottom((oop*) Universe::old_gen.virtual_space.low());
  set_top((oop*)    Universe::old_gen.virtual_space.low());
  set_end((oop*)    Universe::old_gen.virtual_space.high());
  initialize_threshold();
}
Esempio n. 17
0
void
CardTableModRefBS::
get_LNC_array_for_space(Space* sp,
                        jbyte**& lowest_non_clean,
                        uintptr_t& lowest_non_clean_base_chunk_index,
                        size_t& lowest_non_clean_chunk_size) {

  int       i        = find_covering_region_containing(sp->bottom());
  MemRegion covered  = _covered[i];
  size_t    n_chunks = chunks_to_cover(covered);

  // Only the first thread to obtain the lock will resize the
  // LNC array for the covered region.  Any later expansion can't affect
  // the used_at_save_marks region.
  // (I observed a bug in which the first thread to execute this would
  // resize, and then it would cause "expand_and_allocates" that would
  // Increase the number of chunks in the covered region.  Then a second
  // thread would come and execute this, see that the size didn't match,
  // and free and allocate again.  So the first thread would be using a
  // freed "_lowest_non_clean" array.)

  // Do a dirty read here. If we pass the conditional then take the rare
  // event lock and do the read again in case some other thread had already
  // succeeded and done the resize.
  int cur_collection = Universe::heap()->total_collections();
  if (_last_LNC_resizing_collection[i] != cur_collection) {
    MutexLocker x(ParGCRareEvent_lock);
    if (_last_LNC_resizing_collection[i] != cur_collection) {
      if (_lowest_non_clean[i] == NULL ||
          n_chunks != _lowest_non_clean_chunk_size[i]) {

        // Should we delete the old?
        if (_lowest_non_clean[i] != NULL) {
          assert(n_chunks != _lowest_non_clean_chunk_size[i],
                 "logical consequence");
          FREE_C_HEAP_ARRAY(CardPtr, _lowest_non_clean[i]);
          _lowest_non_clean[i] = NULL;
        }
        // Now allocate a new one if necessary.
        if (_lowest_non_clean[i] == NULL) {
          _lowest_non_clean[i]                  = NEW_C_HEAP_ARRAY(CardPtr, n_chunks);
          _lowest_non_clean_chunk_size[i]       = n_chunks;
          _lowest_non_clean_base_chunk_index[i] = addr_to_chunk_index(covered.start());
          for (int j = 0; j < (int)n_chunks; j++)
            _lowest_non_clean[i][j] = NULL;
        }
      }
      _last_LNC_resizing_collection[i] = cur_collection;
    }
  }
  // In any case, now do the initialization.
  lowest_non_clean                  = _lowest_non_clean[i];
  lowest_non_clean_base_chunk_index = _lowest_non_clean_base_chunk_index[i];
  lowest_non_clean_chunk_size       = _lowest_non_clean_chunk_size[i];
}
Esempio n. 18
0
G1StringDedupQueue::G1StringDedupQueue() :
  _cursor(0),
  _cancel(false),
  _empty(true),
  _dropped(0) {
  _nqueues = ParallelGCThreads;
  _queues = NEW_C_HEAP_ARRAY(G1StringDedupWorkerQueue, _nqueues, mtGC);
  for (size_t i = 0; i < _nqueues; i++) {
    new (_queues + i) G1StringDedupWorkerQueue(G1StringDedupWorkerQueue::default_segment_size(), _max_cache_size, _max_size);
  }
}
Esempio n. 19
0
KlassInfoTable::KlassInfoTable(int size, HeapWord* ref) {
  _size = 0;
  _ref = ref;
  _buckets = NEW_C_HEAP_ARRAY(KlassInfoBucket, size, mtInternal);
  if (_buckets != NULL) {
    _size = size;
    for (int index = 0; index < _size; index++) {
      _buckets[index].initialize();
    }
  }
}
Esempio n. 20
0
void NotificationQueue::put(oop obj) {
  if (array == NULL) array = NEW_C_HEAP_ARRAY(oop, size);
  if (succ(last) == first) {
    int new_size = size * 2;
    int new_last = 0;
    // allocate new_array
    oop* new_array = NEW_C_HEAP_ARRAY(oop, new_size);
    // copy from array to new_array
    for (int index = first; index != last; index = succ(index))
      new_array[new_last++] = array[index];
    free(array);
    // replace array
    array = new_array;
    size  = new_size;
    first = 0;
    last  = new_last;
  } 
  array[last] = obj;
  last = succ(last);
}
Esempio n. 21
0
void LoaderConstraintTable::ensure_loader_constraint_capacity(
                                                     LoaderConstraintEntry *p,
                                                    int nfree) {
    if (p->max_loaders() - p->num_loaders() < nfree) {
        int n = nfree + p->num_loaders();
        oop* new_loaders = NEW_C_HEAP_ARRAY(oop, n);
        memcpy(new_loaders, p->loaders(), sizeof(oop) * p->num_loaders());
        p->set_max_loaders(n);
        FREE_C_HEAP_ARRAY(oop, p->loaders());
        p->set_loaders(new_loaders);
    }
}
void CardTableModRefBSForCTRS::initialize() {
  CardTableModRefBS::initialize();
  _lowest_non_clean =
    NEW_C_HEAP_ARRAY(CardArr, _max_covered_regions, mtGC);
  _lowest_non_clean_chunk_size =
    NEW_C_HEAP_ARRAY(size_t, _max_covered_regions, mtGC);
  _lowest_non_clean_base_chunk_index =
    NEW_C_HEAP_ARRAY(uintptr_t, _max_covered_regions, mtGC);
  _last_LNC_resizing_collection =
    NEW_C_HEAP_ARRAY(int, _max_covered_regions, mtGC);
  if (_lowest_non_clean == NULL
      || _lowest_non_clean_chunk_size == NULL
      || _lowest_non_clean_base_chunk_index == NULL
      || _last_LNC_resizing_collection == NULL)
    vm_exit_during_initialization("couldn't allocate an LNC array.");
  for (int i = 0; i < _max_covered_regions; i++) {
    _lowest_non_clean[i] = NULL;
    _lowest_non_clean_chunk_size[i] = 0;
    _last_LNC_resizing_collection[i] = -1;
  }
}
Esempio n. 23
0
// @requires UseG1GC
TEST_VM(FreeRegionList, length) {
  if (!UseG1GC) {
    return;
  }

  FreeRegionList l("test");
  const uint num_regions_in_test = 5;

  // Create a fake heap. It does not need to be valid, as the HeapRegion constructor
  // does not access it.
  MemRegion heap(NULL, num_regions_in_test * HeapRegion::GrainWords);

  // Allocate a fake BOT because the HeapRegion constructor initializes
  // the BOT.
  size_t bot_size = G1BlockOffsetTable::compute_size(heap.word_size());
  HeapWord* bot_data = NEW_C_HEAP_ARRAY(HeapWord, bot_size, mtGC);
  ReservedSpace bot_rs(G1BlockOffsetTable::compute_size(heap.word_size()));
  G1RegionToSpaceMapper* bot_storage =
    G1RegionToSpaceMapper::create_mapper(bot_rs,
                                         bot_rs.size(),
                                         os::vm_page_size(),
                                         HeapRegion::GrainBytes,
                                         BOTConstants::N_bytes,
                                         mtGC);
  G1BlockOffsetTable bot(heap, bot_storage);
  bot_storage->commit_regions(0, num_regions_in_test);

  // Set up memory regions for the heap regions.
  MemRegion mr0(heap.start(), HeapRegion::GrainWords);
  MemRegion mr1(mr0.end(), HeapRegion::GrainWords);
  MemRegion mr2(mr1.end(), HeapRegion::GrainWords);
  MemRegion mr3(mr2.end(), HeapRegion::GrainWords);
  MemRegion mr4(mr3.end(), HeapRegion::GrainWords);

  HeapRegion hr0(0, &bot, mr0);
  HeapRegion hr1(1, &bot, mr1);
  HeapRegion hr2(2, &bot, mr2);
  HeapRegion hr3(3, &bot, mr3);
  HeapRegion hr4(4, &bot, mr4);
  l.add_ordered(&hr1);
  l.add_ordered(&hr0);
  l.add_ordered(&hr3);
  l.add_ordered(&hr4);
  l.add_ordered(&hr2);

  EXPECT_EQ(l.length(), num_regions_in_test) << "Wrong free region list length";
  l.verify_list();

  bot_storage->uncommit_regions(0, num_regions_in_test);
  delete bot_storage;
  FREE_C_HEAP_ARRAY(HeapWord, bot_data);
}
Esempio n. 24
0
void InliningDatabase::allocate_table(unsigned int size) {
  if (TraceInliningDatabase) {
    std->print_cr("InliningDatabase::allocate_table(%d)", size);
  }
  table_size      = size;
  table_size_mask = size - 1;
  table_no        = 0;
  table           = NEW_C_HEAP_ARRAY(InliningDatabaseKey, table_size);
  // clear the table
  for (unsigned int index = 0 ; index < table_size; index++) {
    table[index].clear();
  }
}
Esempio n. 25
0
void G1RemSet::prepare_for_oops_into_collection_set_do() {
  cleanupHRRS();
  _g1->set_refine_cte_cl_concurrency(false);
  DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
  dcqs.concatenate_logs();

  guarantee( _cards_scanned == NULL, "invariant" );
  _cards_scanned = NEW_C_HEAP_ARRAY(size_t, n_workers(), mtGC);
  for (uint i = 0; i < n_workers(); ++i) {
    _cards_scanned[i] = 0;
  }
  _total_cards_scanned = 0;
}
Esempio n. 26
0
void bootstrap::add(oop obj) {
  if (number_of_oops >= max_number_of_oops) {
    int new_size = max_number_of_oops  * 2;
    printf("Expanding boot table to %d\n", new_size);
    oop* new_oop_table = NEW_C_HEAP_ARRAY(oop, new_size);
    for(int index = 0; index < max_number_of_oops; index++)
      new_oop_table[index] = oop_table[index];

    max_number_of_oops = new_size;
    FreeHeap(oop_table);
    oop_table = new_oop_table;
  }
  oop_table[number_of_oops++] = obj;
}
ThreadCodeBuffer::ThreadCodeBuffer(int size_in_bytes, nmethod *nm, address real_pc) {
   _code = NEW_C_HEAP_ARRAY(u_char, size_in_bytes);
  assert(_code != NULL, "out of memory");
  os::unguard_memory((char*) _code, size_in_bytes);
  _size = size_in_bytes;
  _method = nm;
  _real_pc = real_pc;

  debug_only(
    // Initialize area 
    for(int i=0; i<size_in_bytes; i++) {
      _code[i] = 0xEE;  
    }
  )
}
Esempio n. 28
0
symbolTableLink* symbolTable::new_link(symbolOop s, symbolTableLink* n) {
  symbolTableLink* res;
  if (free_list) {
    res = free_list;
    free_list = free_list->next;
  } else {
    const int block_size = 500;
    if (first_free_link == end_block) {
      first_free_link = NEW_C_HEAP_ARRAY(symbolTableLink, block_size);
      end_block = first_free_link + block_size;
    }
    res = first_free_link++;
  } 
  res->symbol = s;
  res->next   = n;
  return res;
}
Esempio n. 29
0
WorkerDataArray<T>::WorkerDataArray(uint length,
                                    const char* title,
                                    bool print_sum,
                                    int log_level,
                                    uint indent_level) :
 _title(title),
 _length(0),
 _print_sum(print_sum),
 _log_level(log_level),
 _indent_level(indent_level),
 _thread_work_items(NULL),
 _enabled(true) {
  assert(length > 0, "Must have some workers to store data for");
  _length = length;
  _data = NEW_C_HEAP_ARRAY(T, _length, mtGC);
  reset();
}
Esempio n. 30
0
TenuredGeneration::TenuredGeneration(ReservedSpace rs,
                                     size_t initial_byte_size, int level,
                                     GenRemSet* remset) :
  OneContigSpaceCardGeneration(rs, initial_byte_size,
                               MinHeapDeltaBytes, level, remset, NULL)
{
  HeapWord* bottom = (HeapWord*) _virtual_space.low();
  HeapWord* end    = (HeapWord*) _virtual_space.high();
  _the_space  = new TenuredSpace(_bts, MemRegion(bottom, end));
  _the_space->reset_saved_mark();
  _shrink_factor = 0;
  _capacity_at_prologue = 0;

  _gc_stats = new GCStats();

  // initialize performance counters

  const char* gen_name = "old";

  // Generation Counters -- generation 1, 1 subspace
  _gen_counters = new GenerationCounters(gen_name, 1, 1, &_virtual_space);

  _gc_counters = new CollectorCounters("MSC", 1);

  _space_counters = new CSpaceCounters(gen_name, 0,
                                       _virtual_space.reserved_size(),
                                       _the_space, _gen_counters);
#ifndef SERIALGC
  if (UseParNewGC && ParallelGCThreads > 0) {
    typedef ParGCAllocBufferWithBOT* ParGCAllocBufferWithBOTPtr;
    _alloc_buffers = NEW_C_HEAP_ARRAY(ParGCAllocBufferWithBOTPtr,
                                      ParallelGCThreads);
    if (_alloc_buffers == NULL)
      vm_exit_during_initialization("Could not allocate alloc_buffers");
    for (uint i = 0; i < ParallelGCThreads; i++) {
      _alloc_buffers[i] =
        new ParGCAllocBufferWithBOT(OldPLABSize, _bts);
      if (_alloc_buffers[i] == NULL)
        vm_exit_during_initialization("Could not allocate alloc_buffers");
    }
  } else {
    _alloc_buffers = NULL;
  }
#endif // SERIALGC
}