Esempio n. 1
0
void gc_gen_stats_verbose(GC_Gen* gc)
{
  GC_Gen_Stats* stats = gc->stats;
  Boolean is_los_collected = stats->is_los_collected;
  if (collect_is_minor()){
    TRACE2("gc.space", "GC: NOS Collection stats: "
      <<"\nGC: " << (gc_is_gen_mode()?"generational":"nongenerational")
      <<"\nGC: collection algo: " << (minor_is_semispace()?"semi-space":"partial-forward")
      <<"\nGC: num surviving objs: " << stats->nos_surviving_obj_num_minor
      <<"\nGC: size surviving objs: " << verbose_print_size(stats->nos_surviving_obj_size_minor)
      <<"\nGC: surviving ratio: " << (int)(stats->nos_surviving_ratio_minor*100) << "%\n");
  }else{
    TRACE2("gc.space", "GC: MOS Collection stats: "
      <<"\nGC: collection algo: " << (major_is_marksweep()?"mark-sweep":"slide compact")
      <<"\nGC: num surviving objs: "<<stats->nos_mos_suviving_obj_num_major
      <<"\nGC: size surviving objs: "<<verbose_print_size(stats->nos_mos_suviving_obj_size_major)
      <<"\nGC: surviving ratio: "<<(int)(stats->nos_mos_suviving_ratio_major*100)<<"%\n");
  }

  if(stats->is_los_collected) { /*if los is collected, need to output los related info*/
    TRACE2("gc.space", "GC: Lspace Collection stats: "
      <<"\nGC: collection algo: "<<(collect_is_major()?"slide compact":"mark sweep")
      <<"\nGC: num surviving objs: "<<stats->los_suviving_obj_num
      <<"\nGC: size surviving objs: "<<verbose_print_size(stats->los_suviving_obj_size)
      <<"\nGC: surviving ratio: "<<(int)(stats->los_surviving_ratio*100)<<"%\n");
  }

}
Esempio n. 2
0
void mutator_destruct(GC* gc, void *unused_gc_information)
{

  Mutator *mutator = (Mutator *)gc_get_tls();

  alloc_context_reset((Allocator*)mutator);


  lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv

#ifdef USE_UNIQUE_MARK_SWEEP_GC
  allocactor_destruct_local_chunks((Allocator*)mutator);
#endif
  mutator_register_new_obj_size(mutator);

  volatile Mutator *temp = gc->mutator_list;
  if (temp == mutator) {  /* it is at the head of the list */
    gc->mutator_list = temp->next;
  } else {
    while (temp->next != mutator) {
      temp = temp->next;
      assert(temp);
    }
    temp->next = mutator->next;
  }
  gc->num_mutators--;

  unlock(gc->mutator_list_lock); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  
  if(gc_is_gen_mode()){ /* put back the remset when a mutator exits */
    pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set);
    mutator->rem_set = NULL;
  }
  
  if(mutator->obj_with_fin){
    pool_put_entry(gc->finref_metadata->obj_with_fin_pool, mutator->obj_with_fin);
    mutator->obj_with_fin = NULL;
  }

  lock(mutator->dirty_set_lock);
  if( mutator->dirty_set != NULL){
    if(vector_block_is_empty(mutator->dirty_set))
      pool_put_entry(gc->metadata->free_set_pool, mutator->dirty_set);
    else{ /* FIXME:: this condition may be released. */
      pool_put_entry(gc->metadata->gc_dirty_set_pool, mutator->dirty_set);
      mutator->dirty_set = NULL;
    }
  }
  unlock(mutator->dirty_set_lock);
  STD_FREE(mutator);
  gc_set_tls(NULL);
  
  return;
}
Esempio n. 3
0
void gc_heap_wrote_object (Managed_Object_Handle p_obj_written )
{

  if( !gc_is_gen_mode() || !object_has_ref_field((Partial_Reveal_Object*)p_obj_written)) 
    return;

  /* for array copy and object clone */
#ifdef USE_REM_SLOTS
  gc_object_write_barrier(p_obj_written); 
#else
  if( p_obj_written >= nos_boundary ) return;

  mutator_rem_obj( p_obj_written );  
#endif 
  return;
}
Esempio n. 4
0
static void update_referent_field_ignore_finref(GC *gc, Pool *pool)
{
  Vector_Block *block = pool_get_entry(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent))
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else {
            finref_repset_add_entry(gc, p_referent_field);
          }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */
    }
    block = pool_get_entry(pool);
  }
}
Esempio n. 5
0
void mutator_initialize(GC* gc, void *unused_gc_information) 
{
  /* FIXME:: make sure gc_info is cleared */
  Mutator *mutator = (Mutator *)STD_MALLOC(sizeof(Mutator));
  memset(mutator, 0, sizeof(Mutator));
  mutator->alloc_space = gc_get_nos((GC_Gen*)gc);
  mutator->gc = gc;
    
  if(gc_is_gen_mode()){
    mutator->rem_set = free_set_pool_get_entry(gc->metadata);
    assert(vector_block_is_empty(mutator->rem_set));
  }
  mutator->dirty_set = free_set_pool_get_entry(gc->metadata);
  
  if(!IGNORE_FINREF )
    mutator->obj_with_fin = finref_get_free_block(gc);
  else
    mutator->obj_with_fin = NULL;

#ifdef USE_UNIQUE_MARK_SWEEP_GC
  allocator_init_local_chunks((Allocator*)mutator);
#endif
  
  lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv

  mutator->next = (Mutator *)gc->mutator_list;
  gc->mutator_list = mutator;
  gc->num_mutators++;
  /*Begin to measure the mutator thread execution time. */
  mutator->time_measurement_start = time_now();
  unlock(gc->mutator_list_lock); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  
  gc_set_tls(mutator);
  
  return;
}
Esempio n. 6
0
/*
 * The reason why we don't use identify_dead_refs() to implement this function is
 * that we will differentiate phanref from weakref in the future.
 */
static void identify_dead_phanrefs(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *phanref_pool = metadata->phanref_pool;
  
  if(collect_need_update_repset())
    finref_reset_repset(gc);
//  collector_reset_repset(collector);
  pool_iterator_init(phanref_pool);
  Vector_Block *block = pool_iterator_next(phanref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
      Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);

      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL;
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
      /* Phantom status: for future use
       * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){
       *   // enqueued but not explicitly cleared OR pending for enqueueing
       *   *iter = NULL;
       * }
       * resurrect_obj_tree(collector, p_referent_field);
       */
    }
    block = pool_iterator_next(phanref_pool);
  }
//  collector_put_repset(collector);
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, phanref_pool);
  }
}
Esempio n. 7
0
static void identify_dead_refs(GC *gc, Pool *pool)
{
  if(collect_need_update_repset())
    finref_reset_repset(gc);

  pool_iterator_init(pool);
  Vector_Block *block = pool_iterator_next(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
      
      if(!p_referent){  
        /* referent field has been cleared. I forgot why we set p_ref with NULL here. 
           I guess it's because this ref_obj was processed in abother p_ref already, so
           there is no need to keep same ref_obj in this p_ref. */
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            /* if it's gen mode, and referent stays in NOS, we need keep p_referent_field in collector remset.
               This leads to the ref obj live even it is actually only weakly-reachable in next gen-mode collection. 
               This simplifies the design. Otherwise, we need remember the refobj in MOS seperately and process them seperately. */
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks 
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
      }else{
	      /* else, the referent is dead (weakly reachable), clear the referent field */
	      *p_referent_field = (REF)NULL; 
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
	      /* for dead referent, p_ref is not set NULL. p_ref keeps the ref object, which
	         will be moved to VM for enqueueing. */
      }
    }/* for each ref object */
    
    block = pool_iterator_next(pool);
  }
  
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, pool);
  }
}
Esempio n. 8
0
// Resurrect the obj tree whose root is the obj which p_ref points to
static inline void resurrect_obj_tree(Collector *collector, REF *p_ref)
{
  GC *gc = collector->gc;
  GC_Metadata *metadata = gc->metadata;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(p_obj && gc_obj_is_dead(gc, p_obj));
  
  void *p_ref_or_obj = p_ref;
  Trace_Object_Func trace_object;
  
  /* set trace_object() function */
  if(collect_is_minor()){
    if(gc_is_gen_mode()){
      if(minor_is_forward())
        trace_object = trace_obj_in_gen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_gen_ss;
      else 
        assert(0);
    }else{
      if(minor_is_forward())
        trace_object = trace_obj_in_nongen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_nongen_ss;
      else 
        assert(0);
    }
  } else if(collect_is_major_normal() || !gc_has_nos()){
    p_ref_or_obj = p_obj;
    if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
      trace_object = trace_obj_in_space_tune_marking;
      unsigned int obj_size = vm_object_size(p_obj);
#ifdef USE_32BITS_HASHCODE
      obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0;
#endif
      if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){
        collector->non_los_live_obj_size += obj_size;
        collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size;
      } else {
        collector->los_live_obj_size += round_up_to_size(obj_size, KB); 
      }
    } else if(!gc_has_nos()){
      trace_object = trace_obj_in_ms_marking;
    } else {
      trace_object = trace_obj_in_normal_marking;
    }
  } else if(collect_is_fallback()){
    if(major_is_marksweep())
      trace_object = trace_obj_in_ms_fallback_marking;
    else
      trace_object = trace_obj_in_fallback_marking;
  } else {
    assert(major_is_marksweep());
    p_ref_or_obj = p_obj;
   if( gc->gc_concurrent_status == GC_CON_NIL ) 
      trace_object = trace_obj_in_ms_marking;
    else
      trace_object = trace_obj_in_ms_concurrent_mark;
  }
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  collector_tracestack_push(collector, p_ref_or_obj);
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool);
  while(task_block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
    while(!vector_block_iterator_end(task_block, iter)){
      void *p_ref_or_obj = (void*)*iter;
      assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
              || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj));
      trace_object(collector, p_ref_or_obj);
      if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
      
      iter = vector_block_iterator_advance(task_block, iter);
    }
    vector_stack_clear(task_block);
    pool_put_entry(metadata->free_task_pool, task_block);
    
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    task_block = pool_get_entry(metadata->mark_task_pool);
  }
  
  task_block = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(task_block);
  pool_put_entry(metadata->free_task_pool, task_block);
  collector->trace_stack = NULL;
}
Esempio n. 9
0
GC* gc_parse_options() 
{
  TRACE2("gc.process", "GC: parse options ...\n");

  GC* gc;

  /* GC algorithm decision */
  /* Step 1: */
  char* minor_algo = NULL;
  char* major_algo = NULL;
  char* unique_algo = NULL;

  if (vm_property_is_set("gc.minor_algorithm", VM_PROPERTIES) == 1) {
    minor_algo = vm_properties_get_value("gc.minor_algorithm", VM_PROPERTIES);
  }

  if (vm_property_is_set("gc.major_algorithm", VM_PROPERTIES) == 1) {
    major_algo = vm_properties_get_value("gc.major_algorithm", VM_PROPERTIES);
  }

  if (vm_property_is_set("gc.unique_algorithm", VM_PROPERTIES) == 1) {
    unique_algo = vm_properties_get_value("gc.unique_algorithm", VM_PROPERTIES);
  }

  Boolean has_los = FALSE;
  if (vm_property_is_set("gc.has_los", VM_PROPERTIES) == 1) {
    has_los = vm_property_get_boolean("gc.has_los");
  }

  if(unique_algo){
    if(minor_algo || major_algo){
      LWARN(60, "Generational options cannot be set with unique_algo, ignored.");
    }
    gc = gc_unique_decide_collection_algo(unique_algo, has_los);
    vm_properties_destroy_value(unique_algo);  
  }else{ /* default */
    gc = gc_gen_decide_collection_algo(minor_algo, major_algo, has_los);
    if( minor_algo) vm_properties_destroy_value(minor_algo);
    if( major_algo) vm_properties_destroy_value(major_algo);
  }

  if (vm_property_is_set("gc.gen_mode", VM_PROPERTIES) == 1) {
    Boolean gen_mode = vm_property_get_boolean("gc.gen_mode");
    gc_set_gen_mode(gen_mode);
  }

  /* Step 2: */

  /* NOTE:: this has to stay after above!! */
  if (vm_property_is_set("gc.force_major_collect", VM_PROPERTIES) == 1) {
    FORCE_FULL_COMPACT = vm_property_get_boolean("gc.force_major_collect");
    if(FORCE_FULL_COMPACT){
      gc_set_gen_mode(FALSE);
    }
  }

  /* Step 3: */
  /* NOTE:: this has to stay after above!! */
  gc->generate_barrier = gc_is_gen_mode();
  
  if (vm_property_is_set("gc.generate_barrier", VM_PROPERTIES) == 1) {
    Boolean generate_barrier = vm_property_get_boolean("gc.generate_barrier");
    gc->generate_barrier = (generate_barrier || gc->generate_barrier);
  }
  
/* ///////////////////////////////////////////////////   */
  
  POINTER_SIZE_INT max_heap_size = HEAP_SIZE_DEFAULT;
  POINTER_SIZE_INT min_heap_size = min_heap_size_bytes;
  
  if (vm_property_is_set("gc.mx", VM_PROPERTIES) == 1) {
    max_heap_size = vm_property_get_size("gc.mx");

    if (max_heap_size < min_heap_size){
      max_heap_size = min_heap_size;
      LWARN(61, "Max heap size you set is too small, reset to {0}MB" << max_heap_size/MB);
    }
    if (0 == max_heap_size){
      max_heap_size = HEAP_SIZE_DEFAULT;
      LWARN(62, "Max heap size you set equals to zero, reset to {0}MB" << max_heap_size/MB);
    }
 
    min_heap_size = max_heap_size / 10;
    if (min_heap_size < min_heap_size_bytes){
      min_heap_size = min_heap_size_bytes;
      //printf("Min heap size: too small, reset to %d MB! \n", min_heap_size/MB);
    }
  }

  if (vm_property_is_set("gc.ms", VM_PROPERTIES) == 1) {
    min_heap_size = vm_property_get_size("gc.ms");
    if (min_heap_size < min_heap_size_bytes){
      min_heap_size = min_heap_size_bytes;
      LWARN(63, "Min heap size you set is too small, reset to {0}MB" << min_heap_size/MB);
    } 
  }

  if (min_heap_size > max_heap_size){
    max_heap_size = min_heap_size;
    LWARN(61, "Max heap size is too small, reset to {0}MB" << max_heap_size/MB);
  }

  min_heap_size_bytes = min_heap_size;
  max_heap_size_bytes = max_heap_size;

  if (vm_property_is_set("gc.nos_size", VM_PROPERTIES) == 1) {
    NOS_SIZE = vm_property_get_size("gc.nos_size");
  }

  if (vm_property_is_set("gc.min_nos_size", VM_PROPERTIES) == 1) {
    MIN_NOS_SIZE = vm_property_get_size("gc.min_nos_size");
  }

  if (vm_property_is_set("gc.init_los_size", VM_PROPERTIES) == 1) {
    INIT_LOS_SIZE = vm_property_get_size("gc.init_los_size");
  }  

  if (vm_property_is_set("gc.num_collectors", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_collectors");
    NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num;
  }

  if (vm_property_is_set("gc.num_conclctors", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_conclctors");
    NUM_CONCLCTORS = (num==0)? NUM_CONCLCTORS:num;
  }

  // for concurrent GC debug
  if (vm_property_is_set("gc.num_con_markers", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_con_markers");
    NUM_CON_MARKERS = (num==0)? NUM_CON_MARKERS:num;
  }

  if (vm_property_is_set("gc.num_con_sweepers", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_con_sweepers");
    NUM_CON_SWEEPERS = (num==0)? NUM_CON_SWEEPERS:num;
  }


  

  if (vm_property_is_set("gc.tospace_size", VM_PROPERTIES) == 1) {
    TOSPACE_SIZE = vm_property_get_size("gc.tospace_size");
  }

  if (vm_property_is_set("gc.mos_reserve_size", VM_PROPERTIES) == 1) {
    MOS_RESERVE_SIZE = vm_property_get_size("gc.mos_reserve_size");
  }

  if (vm_property_is_set("gc.nos_partial_forward", VM_PROPERTIES) == 1) {
    NOS_PARTIAL_FORWARD = vm_property_get_boolean("gc.nos_partial_forward");
  }
    
  if (vm_property_is_set("gc.minor_collectors", VM_PROPERTIES) == 1) {
    MINOR_COLLECTORS = vm_property_get_integer("gc.minor_collectors");
  }

  if (vm_property_is_set("gc.major_collectors", VM_PROPERTIES) == 1) {
    MAJOR_COLLECTORS = vm_property_get_integer("gc.major_collectors");
  }

  if (vm_property_is_set("gc.ignore_finref", VM_PROPERTIES) == 1) {
    IGNORE_FINREF = vm_property_get_boolean("gc.ignore_finref");
  }

  if (vm_property_is_set("gc.verify", VM_PROPERTIES) == 1) {
    char* value = vm_properties_get_value("gc.verify", VM_PROPERTIES);
    GC_VERIFY = strdup(value);
    vm_properties_destroy_value(value);
  }

  if (vm_property_is_set("gc.gen_nongen_switch", VM_PROPERTIES) == 1){
    GEN_NONGEN_SWITCH= vm_property_get_boolean("gc.gen_nongen_switch");
    gc->generate_barrier = TRUE;
  }

  if (vm_property_is_set("gc.heap_iteration", VM_PROPERTIES) == 1) {
    JVMTI_HEAP_ITERATION = vm_property_get_boolean("gc.heap_iteration");
  }

  if (vm_property_is_set("gc.ignore_vtable_tracing", VM_PROPERTIES) == 1) {
    IGNORE_VTABLE_TRACING = vm_property_get_boolean("gc.ignore_vtable_tracing");
  }

  if (vm_property_is_set("gc.use_large_page", VM_PROPERTIES) == 1){
    char* value = vm_properties_get_value("gc.use_large_page", VM_PROPERTIES);
    large_page_hint = strdup(value);
    vm_properties_destroy_value(value);
  }

  if (vm_property_is_set("gc.share_los_boundary", VM_PROPERTIES) == 1){
    share_los_boundary = vm_property_get_boolean("gc.share_los_boundary");
  }

  if (vm_property_is_set("gc.ignore_force_gc", VM_PROPERTIES) == 1){
    IGNORE_FORCE_GC = vm_property_get_boolean("gc.ignore_force_gc");
  }
  
  if (vm_property_is_set("gc.concurrent_gc", VM_PROPERTIES) == 1){
    Boolean use_all_concurrent_phase= vm_property_get_boolean("gc.concurrent_gc");
    if(use_all_concurrent_phase){
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_enum();
      gc_specify_con_mark();
      gc_specify_con_sweep();
      gc->generate_barrier = TRUE;
    }
  }

  if (vm_property_is_set("gc.concurrent_enumeration", VM_PROPERTIES) == 1){
    Boolean USE_CONCURRENT_ENUMERATION = vm_property_get_boolean("gc.concurrent_enumeration");
    if(USE_CONCURRENT_ENUMERATION){
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_enum();
      gc->generate_barrier = TRUE;
    }
  }

  if (vm_property_is_set("gc.concurrent_mark", VM_PROPERTIES) == 1){
    Boolean USE_CONCURRENT_MARK = vm_property_get_boolean("gc.concurrent_mark");
    if(USE_CONCURRENT_MARK){
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_mark();
      gc->generate_barrier = TRUE;
      IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
    }
  }

  if (vm_property_is_set("gc.concurrent_sweep", VM_PROPERTIES) == 1){
    Boolean USE_CONCURRENT_SWEEP= vm_property_get_boolean("gc.concurrent_sweep");
    if(USE_CONCURRENT_SWEEP){
      /*currently, concurrent sweeping only starts after concurrent marking.*/
      assert(gc_is_specify_con_mark());
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_sweep();
      IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
    }
  }
 
  char* concurrent_algo = NULL;
  
  if (vm_property_is_set("gc.concurrent_algorithm", VM_PROPERTIES) == 1) {
    concurrent_algo = vm_properties_get_value("gc.concurrent_algorithm", VM_PROPERTIES);    
    gc_decide_con_algo(concurrent_algo);
  }else if(gc_is_specify_con_gc()){
    gc_set_default_con_algo();
  }

  char* cc_scheduler = NULL;
  if (vm_property_is_set("gc.cc_scheduler", VM_PROPERTIES) == 1) {
    cc_scheduler = vm_properties_get_value("gc.cc_scheduler", VM_PROPERTIES);    
    gc_decide_cc_scheduler_kind(cc_scheduler);
  }else if(gc_is_specify_con_gc()){
    gc_set_default_cc_scheduler_kind();
  }

#if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
  if(vm_property_is_set("gc.prefetch",VM_PROPERTIES) ==1) {
    PREFETCH_ENABLED = vm_property_get_boolean("gc.prefetch");
  }

  if(vm_property_is_set("gc.prefetch_distance",VM_PROPERTIES)==1) {
    PREFETCH_DISTANCE = vm_property_get_size("gc.prefetch_distance");
    if(!PREFETCH_ENABLED) {
      LWARN(64, "Prefetch distance set with Prefetch disabled!");
    }
  }

  if(vm_property_is_set("gc.prefetch_stride",VM_PROPERTIES)==1) {
    PREFETCH_STRIDE = vm_property_get_size("gc.prefetch_stride");
    if(!PREFETCH_ENABLED) {
      LWARN(65, "Prefetch stride set  with Prefetch disabled!");
    }  
  }
  
  if(vm_property_is_set("gc.zeroing_size",VM_PROPERTIES)==1) {
    ZEROING_SIZE = vm_property_get_size("gc.zeroing_size");
  }   
#endif

#ifdef PREFETCH_SUPPORTED
  if(vm_property_is_set("gc.mark_prefetch",VM_PROPERTIES) ==1) {
    mark_prefetch = vm_property_get_boolean("gc.mark_prefetch");
  }  
#endif

  return gc;
}
Esempio n. 10
0
JNIEXPORT jboolean JNICALL Java_org_apache_harmony_drlvm_gc_1gen_GCHelper_getGenMode(JNIEnv *e, jclass c)
{
    return (jboolean)gc_is_gen_mode();
}
Esempio n. 11
0
void gc_gen_mode_adapt(GC_Gen* gc, int64 pause_time)
{
  if(GEN_NONGEN_SWITCH == FALSE) return;
  
  Blocked_Space* nos = (Blocked_Space*)gc->nos;
  Blocked_Space* mos = (Blocked_Space*)gc->mos;
  Gen_Mode_Adaptor* gen_mode_adaptor = gc->gen_mode_adaptor;

  POINTER_SIZE_INT mos_free_size = blocked_space_free_mem_size(mos);
  POINTER_SIZE_INT nos_free_size = blocked_space_free_mem_size(nos);
  POINTER_SIZE_INT total_free_size = mos_free_size  + nos_free_size;
  
  if(collect_is_major()) {
    assert(!gc_is_gen_mode());
    
    if(gen_mode_adaptor->major_survive_ratio_threshold != 0 && mos->survive_ratio > gen_mode_adaptor->major_survive_ratio_threshold){    
      if(gen_mode_adaptor->major_repeat_count > MAX_MAJOR_REPEAT_COUNT ){
        gc->force_gen_mode = TRUE;
        gc_set_gen_mode(TRUE);
        gc->next_collect_force_major = FALSE;
        return;
      }else{
        gen_mode_adaptor->major_repeat_count++;
      }
    }else{
      gen_mode_adaptor->major_repeat_count = 1;
    }
    
  }else{
    /*compute throughput*/
    if(!collect_last_is_minor((GC*)gc)){
      gen_mode_adaptor->nongen_minor_throughput = 1.0f;
    }
    if(gc->force_gen_mode){
      if(pause_time!=0){
        if(gen_mode_adaptor->gen_minor_throughput != 0)
          gen_mode_adaptor->gen_minor_throughput = (gen_mode_adaptor->gen_minor_throughput + (float) nos_free_size/(float)pause_time)/2.0f;
        else
          gen_mode_adaptor->gen_minor_throughput =(float) nos_free_size/(float)pause_time;
      }
    }else{
      if(pause_time!=0){
        if(gen_mode_adaptor->gen_minor_throughput != 1.0f)
          gen_mode_adaptor->nongen_minor_throughput = (gen_mode_adaptor->nongen_minor_throughput + (float) nos_free_size/(float)pause_time)/2.0f;      
        else
          gen_mode_adaptor->nongen_minor_throughput = (float) nos_free_size/(float)pause_time;
      }
   }

    if(gen_mode_adaptor->nongen_minor_throughput <=  gen_mode_adaptor->gen_minor_throughput ){
      if( !collect_last_is_minor((GC*)gc) ){
        gen_mode_adaptor->major_survive_ratio_threshold = mos->survive_ratio;
      }else if( !gc->force_gen_mode ){
        gc->force_gen_mode = TRUE;
        gen_mode_adaptor->gen_mode_trial_count = MAX_INT32;        
      } 
    }

    if(gc->next_collect_force_major && !gc->force_gen_mode){
        gc->next_collect_force_major = FALSE;
        gc->force_gen_mode = TRUE;
        gen_mode_adaptor->gen_mode_trial_count = 2;
    }else if( collect_last_is_minor((GC*)gc) && gc->force_gen_mode){
       gen_mode_adaptor->gen_mode_trial_count = MAX_INT32;
    }

    if(gc->force_gen_mode && (total_free_size <= ((float)min_nos_size_bytes) * 1.3 )){
        gc->force_gen_mode = FALSE;
        gc_set_gen_mode(FALSE);
        gc->next_collect_force_major = TRUE;
        gen_mode_adaptor->gen_mode_trial_count = 0;
        return;
    }
    
    if( gc->force_gen_mode ){
      assert( gen_mode_adaptor->gen_mode_trial_count >= 0);

      gen_mode_adaptor->gen_mode_trial_count --;
      if( gen_mode_adaptor->gen_mode_trial_count >= 0){
        gc_set_gen_mode(TRUE);
        return;
      }
          
      gc->force_gen_mode = FALSE;
      gc->next_collect_force_major = TRUE;    
      gen_mode_adaptor->gen_mode_trial_count = 0;
    }
  }
  
  gc_set_gen_mode(FALSE);
  return;
}
Esempio n. 12
0
void gc_gen_adapt(GC_Gen* gc, int64 pause_time)
{
  gc_decide_next_collect(gc, pause_time);

  if(NOS_SIZE) return;

  Blocked_Space* nos = (Blocked_Space*)gc->nos;
  Blocked_Space* mos = (Blocked_Space*)gc->mos;
  
  POINTER_SIZE_INT new_nos_size;
  POINTER_SIZE_INT new_mos_size;

  Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size);

  if(!result) return;

  POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)nos);

  //if( ABS_DIFF(new_nos_size, curr_nos_size) < NOS_COPY_RESERVE_DELTA )
  if( new_nos_size == curr_nos_size ){
    return;
  }else if ( new_nos_size >= curr_nos_size ){
    INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...");
    POINTER_SIZE_INT adapt_size = new_nos_size - curr_nos_size;
    INFO2("gc.space", "GC: Space Adapt:  mos  --->  nos  ("
      <<verbose_print_size(adapt_size)
      <<" size was transferred from mos to nos)\n"); 
  } else {
    INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...");
    POINTER_SIZE_INT  adapt_size = curr_nos_size - new_nos_size;
    INFO2("gc.space", "GC: Space Adapt:  nos  --->  mos  ("
      <<verbose_print_size(adapt_size)
      <<" size was transferred from nos to mos)\n"); 
  }

  /* below are ajustment */  
  POINTER_SIZE_INT curr_heap_commit_end;
  if(LOS_ADJUST_BOUNDARY)
    curr_heap_commit_end = (POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE + gc->committed_heap_size;
  else
    curr_heap_commit_end = (POINTER_SIZE_INT)nos->heap_start + nos->committed_heap_size;
  
  void* new_nos_boundary = (void*)(curr_heap_commit_end - new_nos_size);

  nos_boundary = nos_space_adjust((Space*)nos, new_nos_boundary, new_nos_size);
  /* it's possible that nos can't accept the specified nos boundary, it gives a new one. */
  assert(nos_boundary <= new_nos_boundary); 
  
  new_mos_size -= (POINTER_SIZE_INT)new_nos_boundary - (POINTER_SIZE_INT)nos_boundary;
  blocked_space_adjust(mos, mos->heap_start, new_mos_size);
  
  Block_Header* mos_last_block = (Block_Header*)&mos->blocks[mos->num_managed_blocks-1];
  assert(mos->ceiling_block_idx == mos_last_block->block_idx);
  Block_Header* nos_first_block = (Block_Header*)&nos->blocks[0];
  /* this is redundant, because blocked_space_adjust doesn't set last block next to NULL.
  mos_last_block->next = nos_first_block; */

  if( gc_is_gen_mode())
    HelperClass_set_NosBoundary(nos_boundary);
  
  return;
}