Exemple #1
0
/* FIXME:: the collection should be seperated from the alloation */
void *wspace_alloc(unsigned size, Allocator *allocator)
{
  void *p_obj = NULL;
  /*
  if( get_hyper_free_chunk_list()->head == NULL )
  	INFO2("gc.wspace", "[BEFORE ALLOC]hyper free chunk is EMPTY!!");
  */
  
  if(gc_is_specify_con_gc())
    gc_sched_collection(allocator->gc, GC_CAUSE_CONCURRENT_GC);
  
  /* First, try to allocate object from TLB (thread local chunk) */
  p_obj = wspace_try_alloc(size, allocator);
  if(p_obj){
    ((Mutator*)allocator)->new_obj_size += size;
    /*
    if( get_hyper_free_chunk_list()->head == NULL )
  	INFO2("gc.wspace", "[AFTER FIRST ALLOC]hyper free chunk is EMPTY!!");
    */
    return p_obj;
  }
  
  if(allocator->gc->in_collection) return NULL;
  
  vm_gc_lock_enum();
  /* after holding lock, try if other thread collected already */
  p_obj = wspace_try_alloc(size, allocator);
  if(p_obj){
    vm_gc_unlock_enum();
    ((Mutator*)allocator)->new_obj_size += size;
    /*
    if( get_hyper_free_chunk_list()->head == NULL )
  	INFO2("gc.wspace", "[AFTER SECOND ALLOC]hyper free chunk is EMPTY!!");
    */
    return p_obj;
  }
  
  INFO2("gc.con.info", "[Exhausted Cause] Allocation size is :" << size << " bytes");
  GC *gc = allocator->gc;
  /*
  gc->cause = GC_CAUSE_MOS_IS_FULL;
  if(gc_is_specify_con_gc())
    gc_relaim_heap_con_mode(gc);
  else*/ 
  gc_reclaim_heap(gc, GC_CAUSE_MOS_IS_FULL);
  vm_gc_unlock_enum();

#ifdef SSPACE_CHUNK_INFO
  printf("Failure size: %x\n", size);
#endif

  p_obj = wspace_try_alloc(size, allocator);
  /*
  if( get_hyper_free_chunk_list()->head == NULL )
  	INFO2("gc.wspace", "[AFTER COLLECTION ALLOC]hyper free chunk is EMPTY!!");
  */
  if(p_obj) ((Mutator*)allocator)->new_obj_size += size;
  
  return p_obj;
}
Exemple #2
0
void gc_reset_after_con_collection(GC* gc)
{
  assert(gc_is_specify_con_gc());
  int64 reset_start = time_now();
  if(!IGNORE_FINREF ){
    INFO2("gc.process", "GC: finref process after collection ...\n");
    gc_put_finref_to_vm(gc);
    gc_reset_finref_metadata(gc);
    gc_activate_finref_threads((GC*)gc);
#ifndef BUILD_IN_REFERENT
  } else {
    gc_clear_weakref_pools(gc);
    gc_clear_finref_repset_pool(gc);
#endif
  }
  reset_start = time_now();
  gc_reset_con_space_stat(gc);
  gc_clear_conclctor_role(gc);  
  vm_reclaim_native_objs();
}
Exemple #3
0
GC* gc_parse_options() 
{
  TRACE2("gc.process", "GC: parse options ...\n");

  GC* gc;

  /* GC algorithm decision */
  /* Step 1: */
  char* minor_algo = NULL;
  char* major_algo = NULL;
  char* unique_algo = NULL;

  if (vm_property_is_set("gc.minor_algorithm", VM_PROPERTIES) == 1) {
    minor_algo = vm_properties_get_value("gc.minor_algorithm", VM_PROPERTIES);
  }

  if (vm_property_is_set("gc.major_algorithm", VM_PROPERTIES) == 1) {
    major_algo = vm_properties_get_value("gc.major_algorithm", VM_PROPERTIES);
  }

  if (vm_property_is_set("gc.unique_algorithm", VM_PROPERTIES) == 1) {
    unique_algo = vm_properties_get_value("gc.unique_algorithm", VM_PROPERTIES);
  }

  Boolean has_los = FALSE;
  if (vm_property_is_set("gc.has_los", VM_PROPERTIES) == 1) {
    has_los = vm_property_get_boolean("gc.has_los");
  }

  if(unique_algo){
    if(minor_algo || major_algo){
      LWARN(60, "Generational options cannot be set with unique_algo, ignored.");
    }
    gc = gc_unique_decide_collection_algo(unique_algo, has_los);
    vm_properties_destroy_value(unique_algo);  
  }else{ /* default */
    gc = gc_gen_decide_collection_algo(minor_algo, major_algo, has_los);
    if( minor_algo) vm_properties_destroy_value(minor_algo);
    if( major_algo) vm_properties_destroy_value(major_algo);
  }

  if (vm_property_is_set("gc.gen_mode", VM_PROPERTIES) == 1) {
    Boolean gen_mode = vm_property_get_boolean("gc.gen_mode");
    gc_set_gen_mode(gen_mode);
  }

  /* Step 2: */

  /* NOTE:: this has to stay after above!! */
  if (vm_property_is_set("gc.force_major_collect", VM_PROPERTIES) == 1) {
    FORCE_FULL_COMPACT = vm_property_get_boolean("gc.force_major_collect");
    if(FORCE_FULL_COMPACT){
      gc_set_gen_mode(FALSE);
    }
  }

  /* Step 3: */
  /* NOTE:: this has to stay after above!! */
  gc->generate_barrier = gc_is_gen_mode();
  
  if (vm_property_is_set("gc.generate_barrier", VM_PROPERTIES) == 1) {
    Boolean generate_barrier = vm_property_get_boolean("gc.generate_barrier");
    gc->generate_barrier = (generate_barrier || gc->generate_barrier);
  }
  
/* ///////////////////////////////////////////////////   */
  
  POINTER_SIZE_INT max_heap_size = HEAP_SIZE_DEFAULT;
  POINTER_SIZE_INT min_heap_size = min_heap_size_bytes;
  
  if (vm_property_is_set("gc.mx", VM_PROPERTIES) == 1) {
    max_heap_size = vm_property_get_size("gc.mx");

    if (max_heap_size < min_heap_size){
      max_heap_size = min_heap_size;
      LWARN(61, "Max heap size you set is too small, reset to {0}MB" << max_heap_size/MB);
    }
    if (0 == max_heap_size){
      max_heap_size = HEAP_SIZE_DEFAULT;
      LWARN(62, "Max heap size you set equals to zero, reset to {0}MB" << max_heap_size/MB);
    }
 
    min_heap_size = max_heap_size / 10;
    if (min_heap_size < min_heap_size_bytes){
      min_heap_size = min_heap_size_bytes;
      //printf("Min heap size: too small, reset to %d MB! \n", min_heap_size/MB);
    }
  }

  if (vm_property_is_set("gc.ms", VM_PROPERTIES) == 1) {
    min_heap_size = vm_property_get_size("gc.ms");
    if (min_heap_size < min_heap_size_bytes){
      min_heap_size = min_heap_size_bytes;
      LWARN(63, "Min heap size you set is too small, reset to {0}MB" << min_heap_size/MB);
    } 
  }

  if (min_heap_size > max_heap_size){
    max_heap_size = min_heap_size;
    LWARN(61, "Max heap size is too small, reset to {0}MB" << max_heap_size/MB);
  }

  min_heap_size_bytes = min_heap_size;
  max_heap_size_bytes = max_heap_size;

  if (vm_property_is_set("gc.nos_size", VM_PROPERTIES) == 1) {
    NOS_SIZE = vm_property_get_size("gc.nos_size");
  }

  if (vm_property_is_set("gc.min_nos_size", VM_PROPERTIES) == 1) {
    MIN_NOS_SIZE = vm_property_get_size("gc.min_nos_size");
  }

  if (vm_property_is_set("gc.init_los_size", VM_PROPERTIES) == 1) {
    INIT_LOS_SIZE = vm_property_get_size("gc.init_los_size");
  }  

  if (vm_property_is_set("gc.num_collectors", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_collectors");
    NUM_COLLECTORS = (num==0)? NUM_COLLECTORS:num;
  }

  if (vm_property_is_set("gc.num_conclctors", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_conclctors");
    NUM_CONCLCTORS = (num==0)? NUM_CONCLCTORS:num;
  }

  // for concurrent GC debug
  if (vm_property_is_set("gc.num_con_markers", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_con_markers");
    NUM_CON_MARKERS = (num==0)? NUM_CON_MARKERS:num;
  }

  if (vm_property_is_set("gc.num_con_sweepers", VM_PROPERTIES) == 1) {
    unsigned int num = vm_property_get_integer("gc.num_con_sweepers");
    NUM_CON_SWEEPERS = (num==0)? NUM_CON_SWEEPERS:num;
  }


  

  if (vm_property_is_set("gc.tospace_size", VM_PROPERTIES) == 1) {
    TOSPACE_SIZE = vm_property_get_size("gc.tospace_size");
  }

  if (vm_property_is_set("gc.mos_reserve_size", VM_PROPERTIES) == 1) {
    MOS_RESERVE_SIZE = vm_property_get_size("gc.mos_reserve_size");
  }

  if (vm_property_is_set("gc.nos_partial_forward", VM_PROPERTIES) == 1) {
    NOS_PARTIAL_FORWARD = vm_property_get_boolean("gc.nos_partial_forward");
  }
    
  if (vm_property_is_set("gc.minor_collectors", VM_PROPERTIES) == 1) {
    MINOR_COLLECTORS = vm_property_get_integer("gc.minor_collectors");
  }

  if (vm_property_is_set("gc.major_collectors", VM_PROPERTIES) == 1) {
    MAJOR_COLLECTORS = vm_property_get_integer("gc.major_collectors");
  }

  if (vm_property_is_set("gc.ignore_finref", VM_PROPERTIES) == 1) {
    IGNORE_FINREF = vm_property_get_boolean("gc.ignore_finref");
  }

  if (vm_property_is_set("gc.verify", VM_PROPERTIES) == 1) {
    char* value = vm_properties_get_value("gc.verify", VM_PROPERTIES);
    GC_VERIFY = strdup(value);
    vm_properties_destroy_value(value);
  }

  if (vm_property_is_set("gc.gen_nongen_switch", VM_PROPERTIES) == 1){
    GEN_NONGEN_SWITCH= vm_property_get_boolean("gc.gen_nongen_switch");
    gc->generate_barrier = TRUE;
  }

  if (vm_property_is_set("gc.heap_iteration", VM_PROPERTIES) == 1) {
    JVMTI_HEAP_ITERATION = vm_property_get_boolean("gc.heap_iteration");
  }

  if (vm_property_is_set("gc.ignore_vtable_tracing", VM_PROPERTIES) == 1) {
    IGNORE_VTABLE_TRACING = vm_property_get_boolean("gc.ignore_vtable_tracing");
  }

  if (vm_property_is_set("gc.use_large_page", VM_PROPERTIES) == 1){
    char* value = vm_properties_get_value("gc.use_large_page", VM_PROPERTIES);
    large_page_hint = strdup(value);
    vm_properties_destroy_value(value);
  }

  if (vm_property_is_set("gc.share_los_boundary", VM_PROPERTIES) == 1){
    share_los_boundary = vm_property_get_boolean("gc.share_los_boundary");
  }

  if (vm_property_is_set("gc.ignore_force_gc", VM_PROPERTIES) == 1){
    IGNORE_FORCE_GC = vm_property_get_boolean("gc.ignore_force_gc");
  }
  
  if (vm_property_is_set("gc.concurrent_gc", VM_PROPERTIES) == 1){
    Boolean use_all_concurrent_phase= vm_property_get_boolean("gc.concurrent_gc");
    if(use_all_concurrent_phase){
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_enum();
      gc_specify_con_mark();
      gc_specify_con_sweep();
      gc->generate_barrier = TRUE;
    }
  }

  if (vm_property_is_set("gc.concurrent_enumeration", VM_PROPERTIES) == 1){
    Boolean USE_CONCURRENT_ENUMERATION = vm_property_get_boolean("gc.concurrent_enumeration");
    if(USE_CONCURRENT_ENUMERATION){
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_enum();
      gc->generate_barrier = TRUE;
    }
  }

  if (vm_property_is_set("gc.concurrent_mark", VM_PROPERTIES) == 1){
    Boolean USE_CONCURRENT_MARK = vm_property_get_boolean("gc.concurrent_mark");
    if(USE_CONCURRENT_MARK){
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_mark();
      gc->generate_barrier = TRUE;
      IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
    }
  }

  if (vm_property_is_set("gc.concurrent_sweep", VM_PROPERTIES) == 1){
    Boolean USE_CONCURRENT_SWEEP= vm_property_get_boolean("gc.concurrent_sweep");
    if(USE_CONCURRENT_SWEEP){
      /*currently, concurrent sweeping only starts after concurrent marking.*/
      assert(gc_is_specify_con_mark());
#ifndef USE_UNIQUE_MARK_SWEEP_GC
      LDIE(77, "Please define USE_UNIQUE_MARK_SWEEP_GC macro.");
#endif
      gc_specify_con_sweep();
      IGNORE_FINREF = TRUE; /*TODO: finref is unsupported.*/
    }
  }
 
  char* concurrent_algo = NULL;
  
  if (vm_property_is_set("gc.concurrent_algorithm", VM_PROPERTIES) == 1) {
    concurrent_algo = vm_properties_get_value("gc.concurrent_algorithm", VM_PROPERTIES);    
    gc_decide_con_algo(concurrent_algo);
  }else if(gc_is_specify_con_gc()){
    gc_set_default_con_algo();
  }

  char* cc_scheduler = NULL;
  if (vm_property_is_set("gc.cc_scheduler", VM_PROPERTIES) == 1) {
    cc_scheduler = vm_properties_get_value("gc.cc_scheduler", VM_PROPERTIES);    
    gc_decide_cc_scheduler_kind(cc_scheduler);
  }else if(gc_is_specify_con_gc()){
    gc_set_default_cc_scheduler_kind();
  }

#if defined(ALLOC_ZEROING) && defined(ALLOC_PREFETCH)
  if(vm_property_is_set("gc.prefetch",VM_PROPERTIES) ==1) {
    PREFETCH_ENABLED = vm_property_get_boolean("gc.prefetch");
  }

  if(vm_property_is_set("gc.prefetch_distance",VM_PROPERTIES)==1) {
    PREFETCH_DISTANCE = vm_property_get_size("gc.prefetch_distance");
    if(!PREFETCH_ENABLED) {
      LWARN(64, "Prefetch distance set with Prefetch disabled!");
    }
  }

  if(vm_property_is_set("gc.prefetch_stride",VM_PROPERTIES)==1) {
    PREFETCH_STRIDE = vm_property_get_size("gc.prefetch_stride");
    if(!PREFETCH_ENABLED) {
      LWARN(65, "Prefetch stride set  with Prefetch disabled!");
    }  
  }
  
  if(vm_property_is_set("gc.zeroing_size",VM_PROPERTIES)==1) {
    ZEROING_SIZE = vm_property_get_size("gc.zeroing_size");
  }   
#endif

#ifdef PREFETCH_SUPPORTED
  if(vm_property_is_set("gc.mark_prefetch",VM_PROPERTIES) ==1) {
    mark_prefetch = vm_property_get_boolean("gc.mark_prefetch");
  }  
#endif

  return gc;
}