コード例 #1
0
static void resurrect_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
  
  if(finalizable_obj_pool_is_empty(gc))
    return;
  
  DURING_RESURRECTION = TRUE;
  
  pool_iterator_init(finalizable_obj_pool);
  Vector_Block *block = pool_iterator_next(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      
      /* Perhaps obj has been resurrected by previous resurrections */
      if(!gc_obj_is_dead(gc, p_obj)){
        if(collect_is_minor() && obj_need_move(gc, p_obj))
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        continue;
      }
      
      resurrect_obj_tree(collector, p_ref);
      if(collector->result == FALSE){
        /* Resurrection fallback happens */
        assert(collect_is_minor());
        return; /* force return */
      }
    }
    
    block = pool_iterator_next(finalizable_obj_pool);
  }
  
  /* In major & fallback & sweep-compact collection we need record p_ref of the root dead obj to update it later.
   * Because it is outside heap, we can't update it in ref fixing.
   * In minor collection p_ref of the root dead obj is automatically updated while tracing.
   */
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, finalizable_obj_pool);
  metadata->pending_finalizers = TRUE;
  
  DURING_RESURRECTION = FALSE;
  
  /* fianlizable objs have been added to finref repset pool or updated by tracing */
}
コード例 #2
0
void gc_gen_stats_verbose(GC_Gen* gc)
{
  GC_Gen_Stats* stats = gc->stats;
  Boolean is_los_collected = stats->is_los_collected;
  if (collect_is_minor()){
    TRACE2("gc.space", "GC: NOS Collection stats: "
      <<"\nGC: " << (gc_is_gen_mode()?"generational":"nongenerational")
      <<"\nGC: collection algo: " << (minor_is_semispace()?"semi-space":"partial-forward")
      <<"\nGC: num surviving objs: " << stats->nos_surviving_obj_num_minor
      <<"\nGC: size surviving objs: " << verbose_print_size(stats->nos_surviving_obj_size_minor)
      <<"\nGC: surviving ratio: " << (int)(stats->nos_surviving_ratio_minor*100) << "%\n");
  }else{
    TRACE2("gc.space", "GC: MOS Collection stats: "
      <<"\nGC: collection algo: " << (major_is_marksweep()?"mark-sweep":"slide compact")
      <<"\nGC: num surviving objs: "<<stats->nos_mos_suviving_obj_num_major
      <<"\nGC: size surviving objs: "<<verbose_print_size(stats->nos_mos_suviving_obj_size_major)
      <<"\nGC: surviving ratio: "<<(int)(stats->nos_mos_suviving_ratio_major*100)<<"%\n");
  }

  if(stats->is_los_collected) { /*if los is collected, need to output los related info*/
    TRACE2("gc.space", "GC: Lspace Collection stats: "
      <<"\nGC: collection algo: "<<(collect_is_major()?"slide compact":"mark sweep")
      <<"\nGC: num surviving objs: "<<stats->los_suviving_obj_num
      <<"\nGC: size surviving objs: "<<verbose_print_size(stats->los_suviving_obj_size)
      <<"\nGC: surviving ratio: "<<(int)(stats->los_surviving_ratio*100)<<"%\n");
  }

}
コード例 #3
0
static void identify_dead_softrefs(Collector *collector)
{
  GC *gc = collector->gc;
  if(collect_is_minor()){
    assert(softref_pool_is_empty(gc));
    return;
  }
  
  Pool *softref_pool = gc->finref_metadata->softref_pool;
  identify_dead_refs(gc, softref_pool);
}
コード例 #4
0
void gc_update_finref_repointed_refs(GC *gc, Boolean double_fix)
{
  assert(!collect_is_minor());
  
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *repset_pool = metadata->repset_pool;
  Pool *fallback_ref_pool = metadata->fallback_ref_pool;
  
  nondestructively_fix_finref_pool(gc, repset_pool, TRUE, double_fix);
  if(!pool_is_empty(fallback_ref_pool)){
    assert(collect_is_fallback());
    nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE, double_fix);
  }
}
コード例 #5
0
void collector_identify_finref(Collector *collector)
{
  GC *gc = collector->gc;
  
  gc_set_weakref_sets(gc);
  identify_dead_softrefs(collector);
  identify_dead_weakrefs(collector);
  identify_finalizable_objects(collector);
  resurrect_finalizable_objects(collector);
  gc->collect_result = gc_collection_result(gc);
  if(!gc->collect_result){
    assert(collect_is_minor());
    resurrection_fallback_handler(gc);
    return;
  }
  identify_dead_phanrefs(collector);
}
コード例 #6
0
void gc_gen_stats_reset_before_collection(GC_Gen* gc)
{
  GC_Gen_Stats* stats = gc->stats;

  if(collect_is_minor()){
    stats->nos_surviving_obj_num_minor = 0;
    stats->nos_surviving_obj_size_minor = 0;
    stats->los_suviving_obj_num = 0;
    stats->los_suviving_obj_size = 0;
    stats->is_los_collected = false;
  }else{
    stats->nos_mos_suviving_obj_num_major = 0;
    stats->nos_mos_suviving_obj_size_major = 0;
    stats->los_suviving_obj_num = 0;
    stats->los_suviving_obj_size = 0;
    stats->is_los_collected = false;  
  }
}
コード例 #7
0
static void identify_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
  
  gc_reset_finalizable_objects(gc);
  pool_iterator_init(obj_with_fin_pool);
  Vector_Block *block = pool_iterator_next(obj_with_fin_pool);
  while(block){
    unsigned int block_has_ref = 0;
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      if(collect_is_fallback())
        fallback_update_fw_ref(p_ref);  // in case that this collection is ALGO_MAJOR_FALLBACK
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      if(!p_obj)
        continue;
      if(gc_obj_is_dead(gc, p_obj)){
        gc_add_finalizable_obj(gc, p_obj);
        *p_ref = (REF)NULL;
      } else {
        if(collect_is_minor() && obj_need_move(gc, p_obj)){
          assert(obj_is_fw_in_oi(p_obj));
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        }
        ++block_has_ref;
      }
    }
    if(!block_has_ref)
      vector_block_clear(block);
    
    block = pool_iterator_next(obj_with_fin_pool);
  }
  gc_put_finalizable_objects(gc);
  
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, obj_with_fin_pool);
}
コード例 #8
0
static void update_referent_field_ignore_finref(GC *gc, Pool *pool)
{
  Vector_Block *block = pool_get_entry(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent))
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else {
            finref_repset_add_entry(gc, p_referent_field);
          }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */
    }
    block = pool_get_entry(pool);
  }
}
コード例 #9
0
void gc_gen_stats_update_after_collection(GC_Gen* gc)
{
  Collector** collector = gc->collectors;
  GC_Gen_Stats* gc_gen_stats = gc->stats;
  GC_Gen_Collector_Stats* collector_stats;
  Boolean is_los_collected = gc_gen_stats->is_los_collected;

  if(collect_is_minor()) {

    for (unsigned int i=0; i<gc->num_active_collectors; i++) {
      collector_stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
      gc_gen_stats->nos_surviving_obj_num_minor += collector_stats->nos_obj_num_moved_minor;
      gc_gen_stats->nos_surviving_obj_size_minor += collector_stats->nos_obj_size_moved_minor;
    }

    gc_gen_stats->nos_surviving_ratio_minor = ((float)gc_gen_stats->nos_surviving_obj_size_minor)/gc->nos->committed_heap_size;

  }else{

    for (unsigned int i=0; i < gc->num_active_collectors; i++) {
      collector_stats = (GC_Gen_Collector_Stats*)collector[i]->stats;
      gc_gen_stats->nos_mos_suviving_obj_num_major += collector_stats->nos_mos_obj_num_moved_major;
      gc_gen_stats->nos_mos_suviving_obj_size_major += collector_stats->nos_mos_obj_size_moved_major;

      /*need to accumulate the los related info if los is collected when major*/
      if(is_los_collected) {
        gc_gen_stats->los_suviving_obj_num += collector_stats->los_obj_num_moved_major;
        gc_gen_stats->los_suviving_obj_size += collector_stats->los_obj_size_moved_major;
      }
    }

    gc_gen_stats->nos_mos_suviving_ratio_major = ((float)gc_gen_stats->nos_mos_suviving_obj_size_major)/(gc->nos->committed_heap_size+gc->mos->committed_heap_size);
  }

  if (is_los_collected) {
    gc_gen_stats->los_surviving_ratio = ((float)gc_gen_stats->los_suviving_obj_size)/gc->los->committed_heap_size;
  }
}
コード例 #10
0
/*
 * The reason why we don't use identify_dead_refs() to implement this function is
 * that we will differentiate phanref from weakref in the future.
 */
static void identify_dead_phanrefs(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *phanref_pool = metadata->phanref_pool;
  
  if(collect_need_update_repset())
    finref_reset_repset(gc);
//  collector_reset_repset(collector);
  pool_iterator_init(phanref_pool);
  Vector_Block *block = pool_iterator_next(phanref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
      Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);

      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL;
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
      /* Phantom status: for future use
       * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){
       *   // enqueued but not explicitly cleared OR pending for enqueueing
       *   *iter = NULL;
       * }
       * resurrect_obj_tree(collector, p_referent_field);
       */
    }
    block = pool_iterator_next(phanref_pool);
  }
//  collector_put_repset(collector);
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, phanref_pool);
  }
}
コード例 #11
0
static void identify_dead_refs(GC *gc, Pool *pool)
{
  if(collect_need_update_repset())
    finref_reset_repset(gc);

  pool_iterator_init(pool);
  Vector_Block *block = pool_iterator_next(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
      
      if(!p_referent){  
        /* referent field has been cleared. I forgot why we set p_ref with NULL here. 
           I guess it's because this ref_obj was processed in abother p_ref already, so
           there is no need to keep same ref_obj in this p_ref. */
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            /* if it's gen mode, and referent stays in NOS, we need keep p_referent_field in collector remset.
               This leads to the ref obj live even it is actually only weakly-reachable in next gen-mode collection. 
               This simplifies the design. Otherwise, we need remember the refobj in MOS seperately and process them seperately. */
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks 
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
      }else{
	      /* else, the referent is dead (weakly reachable), clear the referent field */
	      *p_referent_field = (REF)NULL; 
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
	      /* for dead referent, p_ref is not set NULL. p_ref keeps the ref object, which
	         will be moved to VM for enqueueing. */
      }
    }/* for each ref object */
    
    block = pool_iterator_next(pool);
  }
  
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, pool);
  }
}
コード例 #12
0
// Resurrect the obj tree whose root is the obj which p_ref points to
static inline void resurrect_obj_tree(Collector *collector, REF *p_ref)
{
  GC *gc = collector->gc;
  GC_Metadata *metadata = gc->metadata;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(p_obj && gc_obj_is_dead(gc, p_obj));
  
  void *p_ref_or_obj = p_ref;
  Trace_Object_Func trace_object;
  
  /* set trace_object() function */
  if(collect_is_minor()){
    if(gc_is_gen_mode()){
      if(minor_is_forward())
        trace_object = trace_obj_in_gen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_gen_ss;
      else 
        assert(0);
    }else{
      if(minor_is_forward())
        trace_object = trace_obj_in_nongen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_nongen_ss;
      else 
        assert(0);
    }
  } else if(collect_is_major_normal() || !gc_has_nos()){
    p_ref_or_obj = p_obj;
    if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
      trace_object = trace_obj_in_space_tune_marking;
      unsigned int obj_size = vm_object_size(p_obj);
#ifdef USE_32BITS_HASHCODE
      obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0;
#endif
      if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){
        collector->non_los_live_obj_size += obj_size;
        collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size;
      } else {
        collector->los_live_obj_size += round_up_to_size(obj_size, KB); 
      }
    } else if(!gc_has_nos()){
      trace_object = trace_obj_in_ms_marking;
    } else {
      trace_object = trace_obj_in_normal_marking;
    }
  } else if(collect_is_fallback()){
    if(major_is_marksweep())
      trace_object = trace_obj_in_ms_fallback_marking;
    else
      trace_object = trace_obj_in_fallback_marking;
  } else {
    assert(major_is_marksweep());
    p_ref_or_obj = p_obj;
   if( gc->gc_concurrent_status == GC_CON_NIL ) 
      trace_object = trace_obj_in_ms_marking;
    else
      trace_object = trace_obj_in_ms_concurrent_mark;
  }
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  collector_tracestack_push(collector, p_ref_or_obj);
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool);
  while(task_block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
    while(!vector_block_iterator_end(task_block, iter)){
      void *p_ref_or_obj = (void*)*iter;
      assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
              || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj));
      trace_object(collector, p_ref_or_obj);
      if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
      
      iter = vector_block_iterator_advance(task_block, iter);
    }
    vector_stack_clear(task_block);
    pool_put_entry(metadata->free_task_pool, task_block);
    
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    task_block = pool_get_entry(metadata->mark_task_pool);
  }
  
  task_block = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(task_block);
  pool_put_entry(metadata->free_task_pool, task_block);
  collector->trace_stack = NULL;
}
コード例 #13
0
/* The tuning size computing before marking is not precise. We only estimate the probable direction of space tuning.
  * If this function decide to set TRANS_NOTHING, then we just call the normal marking function.
  * Else, we call the marking function for space tuning.  */
void gc_compute_space_tune_size_before_marking(GC* gc)
{
  if(collect_is_minor())  return;
  
  gc_decide_space_tune(gc);
  
  Space_Tuner* tuner = gc->tuner;
  assert((tuner->speed_los != 0) && ( tuner->speed_mos != 0)) ;
  if((!tuner->need_tune) && (!tuner->force_tune)) return;
  
  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
  Space* lspace = (Space*)gc_get_los((GC_Gen*)gc);

  POINTER_SIZE_INT los_expect_surviving_sz = (POINTER_SIZE_INT)((float)(lspace->last_surviving_size + lspace->last_alloced_size) * lspace->survive_ratio);
  POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_surviving_sz) ? 
                                                            (lspace->committed_heap_size - los_expect_surviving_sz) : 0);
  
  POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->period_surviving_size + mspace->accumu_alloced_size) * mspace->survive_ratio);
  float mos_expect_threshold_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace);
  POINTER_SIZE_INT mos_expect_threshold = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * mos_expect_threshold_ratio);
  POINTER_SIZE_INT mos_expect_free_sz = ((mos_expect_threshold > mos_expect_survive_sz)?
                                                            (mos_expect_threshold - mos_expect_survive_sz) : 0);

  POINTER_SIZE_INT non_los_expect_surviving_sz = (POINTER_SIZE_INT)(mos_expect_survive_sz + fspace->last_alloced_size * fspace->survive_ratio);
  POINTER_SIZE_INT non_los_committed_size = mspace->committed_heap_size + fspace->committed_heap_size;
  POINTER_SIZE_INT non_los_expect_free_sz = (non_los_committed_size > non_los_expect_surviving_sz) ? (non_los_committed_size - non_los_expect_surviving_sz):(0) ;

#ifdef SPACE_TUNE_BY_MAJOR_SPEED
  /*Fixme: tuner->speed_los here should be computed by sliding compact LOS, to be implemented!*/
  POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + mos_expect_free_sz;
  float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_mos);
  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio);
#else
  POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + non_los_expect_free_sz;
  float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_nos);
  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio);
#endif


  /*LOS_Extend:*/
  if((new_free_los_sz > los_expect_free_sz) )
  { 
    tuner->kind = TRANS_FROM_MOS_TO_LOS;
    tuner->tuning_size = new_free_los_sz - los_expect_free_sz;
  }
  /*LOS_Shrink:*/
  else if(new_free_los_sz < los_expect_free_sz)
  {
    tuner->kind = TRANS_FROM_LOS_TO_MOS;
    tuner->tuning_size = los_expect_free_sz - new_free_los_sz;
  }
  /*Nothing*/
  else
  {    
    tuner->tuning_size = 0;
  }

  /*If not force tune, and the tuning size is too small, tuner will not take effect.*/
  if( (!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size) ){
    tuner->kind = TRANS_NOTHING;
    tuner->tuning_size = 0;
  }

  /*If los or non-los is already the smallest size, there is no need to tune anymore.
   *But we give "force tune" a chance to extend the whole heap size down there.
   */
  if(((lspace->committed_heap_size <= min_los_size_bytes) && (tuner->kind == TRANS_FROM_LOS_TO_MOS)) ||
      ((fspace->committed_heap_size + mspace->committed_heap_size <= min_none_los_size_bytes) && (tuner->kind == TRANS_FROM_MOS_TO_LOS))){
    assert((lspace->committed_heap_size == min_los_size_bytes) || (fspace->committed_heap_size + mspace->committed_heap_size == min_none_los_size_bytes));
    tuner->kind = TRANS_NOTHING;
    tuner->tuning_size = 0;
  }

  /*If the strategy upward doesn't decide to extend los, but current GC is caused by los, force an extension here.*/
  if(tuner->force_tune){
    if(tuner->kind != TRANS_FROM_MOS_TO_LOS){
      tuner->kind = TRANS_FROM_MOS_TO_LOS;
      tuner->tuning_size = 0;
    }
  }

  return;
}