Example #1
0
void verifier_scan_unreachable_objects(Heap_Verifier* heap_verifier)
{
#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
  if(heap_verifier->is_before_gc) return;
  GC_Gen* gc       = (GC_Gen*)heap_verifier->gc;  
  Space* mspace   = gc_get_mos(gc);
  Space* lspace     = gc_get_los(gc);
  
  verifier_scan_mos_unreachable_objects(mspace, heap_verifier);
  verifier_scan_los_unreachable_objects(lspace, heap_verifier);
#else
  return;
#endif
}
Example #2
0
/* Calculate speed of allocation and waste memory of specific space respectively, 
  * then decide whether to execute a space tuning according to the infomation.*/
static void gc_decide_space_tune(GC* gc)
{
  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);  
  Space* lspace = (Space*)gc_get_los((GC_Gen*)gc);  
  Space_Tuner* tuner = gc->tuner;

  tuner->speed_los = lspace->accumu_alloced_size;
  tuner->speed_los = (tuner->speed_los + tuner->last_speed_los) >> 1;
  /*The possible survivors from the newly allocated NOS should be counted into the speed of MOS*/
  tuner->speed_mos = mspace->accumu_alloced_size +   (uint64)((float)fspace->last_alloced_size * fspace->survive_ratio);;
  tuner->speed_mos = (tuner->speed_mos + tuner->last_speed_mos) >> 1;
  tuner->speed_nos = fspace->accumu_alloced_size;
  tuner->speed_nos = (tuner->speed_nos + tuner->last_speed_nos) >> 1;
  
  /*Statistic wasted memory*/
  uint64 curr_used_los = lspace->last_surviving_size + lspace->last_alloced_size;
  uint64 curr_wast_los = 0;
  if(gc->cause != GC_CAUSE_LOS_IS_FULL) curr_wast_los =  lspace->committed_heap_size - curr_used_los;
  tuner->wast_los += (POINTER_SIZE_INT)curr_wast_los;
  
  uint64 curr_used_mos = 
                                mspace->period_surviving_size + mspace->accumu_alloced_size + (uint64)(fspace->last_alloced_size * fspace->survive_ratio);
  float expected_mos_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace);
  uint64 expected_mos = (uint64)((mspace->committed_heap_size + fspace->committed_heap_size) * expected_mos_ratio);
  uint64 curr_wast_mos = 0;
  if(expected_mos > curr_used_mos) curr_wast_mos = expected_mos - curr_used_mos;
  tuner->wast_mos += curr_wast_mos;

  tuner->current_dw = ABS_DIFF(tuner->wast_mos, tuner->wast_los);

  /*For_statistic ds in heuristic*/
  tuner->current_ds = (unsigned int)((float)fspace->committed_heap_size * fspace->survive_ratio);
  /*Fixme: Threshold should be computed by heuristic. tslow, total recycled heap size shold be statistic.*/
  tuner->threshold_waste = tuner->current_ds;
  if(tuner->threshold_waste > 8 * MB) tuner->threshold_waste = 8 * MB;
  tuner->min_tuning_size = tuner->current_ds;
  if(tuner->min_tuning_size > 4 * MB) tuner->min_tuning_size = 4 * MB;  

  if(tuner->speed_los == 0) tuner->speed_los = 16;
  if(tuner->speed_mos == 0) tuner->speed_mos = 16;

  /*Needn't tune if dw does not reach threshold.*/  
  if(tuner->current_dw > tuner->threshold_waste)  tuner->need_tune = 1;
  /*If LOS is full, we should tune at lease "tuner->least_tuning_size" size*/
  if(gc->cause == GC_CAUSE_LOS_IS_FULL) tuner->force_tune = 1;

  return;
}
Example #3
0
void verifier_scan_all_objects(Heap_Verifier* heap_verifier)
{
#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
  GC_Gen* gc       = (GC_Gen*)heap_verifier->gc;
  Space* fspace     = gc_get_nos(gc);
  Space* mspace   = gc_get_mos(gc);
  Space* lspace     = gc_get_los(gc);
  
  verifier_scan_nos_mos_objects(fspace, heap_verifier);
  verifier_scan_nos_mos_objects(mspace, heap_verifier);
  verifier_scan_los_objects(lspace, heap_verifier);
#else
  assert(0);
#endif
}
Example #4
0
void* lspace_alloc(unsigned size, Allocator *allocator)
{
    unsigned int try_count = 0;
    void* p_result = NULL;
    POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size);
    Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc);
    Free_Area_Pool* pool = lspace->free_pool;
    
    while( try_count < 2 ){
        if(p_result = lspace_try_alloc(lspace, alloc_size))
          return p_result;

        /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/   
        if(try_count == 0){
            vm_gc_lock_enum();
            /*Check again if there is space for the obj, for maybe other mutator 
            threads issus a GC in the time gap of waiting the gc lock*/
            if(p_result = lspace_try_alloc(lspace, alloc_size)){
              vm_gc_unlock_enum();
              return p_result;            
            }
            lspace->failure_size = round_up_to_size(alloc_size, KB);

            gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL);

            if(lspace->success_ptr){
              p_result = lspace->success_ptr;
              lspace->success_ptr = NULL;
              vm_gc_unlock_enum();
              return p_result;
            }
            vm_gc_unlock_enum();
            try_count ++;
        }else{
            try_count ++;
        }
    }
    return NULL;
}
Example #5
0
// Resurrect the obj tree whose root is the obj which p_ref points to
static inline void resurrect_obj_tree(Collector *collector, REF *p_ref)
{
  GC *gc = collector->gc;
  GC_Metadata *metadata = gc->metadata;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(p_obj && gc_obj_is_dead(gc, p_obj));
  
  void *p_ref_or_obj = p_ref;
  Trace_Object_Func trace_object;
  
  /* set trace_object() function */
  if(collect_is_minor()){
    if(gc_is_gen_mode()){
      if(minor_is_forward())
        trace_object = trace_obj_in_gen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_gen_ss;
      else 
        assert(0);
    }else{
      if(minor_is_forward())
        trace_object = trace_obj_in_nongen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_nongen_ss;
      else 
        assert(0);
    }
  } else if(collect_is_major_normal() || !gc_has_nos()){
    p_ref_or_obj = p_obj;
    if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
      trace_object = trace_obj_in_space_tune_marking;
      unsigned int obj_size = vm_object_size(p_obj);
#ifdef USE_32BITS_HASHCODE
      obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0;
#endif
      if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){
        collector->non_los_live_obj_size += obj_size;
        collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size;
      } else {
        collector->los_live_obj_size += round_up_to_size(obj_size, KB); 
      }
    } else if(!gc_has_nos()){
      trace_object = trace_obj_in_ms_marking;
    } else {
      trace_object = trace_obj_in_normal_marking;
    }
  } else if(collect_is_fallback()){
    if(major_is_marksweep())
      trace_object = trace_obj_in_ms_fallback_marking;
    else
      trace_object = trace_obj_in_fallback_marking;
  } else {
    assert(major_is_marksweep());
    p_ref_or_obj = p_obj;
   if( gc->gc_concurrent_status == GC_CON_NIL ) 
      trace_object = trace_obj_in_ms_marking;
    else
      trace_object = trace_obj_in_ms_concurrent_mark;
  }
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  collector_tracestack_push(collector, p_ref_or_obj);
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool);
  while(task_block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
    while(!vector_block_iterator_end(task_block, iter)){
      void *p_ref_or_obj = (void*)*iter;
      assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
              || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj));
      trace_object(collector, p_ref_or_obj);
      if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
      
      iter = vector_block_iterator_advance(task_block, iter);
    }
    vector_stack_clear(task_block);
    pool_put_entry(metadata->free_task_pool, task_block);
    
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    task_block = pool_get_entry(metadata->mark_task_pool);
  }
  
  task_block = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(task_block);
  pool_put_entry(metadata->free_task_pool, task_block);
  collector->trace_stack = NULL;
}
Example #6
0
void slide_compact_mspace(Collector* collector)
{
    GC* gc = collector->gc;
    Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
    Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc);

    unsigned int num_active_collectors = gc->num_active_collectors;

    /* Pass 1: **************************************************
      *mark all live objects in heap, and save all the slots that
      *have references  that are going to be repointed.
      */

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: marking...");

    unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);

    if(collect_is_fallback())
        mark_scan_heap_for_fallback(collector);
    else if(gc->tuner->kind != TRANS_NOTHING)
        mark_scan_heap_for_space_tune(collector);
    else
        mark_scan_heap(collector);
    old_num = atomic_inc32(&num_marking_collectors);

    /* last collector's world here */
    if( ++old_num == num_active_collectors ) {

        if(!IGNORE_FINREF )
            collector_identify_finref(collector);
#ifndef BUILD_IN_REFERENT
        else {
            gc_set_weakref_sets(gc);
            gc_update_weakref_ignore_finref(gc);
        }
#endif
        gc_identify_dead_weak_roots(gc);

        if( gc->tuner->kind != TRANS_NOTHING ) gc_compute_space_tune_size_after_marking(gc);
        //assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
        /* prepare for next phase */
        gc_init_block_for_collectors(gc, mspace);

#ifdef USE_32BITS_HASHCODE
        if(collect_is_fallback())
            fallback_clear_fwd_obj_oi_init(collector);
#endif

        last_block_for_dest = NULL;
        /* let other collectors go */
        num_marking_collectors++;
    }
    while(num_marking_collectors != num_active_collectors + 1);

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass1 and start pass2: relocating mos&nos...");

    /* Pass 2: **************************************************
       assign target addresses for all to-be-moved objects */

    atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1);

#ifdef USE_32BITS_HASHCODE
    if(collect_is_fallback())
        fallback_clear_fwd_obj_oi(collector);
#endif
    mspace_compute_object_target(collector, mspace);

    old_num = atomic_inc32(&num_repointing_collectors);
    /*last collector's world here*/
    if( ++old_num == num_active_collectors ) {
        if(lspace->move_object) {
            TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: relocating los ...");
            lspace_compute_object_target(collector, lspace);
        }
        gc->collect_result = gc_collection_result(gc);
        if(!gc->collect_result) {
            num_repointing_collectors++;
            return;
        }
        gc_reset_block_for_collectors(gc, mspace);
        gc_init_block_for_fix_repointed_refs(gc, mspace);
        num_repointing_collectors++;
    }
    while(num_repointing_collectors != num_active_collectors + 1);
    if(!gc->collect_result) return;
    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2 and start pass3: repointing...");

    /* Pass 3: **************************************************
      *update all references whose objects are to be moved
      */
    old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);
    mspace_fix_repointed_refs(collector, mspace);
    old_num = atomic_inc32(&num_fixing_collectors);
    /*last collector's world here */
    if( ++old_num == num_active_collectors ) {
        lspace_fix_repointed_refs(collector, lspace);
        gc_fix_rootset(collector, FALSE);
        gc_init_block_for_sliding_compact(gc, mspace);
        /*LOS_Shrink: This operation moves objects in LOS, and should be part of Pass 4
          *lspace_sliding_compact is not binded with los shrink, we could slide compact los individually.
          *So we use a flag lspace->move_object here, not tuner->kind == TRANS_FROM_LOS_TO_MOS.
          */
        if(lspace->move_object)  lspace_sliding_compact(collector, lspace);
        /*The temp blocks for storing interim infomation is copied to the real place they should be.
          *And the space of the blocks are freed, which is alloced in gc_space_tuner_init_fake_blocks_for_los_shrink.
          */
        last_block_for_dest = (Block_Header *)round_down_to_size((POINTER_SIZE_INT)last_block_for_dest->base, GC_BLOCK_SIZE_BYTES);
        if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) gc_space_tuner_release_fake_blocks_for_los_shrink(gc);
        num_fixing_collectors++;
    }
    while(num_fixing_collectors != num_active_collectors + 1);

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3 and start pass4: moving...");

    /* Pass 4: **************************************************
       move objects                                             */

    atomic_cas32( &num_moving_collectors, 0, num_active_collectors);

    mspace_sliding_compact(collector, mspace);

    atomic_inc32(&num_moving_collectors);
    while(num_moving_collectors != num_active_collectors);

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4 and start pass 5: restoring obj_info...");

    /* Pass 5: **************************************************
       restore obj_info                                         */

    atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1);

    collector_restore_obj_info(collector);
#ifdef USE_32BITS_HASHCODE
    collector_attach_hashcode(collector);
#endif

    old_num = atomic_inc32(&num_restoring_collectors);

    if( ++old_num == num_active_collectors ) {
        if(gc->tuner->kind != TRANS_NOTHING)
            mspace_update_info_after_space_tuning(mspace);
        num_restoring_collectors++;
    }
    while(num_restoring_collectors != num_active_collectors + 1);

    /* Dealing with out of memory in mspace */
    void* mspace_border = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
    if( mspace_border > nos_boundary) {
        atomic_cas32( &num_extending_collectors, 0, num_active_collectors);

        mspace_extend_compact(collector);

        atomic_inc32(&num_extending_collectors);
        while(num_extending_collectors != num_active_collectors);
    }

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass5 and done.");

    return;
}
Example #7
0
/* The tuning size computing before marking is not precise. We only estimate the probable direction of space tuning.
  * If this function decide to set TRANS_NOTHING, then we just call the normal marking function.
  * Else, we call the marking function for space tuning.  */
void gc_compute_space_tune_size_before_marking(GC* gc)
{
  if(collect_is_minor())  return;
  
  gc_decide_space_tune(gc);
  
  Space_Tuner* tuner = gc->tuner;
  assert((tuner->speed_los != 0) && ( tuner->speed_mos != 0)) ;
  if((!tuner->need_tune) && (!tuner->force_tune)) return;
  
  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
  Space* lspace = (Space*)gc_get_los((GC_Gen*)gc);

  POINTER_SIZE_INT los_expect_surviving_sz = (POINTER_SIZE_INT)((float)(lspace->last_surviving_size + lspace->last_alloced_size) * lspace->survive_ratio);
  POINTER_SIZE_INT los_expect_free_sz = ((lspace->committed_heap_size > los_expect_surviving_sz) ? 
                                                            (lspace->committed_heap_size - los_expect_surviving_sz) : 0);
  
  POINTER_SIZE_INT mos_expect_survive_sz = (POINTER_SIZE_INT)((float)(mspace->period_surviving_size + mspace->accumu_alloced_size) * mspace->survive_ratio);
  float mos_expect_threshold_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace);
  POINTER_SIZE_INT mos_expect_threshold = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * mos_expect_threshold_ratio);
  POINTER_SIZE_INT mos_expect_free_sz = ((mos_expect_threshold > mos_expect_survive_sz)?
                                                            (mos_expect_threshold - mos_expect_survive_sz) : 0);

  POINTER_SIZE_INT non_los_expect_surviving_sz = (POINTER_SIZE_INT)(mos_expect_survive_sz + fspace->last_alloced_size * fspace->survive_ratio);
  POINTER_SIZE_INT non_los_committed_size = mspace->committed_heap_size + fspace->committed_heap_size;
  POINTER_SIZE_INT non_los_expect_free_sz = (non_los_committed_size > non_los_expect_surviving_sz) ? (non_los_committed_size - non_los_expect_surviving_sz):(0) ;

#ifdef SPACE_TUNE_BY_MAJOR_SPEED
  /*Fixme: tuner->speed_los here should be computed by sliding compact LOS, to be implemented!*/
  POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + mos_expect_free_sz;
  float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_mos);
  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio);
#else
  POINTER_SIZE_INT total_expect_free_sz = los_expect_free_sz + non_los_expect_free_sz;
  float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_nos);
  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_expect_free_sz * new_los_ratio);
#endif


  /*LOS_Extend:*/
  if((new_free_los_sz > los_expect_free_sz) )
  { 
    tuner->kind = TRANS_FROM_MOS_TO_LOS;
    tuner->tuning_size = new_free_los_sz - los_expect_free_sz;
  }
  /*LOS_Shrink:*/
  else if(new_free_los_sz < los_expect_free_sz)
  {
    tuner->kind = TRANS_FROM_LOS_TO_MOS;
    tuner->tuning_size = los_expect_free_sz - new_free_los_sz;
  }
  /*Nothing*/
  else
  {    
    tuner->tuning_size = 0;
  }

  /*If not force tune, and the tuning size is too small, tuner will not take effect.*/
  if( (!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size) ){
    tuner->kind = TRANS_NOTHING;
    tuner->tuning_size = 0;
  }

  /*If los or non-los is already the smallest size, there is no need to tune anymore.
   *But we give "force tune" a chance to extend the whole heap size down there.
   */
  if(((lspace->committed_heap_size <= min_los_size_bytes) && (tuner->kind == TRANS_FROM_LOS_TO_MOS)) ||
      ((fspace->committed_heap_size + mspace->committed_heap_size <= min_none_los_size_bytes) && (tuner->kind == TRANS_FROM_MOS_TO_LOS))){
    assert((lspace->committed_heap_size == min_los_size_bytes) || (fspace->committed_heap_size + mspace->committed_heap_size == min_none_los_size_bytes));
    tuner->kind = TRANS_NOTHING;
    tuner->tuning_size = 0;
  }

  /*If the strategy upward doesn't decide to extend los, but current GC is caused by los, force an extension here.*/
  if(tuner->force_tune){
    if(tuner->kind != TRANS_FROM_MOS_TO_LOS){
      tuner->kind = TRANS_FROM_MOS_TO_LOS;
      tuner->tuning_size = 0;
    }
  }

  return;
}
Example #8
0
/* This is the real function that decide tuning_size, because we have know the total size of living objects after "mark_scan_heap_for_space_tune". */
void gc_compute_space_tune_size_after_marking(GC *gc)
{
  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
  Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Space_Tuner* tuner = gc->tuner;

  POINTER_SIZE_INT max_tuning_size = 0;  
  POINTER_SIZE_INT non_los_size = mspace->committed_heap_size + fspace->committed_heap_size;
  if(LOS_ADJUST_BOUNDARY) 
    gc_compute_live_object_size_after_marking(gc, non_los_size);
  else {
    unsigned int collector_num = gc->num_active_collectors;
    POINTER_SIZE_INT reserve_size = collector_num <<(GC_BLOCK_SHIFT_COUNT+2);
    los_live_obj_size = (POINTER_SIZE_INT) lspace->last_surviving_size + reserve_size;
    non_los_live_obj_size = ((POINTER_SIZE_INT)(mspace->free_block_idx-mspace->first_block_idx)<<GC_BLOCK_SHIFT_COUNT)+reserve_size;
    non_los_live_obj_size = round_up_to_size(non_los_live_obj_size, SPACE_ALLOC_UNIT); 
  }
  check_tuning_size(gc);
  
  /*We should assure that the non_los area is no less than min_none_los_size_bytes*/
  POINTER_SIZE_INT max_tune_for_min_non_los = 0;
  if(non_los_size > min_none_los_size_bytes)
    max_tune_for_min_non_los = non_los_size - min_none_los_size_bytes;
  POINTER_SIZE_INT max_tune_for_min_los = 0;
  //debug_adjust
  assert(lspace->committed_heap_size >= min_los_size_bytes);
  max_tune_for_min_los = lspace->committed_heap_size - min_los_size_bytes;

  /*Not force tune, LOS_Extend:*/
  if(tuner->kind == TRANS_FROM_MOS_TO_LOS)
  {
    if (gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size){
      max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size;
      if(max_tuning_size > max_tune_for_min_non_los)
        max_tuning_size = max_tune_for_min_non_los;
      if( tuner->tuning_size > max_tuning_size)
        tuner->tuning_size = max_tuning_size;
      /*Round down so as not to break max_tuning_size*/
      if(LOS_ADJUST_BOUNDARY)
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      else
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);

       /*If tuning size is zero, we should reset kind to NOTHING, in case that gc_init_block_for_collectors relink the block list.*/
      if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
    }else{ 
      tuner->tuning_size = 0;
      tuner->kind = TRANS_NOTHING;
    }
  }
  /*Not force tune, LOS Shrink*/
  else
  {    
    if(lspace->committed_heap_size > los_live_obj_size){
      max_tuning_size = lspace->committed_heap_size - los_live_obj_size;
      if(max_tuning_size > max_tune_for_min_los)
        max_tuning_size = max_tune_for_min_los;
      if(tuner->tuning_size > max_tuning_size) 
        tuner->tuning_size = max_tuning_size;
      /*Round down so as not to break max_tuning_size*/

      if (LOS_ADJUST_BOUNDARY)
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      else
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);

      if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
    }else{
      /* this is possible because of the reservation in gc_compute_live_object_size_after_marking*/        
      tuner->tuning_size = 0;
      tuner->kind = TRANS_NOTHING;
    }
  }

  /*If the tuning strategy give a bigger tuning_size than failure size, we just follow the strategy and set noforce.*/
  Boolean doforce = TRUE;
  POINTER_SIZE_INT failure_size = lspace_get_failure_size((Lspace*)lspace);  
  if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse) && (tuner->tuning_size > failure_size) )
    doforce = FALSE;
  if( (tuner->force_tune) && (doforce) )
    compute_space_tune_size_for_force_tune(gc, max_tune_for_min_non_los);

  return;
  
}
Example #9
0
static void check_tuning_size(GC* gc)
{
  Space_Tuner* tuner = gc->tuner;
  Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);

  POINTER_SIZE_INT los_free_sz =  ((lspace->committed_heap_size > los_live_obj_size) ? 
                                                   (lspace->committed_heap_size - los_live_obj_size) : 0);

#ifdef SPACE_TUNE_BY_MAJOR_SPEED
  float mos_expect_threshold_ratio = mspace_get_expected_threshold_ratio((Mspace*)mspace);
  POINTER_SIZE_INT mos_expect_threshold = (POINTER_SIZE_INT)((mspace->committed_heap_size + fspace->committed_heap_size) * mos_expect_threshold_ratio);
  POINTER_SIZE_INT mos_free_sz = ((mos_expect_threshold > non_los_live_obj_size)?
                                                            (mos_expect_threshold - non_los_live_obj_size) : 0);
  POINTER_SIZE_INT total_free_sz = los_free_sz + mos_free_sz;
  float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_mos);
  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_free_sz * new_los_ratio);
#else
  POINTER_SIZE_INT non_los_committed_size = mspace->committed_heap_size + fspace->committed_heap_size;
  POINTER_SIZE_INT non_los_free_sz = ((non_los_committed_size > non_los_live_obj_size)?
                                                                (non_los_committed_size - non_los_live_obj_size):0);
  POINTER_SIZE_INT total_free_sz = los_free_sz + non_los_free_sz;
  float new_los_ratio = (float)tuner->speed_los / (float)(tuner->speed_los  + tuner->speed_nos);
  POINTER_SIZE_INT new_free_los_sz = (POINTER_SIZE_INT)((float)total_free_sz * new_los_ratio);
#endif

  /*LOS_Extend:*/
  if((new_free_los_sz > los_free_sz) )
  { 
    tuner->kind = TRANS_FROM_MOS_TO_LOS;
    tuner->tuning_size = new_free_los_sz - los_free_sz;
  }
  /*LOS_Shrink:*/
  else if(new_free_los_sz < los_free_sz)
  {
    tuner->kind = TRANS_FROM_LOS_TO_MOS;
    tuner->tuning_size = los_free_sz - new_free_los_sz;
  }
  /*Nothing*/
  else
  {
    tuner->tuning_size = 0;
    /*This is necessary, because the original value of kind might not be NOTHING. */
    tuner->kind = TRANS_NOTHING;
  }

  /*If not force tune, and the tuning size is too small, tuner will not take effect.*/
  if( (!tuner->force_tune) && (tuner->tuning_size < tuner->min_tuning_size) ){
    tuner->kind = TRANS_NOTHING;
    tuner->tuning_size = 0;
  }

  /*If los or non-los is already the smallest size, there is no need to tune anymore.
   *But we give "force tune" a chance to extend the whole heap size down there.
   */
  if(((lspace->committed_heap_size <= min_los_size_bytes) && (tuner->kind == TRANS_FROM_LOS_TO_MOS)) ||
      ((fspace->committed_heap_size + mspace->committed_heap_size <= min_none_los_size_bytes) && (tuner->kind == TRANS_FROM_MOS_TO_LOS))){
    assert((lspace->committed_heap_size == min_los_size_bytes) || (fspace->committed_heap_size + mspace->committed_heap_size == min_none_los_size_bytes));
    tuner->kind = TRANS_NOTHING;
    tuner->tuning_size = 0;
  }
  
  if(tuner->force_tune){
    if(tuner->kind != TRANS_FROM_MOS_TO_LOS){
      tuner->kind = TRANS_FROM_MOS_TO_LOS;
      tuner->reverse = 1;
    }
  }
  
  return;  
}
Example #10
0
/* If this GC is caused by a LOS allocation failure, we set the "force_tune" flag. 
  * Attention1:  The space tuning strategy will extend or shrink LOS according to the wasted memory size and allocation speed.
  * If the strategy decide to shrink or the size extended is not large enough to hold the failed object, we set the "doforce" flag in 
  * function "gc_compute_space_tune_size_after_marking". And only if "force_tune" and "doforce" are both true, we decide the 
  * size of extention by this function.
  * Attention2: The total heap size might extend in this function. */
static void compute_space_tune_size_for_force_tune(GC *gc, POINTER_SIZE_INT max_tune_for_min_non_los)
{
  Space_Tuner* tuner = gc->tuner;
  Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);  
  POINTER_SIZE_INT max_tuning_size = 0;
  POINTER_SIZE_INT failure_size = lspace->failure_size;
  POINTER_SIZE_INT lspace_free_size = ( (lspace->committed_heap_size > los_live_obj_size) ? (lspace->committed_heap_size - los_live_obj_size) : (0) );
  //debug_adjust
  assert(!(lspace_free_size % KB));
  assert(!(failure_size % KB));

  if(lspace_free_size >= failure_size){
    tuner->tuning_size = 0;
    tuner->kind = TRANS_NOTHING;
  }else{
    tuner->tuning_size = failure_size -lspace_free_size;
    
    /*We should assure that the tuning size is no more than the free space of non_los area*/
    if( gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size )
      max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size;

    if(max_tuning_size > max_tune_for_min_non_los)
      max_tuning_size = max_tune_for_min_non_los;

    /*Round up to satisfy LOS alloc demand.*/
    if(LOS_ADJUST_BOUNDARY)  {
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      max_tuning_size = round_down_to_size(max_tuning_size, GC_BLOCK_SIZE_BYTES);
    }else {
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
    }

    /*If the tuning size is too large, we did nothing and wait for the OOM of JVM*/
    /*Fixme: if the heap size is not mx, we can extend the whole heap size*/
    if(tuner->tuning_size > max_tuning_size){
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
        //debug_adjust
      assert(max_heap_size_bytes >= gc->committed_heap_size);
      POINTER_SIZE_INT extend_heap_size = 0;
      POINTER_SIZE_INT potential_max_tuning_size = max_tuning_size + max_heap_size_bytes - gc->committed_heap_size;
      potential_max_tuning_size -= LOS_HEAD_RESERVE_FOR_HEAP_BASE;

      //debug_adjust
      assert(!(potential_max_tuning_size % SPACE_ALLOC_UNIT));
      if(tuner->tuning_size > potential_max_tuning_size){
        tuner->tuning_size = 0;
        tuner->kind = TRANS_NOTHING;
      }else{
        /*We have tuner->tuning_size > max_tuning_size up there.*/
        extend_heap_size = tuner->tuning_size - max_tuning_size;    
        blocked_space_extend(fspace, (unsigned int)extend_heap_size);
        gc->committed_heap_size += extend_heap_size;
        tuner->kind = TRANS_FROM_MOS_TO_LOS;
      }
    }
    else
    {
      tuner->kind = TRANS_FROM_MOS_TO_LOS;
    }
  }

  return;
}
void move_compact_mspace(Collector* collector) 
{
  GC* gc = collector->gc;
  Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
  Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
  
  unsigned int num_active_collectors = gc->num_active_collectors;
  Boolean is_fallback = collect_is_fallback();
  
  /* Pass 1: **************************************************
     mark all live objects in heap, and save all the slots that 
            have references  that are going to be repointed */

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ...");

  unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);
  
  if(!is_fallback)
       mark_scan_heap(collector);  
  else
       mark_scan_heap_for_fallback(collector);

  old_num = atomic_inc32(&num_marking_collectors);
  if( ++old_num == num_active_collectors ){
    /* last collector's world here */
    /* prepare for next phase */
    gc_init_block_for_collectors(gc, mspace); 
    
    if(!IGNORE_FINREF )
      collector_identify_finref(collector);
#ifndef BUILD_IN_REFERENT
    else {
      gc_set_weakref_sets(gc);
      gc_update_weakref_ignore_finref(gc);
    }
#endif
    gc_identify_dead_weak_roots(gc);

#ifdef USE_32BITS_HASHCODE
    if((!LOS_ADJUST_BOUNDARY) && (is_fallback))
      fallback_clear_fwd_obj_oi_init(collector);
#endif
    debug_num_compact_blocks = 0;
    /* let other collectors go */
    num_marking_collectors++; 
  }
  while(num_marking_collectors != num_active_collectors + 1);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass1");

  /* Pass 2: **************************************************
     move object and set the forwarding offset table */

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: move object and set the forwarding offset table ...");

  atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1);
#ifdef USE_32BITS_HASHCODE
  if(is_fallback)
    fallback_clear_fwd_obj_oi(collector);
#endif

  mspace_move_objects(collector, mspace);   
  
  old_num = atomic_inc32(&num_moving_collectors);
  if( ++old_num == num_active_collectors ){
    /* single thread world */
    if(lspace->move_object) 
      lspace_compute_object_target(collector, lspace);    
    
    gc->collect_result = gc_collection_result(gc);
    if(!gc->collect_result){
      num_moving_collectors++; 
      return;
    }
    
    if(verify_live_heap){
      assert( debug_num_compact_blocks == mspace->num_managed_blocks + nos->num_managed_blocks );	
      debug_num_compact_blocks = 0;
    }

    gc_reset_block_for_collectors(gc, mspace);
    blocked_space_block_iterator_init((Blocked_Space*)mspace);
    num_moving_collectors++; 
  }
  while(num_moving_collectors != num_active_collectors + 1);
  if(!gc->collect_result) return;
  
  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass2");

  /* Pass 3: **************************************************
     update all references whose pointed objects were moved */  

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ...");

  old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);

  mspace_fix_repointed_refs(collector, mspace);

  old_num = atomic_inc32(&num_fixing_collectors);
  if( ++old_num == num_active_collectors ){
    /* last collector's world here */
    lspace_fix_repointed_refs(collector, lspace);   
    gc_fix_rootset(collector, FALSE);
    if(lspace->move_object)  lspace_sliding_compact(collector, lspace);    

    num_fixing_collectors++; 
  }
  while(num_fixing_collectors != num_active_collectors + 1);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass3");

  /* Pass 4: **************************************************
     restore obj_info . Actually only LOS needs it.   Since oi is recorded for new address, so the restoration
     doesn't need to to specify space. */

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: restore obj_info ...");

  atomic_cas32( &num_restoring_collectors, 0, num_active_collectors);
  
  collector_restore_obj_info(collector);

  atomic_inc32(&num_restoring_collectors);

  while(num_restoring_collectors != num_active_collectors);

   /* Dealing with out of memory in mspace */  
  if(mspace->free_block_idx > nos->first_block_idx){    
     atomic_cas32( &num_extending_collectors, 0, num_active_collectors);        
     mspace_extend_compact(collector);        
     atomic_inc32(&num_extending_collectors);    
     while(num_extending_collectors != num_active_collectors);  
  }

 
  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass4");

  /* Leftover: **************************************************
   */
  if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){
    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]  finished");
    return;
  }
  
  TRACE2("gc.process", "GC: collector[0]  finished");
  return;
}