Example #1
0
/* Only when we call the special marking function for space tuning, we can get the accumulation of the sizes. */
static void gc_compute_live_object_size_after_marking(GC* gc, POINTER_SIZE_INT non_los_size)
{
  non_los_live_obj_size = 0;
  los_live_obj_size = 0;
  
  POINTER_SIZE_INT segment_live_size[NORMAL_SIZE_SEGMENT_NUM];
  memset(segment_live_size, 0, sizeof(POINTER_SIZE_INT) * NORMAL_SIZE_SEGMENT_NUM);

  unsigned int collector_num = gc->num_active_collectors;
  for(unsigned int i = collector_num; i--;){
    Collector *collector = gc->collectors[i];
    non_los_live_obj_size += collector->non_los_live_obj_size;
    los_live_obj_size += collector->los_live_obj_size;
    for(unsigned int j = 0; j < NORMAL_SIZE_SEGMENT_NUM; j++) {
      segment_live_size[j] += collector->segment_live_size[j];
    }
    memset(collector->segment_live_size, 0, sizeof(POINTER_SIZE_INT) * NORMAL_SIZE_SEGMENT_NUM);
  }
  
  //POINTER_SIZE_INT additional_non_los_size = ((collector_num * 2) << GC_BLOCK_SHIFT_COUNT) + (non_los_live_obj_size >> GC_BLOCK_SHIFT_COUNT) * (GC_LOS_OBJ_SIZE_THRESHOLD/4);
  double additional_non_los_size = 0;
  for(unsigned int i = 0; i < NORMAL_SIZE_SEGMENT_NUM; i++) {
    additional_non_los_size += (double)segment_live_size[i] * SEGMENT_INDEX_TO_SIZE(i) / non_los_live_obj_size;
  }
  additional_non_los_size *= 1.2; // in case of some cases worse than average one
  POINTER_SIZE_INT non_los_live_block = non_los_live_obj_size / (GC_BLOCK_BODY_SIZE_BYTES-(POINTER_SIZE_INT)additional_non_los_size);
  non_los_live_block += collector_num << 2;
  non_los_live_obj_size = (non_los_live_block << GC_BLOCK_SHIFT_COUNT);
  if(non_los_live_obj_size > non_los_size)
    non_los_live_obj_size = non_los_size;

  los_live_obj_size += ((collector_num << 2) << GC_BLOCK_SHIFT_COUNT);
  los_live_obj_size = round_up_to_size(los_live_obj_size, GC_BLOCK_SIZE_BYTES);

}
Example #2
0
void verifier_metadata_initialize(Heap_Verifier* heap_verifier)
{
  Heap_Verifier_Metadata* heap_verifier_metadata = (Heap_Verifier_Metadata* )STD_MALLOC(sizeof(Heap_Verifier_Metadata));
  assert(heap_verifier_metadata);
  memset(heap_verifier_metadata, 0, sizeof(Heap_Verifier_Metadata));
  
  unsigned int seg_size = GC_VERIFIER_METADATA_SIZE_BYTES + GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;
  void* metadata = STD_MALLOC(seg_size);
  assert(metadata);
  memset(metadata, 0, seg_size);
  heap_verifier_metadata->segments[0] = metadata;
  metadata = (void*)round_up_to_size((POINTER_SIZE_INT)metadata, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
  heap_verifier_metadata->num_alloc_segs = 1;
  
  unsigned int i = 0;
  unsigned int num_blocks = GC_VERIFIER_METADATA_SIZE_BYTES/GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;
  for(i=0; i<num_blocks; i++){
    Vector_Block* block = (Vector_Block*)((POINTER_SIZE_INT)metadata + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
    vector_block_init(block, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
  }
  
  unsigned num_tasks = num_blocks>>1;
  heap_verifier_metadata->free_task_pool = sync_pool_create();
  for(i=0; i<num_tasks; i++){
    Vector_Block *block = (Vector_Block*)((POINTER_SIZE_INT)metadata + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
    vector_stack_init((Vector_Block*)block);
    pool_put_entry(heap_verifier_metadata->free_task_pool, (void*)block); 
  }
  
  heap_verifier_metadata->free_set_pool = sync_pool_create();
  for(; i<num_blocks; i++){
    POINTER_SIZE_INT block = (POINTER_SIZE_INT)metadata + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;    
    pool_put_entry(heap_verifier_metadata->free_set_pool, (void*)block); 
  }

  heap_verifier_metadata->mark_task_pool = sync_pool_create();
  heap_verifier_metadata->root_set_pool = sync_pool_create();
  heap_verifier_metadata->objects_pool_before_gc  = sync_pool_create();
  heap_verifier_metadata->objects_pool_after_gc     = sync_pool_create();
  heap_verifier_metadata->resurrect_objects_pool_before_gc  = sync_pool_create();
  heap_verifier_metadata->resurrect_objects_pool_after_gc      = sync_pool_create();
  heap_verifier_metadata->new_objects_pool  = sync_pool_create();  
  heap_verifier_metadata->hashcode_pool_before_gc = sync_pool_create();
  heap_verifier_metadata->hashcode_pool_after_gc = sync_pool_create();
  heap_verifier_metadata->obj_with_fin_pool= sync_pool_create();
  heap_verifier_metadata->finalizable_obj_pool= sync_pool_create();

  verifier_metadata = heap_verifier_metadata;
  heap_verifier->heap_verifier_metadata = heap_verifier_metadata;
  return;
}
Example #3
0
Vector_Block* gc_verifier_metadata_extend(Pool* pool, Boolean is_set_pool)
{
  /*add a slot to pool point back to verifier_metadata, then we do not need the global var verifer_metadata*/
  lock(verifier_metadata->alloc_lock);
  Vector_Block* block = pool_get_entry(pool);
  if( block ){
    unlock(verifier_metadata->alloc_lock);
    return block;
  }
  
  unsigned int num_alloced = verifier_metadata->num_alloc_segs;
  if(num_alloced == METADATA_SEGMENT_NUM){
    printf("Run out GC metadata, please give it more segments!\n");
    exit(0);
  }
  unsigned int seg_size =  GC_VERIFIER_METADATA_EXTEND_SIZE_BYTES + GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;
  void *new_segment = STD_MALLOC(seg_size);
  assert(new_segment);
  memset(new_segment, 0, seg_size);
  verifier_metadata->segments[num_alloced] = new_segment;
  new_segment = (void*)round_up_to_size((POINTER_SIZE_INT)new_segment, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
  verifier_metadata->num_alloc_segs = num_alloced + 1;
  
  unsigned int num_blocks =  GC_VERIFIER_METADATA_EXTEND_SIZE_BYTES/GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;
  
  unsigned int i=0;
  for(i=0; i<num_blocks; i++){
    Vector_Block* block = (Vector_Block*)((POINTER_SIZE_INT)new_segment + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
    vector_block_init(block, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
    assert(vector_block_is_empty(block));
  }
  
  if(is_set_pool){
    for(i=0; i<num_blocks; i++){
      POINTER_SIZE_INT block = (POINTER_SIZE_INT)new_segment + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;    
      pool_put_entry(pool, (void*)block); 
    }
  }else{
    for(i=0; i<num_blocks; i++){
      Vector_Block *block = (Vector_Block *)((POINTER_SIZE_INT)new_segment + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
      vector_stack_init(block);
      pool_put_entry(pool, (void*)block);
    }
  }

  block = pool_get_entry(pool);
  unlock(verifier_metadata->alloc_lock);
  return block;
}
Example #4
0
void* lspace_alloc(unsigned size, Allocator *allocator)
{
    unsigned int try_count = 0;
    void* p_result = NULL;
    POINTER_SIZE_INT alloc_size = ALIGN_UP_TO_KILO(size);
    Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)allocator->gc);
    Free_Area_Pool* pool = lspace->free_pool;
    
    while( try_count < 2 ){
        if(p_result = lspace_try_alloc(lspace, alloc_size))
          return p_result;

        /*Failled, no adequate area found in all lists, so GC at first, then get another try.*/   
        if(try_count == 0){
            vm_gc_lock_enum();
            /*Check again if there is space for the obj, for maybe other mutator 
            threads issus a GC in the time gap of waiting the gc lock*/
            if(p_result = lspace_try_alloc(lspace, alloc_size)){
              vm_gc_unlock_enum();
              return p_result;            
            }
            lspace->failure_size = round_up_to_size(alloc_size, KB);

            gc_reclaim_heap(allocator->gc, GC_CAUSE_LOS_IS_FULL);

            if(lspace->success_ptr){
              p_result = lspace->success_ptr;
              lspace->success_ptr = NULL;
              vm_gc_unlock_enum();
              return p_result;
            }
            vm_gc_unlock_enum();
            try_count ++;
        }else{
            try_count ++;
        }
    }
    return NULL;
}
Example #5
0
// Resurrect the obj tree whose root is the obj which p_ref points to
static inline void resurrect_obj_tree(Collector *collector, REF *p_ref)
{
  GC *gc = collector->gc;
  GC_Metadata *metadata = gc->metadata;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(p_obj && gc_obj_is_dead(gc, p_obj));
  
  void *p_ref_or_obj = p_ref;
  Trace_Object_Func trace_object;
  
  /* set trace_object() function */
  if(collect_is_minor()){
    if(gc_is_gen_mode()){
      if(minor_is_forward())
        trace_object = trace_obj_in_gen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_gen_ss;
      else 
        assert(0);
    }else{
      if(minor_is_forward())
        trace_object = trace_obj_in_nongen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_nongen_ss;
      else 
        assert(0);
    }
  } else if(collect_is_major_normal() || !gc_has_nos()){
    p_ref_or_obj = p_obj;
    if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
      trace_object = trace_obj_in_space_tune_marking;
      unsigned int obj_size = vm_object_size(p_obj);
#ifdef USE_32BITS_HASHCODE
      obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0;
#endif
      if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){
        collector->non_los_live_obj_size += obj_size;
        collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size;
      } else {
        collector->los_live_obj_size += round_up_to_size(obj_size, KB); 
      }
    } else if(!gc_has_nos()){
      trace_object = trace_obj_in_ms_marking;
    } else {
      trace_object = trace_obj_in_normal_marking;
    }
  } else if(collect_is_fallback()){
    if(major_is_marksweep())
      trace_object = trace_obj_in_ms_fallback_marking;
    else
      trace_object = trace_obj_in_fallback_marking;
  } else {
    assert(major_is_marksweep());
    p_ref_or_obj = p_obj;
   if( gc->gc_concurrent_status == GC_CON_NIL ) 
      trace_object = trace_obj_in_ms_marking;
    else
      trace_object = trace_obj_in_ms_concurrent_mark;
  }
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  collector_tracestack_push(collector, p_ref_or_obj);
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool);
  while(task_block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
    while(!vector_block_iterator_end(task_block, iter)){
      void *p_ref_or_obj = (void*)*iter;
      assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
              || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj));
      trace_object(collector, p_ref_or_obj);
      if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
      
      iter = vector_block_iterator_advance(task_block, iter);
    }
    vector_stack_clear(task_block);
    pool_put_entry(metadata->free_task_pool, task_block);
    
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    task_block = pool_get_entry(metadata->mark_task_pool);
  }
  
  task_block = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(task_block);
  pool_put_entry(metadata->free_task_pool, task_block);
  collector->trace_stack = NULL;
}
Example #6
0
void lspace_reset_for_slide(Lspace* lspace)
{
    GC* gc = lspace->gc;
    Space_Tuner* tuner = gc->tuner;
    POINTER_SIZE_INT trans_size = tuner->tuning_size;
    POINTER_SIZE_INT new_fa_size = 0;
    assert(!(trans_size%GC_BLOCK_SIZE_BYTES));
    Mspace * mos=(Mspace*)((GC_Gen*)gc)->mos;
    Fspace *nos = (Fspace*)((GC_Gen*)gc)->nos;

    /* Reset the pool first because its info is useless now. */
    free_area_pool_reset(lspace->free_pool);

    /*Lspace collection in major collection must move object*/
     
    assert(lspace->move_object);

    switch(tuner->kind){
      case TRANS_FROM_MOS_TO_LOS:{
        //debug_minor_sweep
        if(LOS_ADJUST_BOUNDARY ) {
          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
          lspace->heap_end = (void*)mos_first_block;
          assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
        }else{
          vm_commit_mem(lspace->heap_end, trans_size);
          lspace->heap_end= (void*)((POINTER_SIZE_INT)lspace->heap_end + trans_size);   
          //fixme: need to add decommit in NOS
          if(trans_size < nos->committed_heap_size) {
            nos->free_block_idx=nos->first_block_idx;
            blocked_space_shrink((Blocked_Space*)nos, trans_size);
          } else {
            POINTER_SIZE_INT mos_free_size= blocked_space_free_mem_size((Blocked_Space*)mos);
            void *decommit_base=(void*)((POINTER_SIZE_INT)nos->heap_end-trans_size);
            vm_decommit_mem(decommit_base,trans_size);
            unsigned int reduced_mos_size = trans_size - nos->committed_heap_size;
            unsigned int size=round_down_to_size(mos_free_size-reduced_mos_size,SPACE_ALLOC_UNIT);
            unsigned int nos_size= mos_free_size - reduced_mos_size ;
            if(nos_size<GC_BLOCK_SIZE_BYTES)  nos_size=GC_BLOCK_SIZE_BYTES;
            nos_size=round_up_to_size(nos_size, GC_BLOCK_SIZE_BYTES);
            mos->num_managed_blocks -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT);
            mos->num_used_blocks = mos->free_block_idx-mos->first_block_idx;
            mos->num_total_blocks=mos->num_managed_blocks;
            mos->ceiling_block_idx -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT);
            assert(mos->num_used_blocks<=mos->num_managed_blocks);
            void *start_address=(void*)&(mos->blocks[mos->num_managed_blocks]);
            assert(start_address< decommit_base);
            mos->heap_end = start_address;
            mos->committed_heap_size = (POINTER_SIZE_INT) start_address - (POINTER_SIZE_INT) mos->heap_start;
            nos_boundary = nos->heap_start = start_address;
            nos->heap_end = decommit_base;
            nos->committed_heap_size = nos->reserved_heap_size = (POINTER_SIZE_INT)decommit_base- (POINTER_SIZE_INT) start_address;   
            nos->num_total_blocks = nos->num_managed_blocks = nos_size>>GC_BLOCK_SHIFT_COUNT;
            nos->free_block_idx=nos->first_block_idx=GC_BLOCK_INDEX_FROM(gc->heap_start,start_address);
            nos->ceiling_block_idx=nos->first_block_idx+nos->num_managed_blocks-1;
            nos->num_used_blocks = 0;
            space_init_blocks((Blocked_Space*)nos);
          }
        }
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
        lspace->committed_heap_size += trans_size;
        
        break;
      }
      case TRANS_FROM_LOS_TO_MOS:{
        assert(lspace->move_object);
        if(LOS_ADJUST_BOUNDARY ){
          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
          assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block );
              lspace->heap_end = (void*)mos_first_block;
        }else{
          void *p=(void*)((POINTER_SIZE_INT)lspace->heap_end - trans_size);
          vm_decommit_mem(p, trans_size);
          lspace->heap_end=p;
          //fixme: need to add decommit in NOS
          blocked_space_extend((Blocked_Space*)((GC_Gen*) gc)->nos, trans_size);
        }
        lspace->committed_heap_size -= trans_size;
        /*LOS_Shrink: We don't have to scan lspace to build free pool when slide compact LOS*/
        assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size);
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);

        break;
      }
      default:{
        assert(lspace->move_object);
        assert(tuner->kind == TRANS_NOTHING);
        assert(!tuner->tuning_size);
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
        if(new_fa_size == 0) break;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
        break;
      }
    }

//    lspace->accumu_alloced_size = 0;    
//    lspace->last_alloced_size = 0;        
    lspace->period_surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start;
    lspace->last_surviving_size = lspace->period_surviving_size;
    lspace->survive_ratio = (float)lspace->accumu_alloced_size / (float)lspace->committed_heap_size;

    los_boundary = lspace->heap_end;
}
Example #7
0
/* This is the real function that decide tuning_size, because we have know the total size of living objects after "mark_scan_heap_for_space_tune". */
void gc_compute_space_tune_size_after_marking(GC *gc)
{
  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
  Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Space_Tuner* tuner = gc->tuner;

  POINTER_SIZE_INT max_tuning_size = 0;  
  POINTER_SIZE_INT non_los_size = mspace->committed_heap_size + fspace->committed_heap_size;
  if(LOS_ADJUST_BOUNDARY) 
    gc_compute_live_object_size_after_marking(gc, non_los_size);
  else {
    unsigned int collector_num = gc->num_active_collectors;
    POINTER_SIZE_INT reserve_size = collector_num <<(GC_BLOCK_SHIFT_COUNT+2);
    los_live_obj_size = (POINTER_SIZE_INT) lspace->last_surviving_size + reserve_size;
    non_los_live_obj_size = ((POINTER_SIZE_INT)(mspace->free_block_idx-mspace->first_block_idx)<<GC_BLOCK_SHIFT_COUNT)+reserve_size;
    non_los_live_obj_size = round_up_to_size(non_los_live_obj_size, SPACE_ALLOC_UNIT); 
  }
  check_tuning_size(gc);
  
  /*We should assure that the non_los area is no less than min_none_los_size_bytes*/
  POINTER_SIZE_INT max_tune_for_min_non_los = 0;
  if(non_los_size > min_none_los_size_bytes)
    max_tune_for_min_non_los = non_los_size - min_none_los_size_bytes;
  POINTER_SIZE_INT max_tune_for_min_los = 0;
  //debug_adjust
  assert(lspace->committed_heap_size >= min_los_size_bytes);
  max_tune_for_min_los = lspace->committed_heap_size - min_los_size_bytes;

  /*Not force tune, LOS_Extend:*/
  if(tuner->kind == TRANS_FROM_MOS_TO_LOS)
  {
    if (gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size){
      max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size;
      if(max_tuning_size > max_tune_for_min_non_los)
        max_tuning_size = max_tune_for_min_non_los;
      if( tuner->tuning_size > max_tuning_size)
        tuner->tuning_size = max_tuning_size;
      /*Round down so as not to break max_tuning_size*/
      if(LOS_ADJUST_BOUNDARY)
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      else
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);

       /*If tuning size is zero, we should reset kind to NOTHING, in case that gc_init_block_for_collectors relink the block list.*/
      if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
    }else{ 
      tuner->tuning_size = 0;
      tuner->kind = TRANS_NOTHING;
    }
  }
  /*Not force tune, LOS Shrink*/
  else
  {    
    if(lspace->committed_heap_size > los_live_obj_size){
      max_tuning_size = lspace->committed_heap_size - los_live_obj_size;
      if(max_tuning_size > max_tune_for_min_los)
        max_tuning_size = max_tune_for_min_los;
      if(tuner->tuning_size > max_tuning_size) 
        tuner->tuning_size = max_tuning_size;
      /*Round down so as not to break max_tuning_size*/

      if (LOS_ADJUST_BOUNDARY)
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      else
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);

      if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
    }else{
      /* this is possible because of the reservation in gc_compute_live_object_size_after_marking*/        
      tuner->tuning_size = 0;
      tuner->kind = TRANS_NOTHING;
    }
  }

  /*If the tuning strategy give a bigger tuning_size than failure size, we just follow the strategy and set noforce.*/
  Boolean doforce = TRUE;
  POINTER_SIZE_INT failure_size = lspace_get_failure_size((Lspace*)lspace);  
  if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse) && (tuner->tuning_size > failure_size) )
    doforce = FALSE;
  if( (tuner->force_tune) && (doforce) )
    compute_space_tune_size_for_force_tune(gc, max_tune_for_min_non_los);

  return;
  
}
Example #8
0
/* If this GC is caused by a LOS allocation failure, we set the "force_tune" flag. 
  * Attention1:  The space tuning strategy will extend or shrink LOS according to the wasted memory size and allocation speed.
  * If the strategy decide to shrink or the size extended is not large enough to hold the failed object, we set the "doforce" flag in 
  * function "gc_compute_space_tune_size_after_marking". And only if "force_tune" and "doforce" are both true, we decide the 
  * size of extention by this function.
  * Attention2: The total heap size might extend in this function. */
static void compute_space_tune_size_for_force_tune(GC *gc, POINTER_SIZE_INT max_tune_for_min_non_los)
{
  Space_Tuner* tuner = gc->tuner;
  Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);  
  POINTER_SIZE_INT max_tuning_size = 0;
  POINTER_SIZE_INT failure_size = lspace->failure_size;
  POINTER_SIZE_INT lspace_free_size = ( (lspace->committed_heap_size > los_live_obj_size) ? (lspace->committed_heap_size - los_live_obj_size) : (0) );
  //debug_adjust
  assert(!(lspace_free_size % KB));
  assert(!(failure_size % KB));

  if(lspace_free_size >= failure_size){
    tuner->tuning_size = 0;
    tuner->kind = TRANS_NOTHING;
  }else{
    tuner->tuning_size = failure_size -lspace_free_size;
    
    /*We should assure that the tuning size is no more than the free space of non_los area*/
    if( gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size )
      max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size;

    if(max_tuning_size > max_tune_for_min_non_los)
      max_tuning_size = max_tune_for_min_non_los;

    /*Round up to satisfy LOS alloc demand.*/
    if(LOS_ADJUST_BOUNDARY)  {
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      max_tuning_size = round_down_to_size(max_tuning_size, GC_BLOCK_SIZE_BYTES);
    }else {
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
    }

    /*If the tuning size is too large, we did nothing and wait for the OOM of JVM*/
    /*Fixme: if the heap size is not mx, we can extend the whole heap size*/
    if(tuner->tuning_size > max_tuning_size){
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
        //debug_adjust
      assert(max_heap_size_bytes >= gc->committed_heap_size);
      POINTER_SIZE_INT extend_heap_size = 0;
      POINTER_SIZE_INT potential_max_tuning_size = max_tuning_size + max_heap_size_bytes - gc->committed_heap_size;
      potential_max_tuning_size -= LOS_HEAD_RESERVE_FOR_HEAP_BASE;

      //debug_adjust
      assert(!(potential_max_tuning_size % SPACE_ALLOC_UNIT));
      if(tuner->tuning_size > potential_max_tuning_size){
        tuner->tuning_size = 0;
        tuner->kind = TRANS_NOTHING;
      }else{
        /*We have tuner->tuning_size > max_tuning_size up there.*/
        extend_heap_size = tuner->tuning_size - max_tuning_size;    
        blocked_space_extend(fspace, (unsigned int)extend_heap_size);
        gc->committed_heap_size += extend_heap_size;
        tuner->kind = TRANS_FROM_MOS_TO_LOS;
      }
    }
    else
    {
      tuner->kind = TRANS_FROM_MOS_TO_LOS;
    }
  }

  return;
}