Ejemplo n.º 1
0
/* If this GC is caused by a LOS allocation failure, we set the "force_tune" flag. 
  * Attention1:  The space tuning strategy will extend or shrink LOS according to the wasted memory size and allocation speed.
  * If the strategy decide to shrink or the size extended is not large enough to hold the failed object, we set the "doforce" flag in 
  * function "gc_compute_space_tune_size_after_marking". And only if "force_tune" and "doforce" are both true, we decide the 
  * size of extention by this function.
  * Attention2: The total heap size might extend in this function. */
static void compute_space_tune_size_for_force_tune(GC *gc, POINTER_SIZE_INT max_tune_for_min_non_los)
{
  Space_Tuner* tuner = gc->tuner;
  Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);  
  POINTER_SIZE_INT max_tuning_size = 0;
  POINTER_SIZE_INT failure_size = lspace->failure_size;
  POINTER_SIZE_INT lspace_free_size = ( (lspace->committed_heap_size > los_live_obj_size) ? (lspace->committed_heap_size - los_live_obj_size) : (0) );
  //debug_adjust
  assert(!(lspace_free_size % KB));
  assert(!(failure_size % KB));

  if(lspace_free_size >= failure_size){
    tuner->tuning_size = 0;
    tuner->kind = TRANS_NOTHING;
  }else{
    tuner->tuning_size = failure_size -lspace_free_size;
    
    /*We should assure that the tuning size is no more than the free space of non_los area*/
    if( gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size )
      max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size;

    if(max_tuning_size > max_tune_for_min_non_los)
      max_tuning_size = max_tune_for_min_non_los;

    /*Round up to satisfy LOS alloc demand.*/
    if(LOS_ADJUST_BOUNDARY)  {
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      max_tuning_size = round_down_to_size(max_tuning_size, GC_BLOCK_SIZE_BYTES);
    }else {
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
    }

    /*If the tuning size is too large, we did nothing and wait for the OOM of JVM*/
    /*Fixme: if the heap size is not mx, we can extend the whole heap size*/
    if(tuner->tuning_size > max_tuning_size){
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
        //debug_adjust
      assert(max_heap_size_bytes >= gc->committed_heap_size);
      POINTER_SIZE_INT extend_heap_size = 0;
      POINTER_SIZE_INT potential_max_tuning_size = max_tuning_size + max_heap_size_bytes - gc->committed_heap_size;
      potential_max_tuning_size -= LOS_HEAD_RESERVE_FOR_HEAP_BASE;

      //debug_adjust
      assert(!(potential_max_tuning_size % SPACE_ALLOC_UNIT));
      if(tuner->tuning_size > potential_max_tuning_size){
        tuner->tuning_size = 0;
        tuner->kind = TRANS_NOTHING;
      }else{
        /*We have tuner->tuning_size > max_tuning_size up there.*/
        extend_heap_size = tuner->tuning_size - max_tuning_size;    
        blocked_space_extend(fspace, (unsigned int)extend_heap_size);
        gc->committed_heap_size += extend_heap_size;
        tuner->kind = TRANS_FROM_MOS_TO_LOS;
      }
    }
    else
    {
      tuner->kind = TRANS_FROM_MOS_TO_LOS;
    }
  }

  return;
}
Ejemplo n.º 2
0
void lspace_reset_for_slide(Lspace* lspace)
{
    GC* gc = lspace->gc;
    Space_Tuner* tuner = gc->tuner;
    POINTER_SIZE_INT trans_size = tuner->tuning_size;
    POINTER_SIZE_INT new_fa_size = 0;
    assert(!(trans_size%GC_BLOCK_SIZE_BYTES));
    Mspace * mos=(Mspace*)((GC_Gen*)gc)->mos;
    Fspace *nos = (Fspace*)((GC_Gen*)gc)->nos;

    /* Reset the pool first because its info is useless now. */
    free_area_pool_reset(lspace->free_pool);

    /*Lspace collection in major collection must move object*/
     
    assert(lspace->move_object);

    switch(tuner->kind){
      case TRANS_FROM_MOS_TO_LOS:{
        //debug_minor_sweep
        if(LOS_ADJUST_BOUNDARY ) {
          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
          lspace->heap_end = (void*)mos_first_block;
          assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
        }else{
          vm_commit_mem(lspace->heap_end, trans_size);
          lspace->heap_end= (void*)((POINTER_SIZE_INT)lspace->heap_end + trans_size);   
          //fixme: need to add decommit in NOS
          if(trans_size < nos->committed_heap_size) {
            nos->free_block_idx=nos->first_block_idx;
            blocked_space_shrink((Blocked_Space*)nos, trans_size);
          } else {
            POINTER_SIZE_INT mos_free_size= blocked_space_free_mem_size((Blocked_Space*)mos);
            void *decommit_base=(void*)((POINTER_SIZE_INT)nos->heap_end-trans_size);
            vm_decommit_mem(decommit_base,trans_size);
            unsigned int reduced_mos_size = trans_size - nos->committed_heap_size;
            unsigned int size=round_down_to_size(mos_free_size-reduced_mos_size,SPACE_ALLOC_UNIT);
            unsigned int nos_size= mos_free_size - reduced_mos_size ;
            if(nos_size<GC_BLOCK_SIZE_BYTES)  nos_size=GC_BLOCK_SIZE_BYTES;
            nos_size=round_up_to_size(nos_size, GC_BLOCK_SIZE_BYTES);
            mos->num_managed_blocks -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT);
            mos->num_used_blocks = mos->free_block_idx-mos->first_block_idx;
            mos->num_total_blocks=mos->num_managed_blocks;
            mos->ceiling_block_idx -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT);
            assert(mos->num_used_blocks<=mos->num_managed_blocks);
            void *start_address=(void*)&(mos->blocks[mos->num_managed_blocks]);
            assert(start_address< decommit_base);
            mos->heap_end = start_address;
            mos->committed_heap_size = (POINTER_SIZE_INT) start_address - (POINTER_SIZE_INT) mos->heap_start;
            nos_boundary = nos->heap_start = start_address;
            nos->heap_end = decommit_base;
            nos->committed_heap_size = nos->reserved_heap_size = (POINTER_SIZE_INT)decommit_base- (POINTER_SIZE_INT) start_address;   
            nos->num_total_blocks = nos->num_managed_blocks = nos_size>>GC_BLOCK_SHIFT_COUNT;
            nos->free_block_idx=nos->first_block_idx=GC_BLOCK_INDEX_FROM(gc->heap_start,start_address);
            nos->ceiling_block_idx=nos->first_block_idx+nos->num_managed_blocks-1;
            nos->num_used_blocks = 0;
            space_init_blocks((Blocked_Space*)nos);
          }
        }
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
        lspace->committed_heap_size += trans_size;
        
        break;
      }
      case TRANS_FROM_LOS_TO_MOS:{
        assert(lspace->move_object);
        if(LOS_ADJUST_BOUNDARY ){
          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
          assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block );
              lspace->heap_end = (void*)mos_first_block;
        }else{
          void *p=(void*)((POINTER_SIZE_INT)lspace->heap_end - trans_size);
          vm_decommit_mem(p, trans_size);
          lspace->heap_end=p;
          //fixme: need to add decommit in NOS
          blocked_space_extend((Blocked_Space*)((GC_Gen*) gc)->nos, trans_size);
        }
        lspace->committed_heap_size -= trans_size;
        /*LOS_Shrink: We don't have to scan lspace to build free pool when slide compact LOS*/
        assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size);
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);

        break;
      }
      default:{
        assert(lspace->move_object);
        assert(tuner->kind == TRANS_NOTHING);
        assert(!tuner->tuning_size);
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
        if(new_fa_size == 0) break;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
        break;
      }
    }

//    lspace->accumu_alloced_size = 0;    
//    lspace->last_alloced_size = 0;        
    lspace->period_surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start;
    lspace->last_surviving_size = lspace->period_surviving_size;
    lspace->survive_ratio = (float)lspace->accumu_alloced_size / (float)lspace->committed_heap_size;

    los_boundary = lspace->heap_end;
}
Ejemplo n.º 3
0
void gc_gen_adapt(GC_Gen* gc, int64 pause_time)
{
  gc_decide_next_collect(gc, pause_time);

  if(NOS_SIZE) return;

  POINTER_SIZE_INT new_nos_size;
  POINTER_SIZE_INT new_mos_size;

  Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size);

  if(!result) return;

  Blocked_Space* nos = (Blocked_Space*)gc->nos;
  Blocked_Space* mos = (Blocked_Space*)gc->mos;
  
  POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)nos);

  //if( ABS_DIFF(new_nos_size, curr_nos_size) < NOS_COPY_RESERVE_DELTA )
  if( new_nos_size == curr_nos_size ){
    return;
  }else if ( new_nos_size >= curr_nos_size ){
    INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...\n");
    POINTER_SIZE_INT adapt_size = new_nos_size - curr_nos_size;
    INFO2("gc.space", "GC: Space Adapt:  mos  --->  nos  ("
      <<verbose_print_size(adapt_size)
      <<" size was transferred from mos to nos)\n"); 
  } else {
    INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...\n");
    POINTER_SIZE_INT  adapt_size = curr_nos_size - new_nos_size;
    INFO2("gc.space", "GC: Space Adapt:  nos  --->  mos  ("
      <<verbose_print_size(adapt_size)
      <<" size was transferred from nos to mos)\n"); 
  }
  
  POINTER_SIZE_INT used_mos_size = blocked_space_used_mem_size((Blocked_Space*)mos);
  POINTER_SIZE_INT free_mos_size = blocked_space_free_mem_size((Blocked_Space*)mos);

  POINTER_SIZE_INT new_free_mos_size = new_mos_size -  used_mos_size;
  
  POINTER_SIZE_INT curr_mos_end = (POINTER_SIZE_INT)&mos->blocks[mos->free_block_idx - mos->first_block_idx];
  POINTER_SIZE_INT mos_border = (POINTER_SIZE_INT)mos->heap_end;
  if(  curr_mos_end + new_free_mos_size > mos_border){
    /* we can't let mos cross border */
    new_free_mos_size = mos_border - curr_mos_end;    
  }

  if(new_nos_size < curr_nos_size){
  /* lets shrink nos */
    assert(new_free_mos_size > free_mos_size);
    blocked_space_shrink((Blocked_Space*)nos, curr_nos_size - new_nos_size);
    blocked_space_extend((Blocked_Space*)mos, new_free_mos_size - free_mos_size);
  }else if(new_nos_size > curr_nos_size){
    /* lets grow nos */
    assert(new_free_mos_size < free_mos_size);
    blocked_space_shrink((Blocked_Space*)mos, free_mos_size - new_free_mos_size);
    blocked_space_extend((Blocked_Space*)nos, new_nos_size - curr_nos_size);     
  }

  Block_Header* mos_last_block = (Block_Header*)&mos->blocks[mos->num_managed_blocks-1];
  Block_Header* nos_first_block = (Block_Header*)&nos->blocks[0];
  mos_last_block->next = nos_first_block;
  
  return;
}