示例#1
0
/*Malloc and initialize fake blocks for LOS_Shrink*/
void gc_space_tuner_init_fake_blocks_for_los_shrink(GC* gc)
{
  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
  Space_Tuner* tuner = gc->tuner;
  Block_Header* mos_first_block = (Block_Header*)&mspace->blocks[0];
  unsigned int trans_blocks = (unsigned int)(tuner->tuning_size >> GC_BLOCK_SHIFT_COUNT);
  tuner->interim_blocks = (Block_Header*)STD_MALLOC(trans_blocks * sizeof(Block_Header));
  Block_Header* los_trans_fake_blocks = tuner->interim_blocks;
  memset(los_trans_fake_blocks, 0, trans_blocks * sizeof(Block_Header));
  void* trans_base = (void*)((POINTER_SIZE_INT)mos_first_block - tuner->tuning_size);
  unsigned int start_idx = GC_BLOCK_INDEX_FROM(gc->heap_start, trans_base);
  Block_Header* last_block = los_trans_fake_blocks;

  for(unsigned int i = 0; i < trans_blocks; i ++){
      Block_Header* curr_block = &los_trans_fake_blocks[i];
      curr_block->block_idx = start_idx + i;
      curr_block->base = (void*)((POINTER_SIZE_INT)trans_base + i * GC_BLOCK_SIZE_BYTES + GC_BLOCK_HEADER_SIZE_BYTES);
      curr_block->free = curr_block->base ;
      curr_block->new_free = curr_block->free;
      curr_block->ceiling = (void*)((POINTER_SIZE_INT)curr_block->base + GC_BLOCK_BODY_SIZE_BYTES);
      curr_block->status = BLOCK_COMPACTED;
#ifdef USE_32BITS_HASHCODE
      curr_block->hashcode_buf = hashcode_buf_create();
#endif
      last_block->next = curr_block;
      last_block = curr_block;
  }
  last_block->next = mos_first_block;
}
示例#2
0
void lspace_reset_for_slide(Lspace* lspace)
{
    GC* gc = lspace->gc;
    Space_Tuner* tuner = gc->tuner;
    POINTER_SIZE_INT trans_size = tuner->tuning_size;
    POINTER_SIZE_INT new_fa_size = 0;
    assert(!(trans_size%GC_BLOCK_SIZE_BYTES));
    Mspace * mos=(Mspace*)((GC_Gen*)gc)->mos;
    Fspace *nos = (Fspace*)((GC_Gen*)gc)->nos;

    /* Reset the pool first because its info is useless now. */
    free_area_pool_reset(lspace->free_pool);

    /*Lspace collection in major collection must move object*/
     
    assert(lspace->move_object);

    switch(tuner->kind){
      case TRANS_FROM_MOS_TO_LOS:{
        //debug_minor_sweep
        if(LOS_ADJUST_BOUNDARY ) {
          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
          lspace->heap_end = (void*)mos_first_block;
          assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
        }else{
          vm_commit_mem(lspace->heap_end, trans_size);
          lspace->heap_end= (void*)((POINTER_SIZE_INT)lspace->heap_end + trans_size);   
          //fixme: need to add decommit in NOS
          if(trans_size < nos->committed_heap_size) {
            nos->free_block_idx=nos->first_block_idx;
            blocked_space_shrink((Blocked_Space*)nos, trans_size);
          } else {
            POINTER_SIZE_INT mos_free_size= blocked_space_free_mem_size((Blocked_Space*)mos);
            void *decommit_base=(void*)((POINTER_SIZE_INT)nos->heap_end-trans_size);
            vm_decommit_mem(decommit_base,trans_size);
            unsigned int reduced_mos_size = trans_size - nos->committed_heap_size;
            unsigned int size=round_down_to_size(mos_free_size-reduced_mos_size,SPACE_ALLOC_UNIT);
            unsigned int nos_size= mos_free_size - reduced_mos_size ;
            if(nos_size<GC_BLOCK_SIZE_BYTES)  nos_size=GC_BLOCK_SIZE_BYTES;
            nos_size=round_up_to_size(nos_size, GC_BLOCK_SIZE_BYTES);
            mos->num_managed_blocks -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT);
            mos->num_used_blocks = mos->free_block_idx-mos->first_block_idx;
            mos->num_total_blocks=mos->num_managed_blocks;
            mos->ceiling_block_idx -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT);
            assert(mos->num_used_blocks<=mos->num_managed_blocks);
            void *start_address=(void*)&(mos->blocks[mos->num_managed_blocks]);
            assert(start_address< decommit_base);
            mos->heap_end = start_address;
            mos->committed_heap_size = (POINTER_SIZE_INT) start_address - (POINTER_SIZE_INT) mos->heap_start;
            nos_boundary = nos->heap_start = start_address;
            nos->heap_end = decommit_base;
            nos->committed_heap_size = nos->reserved_heap_size = (POINTER_SIZE_INT)decommit_base- (POINTER_SIZE_INT) start_address;   
            nos->num_total_blocks = nos->num_managed_blocks = nos_size>>GC_BLOCK_SHIFT_COUNT;
            nos->free_block_idx=nos->first_block_idx=GC_BLOCK_INDEX_FROM(gc->heap_start,start_address);
            nos->ceiling_block_idx=nos->first_block_idx+nos->num_managed_blocks-1;
            nos->num_used_blocks = 0;
            space_init_blocks((Blocked_Space*)nos);
          }
        }
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
        lspace->committed_heap_size += trans_size;
        
        break;
      }
      case TRANS_FROM_LOS_TO_MOS:{
        assert(lspace->move_object);
        if(LOS_ADJUST_BOUNDARY ){
          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
          assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block );
              lspace->heap_end = (void*)mos_first_block;
        }else{
          void *p=(void*)((POINTER_SIZE_INT)lspace->heap_end - trans_size);
          vm_decommit_mem(p, trans_size);
          lspace->heap_end=p;
          //fixme: need to add decommit in NOS
          blocked_space_extend((Blocked_Space*)((GC_Gen*) gc)->nos, trans_size);
        }
        lspace->committed_heap_size -= trans_size;
        /*LOS_Shrink: We don't have to scan lspace to build free pool when slide compact LOS*/
        assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size);
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);

        break;
      }
      default:{
        assert(lspace->move_object);
        assert(tuner->kind == TRANS_NOTHING);
        assert(!tuner->tuning_size);
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
        if(new_fa_size == 0) break;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
        break;
      }
    }

//    lspace->accumu_alloced_size = 0;    
//    lspace->last_alloced_size = 0;        
    lspace->period_surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start;
    lspace->last_surviving_size = lspace->period_surviving_size;
    lspace->survive_ratio = (float)lspace->accumu_alloced_size / (float)lspace->committed_heap_size;

    los_boundary = lspace->heap_end;
}