示例#1
0
static void determine_min_nos_size(GC_Gen *gc, POINTER_SIZE_INT min_heap_size)
{
  min_nos_size_bytes *=  gc->_num_processors;
  
  POINTER_SIZE_INT min_nos_size_threshold = min_heap_size>>5;
  if(min_nos_size_bytes  > min_nos_size_threshold)
    min_nos_size_bytes = round_down_to_size(min_nos_size_threshold, SPACE_ALLOC_UNIT);
  
  if(MIN_NOS_SIZE) min_nos_size_bytes = MIN_NOS_SIZE;
}
示例#2
0
static Block_Header *get_next_dest_block(Mspace *mspace)
{
    Block_Header *cur_dest_block;

    if(next_block_for_dest) {
        cur_dest_block = (Block_Header*)next_block_for_dest;
        while(cur_dest_block->status == BLOCK_DEST) {
            cur_dest_block = cur_dest_block->next;
            if(!cur_dest_block) break;
        }
        next_block_for_dest = cur_dest_block;
    } else {
        cur_dest_block = set_next_block_for_dest(mspace);
    }

    unsigned int total_dest_counter = 0;
    /*For LOS_Shrink: last_dest_block might point to a fake block*/
    Block_Header *last_dest_block =
        (Block_Header *)round_down_to_size((POINTER_SIZE_INT)(last_block_for_dest->base), GC_BLOCK_SIZE_BYTES);
    for(; cur_dest_block <= last_dest_block; cur_dest_block = cur_dest_block->next) {
        if(!cur_dest_block)  return NULL;
        if(cur_dest_block->status == BLOCK_DEST) {
            continue;
        }
        if(cur_dest_block->dest_counter == 0 && cur_dest_block->src) {
            cur_dest_block->status = BLOCK_DEST;
            return cur_dest_block;
        } else if(cur_dest_block->dest_counter == 1 && GC_BLOCK_HEADER(cur_dest_block->src) == cur_dest_block) {
            return cur_dest_block;
        } else if(cur_dest_block->dest_counter == 0 && !cur_dest_block->src) {
            cur_dest_block->status = BLOCK_DEST;
        } else {
            total_dest_counter += cur_dest_block->dest_counter;
        }
    }

    if(total_dest_counter)
        return DEST_NOT_EMPTY;

    return NULL;
}
示例#3
0
void slide_compact_mspace(Collector* collector)
{
    GC* gc = collector->gc;
    Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc);
    Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc);

    unsigned int num_active_collectors = gc->num_active_collectors;

    /* Pass 1: **************************************************
      *mark all live objects in heap, and save all the slots that
      *have references  that are going to be repointed.
      */

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: marking...");

    unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1);

    if(collect_is_fallback())
        mark_scan_heap_for_fallback(collector);
    else if(gc->tuner->kind != TRANS_NOTHING)
        mark_scan_heap_for_space_tune(collector);
    else
        mark_scan_heap(collector);
    old_num = atomic_inc32(&num_marking_collectors);

    /* last collector's world here */
    if( ++old_num == num_active_collectors ) {

        if(!IGNORE_FINREF )
            collector_identify_finref(collector);
#ifndef BUILD_IN_REFERENT
        else {
            gc_set_weakref_sets(gc);
            gc_update_weakref_ignore_finref(gc);
        }
#endif
        gc_identify_dead_weak_roots(gc);

        if( gc->tuner->kind != TRANS_NOTHING ) gc_compute_space_tune_size_after_marking(gc);
        //assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
        /* prepare for next phase */
        gc_init_block_for_collectors(gc, mspace);

#ifdef USE_32BITS_HASHCODE
        if(collect_is_fallback())
            fallback_clear_fwd_obj_oi_init(collector);
#endif

        last_block_for_dest = NULL;
        /* let other collectors go */
        num_marking_collectors++;
    }
    while(num_marking_collectors != num_active_collectors + 1);

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]:  finish pass1 and start pass2: relocating mos&nos...");

    /* Pass 2: **************************************************
       assign target addresses for all to-be-moved objects */

    atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1);

#ifdef USE_32BITS_HASHCODE
    if(collect_is_fallback())
        fallback_clear_fwd_obj_oi(collector);
#endif
    mspace_compute_object_target(collector, mspace);

    old_num = atomic_inc32(&num_repointing_collectors);
    /*last collector's world here*/
    if( ++old_num == num_active_collectors ) {
        if(lspace->move_object) {
            TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: relocating los ...");
            lspace_compute_object_target(collector, lspace);
        }
        gc->collect_result = gc_collection_result(gc);
        if(!gc->collect_result) {
            num_repointing_collectors++;
            return;
        }
        gc_reset_block_for_collectors(gc, mspace);
        gc_init_block_for_fix_repointed_refs(gc, mspace);
        num_repointing_collectors++;
    }
    while(num_repointing_collectors != num_active_collectors + 1);
    if(!gc->collect_result) return;
    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2 and start pass3: repointing...");

    /* Pass 3: **************************************************
      *update all references whose objects are to be moved
      */
    old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1);
    mspace_fix_repointed_refs(collector, mspace);
    old_num = atomic_inc32(&num_fixing_collectors);
    /*last collector's world here */
    if( ++old_num == num_active_collectors ) {
        lspace_fix_repointed_refs(collector, lspace);
        gc_fix_rootset(collector, FALSE);
        gc_init_block_for_sliding_compact(gc, mspace);
        /*LOS_Shrink: This operation moves objects in LOS, and should be part of Pass 4
          *lspace_sliding_compact is not binded with los shrink, we could slide compact los individually.
          *So we use a flag lspace->move_object here, not tuner->kind == TRANS_FROM_LOS_TO_MOS.
          */
        if(lspace->move_object)  lspace_sliding_compact(collector, lspace);
        /*The temp blocks for storing interim infomation is copied to the real place they should be.
          *And the space of the blocks are freed, which is alloced in gc_space_tuner_init_fake_blocks_for_los_shrink.
          */
        last_block_for_dest = (Block_Header *)round_down_to_size((POINTER_SIZE_INT)last_block_for_dest->base, GC_BLOCK_SIZE_BYTES);
        if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) gc_space_tuner_release_fake_blocks_for_los_shrink(gc);
        num_fixing_collectors++;
    }
    while(num_fixing_collectors != num_active_collectors + 1);

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3 and start pass4: moving...");

    /* Pass 4: **************************************************
       move objects                                             */

    atomic_cas32( &num_moving_collectors, 0, num_active_collectors);

    mspace_sliding_compact(collector, mspace);

    atomic_inc32(&num_moving_collectors);
    while(num_moving_collectors != num_active_collectors);

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4 and start pass 5: restoring obj_info...");

    /* Pass 5: **************************************************
       restore obj_info                                         */

    atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1);

    collector_restore_obj_info(collector);
#ifdef USE_32BITS_HASHCODE
    collector_attach_hashcode(collector);
#endif

    old_num = atomic_inc32(&num_restoring_collectors);

    if( ++old_num == num_active_collectors ) {
        if(gc->tuner->kind != TRANS_NOTHING)
            mspace_update_info_after_space_tuning(mspace);
        num_restoring_collectors++;
    }
    while(num_restoring_collectors != num_active_collectors + 1);

    /* Dealing with out of memory in mspace */
    void* mspace_border = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx];
    if( mspace_border > nos_boundary) {
        atomic_cas32( &num_extending_collectors, 0, num_active_collectors);

        mspace_extend_compact(collector);

        atomic_inc32(&num_extending_collectors);
        while(num_extending_collectors != num_active_collectors);
    }

    TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass5 and done.");

    return;
}
示例#4
0
void lspace_reset_for_slide(Lspace* lspace)
{
    GC* gc = lspace->gc;
    Space_Tuner* tuner = gc->tuner;
    POINTER_SIZE_INT trans_size = tuner->tuning_size;
    POINTER_SIZE_INT new_fa_size = 0;
    assert(!(trans_size%GC_BLOCK_SIZE_BYTES));
    Mspace * mos=(Mspace*)((GC_Gen*)gc)->mos;
    Fspace *nos = (Fspace*)((GC_Gen*)gc)->nos;

    /* Reset the pool first because its info is useless now. */
    free_area_pool_reset(lspace->free_pool);

    /*Lspace collection in major collection must move object*/
     
    assert(lspace->move_object);

    switch(tuner->kind){
      case TRANS_FROM_MOS_TO_LOS:{
        //debug_minor_sweep
        if(LOS_ADJUST_BOUNDARY ) {
          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
          lspace->heap_end = (void*)mos_first_block;
          assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES));
        }else{
          vm_commit_mem(lspace->heap_end, trans_size);
          lspace->heap_end= (void*)((POINTER_SIZE_INT)lspace->heap_end + trans_size);   
          //fixme: need to add decommit in NOS
          if(trans_size < nos->committed_heap_size) {
            nos->free_block_idx=nos->first_block_idx;
            blocked_space_shrink((Blocked_Space*)nos, trans_size);
          } else {
            POINTER_SIZE_INT mos_free_size= blocked_space_free_mem_size((Blocked_Space*)mos);
            void *decommit_base=(void*)((POINTER_SIZE_INT)nos->heap_end-trans_size);
            vm_decommit_mem(decommit_base,trans_size);
            unsigned int reduced_mos_size = trans_size - nos->committed_heap_size;
            unsigned int size=round_down_to_size(mos_free_size-reduced_mos_size,SPACE_ALLOC_UNIT);
            unsigned int nos_size= mos_free_size - reduced_mos_size ;
            if(nos_size<GC_BLOCK_SIZE_BYTES)  nos_size=GC_BLOCK_SIZE_BYTES;
            nos_size=round_up_to_size(nos_size, GC_BLOCK_SIZE_BYTES);
            mos->num_managed_blocks -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT);
            mos->num_used_blocks = mos->free_block_idx-mos->first_block_idx;
            mos->num_total_blocks=mos->num_managed_blocks;
            mos->ceiling_block_idx -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT);
            assert(mos->num_used_blocks<=mos->num_managed_blocks);
            void *start_address=(void*)&(mos->blocks[mos->num_managed_blocks]);
            assert(start_address< decommit_base);
            mos->heap_end = start_address;
            mos->committed_heap_size = (POINTER_SIZE_INT) start_address - (POINTER_SIZE_INT) mos->heap_start;
            nos_boundary = nos->heap_start = start_address;
            nos->heap_end = decommit_base;
            nos->committed_heap_size = nos->reserved_heap_size = (POINTER_SIZE_INT)decommit_base- (POINTER_SIZE_INT) start_address;   
            nos->num_total_blocks = nos->num_managed_blocks = nos_size>>GC_BLOCK_SHIFT_COUNT;
            nos->free_block_idx=nos->first_block_idx=GC_BLOCK_INDEX_FROM(gc->heap_start,start_address);
            nos->ceiling_block_idx=nos->first_block_idx+nos->num_managed_blocks-1;
            nos->num_used_blocks = 0;
            space_init_blocks((Blocked_Space*)nos);
          }
        }
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
        lspace->committed_heap_size += trans_size;
        
        break;
      }
      case TRANS_FROM_LOS_TO_MOS:{
        assert(lspace->move_object);
        if(LOS_ADJUST_BOUNDARY ){
          Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks;
          assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block );
              lspace->heap_end = (void*)mos_first_block;
        }else{
          void *p=(void*)((POINTER_SIZE_INT)lspace->heap_end - trans_size);
          vm_decommit_mem(p, trans_size);
          lspace->heap_end=p;
          //fixme: need to add decommit in NOS
          blocked_space_extend((Blocked_Space*)((GC_Gen*) gc)->nos, trans_size);
        }
        lspace->committed_heap_size -= trans_size;
        /*LOS_Shrink: We don't have to scan lspace to build free pool when slide compact LOS*/
        assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size);
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);

        break;
      }
      default:{
        assert(lspace->move_object);
        assert(tuner->kind == TRANS_NOTHING);
        assert(!tuner->tuning_size);
        new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start;
        if(new_fa_size == 0) break;
        Free_Area* fa = free_area_new(lspace->scompact_fa_start,  new_fa_size);
        if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa);
        break;
      }
    }

//    lspace->accumu_alloced_size = 0;    
//    lspace->last_alloced_size = 0;        
    lspace->period_surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start;
    lspace->last_surviving_size = lspace->period_surviving_size;
    lspace->survive_ratio = (float)lspace->accumu_alloced_size / (float)lspace->committed_heap_size;

    los_boundary = lspace->heap_end;
}
示例#5
0
/* This is the real function that decide tuning_size, because we have know the total size of living objects after "mark_scan_heap_for_space_tune". */
void gc_compute_space_tune_size_after_marking(GC *gc)
{
  Blocked_Space* mspace = (Blocked_Space*)gc_get_mos((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);
  Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Space_Tuner* tuner = gc->tuner;

  POINTER_SIZE_INT max_tuning_size = 0;  
  POINTER_SIZE_INT non_los_size = mspace->committed_heap_size + fspace->committed_heap_size;
  if(LOS_ADJUST_BOUNDARY) 
    gc_compute_live_object_size_after_marking(gc, non_los_size);
  else {
    unsigned int collector_num = gc->num_active_collectors;
    POINTER_SIZE_INT reserve_size = collector_num <<(GC_BLOCK_SHIFT_COUNT+2);
    los_live_obj_size = (POINTER_SIZE_INT) lspace->last_surviving_size + reserve_size;
    non_los_live_obj_size = ((POINTER_SIZE_INT)(mspace->free_block_idx-mspace->first_block_idx)<<GC_BLOCK_SHIFT_COUNT)+reserve_size;
    non_los_live_obj_size = round_up_to_size(non_los_live_obj_size, SPACE_ALLOC_UNIT); 
  }
  check_tuning_size(gc);
  
  /*We should assure that the non_los area is no less than min_none_los_size_bytes*/
  POINTER_SIZE_INT max_tune_for_min_non_los = 0;
  if(non_los_size > min_none_los_size_bytes)
    max_tune_for_min_non_los = non_los_size - min_none_los_size_bytes;
  POINTER_SIZE_INT max_tune_for_min_los = 0;
  //debug_adjust
  assert(lspace->committed_heap_size >= min_los_size_bytes);
  max_tune_for_min_los = lspace->committed_heap_size - min_los_size_bytes;

  /*Not force tune, LOS_Extend:*/
  if(tuner->kind == TRANS_FROM_MOS_TO_LOS)
  {
    if (gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size){
      max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size;
      if(max_tuning_size > max_tune_for_min_non_los)
        max_tuning_size = max_tune_for_min_non_los;
      if( tuner->tuning_size > max_tuning_size)
        tuner->tuning_size = max_tuning_size;
      /*Round down so as not to break max_tuning_size*/
      if(LOS_ADJUST_BOUNDARY)
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      else
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);

       /*If tuning size is zero, we should reset kind to NOTHING, in case that gc_init_block_for_collectors relink the block list.*/
      if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
    }else{ 
      tuner->tuning_size = 0;
      tuner->kind = TRANS_NOTHING;
    }
  }
  /*Not force tune, LOS Shrink*/
  else
  {    
    if(lspace->committed_heap_size > los_live_obj_size){
      max_tuning_size = lspace->committed_heap_size - los_live_obj_size;
      if(max_tuning_size > max_tune_for_min_los)
        max_tuning_size = max_tune_for_min_los;
      if(tuner->tuning_size > max_tuning_size) 
        tuner->tuning_size = max_tuning_size;
      /*Round down so as not to break max_tuning_size*/

      if (LOS_ADJUST_BOUNDARY)
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      else
        tuner->tuning_size = round_down_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);

      if(tuner->tuning_size == 0)  tuner->kind = TRANS_NOTHING;
    }else{
      /* this is possible because of the reservation in gc_compute_live_object_size_after_marking*/        
      tuner->tuning_size = 0;
      tuner->kind = TRANS_NOTHING;
    }
  }

  /*If the tuning strategy give a bigger tuning_size than failure size, we just follow the strategy and set noforce.*/
  Boolean doforce = TRUE;
  POINTER_SIZE_INT failure_size = lspace_get_failure_size((Lspace*)lspace);  
  if( (tuner->kind == TRANS_FROM_MOS_TO_LOS) && (!tuner->reverse) && (tuner->tuning_size > failure_size) )
    doforce = FALSE;
  if( (tuner->force_tune) && (doforce) )
    compute_space_tune_size_for_force_tune(gc, max_tune_for_min_non_los);

  return;
  
}
示例#6
0
/* If this GC is caused by a LOS allocation failure, we set the "force_tune" flag. 
  * Attention1:  The space tuning strategy will extend or shrink LOS according to the wasted memory size and allocation speed.
  * If the strategy decide to shrink or the size extended is not large enough to hold the failed object, we set the "doforce" flag in 
  * function "gc_compute_space_tune_size_after_marking". And only if "force_tune" and "doforce" are both true, we decide the 
  * size of extention by this function.
  * Attention2: The total heap size might extend in this function. */
static void compute_space_tune_size_for_force_tune(GC *gc, POINTER_SIZE_INT max_tune_for_min_non_los)
{
  Space_Tuner* tuner = gc->tuner;
  Lspace *lspace = (Lspace*)gc_get_los((GC_Gen*)gc);
  Blocked_Space* fspace = (Blocked_Space*)gc_get_nos((GC_Gen*)gc);  
  POINTER_SIZE_INT max_tuning_size = 0;
  POINTER_SIZE_INT failure_size = lspace->failure_size;
  POINTER_SIZE_INT lspace_free_size = ( (lspace->committed_heap_size > los_live_obj_size) ? (lspace->committed_heap_size - los_live_obj_size) : (0) );
  //debug_adjust
  assert(!(lspace_free_size % KB));
  assert(!(failure_size % KB));

  if(lspace_free_size >= failure_size){
    tuner->tuning_size = 0;
    tuner->kind = TRANS_NOTHING;
  }else{
    tuner->tuning_size = failure_size -lspace_free_size;
    
    /*We should assure that the tuning size is no more than the free space of non_los area*/
    if( gc->committed_heap_size > lspace->committed_heap_size + non_los_live_obj_size )
      max_tuning_size = gc->committed_heap_size - lspace->committed_heap_size - non_los_live_obj_size;

    if(max_tuning_size > max_tune_for_min_non_los)
      max_tuning_size = max_tune_for_min_non_los;

    /*Round up to satisfy LOS alloc demand.*/
    if(LOS_ADJUST_BOUNDARY)  {
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, GC_BLOCK_SIZE_BYTES);
      max_tuning_size = round_down_to_size(max_tuning_size, GC_BLOCK_SIZE_BYTES);
    }else {
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
    }

    /*If the tuning size is too large, we did nothing and wait for the OOM of JVM*/
    /*Fixme: if the heap size is not mx, we can extend the whole heap size*/
    if(tuner->tuning_size > max_tuning_size){
      tuner->tuning_size = round_up_to_size(tuner->tuning_size, SPACE_ALLOC_UNIT);
      max_tuning_size = round_down_to_size(max_tuning_size, SPACE_ALLOC_UNIT);
        //debug_adjust
      assert(max_heap_size_bytes >= gc->committed_heap_size);
      POINTER_SIZE_INT extend_heap_size = 0;
      POINTER_SIZE_INT potential_max_tuning_size = max_tuning_size + max_heap_size_bytes - gc->committed_heap_size;
      potential_max_tuning_size -= LOS_HEAD_RESERVE_FOR_HEAP_BASE;

      //debug_adjust
      assert(!(potential_max_tuning_size % SPACE_ALLOC_UNIT));
      if(tuner->tuning_size > potential_max_tuning_size){
        tuner->tuning_size = 0;
        tuner->kind = TRANS_NOTHING;
      }else{
        /*We have tuner->tuning_size > max_tuning_size up there.*/
        extend_heap_size = tuner->tuning_size - max_tuning_size;    
        blocked_space_extend(fspace, (unsigned int)extend_heap_size);
        gc->committed_heap_size += extend_heap_size;
        tuner->kind = TRANS_FROM_MOS_TO_LOS;
      }
    }
    else
    {
      tuner->kind = TRANS_FROM_MOS_TO_LOS;
    }
  }

  return;
}
/* FIXME:: In this algorithm, it assumes NOS is a full forward space. 
   Semispace GC's NOS has survivor_area. Need careful rethinking. 
   But this algorithm so far can be a good approximation. */
Boolean gc_compute_new_space_size(GC_Gen* gc, POINTER_SIZE_INT* mos_size, POINTER_SIZE_INT* nos_size)
{
  Blocked_Space* nos = (Blocked_Space*)gc->nos;
  Blocked_Space* mos = (Blocked_Space*)gc->mos;
  Space* los = gc->los;  
  
  POINTER_SIZE_INT new_nos_size;
  POINTER_SIZE_INT new_mos_size;

  POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)nos);
  // POINTER_SIZE_INT used_nos_size = nos_used_space_size((Space*)nos);
  POINTER_SIZE_INT used_mos_size = mos_used_space_size((Space*)mos);

  POINTER_SIZE_INT total_size; /* total_size is no-LOS spaces size */

#ifdef STATIC_NOS_MAPPING
    total_size = max_heap_size_bytes - space_committed_size(los);
#else
    POINTER_SIZE_INT curr_heap_commit_end;
   
    if(LOS_ADJUST_BOUNDARY) {
      curr_heap_commit_end=(POINTER_SIZE_INT)gc->heap_start + LOS_HEAD_RESERVE_FOR_HEAP_BASE + gc->committed_heap_size;
      assert(curr_heap_commit_end > (POINTER_SIZE_INT)mos->heap_start);
      total_size = curr_heap_commit_end - (POINTER_SIZE_INT)mos->heap_start;
    }else {/*LOS_ADJUST_BOUNDARY else */
      curr_heap_commit_end =  (nos->committed_heap_size)? (POINTER_SIZE_INT) nos->heap_start + nos->committed_heap_size: 
               (POINTER_SIZE_INT) mos->heap_start+mos->committed_heap_size;
      total_size = curr_heap_commit_end - (POINTER_SIZE_INT) mos->heap_start;
    }
#endif
  assert(total_size >= used_mos_size);
  POINTER_SIZE_INT total_free = total_size - used_mos_size;
  /*If total free is smaller than one block, there is no room for us to adjust*/
  if(total_free < GC_BLOCK_SIZE_BYTES)  return FALSE;

  POINTER_SIZE_INT nos_reserve_size;
  if( MOS_RESERVE_SIZE == 0){
    /*To reserve some MOS space to avoid fallback situation. 
     *But we need ensure nos has at least one block.
     *We have such fomula here:
     *NOS_SIZE + NOS_SIZE * anti_fall_back_ratio + NOS_SIZE * survive_ratio = TOTAL_FREE*/
    POINTER_SIZE_INT anti_fallback_size_in_mos;
    float ratio_of_anti_fallback_size_to_nos = 0.25f;
    anti_fallback_size_in_mos = (POINTER_SIZE_INT)(((float)total_free * ratio_of_anti_fallback_size_to_nos)/(1.0f + ratio_of_anti_fallback_size_to_nos + nos->survive_ratio));
    if(anti_fallback_size_in_mos > DEFAULT_MOS_RESERVE_SIZE ){
      /*If the computed anti_fallback_size_in_mos is too large, we reset it back to DEFAULT_MOS_RESERVE_SIZE .*/
      anti_fallback_size_in_mos = DEFAULT_MOS_RESERVE_SIZE ;
      /*Here, anti_fallback_size_in_mos must be smaller than TOTAL_FREE*/
      nos_reserve_size = (POINTER_SIZE_INT)(((float)(total_free - anti_fallback_size_in_mos))/(1.0f + nos->survive_ratio)); 
    }else{
      nos_reserve_size = (POINTER_SIZE_INT)(((float)total_free)/(1.0f + ratio_of_anti_fallback_size_to_nos + nos->survive_ratio));
    }
    
  }else{
    nos_reserve_size = total_free - MOS_RESERVE_SIZE;
  }
  
  /*NOS should not be zero, if there is only one block in non-los, i.e. in the former if sentence,
    *if total_free = GC_BLOCK_SIZE_BYTES, then the computed nos_reserve_size is between zero
    *and GC_BLOCK_SIZE_BYTES. In this case, we assign this block to NOS*/
  if(nos_reserve_size <= GC_BLOCK_SIZE_BYTES)  nos_reserve_size = GC_BLOCK_SIZE_BYTES;

#ifdef STATIC_NOS_MAPPING
  if(nos_reserve_size > nos->reserved_heap_size) nos_reserve_size = nos->reserved_heap_size;
#endif  

  new_nos_size = round_down_to_size((POINTER_SIZE_INT)nos_reserve_size, GC_BLOCK_SIZE_BYTES); 

  if(gc->force_gen_mode){
    new_nos_size = min_nos_size_bytes;
  }
 
  new_mos_size = total_size - new_nos_size;
#ifdef STATIC_NOS_MAPPING
  if(new_mos_size > mos->reserved_heap_size) new_mos_size = mos->reserved_heap_size;
#endif

  *nos_size = new_nos_size;
  *mos_size = new_mos_size;
  return TRUE;;
}