void gc_gen_mode_adapt(GC_Gen* gc, int64 pause_time) { if(GEN_NONGEN_SWITCH == FALSE) return; Blocked_Space* nos = (Blocked_Space*)gc->nos; Blocked_Space* mos = (Blocked_Space*)gc->mos; Gen_Mode_Adaptor* gen_mode_adaptor = gc->gen_mode_adaptor; POINTER_SIZE_INT mos_free_size = blocked_space_free_mem_size(mos); POINTER_SIZE_INT nos_free_size = blocked_space_free_mem_size(nos); POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; if(collect_is_major()) { assert(!gc_is_gen_mode()); if(gen_mode_adaptor->major_survive_ratio_threshold != 0 && mos->survive_ratio > gen_mode_adaptor->major_survive_ratio_threshold){ if(gen_mode_adaptor->major_repeat_count > MAX_MAJOR_REPEAT_COUNT ){ gc->force_gen_mode = TRUE; gc_set_gen_mode(TRUE); gc->next_collect_force_major = FALSE; return; }else{ gen_mode_adaptor->major_repeat_count++; } }else{ gen_mode_adaptor->major_repeat_count = 1; } }else{ /*compute throughput*/ if(!collect_last_is_minor((GC*)gc)){ gen_mode_adaptor->nongen_minor_throughput = 1.0f; } if(gc->force_gen_mode){ if(pause_time!=0){ if(gen_mode_adaptor->gen_minor_throughput != 0) gen_mode_adaptor->gen_minor_throughput = (gen_mode_adaptor->gen_minor_throughput + (float) nos_free_size/(float)pause_time)/2.0f; else gen_mode_adaptor->gen_minor_throughput =(float) nos_free_size/(float)pause_time; } }else{ if(pause_time!=0){ if(gen_mode_adaptor->gen_minor_throughput != 1.0f) gen_mode_adaptor->nongen_minor_throughput = (gen_mode_adaptor->nongen_minor_throughput + (float) nos_free_size/(float)pause_time)/2.0f; else gen_mode_adaptor->nongen_minor_throughput = (float) nos_free_size/(float)pause_time; } } if(gen_mode_adaptor->nongen_minor_throughput <= gen_mode_adaptor->gen_minor_throughput ){ if( !collect_last_is_minor((GC*)gc) ){ gen_mode_adaptor->major_survive_ratio_threshold = mos->survive_ratio; }else if( !gc->force_gen_mode ){ gc->force_gen_mode = TRUE; gen_mode_adaptor->gen_mode_trial_count = MAX_INT32; } } if(gc->next_collect_force_major && !gc->force_gen_mode){ gc->next_collect_force_major = FALSE; gc->force_gen_mode = TRUE; gen_mode_adaptor->gen_mode_trial_count = 2; }else if( collect_last_is_minor((GC*)gc) && gc->force_gen_mode){ gen_mode_adaptor->gen_mode_trial_count = MAX_INT32; } if(gc->force_gen_mode && (total_free_size <= ((float)min_nos_size_bytes) * 1.3 )){ gc->force_gen_mode = FALSE; gc_set_gen_mode(FALSE); gc->next_collect_force_major = TRUE; gen_mode_adaptor->gen_mode_trial_count = 0; return; } if( gc->force_gen_mode ){ assert( gen_mode_adaptor->gen_mode_trial_count >= 0); gen_mode_adaptor->gen_mode_trial_count --; if( gen_mode_adaptor->gen_mode_trial_count >= 0){ gc_set_gen_mode(TRUE); return; } gc->force_gen_mode = FALSE; gc->next_collect_force_major = TRUE; gen_mode_adaptor->gen_mode_trial_count = 0; } } gc_set_gen_mode(FALSE); return; }
void lspace_reset_for_slide(Lspace* lspace) { GC* gc = lspace->gc; Space_Tuner* tuner = gc->tuner; POINTER_SIZE_INT trans_size = tuner->tuning_size; POINTER_SIZE_INT new_fa_size = 0; assert(!(trans_size%GC_BLOCK_SIZE_BYTES)); Mspace * mos=(Mspace*)((GC_Gen*)gc)->mos; Fspace *nos = (Fspace*)((GC_Gen*)gc)->nos; /* Reset the pool first because its info is useless now. */ free_area_pool_reset(lspace->free_pool); /*Lspace collection in major collection must move object*/ assert(lspace->move_object); switch(tuner->kind){ case TRANS_FROM_MOS_TO_LOS:{ //debug_minor_sweep if(LOS_ADJUST_BOUNDARY ) { Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks; lspace->heap_end = (void*)mos_first_block; assert(!(tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); }else{ vm_commit_mem(lspace->heap_end, trans_size); lspace->heap_end= (void*)((POINTER_SIZE_INT)lspace->heap_end + trans_size); //fixme: need to add decommit in NOS if(trans_size < nos->committed_heap_size) { nos->free_block_idx=nos->first_block_idx; blocked_space_shrink((Blocked_Space*)nos, trans_size); } else { POINTER_SIZE_INT mos_free_size= blocked_space_free_mem_size((Blocked_Space*)mos); void *decommit_base=(void*)((POINTER_SIZE_INT)nos->heap_end-trans_size); vm_decommit_mem(decommit_base,trans_size); unsigned int reduced_mos_size = trans_size - nos->committed_heap_size; unsigned int size=round_down_to_size(mos_free_size-reduced_mos_size,SPACE_ALLOC_UNIT); unsigned int nos_size= mos_free_size - reduced_mos_size ; if(nos_size<GC_BLOCK_SIZE_BYTES) nos_size=GC_BLOCK_SIZE_BYTES; nos_size=round_up_to_size(nos_size, GC_BLOCK_SIZE_BYTES); mos->num_managed_blocks -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT); mos->num_used_blocks = mos->free_block_idx-mos->first_block_idx; mos->num_total_blocks=mos->num_managed_blocks; mos->ceiling_block_idx -= (( mos_free_size )>>GC_BLOCK_SHIFT_COUNT); assert(mos->num_used_blocks<=mos->num_managed_blocks); void *start_address=(void*)&(mos->blocks[mos->num_managed_blocks]); assert(start_address< decommit_base); mos->heap_end = start_address; mos->committed_heap_size = (POINTER_SIZE_INT) start_address - (POINTER_SIZE_INT) mos->heap_start; nos_boundary = nos->heap_start = start_address; nos->heap_end = decommit_base; nos->committed_heap_size = nos->reserved_heap_size = (POINTER_SIZE_INT)decommit_base- (POINTER_SIZE_INT) start_address; nos->num_total_blocks = nos->num_managed_blocks = nos_size>>GC_BLOCK_SHIFT_COUNT; nos->free_block_idx=nos->first_block_idx=GC_BLOCK_INDEX_FROM(gc->heap_start,start_address); nos->ceiling_block_idx=nos->first_block_idx+nos->num_managed_blocks-1; nos->num_used_blocks = 0; space_init_blocks((Blocked_Space*)nos); } } new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size; Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa); lspace->committed_heap_size += trans_size; break; } case TRANS_FROM_LOS_TO_MOS:{ assert(lspace->move_object); if(LOS_ADJUST_BOUNDARY ){ Block* mos_first_block = ((Blocked_Space*)((GC_Gen*)gc)->mos)->blocks; assert( (POINTER_SIZE_INT)lspace->heap_end - trans_size == (POINTER_SIZE_INT)mos_first_block ); lspace->heap_end = (void*)mos_first_block; }else{ void *p=(void*)((POINTER_SIZE_INT)lspace->heap_end - trans_size); vm_decommit_mem(p, trans_size); lspace->heap_end=p; //fixme: need to add decommit in NOS blocked_space_extend((Blocked_Space*)((GC_Gen*) gc)->nos, trans_size); } lspace->committed_heap_size -= trans_size; /*LOS_Shrink: We don't have to scan lspace to build free pool when slide compact LOS*/ assert((POINTER_SIZE_INT)lspace->scompact_fa_end > (POINTER_SIZE_INT)lspace->scompact_fa_start + tuner->tuning_size); new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start - tuner->tuning_size; Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa); break; } default:{ assert(lspace->move_object); assert(tuner->kind == TRANS_NOTHING); assert(!tuner->tuning_size); new_fa_size = (POINTER_SIZE_INT)lspace->scompact_fa_end - (POINTER_SIZE_INT)lspace->scompact_fa_start; if(new_fa_size == 0) break; Free_Area* fa = free_area_new(lspace->scompact_fa_start, new_fa_size); if(new_fa_size >= GC_LOS_OBJ_SIZE_THRESHOLD) free_pool_add_area(lspace->free_pool, fa); break; } } // lspace->accumu_alloced_size = 0; // lspace->last_alloced_size = 0; lspace->period_surviving_size = (POINTER_SIZE_INT)lspace->scompact_fa_start - (POINTER_SIZE_INT)lspace->heap_start; lspace->last_surviving_size = lspace->period_surviving_size; lspace->survive_ratio = (float)lspace->accumu_alloced_size / (float)lspace->committed_heap_size; los_boundary = lspace->heap_end; }
void gc_gen_adapt(GC_Gen* gc, int64 pause_time) { gc_decide_next_collect(gc, pause_time); if(NOS_SIZE) return; POINTER_SIZE_INT new_nos_size; POINTER_SIZE_INT new_mos_size; Boolean result = gc_compute_new_space_size(gc, &new_mos_size, &new_nos_size); if(!result) return; Blocked_Space* nos = (Blocked_Space*)gc->nos; Blocked_Space* mos = (Blocked_Space*)gc->mos; POINTER_SIZE_INT curr_nos_size = space_committed_size((Space*)nos); //if( ABS_DIFF(new_nos_size, curr_nos_size) < NOS_COPY_RESERVE_DELTA ) if( new_nos_size == curr_nos_size ){ return; }else if ( new_nos_size >= curr_nos_size ){ INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...\n"); POINTER_SIZE_INT adapt_size = new_nos_size - curr_nos_size; INFO2("gc.space", "GC: Space Adapt: mos ---> nos (" <<verbose_print_size(adapt_size) <<" size was transferred from mos to nos)\n"); } else { INFO2("gc.process", "GC: gc_gen space adjustment after GC["<<gc->num_collections<<"] ...\n"); POINTER_SIZE_INT adapt_size = curr_nos_size - new_nos_size; INFO2("gc.space", "GC: Space Adapt: nos ---> mos (" <<verbose_print_size(adapt_size) <<" size was transferred from nos to mos)\n"); } POINTER_SIZE_INT used_mos_size = blocked_space_used_mem_size((Blocked_Space*)mos); POINTER_SIZE_INT free_mos_size = blocked_space_free_mem_size((Blocked_Space*)mos); POINTER_SIZE_INT new_free_mos_size = new_mos_size - used_mos_size; POINTER_SIZE_INT curr_mos_end = (POINTER_SIZE_INT)&mos->blocks[mos->free_block_idx - mos->first_block_idx]; POINTER_SIZE_INT mos_border = (POINTER_SIZE_INT)mos->heap_end; if( curr_mos_end + new_free_mos_size > mos_border){ /* we can't let mos cross border */ new_free_mos_size = mos_border - curr_mos_end; } if(new_nos_size < curr_nos_size){ /* lets shrink nos */ assert(new_free_mos_size > free_mos_size); blocked_space_shrink((Blocked_Space*)nos, curr_nos_size - new_nos_size); blocked_space_extend((Blocked_Space*)mos, new_free_mos_size - free_mos_size); }else if(new_nos_size > curr_nos_size){ /* lets grow nos */ assert(new_free_mos_size < free_mos_size); blocked_space_shrink((Blocked_Space*)mos, free_mos_size - new_free_mos_size); blocked_space_extend((Blocked_Space*)nos, new_nos_size - curr_nos_size); } Block_Header* mos_last_block = (Block_Header*)&mos->blocks[mos->num_managed_blocks-1]; Block_Header* nos_first_block = (Block_Header*)&nos->blocks[0]; mos_last_block->next = nos_first_block; return; }