void mos_swap_obj_header_info(Mspace* mspace, Sspace* sspace) { #ifdef ORDER_GC_DEBUG printf("[GC DEBUG]: swap nos (sspace) in major collection\n"); #endif unsigned int i; unsigned int ceiling_idx = (unsigned int)( ((POINTER_SIZE_INT)sspace->heap_end - (POINTER_SIZE_INT)sspace->heap_start) >> GC_BLOCK_SHIFT_COUNT); Block* sspace_blocks = (Block*)sspace->blocks; // unsigned int sspace_first_idx = sspace->first_block_idx; int mark_type = collect_is_fallback() ? MARK_IN_VT_FORWARD : MARK_IN_VT; for(i = 0; i < ceiling_idx; i++){ Block_Header* block = (Block_Header*)&(sspace_blocks[i]); swap_obj_header_info_in_block(block, mark_type); } #ifdef ORDER_GC_DEBUG printf("[GC DEBUG]: swap mos (mspace) in major collection\n"); #endif Block *mspace_blocks = mspace->blocks; for(i = 0; i < mspace->num_managed_blocks; i++){ Block_Header* block = (Block_Header*)&(mspace_blocks[i]); swap_obj_header_info_in_block(block, mark_type); } }
void mspace_collection(Mspace* mspace) { mspace->num_collections++; GC* gc = mspace->gc; Transform_Kind kind= gc->tuner->kind; /* init the pool before starting multiple collectors */ pool_iterator_init(gc->metadata->gc_rootset_pool); //For_LOS_extend if(LOS_ADJUST_BOUNDARY){ if(gc->tuner->kind != TRANS_NOTHING){ major_set_compact_slide(); }else if (collect_is_fallback()){ major_set_compact_slide(); }else{ major_set_compact_move(); } }else { gc->tuner->kind = TRANS_NOTHING; } if(major_is_compact_slide()){ #ifdef ORDER_GC_DEBUG printf("[GC DEBUG]: slide compact algo start ... \n"); #endif TRACE2("gc.process", "GC: slide compact algo start ... \n"); collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace); TRACE2("gc.process", "\nGC: end of slide compact algo ... \n"); #ifdef ORDER_GC_DEBUG printf("[GC DEBUG]: end of slide compact algo ... \n"); #endif }else if( major_is_compact_move()){ #ifdef ORDER_GC_DEBUG printf("[GC DEBUG]: move compact algo start ... \n"); #endif TRACE2("gc.process", "GC: move compact algo start ... \n"); collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace); TRACE2("gc.process", "\nGC: end of move compact algo ... \n"); #ifdef ORDER_GC_DEBUG printf("[GC DEBUG]: end of move compact algo ... \n"); #endif }else{ LDIE(75, "GC: The speficied major collection algorithm doesn't exist!"); } if((!LOS_ADJUST_BOUNDARY)&&(kind != TRANS_NOTHING) ) { gc->tuner->kind = kind; gc_compute_space_tune_size_after_marking(gc); } return; }
static inline void fallback_update_fw_ref(REF *p_ref) { assert(collect_is_fallback()); Partial_Reveal_Object *p_obj = read_slot(p_ref); if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){ assert(!obj_is_marked_in_vt(p_obj)); assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj))); p_obj = obj_get_fw_in_oi(p_obj); assert(p_obj); write_slot(p_ref, p_obj); } }
void gc_update_finref_repointed_refs(GC *gc, Boolean double_fix) { assert(!collect_is_minor()); Finref_Metadata *metadata = gc->finref_metadata; Pool *repset_pool = metadata->repset_pool; Pool *fallback_ref_pool = metadata->fallback_ref_pool; nondestructively_fix_finref_pool(gc, repset_pool, TRUE, double_fix); if(!pool_is_empty(fallback_ref_pool)){ assert(collect_is_fallback()); nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE, double_fix); } }
static void identify_finalizable_objects(Collector *collector) { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; Pool *obj_with_fin_pool = metadata->obj_with_fin_pool; gc_reset_finalizable_objects(gc); pool_iterator_init(obj_with_fin_pool); Vector_Block *block = pool_iterator_next(obj_with_fin_pool); while(block){ unsigned int block_has_ref = 0; POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ REF *p_ref = (REF*)iter; if(collect_is_fallback()) fallback_update_fw_ref(p_ref); // in case that this collection is ALGO_MAJOR_FALLBACK Partial_Reveal_Object *p_obj = read_slot(p_ref); if(!p_obj) continue; if(gc_obj_is_dead(gc, p_obj)){ gc_add_finalizable_obj(gc, p_obj); *p_ref = (REF)NULL; } else { if(collect_is_minor() && obj_need_move(gc, p_obj)){ assert(obj_is_fw_in_oi(p_obj)); write_slot(p_ref, obj_get_fw_in_oi(p_obj)); } ++block_has_ref; } } if(!block_has_ref) vector_block_clear(block); block = pool_iterator_next(obj_with_fin_pool); } gc_put_finalizable_objects(gc); if(collect_need_update_repset()) finref_add_repset_from_pool(gc, obj_with_fin_pool); }
static void update_referent_field_ignore_finref(GC *gc, Pool *pool) { Vector_Block *block = pool_get_entry(pool); while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ REF *p_ref = (REF*)iter; Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); REF *p_referent_field = obj_get_referent_field(p_obj); if(collect_is_fallback()) fallback_update_fw_ref(p_referent_field); Partial_Reveal_Object *p_referent = read_slot(p_referent_field); if(!p_referent){ // referent field has been cleared *p_ref = (REF)NULL; continue; } if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive if(obj_need_move(gc, p_referent)) if(collect_is_minor()){ assert(obj_is_fw_in_oi(p_referent)); Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent); write_slot(p_referent_field, p_new_referent); if(gc_is_gen_mode()) if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj)) collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); } else { finref_repset_add_entry(gc, p_referent_field); } *p_ref = (REF)NULL; continue; } *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */ } block = pool_get_entry(pool); } }
/* * The reason why we don't use identify_dead_refs() to implement this function is * that we will differentiate phanref from weakref in the future. */ static void identify_dead_phanrefs(Collector *collector) { GC *gc = collector->gc; Finref_Metadata *metadata = gc->finref_metadata; Pool *phanref_pool = metadata->phanref_pool; if(collect_need_update_repset()) finref_reset_repset(gc); // collector_reset_repset(collector); pool_iterator_init(phanref_pool); Vector_Block *block = pool_iterator_next(phanref_pool); while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter; Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref); assert(p_obj); REF *p_referent_field = obj_get_referent_field(p_obj); if(collect_is_fallback()) fallback_update_fw_ref(p_referent_field); Partial_Reveal_Object *p_referent = read_slot(p_referent_field); if(!p_referent){ // referent field has been cleared *p_ref = NULL; continue; } if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive if(obj_need_move(gc, p_referent)){ if(collect_is_minor()){ assert(obj_is_fw_in_oi(p_referent)); Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent); write_slot(p_referent_field, p_new_referent); if(gc_is_gen_mode()) if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj)) collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks finref_repset_add_entry(gc, p_referent_field); } } *p_ref = (REF)NULL; continue; } *p_referent_field = (REF)NULL; #ifdef ORDER_DEBUG if(ref_file == NULL){ if(order_record){ ref_file = fopen64("RECORD_REF_LOG.log", "w+"); } else{ ref_file = fopen64("REPLAY_REF_LOG.log", "w+"); } } assert(ref_file); fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count); fflush(ref_file); #endif /* Phantom status: for future use * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){ * // enqueued but not explicitly cleared OR pending for enqueueing * *iter = NULL; * } * resurrect_obj_tree(collector, p_referent_field); */ } block = pool_iterator_next(phanref_pool); } // collector_put_repset(collector); if(collect_need_update_repset()){ finref_put_repset(gc); finref_add_repset_from_pool(gc, phanref_pool); } }
static void identify_dead_refs(GC *gc, Pool *pool) { if(collect_need_update_repset()) finref_reset_repset(gc); pool_iterator_init(pool); Vector_Block *block = pool_iterator_next(pool); while(block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(block); for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){ REF *p_ref = (REF*)iter; Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj); REF *p_referent_field = obj_get_referent_field(p_obj); if(collect_is_fallback()) fallback_update_fw_ref(p_referent_field); Partial_Reveal_Object *p_referent = read_slot(p_referent_field); if(!p_referent){ /* referent field has been cleared. I forgot why we set p_ref with NULL here. I guess it's because this ref_obj was processed in abother p_ref already, so there is no need to keep same ref_obj in this p_ref. */ *p_ref = (REF)NULL; continue; } if(!gc_obj_is_dead(gc, p_referent)){ // referent is alive if(obj_need_move(gc, p_referent)){ if(collect_is_minor()){ assert(obj_is_fw_in_oi(p_referent)); Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent); write_slot(p_referent_field, p_new_referent); /* if it's gen mode, and referent stays in NOS, we need keep p_referent_field in collector remset. This leads to the ref obj live even it is actually only weakly-reachable in next gen-mode collection. This simplifies the design. Otherwise, we need remember the refobj in MOS seperately and process them seperately. */ if(gc_is_gen_mode()) if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj)) collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks finref_repset_add_entry(gc, p_referent_field); } } *p_ref = (REF)NULL; }else{ /* else, the referent is dead (weakly reachable), clear the referent field */ *p_referent_field = (REF)NULL; #ifdef ORDER_DEBUG if(ref_file == NULL){ if(order_record){ ref_file = fopen64("RECORD_REF_LOG.log", "w+"); } else{ ref_file = fopen64("REPLAY_REF_LOG.log", "w+"); } } assert(ref_file); fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count); fflush(ref_file); #endif /* for dead referent, p_ref is not set NULL. p_ref keeps the ref object, which will be moved to VM for enqueueing. */ } }/* for each ref object */ block = pool_iterator_next(pool); } if(collect_need_update_repset()){ finref_put_repset(gc); finref_add_repset_from_pool(gc, pool); } }
// Resurrect the obj tree whose root is the obj which p_ref points to static inline void resurrect_obj_tree(Collector *collector, REF *p_ref) { GC *gc = collector->gc; GC_Metadata *metadata = gc->metadata; Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj && gc_obj_is_dead(gc, p_obj)); void *p_ref_or_obj = p_ref; Trace_Object_Func trace_object; /* set trace_object() function */ if(collect_is_minor()){ if(gc_is_gen_mode()){ if(minor_is_forward()) trace_object = trace_obj_in_gen_fw; else if(minor_is_semispace()) trace_object = trace_obj_in_gen_ss; else assert(0); }else{ if(minor_is_forward()) trace_object = trace_obj_in_nongen_fw; else if(minor_is_semispace()) trace_object = trace_obj_in_nongen_ss; else assert(0); } } else if(collect_is_major_normal() || !gc_has_nos()){ p_ref_or_obj = p_obj; if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){ trace_object = trace_obj_in_space_tune_marking; unsigned int obj_size = vm_object_size(p_obj); #ifdef USE_32BITS_HASHCODE obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0; #endif if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){ collector->non_los_live_obj_size += obj_size; collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size; } else { collector->los_live_obj_size += round_up_to_size(obj_size, KB); } } else if(!gc_has_nos()){ trace_object = trace_obj_in_ms_marking; } else { trace_object = trace_obj_in_normal_marking; } } else if(collect_is_fallback()){ if(major_is_marksweep()) trace_object = trace_obj_in_ms_fallback_marking; else trace_object = trace_obj_in_fallback_marking; } else { assert(major_is_marksweep()); p_ref_or_obj = p_obj; if( gc->gc_concurrent_status == GC_CON_NIL ) trace_object = trace_obj_in_ms_marking; else trace_object = trace_obj_in_ms_concurrent_mark; } collector->trace_stack = free_task_pool_get_entry(metadata); collector_tracestack_push(collector, p_ref_or_obj); pool_put_entry(metadata->mark_task_pool, collector->trace_stack); collector->trace_stack = free_task_pool_get_entry(metadata); Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool); while(task_block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block); while(!vector_block_iterator_end(task_block, iter)){ void *p_ref_or_obj = (void*)*iter; assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj) || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj)); trace_object(collector, p_ref_or_obj); if(collector->result == FALSE) break; /* Resurrection fallback happens; force return */ iter = vector_block_iterator_advance(task_block, iter); } vector_stack_clear(task_block); pool_put_entry(metadata->free_task_pool, task_block); if(collector->result == FALSE){ gc_task_pool_clear(metadata->mark_task_pool); break; /* force return */ } task_block = pool_get_entry(metadata->mark_task_pool); } task_block = (Vector_Block*)collector->trace_stack; vector_stack_clear(task_block); pool_put_entry(metadata->free_task_pool, task_block); collector->trace_stack = NULL; }
void slide_compact_mspace(Collector* collector) { GC* gc = collector->gc; Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc); Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc); unsigned int num_active_collectors = gc->num_active_collectors; /* Pass 1: ************************************************** *mark all live objects in heap, and save all the slots that *have references that are going to be repointed. */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: marking..."); unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); if(collect_is_fallback()) mark_scan_heap_for_fallback(collector); else if(gc->tuner->kind != TRANS_NOTHING) mark_scan_heap_for_space_tune(collector); else mark_scan_heap(collector); old_num = atomic_inc32(&num_marking_collectors); /* last collector's world here */ if( ++old_num == num_active_collectors ) { if(!IGNORE_FINREF ) collector_identify_finref(collector); #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); gc_update_weakref_ignore_finref(gc); } #endif gc_identify_dead_weak_roots(gc); if( gc->tuner->kind != TRANS_NOTHING ) gc_compute_space_tune_size_after_marking(gc); //assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace); #ifdef USE_32BITS_HASHCODE if(collect_is_fallback()) fallback_clear_fwd_obj_oi_init(collector); #endif last_block_for_dest = NULL; /* let other collectors go */ num_marking_collectors++; } while(num_marking_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass1 and start pass2: relocating mos&nos..."); /* Pass 2: ************************************************** assign target addresses for all to-be-moved objects */ atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1); #ifdef USE_32BITS_HASHCODE if(collect_is_fallback()) fallback_clear_fwd_obj_oi(collector); #endif mspace_compute_object_target(collector, mspace); old_num = atomic_inc32(&num_repointing_collectors); /*last collector's world here*/ if( ++old_num == num_active_collectors ) { if(lspace->move_object) { TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: relocating los ..."); lspace_compute_object_target(collector, lspace); } gc->collect_result = gc_collection_result(gc); if(!gc->collect_result) { num_repointing_collectors++; return; } gc_reset_block_for_collectors(gc, mspace); gc_init_block_for_fix_repointed_refs(gc, mspace); num_repointing_collectors++; } while(num_repointing_collectors != num_active_collectors + 1); if(!gc->collect_result) return; TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2 and start pass3: repointing..."); /* Pass 3: ************************************************** *update all references whose objects are to be moved */ old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); mspace_fix_repointed_refs(collector, mspace); old_num = atomic_inc32(&num_fixing_collectors); /*last collector's world here */ if( ++old_num == num_active_collectors ) { lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector, FALSE); gc_init_block_for_sliding_compact(gc, mspace); /*LOS_Shrink: This operation moves objects in LOS, and should be part of Pass 4 *lspace_sliding_compact is not binded with los shrink, we could slide compact los individually. *So we use a flag lspace->move_object here, not tuner->kind == TRANS_FROM_LOS_TO_MOS. */ if(lspace->move_object) lspace_sliding_compact(collector, lspace); /*The temp blocks for storing interim infomation is copied to the real place they should be. *And the space of the blocks are freed, which is alloced in gc_space_tuner_init_fake_blocks_for_los_shrink. */ last_block_for_dest = (Block_Header *)round_down_to_size((POINTER_SIZE_INT)last_block_for_dest->base, GC_BLOCK_SIZE_BYTES); if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) gc_space_tuner_release_fake_blocks_for_los_shrink(gc); num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3 and start pass4: moving..."); /* Pass 4: ************************************************** move objects */ atomic_cas32( &num_moving_collectors, 0, num_active_collectors); mspace_sliding_compact(collector, mspace); atomic_inc32(&num_moving_collectors); while(num_moving_collectors != num_active_collectors); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4 and start pass 5: restoring obj_info..."); /* Pass 5: ************************************************** restore obj_info */ atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1); collector_restore_obj_info(collector); #ifdef USE_32BITS_HASHCODE collector_attach_hashcode(collector); #endif old_num = atomic_inc32(&num_restoring_collectors); if( ++old_num == num_active_collectors ) { if(gc->tuner->kind != TRANS_NOTHING) mspace_update_info_after_space_tuning(mspace); num_restoring_collectors++; } while(num_restoring_collectors != num_active_collectors + 1); /* Dealing with out of memory in mspace */ void* mspace_border = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; if( mspace_border > nos_boundary) { atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); atomic_inc32(&num_extending_collectors); while(num_extending_collectors != num_active_collectors); } TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass5 and done."); return; }
static void mspace_move_objects(Collector* collector, Mspace* mspace) { Block_Header* curr_block = collector->cur_compact_block; Block_Header* dest_block = collector->cur_target_block; Block_Header *local_last_dest = dest_block; void* dest_sector_addr = dest_block->base; Boolean is_fallback = collect_is_fallback(); #ifdef USE_32BITS_HASHCODE Hashcode_Buf* old_hashcode_buf = NULL; Hashcode_Buf* new_hashcode_buf = hashcode_buf_create(); hashcode_buf_init(new_hashcode_buf); #endif #ifdef GC_GEN_STATS GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; #endif unsigned int debug_num_live_obj = 0; while( curr_block ){ if(verify_live_heap){ atomic_inc32(&debug_num_compact_blocks); debug_num_live_obj = 0; } void* start_pos; Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos); if( !p_obj ){ #ifdef USE_32BITS_HASHCODE hashcode_buf_clear(curr_block->hashcode_buf); #endif assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs); curr_block = mspace_get_next_compact_block(collector, mspace); continue; } int curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); void* src_sector_addr = p_obj; while( p_obj ){ debug_num_live_obj++; assert( obj_is_marked_in_vt(p_obj)); /* we don't check if it's set, since only non-forwarded objs from last NOS partial-forward collection need it. */ obj_clear_dual_bits_in_oi(p_obj); #ifdef GC_GEN_STATS gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, vm_object_size(p_obj)); #endif #ifdef USE_32BITS_HASHCODE move_compact_process_hashcode(p_obj, curr_block->hashcode_buf, new_hashcode_buf); #endif POINTER_SIZE_INT curr_sector_size = (POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)src_sector_addr; /* check if dest block is not enough to hold this sector. If yes, grab next one */ POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block); if( ((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) > block_end ){ dest_block->new_free = dest_sector_addr; #ifdef USE_32BITS_HASHCODE block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf); #endif dest_block = mspace_get_next_target_block(collector, mspace); if(dest_block == NULL){ #ifdef USE_32BITS_HASHCODE hashcode_buf_rollback_new_entry(old_hashcode_buf); #endif collector->result = FALSE; return; } #ifdef USE_32BITS_HASHCODE hashcode_buf_transfer_new_entry(old_hashcode_buf, new_hashcode_buf); #endif if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx)) local_last_dest = dest_block; block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block); dest_sector_addr = dest_block->base; } assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end ); Partial_Reveal_Object *last_obj_end = (Partial_Reveal_Object *)start_pos; /* check if next live object is out of current sector. If not, loop back to continue within this sector. FIXME:: we should add a condition for block check (?) */ p_obj = block_get_next_marked_object(curr_block, &start_pos); if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector)) { if(last_obj_end != p_obj) obj_set_vt_to_next_obj(last_obj_end, p_obj); continue; } /* current sector is done, let's move it. */ POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr; assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0); /* if sector_distance is zero, we don't do anything. But since block offset table is never cleaned, we have to set 0 to it. */ curr_block->table[curr_sector] = sector_distance; if(sector_distance != 0) memmove(dest_sector_addr, src_sector_addr, curr_sector_size); #ifdef USE_32BITS_HASHCODE hashcode_buf_refresh_new_entry(new_hashcode_buf, sector_distance); #endif dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size); src_sector_addr = p_obj; curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); } #ifdef USE_32BITS_HASHCODE hashcode_buf_clear(curr_block->hashcode_buf); #endif assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs); curr_block = mspace_get_next_compact_block(collector, mspace); } dest_block->new_free = dest_sector_addr; collector->cur_target_block = local_last_dest; #ifdef USE_32BITS_HASHCODE old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf); hashcode_buf_destory(old_hashcode_buf); #endif return; }
void move_compact_mspace(Collector* collector) { GC* gc = collector->gc; Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc); Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc); Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); unsigned int num_active_collectors = gc->num_active_collectors; Boolean is_fallback = collect_is_fallback(); /* Pass 1: ************************************************** mark all live objects in heap, and save all the slots that have references that are going to be repointed */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ..."); unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); if(!is_fallback) mark_scan_heap(collector); else mark_scan_heap_for_fallback(collector); old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ /* last collector's world here */ /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace); if(!IGNORE_FINREF ) collector_identify_finref(collector); #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); gc_update_weakref_ignore_finref(gc); } #endif gc_identify_dead_weak_roots(gc); #ifdef USE_32BITS_HASHCODE if((!LOS_ADJUST_BOUNDARY) && (is_fallback)) fallback_clear_fwd_obj_oi_init(collector); #endif debug_num_compact_blocks = 0; /* let other collectors go */ num_marking_collectors++; } while(num_marking_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass1"); /* Pass 2: ************************************************** move object and set the forwarding offset table */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: move object and set the forwarding offset table ..."); atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1); #ifdef USE_32BITS_HASHCODE if(is_fallback) fallback_clear_fwd_obj_oi(collector); #endif mspace_move_objects(collector, mspace); old_num = atomic_inc32(&num_moving_collectors); if( ++old_num == num_active_collectors ){ /* single thread world */ if(lspace->move_object) lspace_compute_object_target(collector, lspace); gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ num_moving_collectors++; return; } if(verify_live_heap){ assert( debug_num_compact_blocks == mspace->num_managed_blocks + nos->num_managed_blocks ); debug_num_compact_blocks = 0; } gc_reset_block_for_collectors(gc, mspace); blocked_space_block_iterator_init((Blocked_Space*)mspace); num_moving_collectors++; } while(num_moving_collectors != num_active_collectors + 1); if(!gc->collect_result) return; TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2"); /* Pass 3: ************************************************** update all references whose pointed objects were moved */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ..."); old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); mspace_fix_repointed_refs(collector, mspace); old_num = atomic_inc32(&num_fixing_collectors); if( ++old_num == num_active_collectors ){ /* last collector's world here */ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector, FALSE); if(lspace->move_object) lspace_sliding_compact(collector, lspace); num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3"); /* Pass 4: ************************************************** restore obj_info . Actually only LOS needs it. Since oi is recorded for new address, so the restoration doesn't need to to specify space. */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: restore obj_info ..."); atomic_cas32( &num_restoring_collectors, 0, num_active_collectors); collector_restore_obj_info(collector); atomic_inc32(&num_restoring_collectors); while(num_restoring_collectors != num_active_collectors); /* Dealing with out of memory in mspace */ if(mspace->free_block_idx > nos->first_block_idx){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); atomic_inc32(&num_extending_collectors); while(num_extending_collectors != num_active_collectors); } TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4"); /* Leftover: ************************************************** */ if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"] finished"); return; } TRACE2("gc.process", "GC: collector[0] finished"); return; }
static void gc_decide_next_collect(GC_Gen* gc, int64 pause_time) { Space* nos = (Space*)gc->nos; Space* mos = (Space*)gc->mos; float survive_ratio = 0.2f; if( MOS_RESERVE_SIZE != 0) DEFAULT_MOS_RESERVE_SIZE = MOS_RESERVE_SIZE; POINTER_SIZE_INT mos_free_size = mos_free_space_size(mos); /* for space free size computation, semispace may leave some nos space used. But we use a simple approximation here. That is, we just use the totoal nos size as nos free size. This is important. We can't use real nos_free_space_size(), because the whole algorithm here in gc_decide_next_collect() assumes total free size is reduced after every minor collection, and can only be increased after major collection. Otherwise the algorithm is invalid. If we use nos_free_space_size(), we may get an increased total free size after a minor collection. */ POINTER_SIZE_INT nos_free_size = space_committed_size(nos); POINTER_SIZE_INT total_free_size = mos_free_size + nos_free_size; if(collect_is_major()) gc->force_gen_mode = FALSE; if(!gc->force_gen_mode){ /*Major collection:*/ if(collect_is_major()){ mos->time_collections += pause_time; Tslow = (float)pause_time; SMax = total_free_size; /*If fall back happens, and nos_boundary reaches heap_ceiling, then we force major.*/ if( nos_free_size == 0) gc->next_collect_force_major = TRUE; else gc->next_collect_force_major = FALSE; /*If major is caused by LOS, or collection kind is ALGO_MAJOR_EXTEND, all survive ratio is not updated.*/ extern Boolean mos_extended; if((gc->cause != GC_CAUSE_LOS_IS_FULL) && !mos_extended ){ survive_ratio = (float)mos->period_surviving_size/(float)mos->committed_heap_size; mos->survive_ratio = survive_ratio; } /* why do I set it FALSE here? because here is the only place where it's used. */ mos_extended = FALSE; /*If there is no minor collection at all, we must give mos expected threshold a reasonable value.*/ if((gc->tuner->kind != TRANS_NOTHING) && (nos->num_collections == 0)) mspace_set_expected_threshold_ratio((Mspace *)mos, 0.5f); /*If this major is caused by fall back compaction, we must give nos->survive_ratio *a conservative and reasonable number to avoid next fall back. *In fallback compaction, the survive_ratio of mos must be 1.*/ if(collect_is_fallback()) nos->survive_ratio = 1; } /*Minor collection:*/ else { /*Give a hint to mini_free_ratio. */ if(nos->num_collections == 1){ /*Fixme: This is only set for tuning the first warehouse!*/ Tslow = pause_time / gc->survive_ratio; SMax = (POINTER_SIZE_INT)((float)(gc->committed_heap_size - gc->los->committed_heap_size) * ( 1 - gc->survive_ratio )); last_total_free_size = gc->committed_heap_size - gc->los->committed_heap_size; } nos->time_collections += pause_time; POINTER_SIZE_INT free_size_threshold; POINTER_SIZE_INT minor_surviving_size = last_total_free_size - total_free_size; /*If the first GC is caused by LOS, mos->last_alloced_size should be smaller than this minor_surviving_size *Because the last_total_free_size is not accurate.*/ if(nos->num_collections != 1){ assert(minor_surviving_size == mos->last_alloced_size); } float k = Tslow * nos->num_collections/nos->time_collections; float m = ((float)minor_surviving_size)*1.0f/((float)(SMax - DEFAULT_MOS_RESERVE_SIZE )); float free_ratio_threshold = mini_free_ratio(k, m); if(SMax > DEFAULT_MOS_RESERVE_SIZE ) free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * (SMax - DEFAULT_MOS_RESERVE_SIZE ) + DEFAULT_MOS_RESERVE_SIZE ); else free_size_threshold = (POINTER_SIZE_INT)(free_ratio_threshold * SMax); /* FIXME: if the total free size is lesser than threshold, the time point might be too late! * Have a try to test whether the backup solution is better for specjbb. */ // if ((mos_free_size + nos_free_size + minor_surviving_size) < free_size_threshold) gc->next_collect_force_major = TRUE; if ((mos_free_size + nos_free_size)< free_size_threshold) gc->next_collect_force_major = TRUE; survive_ratio = (float)minor_surviving_size/(float)space_committed_size((Space*)nos); nos->survive_ratio = survive_ratio; /*For LOS_Adaptive*/ POINTER_SIZE_INT mos_committed_size = space_committed_size((Space*)mos); POINTER_SIZE_INT nos_committed_size = space_committed_size((Space*)nos); if(mos_committed_size + nos_committed_size > free_size_threshold){ POINTER_SIZE_INT mos_size_threshold; mos_size_threshold = mos_committed_size + nos_committed_size - free_size_threshold; float mos_size_threshold_ratio = (float)mos_size_threshold / (mos_committed_size + nos_committed_size); mspace_set_expected_threshold_ratio((Mspace *)mos, mos_size_threshold_ratio); } } gc->survive_ratio = (gc->survive_ratio + survive_ratio)/2.0f; last_total_free_size = total_free_size; } gc_gen_mode_adapt(gc,pause_time); return; }