static inline void gc_init_block_for_sliding_compact(GC *gc, Mspace *mspace) { /* initialize related static variables */ next_block_for_dest = NULL; current_dest_block.block = NULL; current_dest_block.lock = FREE_LOCK; Space_Tuner* tuner = gc->tuner; POINTER_SIZE_INT tuning_size = tuner->tuning_size; if( tuner->kind == TRANS_NOTHING ) { /*If space is not tuned, we just start from mspace->heap_start.*/ blocked_space_block_iterator_init((Blocked_Space*)mspace); return; } else if (tuner->kind == TRANS_FROM_MOS_TO_LOS) { /*If LOS_Extend, we compact from the new start of mspace, because the block list is start from there.*/ mspace->block_iterator = (Block_Header*)((POINTER_SIZE_INT)mspace->blocks + tuning_size); } else { /*If LOS_Shrink, we compact from the new start of mspace too. *This is different from the operations in function gc_init_block_for_fix_repointed_refs, *because we want to compact mspace to the new start.*/ mspace->block_iterator = (Block_Header*)((POINTER_SIZE_INT)mspace->blocks - tuning_size); } return; }
static inline void gc_init_block_for_fix_repointed_refs(GC* gc, Mspace* mspace) { Space_Tuner* tuner = gc->tuner; POINTER_SIZE_INT tuning_size = tuner->tuning_size; /*If LOS_Shrink, we just fix the repointed refs from the start of old mspace.*/ if((tuner->kind == TRANS_NOTHING) || (tuner->kind == TRANS_FROM_LOS_TO_MOS)) { blocked_space_block_iterator_init((Blocked_Space*)mspace); return; } else { /*If LOS_Extend, we fix from the new start of mspace, because the block list is start from there.*/ mspace->block_iterator = (Block_Header*)((POINTER_SIZE_INT)mspace->blocks + tuning_size); } return; }
void move_compact_mspace(Collector* collector) { GC* gc = collector->gc; Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc); Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc); Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); unsigned int num_active_collectors = gc->num_active_collectors; Boolean is_fallback = collect_is_fallback(); /* Pass 1: ************************************************** mark all live objects in heap, and save all the slots that have references that are going to be repointed */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ..."); unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); if(!is_fallback) mark_scan_heap(collector); else mark_scan_heap_for_fallback(collector); old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ /* last collector's world here */ /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace); if(!IGNORE_FINREF ) collector_identify_finref(collector); #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); gc_update_weakref_ignore_finref(gc); } #endif gc_identify_dead_weak_roots(gc); #ifdef USE_32BITS_HASHCODE if((!LOS_ADJUST_BOUNDARY) && (is_fallback)) fallback_clear_fwd_obj_oi_init(collector); #endif debug_num_compact_blocks = 0; /* let other collectors go */ num_marking_collectors++; } while(num_marking_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass1"); /* Pass 2: ************************************************** move object and set the forwarding offset table */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: move object and set the forwarding offset table ..."); atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1); #ifdef USE_32BITS_HASHCODE if(is_fallback) fallback_clear_fwd_obj_oi(collector); #endif mspace_move_objects(collector, mspace); old_num = atomic_inc32(&num_moving_collectors); if( ++old_num == num_active_collectors ){ /* single thread world */ if(lspace->move_object) lspace_compute_object_target(collector, lspace); gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ num_moving_collectors++; return; } if(verify_live_heap){ assert( debug_num_compact_blocks == mspace->num_managed_blocks + nos->num_managed_blocks ); debug_num_compact_blocks = 0; } gc_reset_block_for_collectors(gc, mspace); blocked_space_block_iterator_init((Blocked_Space*)mspace); num_moving_collectors++; } while(num_moving_collectors != num_active_collectors + 1); if(!gc->collect_result) return; TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2"); /* Pass 3: ************************************************** update all references whose pointed objects were moved */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ..."); old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); mspace_fix_repointed_refs(collector, mspace); old_num = atomic_inc32(&num_fixing_collectors); if( ++old_num == num_active_collectors ){ /* last collector's world here */ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector, FALSE); if(lspace->move_object) lspace_sliding_compact(collector, lspace); num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3"); /* Pass 4: ************************************************** restore obj_info . Actually only LOS needs it. Since oi is recorded for new address, so the restoration doesn't need to to specify space. */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: restore obj_info ..."); atomic_cas32( &num_restoring_collectors, 0, num_active_collectors); collector_restore_obj_info(collector); atomic_inc32(&num_restoring_collectors); while(num_restoring_collectors != num_active_collectors); /* Dealing with out of memory in mspace */ if(mspace->free_block_idx > nos->first_block_idx){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); atomic_inc32(&num_extending_collectors); while(num_extending_collectors != num_active_collectors); } TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4"); /* Leftover: ************************************************** */ if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"] finished"); return; } TRACE2("gc.process", "GC: collector[0] finished"); return; }