void slide_compact_mspace(Collector* collector) { GC* gc = collector->gc; Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc); Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc); unsigned int num_active_collectors = gc->num_active_collectors; /* Pass 1: ************************************************** *mark all live objects in heap, and save all the slots that *have references that are going to be repointed. */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: marking..."); unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); if(collect_is_fallback()) mark_scan_heap_for_fallback(collector); else if(gc->tuner->kind != TRANS_NOTHING) mark_scan_heap_for_space_tune(collector); else mark_scan_heap(collector); old_num = atomic_inc32(&num_marking_collectors); /* last collector's world here */ if( ++old_num == num_active_collectors ) { if(!IGNORE_FINREF ) collector_identify_finref(collector); #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); gc_update_weakref_ignore_finref(gc); } #endif gc_identify_dead_weak_roots(gc); if( gc->tuner->kind != TRANS_NOTHING ) gc_compute_space_tune_size_after_marking(gc); //assert(!(gc->tuner->tuning_size % GC_BLOCK_SIZE_BYTES)); /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace); #ifdef USE_32BITS_HASHCODE if(collect_is_fallback()) fallback_clear_fwd_obj_oi_init(collector); #endif last_block_for_dest = NULL; /* let other collectors go */ num_marking_collectors++; } while(num_marking_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass1 and start pass2: relocating mos&nos..."); /* Pass 2: ************************************************** assign target addresses for all to-be-moved objects */ atomic_cas32( &num_repointing_collectors, 0, num_active_collectors+1); #ifdef USE_32BITS_HASHCODE if(collect_is_fallback()) fallback_clear_fwd_obj_oi(collector); #endif mspace_compute_object_target(collector, mspace); old_num = atomic_inc32(&num_repointing_collectors); /*last collector's world here*/ if( ++old_num == num_active_collectors ) { if(lspace->move_object) { TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: relocating los ..."); lspace_compute_object_target(collector, lspace); } gc->collect_result = gc_collection_result(gc); if(!gc->collect_result) { num_repointing_collectors++; return; } gc_reset_block_for_collectors(gc, mspace); gc_init_block_for_fix_repointed_refs(gc, mspace); num_repointing_collectors++; } while(num_repointing_collectors != num_active_collectors + 1); if(!gc->collect_result) return; TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2 and start pass3: repointing..."); /* Pass 3: ************************************************** *update all references whose objects are to be moved */ old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); mspace_fix_repointed_refs(collector, mspace); old_num = atomic_inc32(&num_fixing_collectors); /*last collector's world here */ if( ++old_num == num_active_collectors ) { lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector, FALSE); gc_init_block_for_sliding_compact(gc, mspace); /*LOS_Shrink: This operation moves objects in LOS, and should be part of Pass 4 *lspace_sliding_compact is not binded with los shrink, we could slide compact los individually. *So we use a flag lspace->move_object here, not tuner->kind == TRANS_FROM_LOS_TO_MOS. */ if(lspace->move_object) lspace_sliding_compact(collector, lspace); /*The temp blocks for storing interim infomation is copied to the real place they should be. *And the space of the blocks are freed, which is alloced in gc_space_tuner_init_fake_blocks_for_los_shrink. */ last_block_for_dest = (Block_Header *)round_down_to_size((POINTER_SIZE_INT)last_block_for_dest->base, GC_BLOCK_SIZE_BYTES); if(gc->tuner->kind == TRANS_FROM_LOS_TO_MOS) gc_space_tuner_release_fake_blocks_for_los_shrink(gc); num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3 and start pass4: moving..."); /* Pass 4: ************************************************** move objects */ atomic_cas32( &num_moving_collectors, 0, num_active_collectors); mspace_sliding_compact(collector, mspace); atomic_inc32(&num_moving_collectors); while(num_moving_collectors != num_active_collectors); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4 and start pass 5: restoring obj_info..."); /* Pass 5: ************************************************** restore obj_info */ atomic_cas32( &num_restoring_collectors, 0, num_active_collectors+1); collector_restore_obj_info(collector); #ifdef USE_32BITS_HASHCODE collector_attach_hashcode(collector); #endif old_num = atomic_inc32(&num_restoring_collectors); if( ++old_num == num_active_collectors ) { if(gc->tuner->kind != TRANS_NOTHING) mspace_update_info_after_space_tuning(mspace); num_restoring_collectors++; } while(num_restoring_collectors != num_active_collectors + 1); /* Dealing with out of memory in mspace */ void* mspace_border = &mspace->blocks[mspace->free_block_idx - mspace->first_block_idx]; if( mspace_border > nos_boundary) { atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); atomic_inc32(&num_extending_collectors); while(num_extending_collectors != num_active_collectors); } TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass5 and done."); return; }
void move_compact_mspace(Collector* collector) { GC* gc = collector->gc; Mspace* mspace = (Mspace*)gc_get_mos((GC_Gen*)gc); Lspace* lspace = (Lspace*)gc_get_los((GC_Gen*)gc); Blocked_Space* nos = (Blocked_Space*)gc_get_nos((GC_Gen*)gc); unsigned int num_active_collectors = gc->num_active_collectors; Boolean is_fallback = collect_is_fallback(); /* Pass 1: ************************************************** mark all live objects in heap, and save all the slots that have references that are going to be repointed */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass1: mark live objects in heap ..."); unsigned int old_num = atomic_cas32( &num_marking_collectors, 0, num_active_collectors+1); if(!is_fallback) mark_scan_heap(collector); else mark_scan_heap_for_fallback(collector); old_num = atomic_inc32(&num_marking_collectors); if( ++old_num == num_active_collectors ){ /* last collector's world here */ /* prepare for next phase */ gc_init_block_for_collectors(gc, mspace); if(!IGNORE_FINREF ) collector_identify_finref(collector); #ifndef BUILD_IN_REFERENT else { gc_set_weakref_sets(gc); gc_update_weakref_ignore_finref(gc); } #endif gc_identify_dead_weak_roots(gc); #ifdef USE_32BITS_HASHCODE if((!LOS_ADJUST_BOUNDARY) && (is_fallback)) fallback_clear_fwd_obj_oi_init(collector); #endif debug_num_compact_blocks = 0; /* let other collectors go */ num_marking_collectors++; } while(num_marking_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass1"); /* Pass 2: ************************************************** move object and set the forwarding offset table */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass2: move object and set the forwarding offset table ..."); atomic_cas32( &num_moving_collectors, 0, num_active_collectors+1); #ifdef USE_32BITS_HASHCODE if(is_fallback) fallback_clear_fwd_obj_oi(collector); #endif mspace_move_objects(collector, mspace); old_num = atomic_inc32(&num_moving_collectors); if( ++old_num == num_active_collectors ){ /* single thread world */ if(lspace->move_object) lspace_compute_object_target(collector, lspace); gc->collect_result = gc_collection_result(gc); if(!gc->collect_result){ num_moving_collectors++; return; } if(verify_live_heap){ assert( debug_num_compact_blocks == mspace->num_managed_blocks + nos->num_managed_blocks ); debug_num_compact_blocks = 0; } gc_reset_block_for_collectors(gc, mspace); blocked_space_block_iterator_init((Blocked_Space*)mspace); num_moving_collectors++; } while(num_moving_collectors != num_active_collectors + 1); if(!gc->collect_result) return; TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass2"); /* Pass 3: ************************************************** update all references whose pointed objects were moved */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass3: update all references ..."); old_num = atomic_cas32( &num_fixing_collectors, 0, num_active_collectors+1); mspace_fix_repointed_refs(collector, mspace); old_num = atomic_inc32(&num_fixing_collectors); if( ++old_num == num_active_collectors ){ /* last collector's world here */ lspace_fix_repointed_refs(collector, lspace); gc_fix_rootset(collector, FALSE); if(lspace->move_object) lspace_sliding_compact(collector, lspace); num_fixing_collectors++; } while(num_fixing_collectors != num_active_collectors + 1); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass3"); /* Pass 4: ************************************************** restore obj_info . Actually only LOS needs it. Since oi is recorded for new address, so the restoration doesn't need to to specify space. */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: pass4: restore obj_info ..."); atomic_cas32( &num_restoring_collectors, 0, num_active_collectors); collector_restore_obj_info(collector); atomic_inc32(&num_restoring_collectors); while(num_restoring_collectors != num_active_collectors); /* Dealing with out of memory in mspace */ if(mspace->free_block_idx > nos->first_block_idx){ atomic_cas32( &num_extending_collectors, 0, num_active_collectors); mspace_extend_compact(collector); atomic_inc32(&num_extending_collectors); while(num_extending_collectors != num_active_collectors); } TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish pass4"); /* Leftover: ************************************************** */ if( (POINTER_SIZE_INT)collector->thread_handle != 0 ){ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"] finished"); return; } TRACE2("gc.process", "GC: collector[0] finished"); return; }