void verifier_trace_rootsets(Heap_Verifier* heap_verifier, Pool* root_set_pool) { Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; GC_Verifier* gc_verifier = heap_verifier->gc_verifier; gc_verifier->objects_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool); gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool); gc_verifier->hashcode_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool); pool_iterator_init(root_set_pool); Vector_Block* root_set = pool_iterator_next(root_set_pool); /* first step: copy all root objects to trace tasks. */ while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ REF* p_ref = (REF* )*iter; iter = vector_block_iterator_advance(root_set,iter); if(!heap_verifier->need_verify_rootset || !heap_verifier->is_before_gc){ if(!verify_rootset_slot(p_ref, heap_verifier)){ gc_verifier->is_verification_passed = FALSE; assert(0); continue; } } Partial_Reveal_Object* p_obj = read_slot(p_ref); assert(p_obj != NULL); verifier_tracestack_push(p_obj, gc_verifier->trace_stack); } root_set = pool_iterator_next(root_set_pool); } /* put back the last trace_stack task */ pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack); /* second step: iterate over the trace tasks and forward objects */ gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool); Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool); while(trace_task){ POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); while(!vector_block_iterator_end(trace_task,iter)){ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter; iter = vector_block_iterator_advance(trace_task,iter); trace_object(heap_verifier, p_obj); } vector_stack_clear(trace_task); pool_put_entry(verifier_metadata->free_task_pool, trace_task); trace_task = pool_get_entry(verifier_metadata->mark_task_pool); } vector_stack_clear(gc_verifier->trace_stack); pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack); gc_verifier->trace_stack = NULL; }
void verifier_trace_objsets(Heap_Verifier* heap_verifier, Pool* obj_set_pool) { Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata; GC_Verifier* gc_verifier = heap_verifier->gc_verifier; gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool); pool_iterator_init(obj_set_pool); Vector_Block* obj_set = pool_iterator_next(obj_set_pool); /* first step: copy all root objects to trace tasks. */ while(obj_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(obj_set); while(!vector_block_iterator_end(obj_set,iter)){ Partial_Reveal_Object* p_obj = read_slot((REF*)iter); iter = vector_block_iterator_advance(obj_set,iter); /*p_obj can be NULL , When GC happened, the obj in Finalize objs list will be clear.*/ //assert(p_obj != NULL); if(p_obj == NULL) continue; if(heap_verifier->gc_is_gen_mode && heap_verifier->is_before_gc && !obj_belongs_to_nos(p_obj)) continue; verifier_tracestack_push(p_obj, gc_verifier->trace_stack); } obj_set = pool_iterator_next(obj_set_pool); } /* put back the last trace_stack task */ pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack); /* second step: iterate over the trace tasks and forward objects */ gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool); Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool); while(trace_task){ POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); while(!vector_block_iterator_end(trace_task,iter)){ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter; iter = vector_block_iterator_advance(trace_task,iter); trace_object(heap_verifier, p_obj); } vector_stack_clear(trace_task); pool_put_entry(verifier_metadata->free_task_pool, trace_task); trace_task = pool_get_entry(verifier_metadata->mark_task_pool); } vector_stack_clear(gc_verifier->trace_stack); pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack); gc_verifier->trace_stack = NULL; }
void verifier_clear_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack) { Vector_Block* working_block = pool_get_entry(working_pool); while(working_block){ if(is_vector_stack) vector_stack_clear(working_block); else vector_block_clear(working_block); pool_put_entry(free_pool, working_block); working_block = pool_get_entry(working_pool); } }
// Resurrect the obj tree whose root is the obj which p_ref points to static inline void resurrect_obj_tree(Collector *collector, REF *p_ref) { GC *gc = collector->gc; GC_Metadata *metadata = gc->metadata; Partial_Reveal_Object *p_obj = read_slot(p_ref); assert(p_obj && gc_obj_is_dead(gc, p_obj)); void *p_ref_or_obj = p_ref; Trace_Object_Func trace_object; /* set trace_object() function */ if(collect_is_minor()){ if(gc_is_gen_mode()){ if(minor_is_forward()) trace_object = trace_obj_in_gen_fw; else if(minor_is_semispace()) trace_object = trace_obj_in_gen_ss; else assert(0); }else{ if(minor_is_forward()) trace_object = trace_obj_in_nongen_fw; else if(minor_is_semispace()) trace_object = trace_obj_in_nongen_ss; else assert(0); } } else if(collect_is_major_normal() || !gc_has_nos()){ p_ref_or_obj = p_obj; if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){ trace_object = trace_obj_in_space_tune_marking; unsigned int obj_size = vm_object_size(p_obj); #ifdef USE_32BITS_HASHCODE obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0; #endif if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){ collector->non_los_live_obj_size += obj_size; collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size; } else { collector->los_live_obj_size += round_up_to_size(obj_size, KB); } } else if(!gc_has_nos()){ trace_object = trace_obj_in_ms_marking; } else { trace_object = trace_obj_in_normal_marking; } } else if(collect_is_fallback()){ if(major_is_marksweep()) trace_object = trace_obj_in_ms_fallback_marking; else trace_object = trace_obj_in_fallback_marking; } else { assert(major_is_marksweep()); p_ref_or_obj = p_obj; if( gc->gc_concurrent_status == GC_CON_NIL ) trace_object = trace_obj_in_ms_marking; else trace_object = trace_obj_in_ms_concurrent_mark; } collector->trace_stack = free_task_pool_get_entry(metadata); collector_tracestack_push(collector, p_ref_or_obj); pool_put_entry(metadata->mark_task_pool, collector->trace_stack); collector->trace_stack = free_task_pool_get_entry(metadata); Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool); while(task_block){ POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block); while(!vector_block_iterator_end(task_block, iter)){ void *p_ref_or_obj = (void*)*iter; assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj) || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj)); trace_object(collector, p_ref_or_obj); if(collector->result == FALSE) break; /* Resurrection fallback happens; force return */ iter = vector_block_iterator_advance(task_block, iter); } vector_stack_clear(task_block); pool_put_entry(metadata->free_task_pool, task_block); if(collector->result == FALSE){ gc_task_pool_clear(metadata->mark_task_pool); break; /* force return */ } task_block = pool_get_entry(metadata->mark_task_pool); } task_block = (Vector_Block*)collector->trace_stack; vector_stack_clear(task_block); pool_put_entry(metadata->free_task_pool, task_block); collector->trace_stack = NULL; }
void wspace_final_mark_scan_mostly_concurrent(Conclctor* marker) { GC *gc = marker->gc; GC_Metadata *metadata = gc->metadata; unsigned int num_dirtyset_slot = 0; marker->trace_stack = free_task_pool_get_entry(metadata); Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool); /* first step: copy all root objects to mark tasks.*/ while(root_set){ POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(root_set,iter); assert(p_obj!=NULL); assert(address_belongs_to_gc_heap(p_obj, gc)); if(obj_mark_gray_in_table(p_obj)) collector_tracestack_push((Collector*)marker, p_obj); } root_set = pool_iterator_next(metadata->gc_rootset_pool); } /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, marker->trace_stack); marker->trace_stack = free_task_pool_get_entry(metadata); /*second step: mark dirty pool*/ Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool); while(dirty_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set); while(!vector_block_iterator_end(dirty_set,iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(dirty_set,iter); assert(p_obj!=NULL); //FIXME: restrict condition? obj_clear_dirty_in_table(p_obj); obj_clear_mark_in_table(p_obj, marker); if(obj_mark_gray_in_table(p_obj)) collector_tracestack_push((Collector*)marker, p_obj); num_dirtyset_slot ++; } vector_block_clear(dirty_set); pool_put_entry(metadata->free_set_pool, dirty_set); dirty_set = pool_get_entry(metadata->gc_dirty_set_pool); } /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, marker->trace_stack); /* third step: iterate over the mark tasks and scan objects */ marker->trace_stack = free_task_pool_get_entry(metadata); Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool); while(mark_task){ POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task); while(!vector_block_iterator_end(mark_task,iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter; iter = vector_block_iterator_advance(mark_task,iter); trace_object(marker, p_obj); } /* run out one task, put back to the pool and grab another task */ vector_stack_clear(mark_task); pool_put_entry(metadata->free_task_pool, mark_task); mark_task = pool_get_entry(metadata->mark_task_pool); } /* put back the last mark stack to the free pool */ mark_task = (Vector_Block*)marker->trace_stack; vector_stack_clear(mark_task); pool_put_entry(metadata->free_task_pool, mark_task); marker->trace_stack = NULL; //marker->time_mark += time_mark; marker->num_dirty_slots_traced = num_dirtyset_slot; //INFO2("gc.marker", "[final marker] processed dirty slot="<<num_dirtyset_slot); return; }
void wspace_mark_scan_mostly_concurrent(Conclctor* marker) { GC *gc = marker->gc; GC_Metadata *metadata = gc->metadata; unsigned int num_dirtyset_slot = 0; marker->trace_stack = free_task_pool_get_entry(metadata); /* first step: copy all root objects to mark tasks.*/ Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool); while(root_set){ POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(root_set,iter); assert(p_obj!=NULL); assert(address_belongs_to_gc_heap(p_obj, gc)); if(obj_mark_gray_in_table(p_obj)) collector_tracestack_push((Collector*)marker, p_obj); } root_set = pool_iterator_next(metadata->gc_rootset_pool); } /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, marker->trace_stack); marker->trace_stack = free_task_pool_get_entry(metadata); /* following code has such concerns: 1, current_thread_id should be unique 2, mostly concurrent do not need adding new marker dynamically 3, when the heap is exhausted, final marking will enumeration rootset, it should be after above actions */ unsigned int current_thread_id = atomic_inc32(&num_active_markers); if((current_thread_id+1) == gc->num_active_markers ) state_transformation( gc, GC_CON_START_MARKERS, GC_CON_TRACING); while( gc->gc_concurrent_status == GC_CON_START_MARKERS ); retry: /*second step: mark dirty pool*/ Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool); while(dirty_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set); while(!vector_block_iterator_end(dirty_set,iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(dirty_set,iter); assert(p_obj!=NULL); //FIXME: restrict condition? obj_clear_dirty_in_table(p_obj); obj_clear_mark_in_table(p_obj, marker); if(obj_mark_gray_in_table(p_obj)) collector_tracestack_push((Collector*)marker, p_obj); num_dirtyset_slot ++; } vector_block_clear(dirty_set); pool_put_entry(metadata->free_set_pool, dirty_set); dirty_set = pool_get_entry(metadata->gc_dirty_set_pool); } /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, marker->trace_stack); /* third step: iterate over the mark tasks and scan objects */ marker->trace_stack = free_task_pool_get_entry(metadata); Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool); while(mark_task){ POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task); while(!vector_block_iterator_end(mark_task,iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter; iter = vector_block_iterator_advance(mark_task,iter); trace_object(marker, p_obj); } /* run out one task, put back to the pool and grab another task */ vector_stack_clear(mark_task); pool_put_entry(metadata->free_task_pool, mark_task); mark_task = pool_get_entry(metadata->mark_task_pool); } /* if(current_thread_id == 0){ gc_prepare_dirty_set(marker->gc); }*/ gc_copy_local_dirty_set_to_global(gc); /* conditions to terminate mark: 1.All thread finished current job. 2.Flag is set to terminate concurrent mark. */ atomic_dec32(&num_active_markers); while(num_active_markers != 0 || !concurrent_mark_need_terminating_mc(gc) ) { if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)) { atomic_inc32(&num_active_markers); goto retry; } else if( current_thread_id >= mostly_con_long_marker_num ) { break; } apr_sleep(15000); } /* while(num_active_markers != 0 || !concurrent_mark_need_terminating_mc(gc)){ if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)){ atomic_inc32(&num_active_markers); goto retry; } }*/ /* put back the last mark stack to the free pool */ mark_task = (Vector_Block*)marker->trace_stack; vector_stack_clear(mark_task); pool_put_entry(metadata->free_task_pool, mark_task); marker->trace_stack = NULL; marker->num_dirty_slots_traced = num_dirtyset_slot; /* if(num_dirtyset_slot!=0) { lock(info_lock); INFO2("gc.marker", "marker ["<< current_thread_id <<"] processed dirty slot="<<num_dirtyset_slot); unlock(info_lock); }*/ return; }
void mark_scan_pool(Collector* collector) { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; #ifdef GC_GEN_STATS GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; #endif /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ unsigned int num_active_collectors = gc->num_active_collectors; atomic_cas32( &num_finished_collectors, 0, num_active_collectors); collector->trace_stack = free_task_pool_get_entry(metadata); Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); /* first step: copy all root objects to mark tasks. FIXME:: can be done sequentially before coming here to eliminate atomic ops */ while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ REF *p_ref = (REF *)*iter; iter = vector_block_iterator_advance(root_set,iter); Partial_Reveal_Object *p_obj = read_slot(p_ref); /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */ assert(p_obj!=NULL); /* we have to mark the object before put it into marktask, because it is possible to have two slots containing a same object. They will be scanned twice and their ref slots will be recorded twice. Problem occurs after the ref slot is updated first time with new position and the second time the value is the ref slot is the old position as expected. This can be worked around if we want. */ if(obj_mark_in_vt(p_obj)){ collector_tracestack_push(collector, p_obj); #ifdef GC_GEN_STATS gc_gen_collector_update_rootset_ref_num(stats); gc_gen_collector_update_marked_obj_stats_major(stats); #endif } } root_set = pool_iterator_next(metadata->gc_rootset_pool); } /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, collector->trace_stack); /* second step: iterate over the mark tasks and scan objects */ /* get a task buf for the mark stack */ collector->trace_stack = free_task_pool_get_entry(metadata); retry: Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool); while(mark_task){ POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task); while(!vector_block_iterator_end(mark_task,iter)){ Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(mark_task,iter); /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. degenerate my stack into mark_task, and grab another mark_task */ trace_object(collector, p_obj); } /* run out one task, put back to the pool and grab another task */ vector_stack_clear(mark_task); pool_put_entry(metadata->free_task_pool, mark_task); mark_task = pool_get_entry(metadata->mark_task_pool); } /* termination detection. This is also a barrier. NOTE:: We can simply spin waiting for num_finished_collectors, because each generated new task would surely be processed by its generating collector eventually. So code below is only for load balance optimization. */ atomic_inc32(&num_finished_collectors); while(num_finished_collectors != num_active_collectors){ if( !pool_is_empty(metadata->mark_task_pool)){ atomic_dec32(&num_finished_collectors); goto retry; } } /* put back the last mark stack to the free pool */ mark_task = (Vector_Block*)collector->trace_stack; vector_stack_clear(mark_task); pool_put_entry(metadata->free_task_pool, mark_task); collector->trace_stack = NULL; return; }
void wspace_mark_scan_concurrent(Conclctor* marker) { //marker->time_measurement_start = time_now(); GC *gc = marker->gc; GC_Metadata *metadata = gc->metadata; /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */ unsigned int current_thread_id = atomic_inc32(&num_active_markers); marker->trace_stack = free_task_pool_get_entry(metadata); Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool); /* first step: copy all root objects to mark tasks.*/ while(root_set){ POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(root_set,iter); assert(p_obj!=NULL); assert(address_belongs_to_gc_heap(p_obj, gc)); //if(obj_mark_gray_in_table(p_obj, &root_set_obj_size)) if(obj_mark_gray_in_table(p_obj)) collector_tracestack_push((Collector*)marker, p_obj); } root_set = pool_iterator_next(metadata->gc_rootset_pool); } /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, marker->trace_stack); marker->trace_stack = free_task_pool_get_entry(metadata); state_transformation( gc, GC_CON_START_MARKERS, GC_CON_TRACING); retry: gc_copy_local_dirty_set_to_global(marker->gc); /*second step: mark dirty object snapshot pool*/ Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool); while(dirty_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set); while(!vector_block_iterator_end(dirty_set,iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter; iter = vector_block_iterator_advance(dirty_set,iter); if(p_obj==NULL) { //FIXME: restrict? RAISE_ERROR; } marker->num_dirty_slots_traced++; if(obj_mark_gray_in_table(p_obj)) collector_tracestack_push((Collector*)marker, p_obj); } vector_block_clear(dirty_set); pool_put_entry(metadata->free_set_pool, dirty_set); dirty_set = pool_get_entry(metadata->gc_dirty_set_pool); } /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, marker->trace_stack); /* third step: iterate over the mark tasks and scan objects */ /* get a task buf for the mark stack */ marker->trace_stack = free_task_pool_get_entry(metadata); Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool); while(mark_task){ POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task); while(!vector_block_iterator_end(mark_task,iter)){ Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter; iter = vector_block_iterator_advance(mark_task,iter); trace_object(marker, p_obj); } /* run out one task, put back to the pool and grab another task */ vector_stack_clear(mark_task); pool_put_entry(metadata->free_task_pool, mark_task); mark_task = pool_get_entry(metadata->mark_task_pool); } /* termination condition: 1.all thread finished current job. 2.local snapshot vectors are empty. 3.global snapshot pool is empty. */ atomic_dec32(&num_active_markers); while(num_active_markers != 0 || !concurrent_mark_need_terminating_otf(gc)){ if(!pool_is_empty(metadata->mark_task_pool) || !concurrent_mark_need_terminating_otf(gc)){ atomic_inc32(&num_active_markers); goto retry; } apr_sleep(15000); } state_transformation( gc, GC_CON_TRACING, GC_CON_TRACE_DONE ); /* put back the last mark stack to the free pool */ mark_task = (Vector_Block*)marker->trace_stack; vector_stack_clear(mark_task); pool_put_entry(metadata->free_task_pool, mark_task); marker->trace_stack = NULL; assert(pool_is_empty(metadata->gc_dirty_set_pool)); //INFO2("gc.con.info", "<stage 5>first marker finishes its job"); return; }
static FORCE_INLINE void forward_object(Collector* collector, REF *p_ref) { GC* gc = collector->gc; Partial_Reveal_Object *p_obj = read_slot(p_ref); if(obj_belongs_to_tospace(p_obj)) return; if(!obj_belongs_to_nos(p_obj)){ if(obj_mark_in_oi(p_obj)){ #ifdef GC_GEN_STATS if(gc_profile){ GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; gc_gen_collector_update_marked_nonnos_obj_stats_minor(stats); } #endif scan_object(collector, p_obj); } return; } Partial_Reveal_Object* p_target_obj = NULL; /* Fastpath: object has already been forwarded, update the ref slot */ if(obj_is_fw_in_oi(p_obj)) { p_target_obj = obj_get_fw_in_oi(p_obj); assert(p_target_obj); write_slot(p_ref, p_target_obj); return; } /* following is the logic for forwarding */ p_target_obj = collector_forward_object(collector, p_obj); /* if p_target_obj is NULL, it is forwarded by other thread. We can implement the collector_forward_object() so that the forwarding pointer is set in the atomic instruction, which requires to roll back the mos_alloced space. That is easy for thread local block allocation cancellation. */ if( p_target_obj == NULL ){ if(collector->result == FALSE ){ /* failed to forward, let's get back to controller. */ vector_stack_clear(collector->trace_stack); return; } p_target_obj = obj_get_fw_in_oi(p_obj); assert(p_target_obj); write_slot(p_ref, p_target_obj); return; } /* otherwise, we successfully forwarded */ #ifdef GC_GEN_STATS if(gc_profile){ GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; gc_gen_collector_update_marked_nos_obj_stats_minor(stats); gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj)); } #endif write_slot(p_ref, p_target_obj); scan_object(collector, p_target_obj); return; }
static void collector_trace_rootsets(Collector* collector) { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; #ifdef GC_GEN_STATS GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; #endif unsigned int num_active_collectors = gc->num_active_collectors; atomic_cas32( &num_finished_collectors, 0, num_active_collectors); Space* space = collector->collect_space; collector->trace_stack = free_task_pool_get_entry(metadata); /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); /* first step: copy all root objects to trace tasks. */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ..."); while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ REF *p_ref = (REF *)*iter; iter = vector_block_iterator_advance(root_set, iter); assert(*p_ref); /* root ref cann't be NULL, but remset can be */ collector_tracestack_push(collector, p_ref); #ifdef GC_GEN_STATS gc_gen_collector_update_rootset_ref_num(stats); #endif } root_set = pool_iterator_next(metadata->gc_rootset_pool); } /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, collector->trace_stack); /* second step: iterate over the trace tasks and forward objects */ collector->trace_stack = free_task_pool_get_entry(metadata); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack."); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ..."); retry: Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); while(trace_task){ POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); while(!vector_block_iterator_end(trace_task,iter)){ REF *p_ref = (REF *)*iter; iter = vector_block_iterator_advance(trace_task, iter); #ifdef PREFETCH_SUPPORTED /* DO PREFETCH */ if( mark_prefetch ) { if(!vector_block_iterator_end(trace_task, iter)) { REF *pref= (REF*) *iter; PREFETCH( read_slot(pref)); } } #endif trace_object(collector, p_ref); if(collector->result == FALSE) break; /* force return */ } vector_stack_clear(trace_task); pool_put_entry(metadata->free_task_pool, trace_task); if(collector->result == FALSE){ gc_task_pool_clear(metadata->mark_task_pool); break; /* force return */ } trace_task = pool_get_entry(metadata->mark_task_pool); } /* A collector comes here when seeing an empty mark_task_pool. The last collector will ensure all the tasks are finished.*/ atomic_inc32(&num_finished_collectors); while(num_finished_collectors != num_active_collectors){ if( pool_is_empty(metadata->mark_task_pool)) continue; /* we can't grab the task here, because of a race condition. If we grab the task, and the pool is empty, other threads may fall to this barrier and then pass. */ atomic_dec32(&num_finished_collectors); goto retry; } TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects."); /* now we are done, but each collector has a private stack that is empty */ trace_task = (Vector_Block*)collector->trace_stack; vector_stack_clear(trace_task); pool_put_entry(metadata->free_task_pool, trace_task); collector->trace_stack = NULL; return; }
static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) { Space* space = collector->collect_space; GC* gc = collector->gc; Partial_Reveal_Object *p_obj = read_slot(p_ref); /* p_obj can also be in tospace because this p_ref is a redundant one in mutator remset. We don't rem p_ref because it was remembered in first time it's met. FIXME:: the situation obj_belongs_to_tospace() should never be true if we remember object rather than slot. Currently, mutator remembers objects, and collector remembers slots. Although collectors remember slots, we are sure there are no chances to have repetitive p_ref because an object is scanned only when it is marked or forwarded atomically, hence only one collector has chance to do the scanning. */ if(!obj_belongs_to_nos(p_obj) || obj_belongs_to_tospace(p_obj)) return; Partial_Reveal_Object* p_target_obj = NULL; Boolean to_rem_slot = FALSE; /* Fastpath: object has already been forwarded, update the ref slot */ if(obj_is_fw_in_oi(p_obj)){ p_target_obj = obj_get_fw_in_oi(p_obj); write_slot(p_ref, p_target_obj); /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */ if(obj_belongs_to_tospace(p_target_obj)) if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc)) collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); return; } /* following is the logic for forwarding */ p_target_obj = collector_forward_object(collector, p_obj); /* if p_target_obj is NULL, it is forwarded by other thread. Note: a race condition here, it might be forwarded by other, but not set the forwarding pointer yet. We need spin here to get the forwarding pointer. We can implement the collector_forward_object() so that the forwarding pointer is set in the atomic instruction, which requires to roll back the mos_alloced space. That is easy for thread local block allocation cancellation. */ if( p_target_obj == NULL ){ if(collector->result == FALSE ){ /* failed to forward, let's get back to controller. */ vector_stack_clear(collector->trace_stack); return; } /* forwarded already*/ p_target_obj = obj_get_fw_in_oi(p_obj); }else{ /* otherwise, we successfully forwarded */ #ifdef GC_GEN_STATS if(gc_profile){ GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; gc_gen_collector_update_marked_nos_obj_stats_minor(stats); gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj)); } #endif scan_object(collector, p_target_obj); } assert(p_target_obj); write_slot(p_ref, p_target_obj); /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */ if(obj_belongs_to_tospace(p_target_obj)){ if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc)) collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); } return; }
static void collector_trace_rootsets(Collector* collector) { GC* gc = collector->gc; GC_Metadata* metadata = gc->metadata; #ifdef GC_GEN_STATS GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; #endif unsigned int num_active_collectors = gc->num_active_collectors; atomic_cas32( &num_finished_collectors, 0, num_active_collectors); Space* space = collector->collect_space; collector->trace_stack = free_task_pool_get_entry(metadata); /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */ Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool); /* first step: copy all root objects to trace tasks. */ TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ......"); while(root_set){ POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set); while(!vector_block_iterator_end(root_set,iter)){ REF *p_ref = (REF *)*iter; iter = vector_block_iterator_advance(root_set,iter); if(!*p_ref) continue; /* root ref cann't be NULL, but remset can be */ Partial_Reveal_Object *p_obj = read_slot(p_ref); #ifdef GC_GEN_STATS gc_gen_collector_update_rootset_ref_num(stats); #endif if(obj_belongs_to_nos(p_obj)){ collector_tracestack_push(collector, p_ref); } } root_set = pool_iterator_next(metadata->gc_rootset_pool); } /* put back the last trace_stack task */ pool_put_entry(metadata->mark_task_pool, collector->trace_stack); /* second step: iterate over the trace tasks and forward objects */ collector->trace_stack = free_task_pool_get_entry(metadata); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack."); TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ......"); retry: Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool); while(trace_task){ POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task); while(!vector_block_iterator_end(trace_task,iter)){ REF *p_ref = (REF *)*iter; iter = vector_block_iterator_advance(trace_task,iter); assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */ #ifdef PREFETCH_SUPPORTED /* DO PREFETCH */ if( mark_prefetch ) { if(!vector_block_iterator_end(trace_task, iter)) { REF *pref= (REF*) *iter; PREFETCH( read_slot(pref)); } } #endif /* in sequential version, we only trace same object once, but we were using a local hashset for that, which couldn't catch the repetition between multiple collectors. This is subject to more study. */ /* FIXME:: we should not let root_set empty during working, other may want to steal it. degenerate my stack into root_set, and grab another stack */ /* a task has to belong to collected space, it was checked before put into the stack */ trace_object(collector, p_ref); if(collector->result == FALSE) break; /* force return */ } vector_stack_clear(trace_task); pool_put_entry(metadata->free_task_pool, trace_task); if(collector->result == FALSE){ gc_task_pool_clear(metadata->mark_task_pool); break; /* force return */ } trace_task = pool_get_entry(metadata->mark_task_pool); } atomic_inc32(&num_finished_collectors); while(num_finished_collectors != num_active_collectors){ if( pool_is_empty(metadata->mark_task_pool)) continue; /* we can't grab the task here, because of a race condition. If we grab the task, and the pool is empty, other threads may fall to this barrier and then pass. */ atomic_dec32(&num_finished_collectors); goto retry; } TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects."); /* now we are done, but each collector has a private stack that is empty */ trace_task = (Vector_Block*)collector->trace_stack; vector_stack_clear(trace_task); pool_put_entry(metadata->free_task_pool, trace_task); collector->trace_stack = NULL; return; }