static FORCE_INLINE void forward_object(Collector* collector, REF *p_ref) { GC* gc = collector->gc; Partial_Reveal_Object *p_obj = read_slot(p_ref); if(obj_belongs_to_tospace(p_obj)) return; if(!obj_belongs_to_nos(p_obj)){ if(obj_mark_in_oi(p_obj)){ #ifdef GC_GEN_STATS if(gc_profile){ GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; gc_gen_collector_update_marked_nonnos_obj_stats_minor(stats); } #endif scan_object(collector, p_obj); } return; } Partial_Reveal_Object* p_target_obj = NULL; /* Fastpath: object has already been forwarded, update the ref slot */ if(obj_is_fw_in_oi(p_obj)) { p_target_obj = obj_get_fw_in_oi(p_obj); assert(p_target_obj); write_slot(p_ref, p_target_obj); return; } /* following is the logic for forwarding */ p_target_obj = collector_forward_object(collector, p_obj); /* if p_target_obj is NULL, it is forwarded by other thread. We can implement the collector_forward_object() so that the forwarding pointer is set in the atomic instruction, which requires to roll back the mos_alloced space. That is easy for thread local block allocation cancellation. */ if( p_target_obj == NULL ){ if(collector->result == FALSE ){ /* failed to forward, let's get back to controller. */ vector_stack_clear(collector->trace_stack); return; } p_target_obj = obj_get_fw_in_oi(p_obj); assert(p_target_obj); write_slot(p_ref, p_target_obj); return; } /* otherwise, we successfully forwarded */ #ifdef GC_GEN_STATS if(gc_profile){ GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; gc_gen_collector_update_marked_nos_obj_stats_minor(stats); gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj)); } #endif write_slot(p_ref, p_target_obj); scan_object(collector, p_target_obj); return; }
static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) { Space* space = collector->collect_space; GC* gc = collector->gc; Partial_Reveal_Object *p_obj = read_slot(p_ref); /* p_obj can also be in tospace because this p_ref is a redundant one in mutator remset. We don't rem p_ref because it was remembered in first time it's met. FIXME:: the situation obj_belongs_to_tospace() should never be true if we remember object rather than slot. Currently, mutator remembers objects, and collector remembers slots. Although collectors remember slots, we are sure there are no chances to have repetitive p_ref because an object is scanned only when it is marked or forwarded atomically, hence only one collector has chance to do the scanning. */ if(!obj_belongs_to_nos(p_obj) || obj_belongs_to_tospace(p_obj)) return; Partial_Reveal_Object* p_target_obj = NULL; Boolean to_rem_slot = FALSE; /* Fastpath: object has already been forwarded, update the ref slot */ if(obj_is_fw_in_oi(p_obj)){ p_target_obj = obj_get_fw_in_oi(p_obj); write_slot(p_ref, p_target_obj); /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */ if(obj_belongs_to_tospace(p_target_obj)) if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc)) collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); return; } /* following is the logic for forwarding */ p_target_obj = collector_forward_object(collector, p_obj); /* if p_target_obj is NULL, it is forwarded by other thread. Note: a race condition here, it might be forwarded by other, but not set the forwarding pointer yet. We need spin here to get the forwarding pointer. We can implement the collector_forward_object() so that the forwarding pointer is set in the atomic instruction, which requires to roll back the mos_alloced space. That is easy for thread local block allocation cancellation. */ if( p_target_obj == NULL ){ if(collector->result == FALSE ){ /* failed to forward, let's get back to controller. */ vector_stack_clear(collector->trace_stack); return; } /* forwarded already*/ p_target_obj = obj_get_fw_in_oi(p_obj); }else{ /* otherwise, we successfully forwarded */ #ifdef GC_GEN_STATS if(gc_profile){ GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; gc_gen_collector_update_marked_nos_obj_stats_minor(stats); gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj)); } #endif scan_object(collector, p_target_obj); } assert(p_target_obj); write_slot(p_ref, p_target_obj); /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */ if(obj_belongs_to_tospace(p_target_obj)){ if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc)) collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); } return; }