/* only called in non-minor collection. parameter pointer_addr_in_pool means it is p_ref or p_obj in pool*/ static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool, Boolean double_fix) { Finref_Metadata *metadata = gc->finref_metadata; REF *p_ref; Partial_Reveal_Object *p_obj; /* NOTE:: this is nondestructive to the root sets. */ pool_iterator_init(pool); Vector_Block *repset = pool_iterator_next(pool); while(repset){ POINTER_SIZE_INT *iter = vector_block_iterator_init(repset); for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){ if(pointer_addr_in_pool) p_ref = (REF*)*iter; else p_ref = (REF*)iter; p_obj = read_slot(p_ref); if(collect_is_compact_move()){ /* include both unique move-compact and major move-compact */ move_compaction_update_ref(gc, p_ref); } else if(collect_is_ms_compact()){ if(obj_is_fw_in_oi(p_obj)) moving_mark_sweep_update_ref(gc, p_ref, double_fix); } else { /* major slide compact */ assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj))); write_slot(p_ref , obj_get_fw_in_oi(p_obj)); } } repset = pool_iterator_next(pool); } }
static inline void fallback_update_fw_ref(REF *p_ref) { assert(collect_is_fallback()); Partial_Reveal_Object *p_obj = read_slot(p_ref); if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){ assert(!obj_is_marked_in_vt(p_obj)); assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj))); p_obj = obj_get_fw_in_oi(p_obj); assert(p_obj); write_slot(p_ref, p_obj); } }
void lspace_compute_object_target(Collector* collector, Lspace* lspace) { void* dest_addr = lspace->heap_start; unsigned int iterate_index = 0; Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index); assert(!collector->rem_set); collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); #ifdef USE_32BITS_HASHCODE collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata); #endif #ifdef GC_GEN_STATS GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; #endif while( p_obj ){ assert( obj_is_marked_in_vt(p_obj)); unsigned int obj_size = vm_object_size(p_obj); #ifdef GC_GEN_STATS gc_gen_collector_update_moved_los_obj_stats_major(stats, vm_object_size(p_obj)); #endif assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end); #ifdef USE_32BITS_HASHCODE obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ; Obj_Info_Type obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector, null, null); #else Obj_Info_Type obj_info = get_obj_info_raw(p_obj); #endif if( obj_info != 0 ) { collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info); } obj_set_fw_in_oi(p_obj, dest_addr); dest_addr = (void *)ALIGN_UP_TO_KILO(((POINTER_SIZE_INT) dest_addr + obj_size)); p_obj = lspace_get_next_marked_object(lspace, &iterate_index); } pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set); collector->rem_set = NULL; #ifdef USE_32BITS_HASHCODE pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set); collector->hashcode_set = NULL; #endif lspace->scompact_fa_start = dest_addr; lspace->scompact_fa_end= lspace->heap_end; return; }
static void mspace_sliding_compact(Collector* collector, Mspace* mspace) { void *start_pos; while(Partial_Reveal_Object *p_obj = get_next_first_src_obj(mspace)) { Block_Header *src_block = GC_BLOCK_HEADER(p_obj); assert(src_block->dest_counter); Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj); Block_Header *dest_block = GC_BLOCK_HEADER(p_target_obj); /* We don't set start_pos as p_obj in case that memmove of this obj may overlap itself. * In that case we can't get the correct vt and obj_info. */ #ifdef USE_32BITS_HASHCODE start_pos = obj_end_extend(p_obj); #else start_pos = obj_end(p_obj); #endif do { assert(obj_is_marked_in_vt(p_obj)); #ifdef USE_32BITS_HASHCODE obj_clear_dual_bits_in_vt(p_obj); #else obj_unmark_in_vt(p_obj); #endif unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); if(p_obj != p_target_obj) { assert((((POINTER_SIZE_INT)p_target_obj) % GC_OBJECT_ALIGNMENT) == 0); memmove(p_target_obj, p_obj, obj_size); } set_obj_info(p_target_obj, 0); p_obj = block_get_next_marked_obj_after_prefetch(src_block, &start_pos); if(!p_obj) break; p_target_obj = obj_get_fw_in_oi(p_obj); } while(GC_BLOCK_HEADER(p_target_obj) == dest_block); atomic_dec32(&src_block->dest_counter); } }
void lspace_sliding_compact(Collector* collector, Lspace* lspace) { unsigned int iterate_index = 0; Partial_Reveal_Object* p_obj; POINTER_SIZE_INT last_one=(POINTER_SIZE_INT) lspace->heap_start; p_obj = lspace_get_first_marked_object(lspace, &iterate_index); if(!LOS_ADJUST_BOUNDARY) lspace->last_surviving_size=0; if(!p_obj) return; while( p_obj ){ assert( obj_is_marked_in_vt(p_obj)); #ifdef USE_32BITS_HASHCODE obj_clear_dual_bits_in_vt(p_obj); #else obj_unmark_in_vt(p_obj); #endif unsigned int obj_size = vm_object_size(p_obj); #ifdef USE_32BITS_HASHCODE obj_size += (obj_is_sethash_in_vt(p_obj))?GC_OBJECT_ALIGNMENT:0; #endif Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj); POINTER_SIZE_INT target_obj_end = (POINTER_SIZE_INT)p_target_obj + obj_size; last_one = target_obj_end; if( p_obj != p_target_obj){ memmove(p_target_obj, p_obj, obj_size); } set_obj_info(p_target_obj, 0); p_obj = lspace_get_next_marked_object(lspace, &iterate_index); } if(!LOS_ADJUST_BOUNDARY) lspace->last_surviving_size = ALIGN_UP_TO_KILO(last_one) - (POINTER_SIZE_INT) lspace->heap_start; return; }
static void mspace_compute_object_target(Collector* collector, Mspace* mspace) { Block_Header *curr_block = collector->cur_compact_block; Block_Header *dest_block = collector->cur_target_block; Block_Header *local_last_dest = dest_block; void *dest_addr = dest_block->base; Block_Header *last_src; #ifdef USE_32BITS_HASHCODE Hashcode_Buf* old_hashcode_buf = NULL; Hashcode_Buf* new_hashcode_buf = hashcode_buf_create(); hashcode_buf_init(new_hashcode_buf); #endif assert(!collector->rem_set); collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); #ifdef USE_32BITS_HASHCODE collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata); #endif #ifdef GC_GEN_STATS GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; #endif while( curr_block ) { void* start_pos; Partial_Reveal_Object *first_obj = block_get_first_marked_obj_prefetch_next(curr_block, &start_pos); if(first_obj) { ++curr_block->dest_counter; if(!dest_block->src) dest_block->src = first_obj; else last_src->next_src = first_obj; last_src = curr_block; } Partial_Reveal_Object* p_obj = first_obj; while( p_obj ) { assert( obj_is_marked_in_vt(p_obj)); unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj); #ifdef GC_GEN_STATS gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, obj_size); #endif Obj_Info_Type obj_info = get_obj_info(p_obj); unsigned int obj_size_precompute = obj_size; #ifdef USE_32BITS_HASHCODE precompute_hashcode_extend_size(p_obj, dest_addr, &obj_size_precompute); #endif if( ((POINTER_SIZE_INT)dest_addr + obj_size_precompute) > (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)) { #ifdef USE_32BITS_HASHCODE block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf); #endif dest_block->new_free = dest_addr; dest_block = mspace_get_next_target_block(collector, mspace); if(dest_block == NULL) { collector->result = FALSE; return; } if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx)) local_last_dest = dest_block; dest_addr = dest_block->base; dest_block->src = p_obj; last_src = curr_block; if(p_obj != first_obj) ++curr_block->dest_counter; } assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)); #ifdef USE_32BITS_HASHCODE obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector,curr_block->hashcode_buf, new_hashcode_buf); #endif if( obj_info != 0 ) { collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info); } obj_set_fw_in_oi(p_obj, dest_addr); /* FIXME: should use alloc to handle alignment requirement */ dest_addr = (void *)((POINTER_SIZE_INT) dest_addr + obj_size); p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos); } #ifdef USE_32BITS_HASHCODE hashcode_buf_clear(curr_block->hashcode_buf); #endif curr_block = mspace_get_next_compact_block(collector, mspace); } #ifdef USE_32BITS_HASHCODE pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set); collector->hashcode_set = NULL; #endif pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set); collector->rem_set = NULL; dest_block->new_free = dest_addr; Block_Header *cur_last_dest = (Block_Header *)last_block_for_dest; collector->cur_target_block = local_last_dest; while((local_last_dest)&&((!cur_last_dest) || (local_last_dest->block_idx > cur_last_dest->block_idx))) { atomic_casptr((volatile void **)&last_block_for_dest, local_last_dest, cur_last_dest); cur_last_dest = (Block_Header *)last_block_for_dest; } #ifdef USE_32BITS_HASHCODE old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf); hashcode_buf_destory(old_hashcode_buf); #endif return; }
static void mspace_move_objects(Collector* collector, Mspace* mspace) { Block_Header* curr_block = collector->cur_compact_block; Block_Header* dest_block = collector->cur_target_block; Block_Header *local_last_dest = dest_block; void* dest_sector_addr = dest_block->base; Boolean is_fallback = collect_is_fallback(); #ifdef USE_32BITS_HASHCODE Hashcode_Buf* old_hashcode_buf = NULL; Hashcode_Buf* new_hashcode_buf = hashcode_buf_create(); hashcode_buf_init(new_hashcode_buf); #endif #ifdef GC_GEN_STATS GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; #endif unsigned int debug_num_live_obj = 0; while( curr_block ){ if(verify_live_heap){ atomic_inc32(&debug_num_compact_blocks); debug_num_live_obj = 0; } void* start_pos; Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos); if( !p_obj ){ #ifdef USE_32BITS_HASHCODE hashcode_buf_clear(curr_block->hashcode_buf); #endif assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs); curr_block = mspace_get_next_compact_block(collector, mspace); continue; } int curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); void* src_sector_addr = p_obj; while( p_obj ){ debug_num_live_obj++; assert( obj_is_marked_in_vt(p_obj)); /* we don't check if it's set, since only non-forwarded objs from last NOS partial-forward collection need it. */ obj_clear_dual_bits_in_oi(p_obj); #ifdef GC_GEN_STATS gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, vm_object_size(p_obj)); #endif #ifdef USE_32BITS_HASHCODE move_compact_process_hashcode(p_obj, curr_block->hashcode_buf, new_hashcode_buf); #endif POINTER_SIZE_INT curr_sector_size = (POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)src_sector_addr; /* check if dest block is not enough to hold this sector. If yes, grab next one */ POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block); if( ((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) > block_end ){ dest_block->new_free = dest_sector_addr; #ifdef USE_32BITS_HASHCODE block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf); #endif dest_block = mspace_get_next_target_block(collector, mspace); if(dest_block == NULL){ #ifdef USE_32BITS_HASHCODE hashcode_buf_rollback_new_entry(old_hashcode_buf); #endif collector->result = FALSE; return; } #ifdef USE_32BITS_HASHCODE hashcode_buf_transfer_new_entry(old_hashcode_buf, new_hashcode_buf); #endif if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx)) local_last_dest = dest_block; block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block); dest_sector_addr = dest_block->base; } assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end ); Partial_Reveal_Object *last_obj_end = (Partial_Reveal_Object *)start_pos; /* check if next live object is out of current sector. If not, loop back to continue within this sector. FIXME:: we should add a condition for block check (?) */ p_obj = block_get_next_marked_object(curr_block, &start_pos); if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector)) { if(last_obj_end != p_obj) obj_set_vt_to_next_obj(last_obj_end, p_obj); continue; } /* current sector is done, let's move it. */ POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr; assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0); /* if sector_distance is zero, we don't do anything. But since block offset table is never cleaned, we have to set 0 to it. */ curr_block->table[curr_sector] = sector_distance; if(sector_distance != 0) memmove(dest_sector_addr, src_sector_addr, curr_sector_size); #ifdef USE_32BITS_HASHCODE hashcode_buf_refresh_new_entry(new_hashcode_buf, sector_distance); #endif dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size); src_sector_addr = p_obj; curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj); } #ifdef USE_32BITS_HASHCODE hashcode_buf_clear(curr_block->hashcode_buf); #endif assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs); curr_block = mspace_get_next_compact_block(collector, mspace); } dest_block->new_free = dest_sector_addr; collector->cur_target_block = local_last_dest; #ifdef USE_32BITS_HASHCODE old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf); hashcode_buf_destory(old_hashcode_buf); #endif return; }