void lspace_compute_object_target(Collector* collector, Lspace* lspace) { void* dest_addr = lspace->heap_start; unsigned int iterate_index = 0; Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index); assert(!collector->rem_set); collector->rem_set = free_set_pool_get_entry(collector->gc->metadata); #ifdef USE_32BITS_HASHCODE collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata); #endif #ifdef GC_GEN_STATS GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats; #endif while( p_obj ){ assert( obj_is_marked_in_vt(p_obj)); unsigned int obj_size = vm_object_size(p_obj); #ifdef GC_GEN_STATS gc_gen_collector_update_moved_los_obj_stats_major(stats, vm_object_size(p_obj)); #endif assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end); #ifdef USE_32BITS_HASHCODE obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ; Obj_Info_Type obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector, null, null); #else Obj_Info_Type obj_info = get_obj_info_raw(p_obj); #endif if( obj_info != 0 ) { collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr); collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info); } obj_set_fw_in_oi(p_obj, dest_addr); dest_addr = (void *)ALIGN_UP_TO_KILO(((POINTER_SIZE_INT) dest_addr + obj_size)); p_obj = lspace_get_next_marked_object(lspace, &iterate_index); } pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set); collector->rem_set = NULL; #ifdef USE_32BITS_HASHCODE pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set); collector->hashcode_set = NULL; #endif lspace->scompact_fa_start = dest_addr; lspace->scompact_fa_end= lspace->heap_end; return; }
inline Partial_Reveal_Object* lspace_get_next_object( Space* lspace, POINTER_SIZE_INT* & next_area_start){ POINTER_SIZE_INT* ret_obj = NULL; while(((POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end)&&!*next_area_start ){ next_area_start =(POINTER_SIZE_INT*)((POINTER_SIZE_INT)next_area_start + ((Free_Area*)next_area_start)->size); } if((POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end){ ret_obj = next_area_start; unsigned int hash_extend_size = 0; #ifdef USE_32BITS_HASHCODE hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)next_area_start))?GC_OBJECT_ALIGNMENT:0; #endif POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start) + hash_extend_size); assert(obj_size); next_area_start = (POINTER_SIZE_INT*)((POINTER_SIZE_INT)next_area_start + obj_size); return (Partial_Reveal_Object*)ret_obj; }else{ return NULL; } }
void lspace_sweep(Lspace* lspace) { TRACE2("gc.process", "GC: lspace sweep algo start ...\n"); #ifdef GC_GEN_STATS GC_Gen_Stats* stats = ((GC_Gen*)lspace->gc)->stats; gc_gen_stats_set_los_collected_flag((GC_Gen*)lspace->gc, true); #endif unsigned int mark_bit_idx = 0; POINTER_SIZE_INT cur_size = 0; void *cur_area_start, *cur_area_end; free_area_pool_reset(lspace->free_pool); Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start; Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object_by_oi(lspace, &mark_bit_idx); if(p_next_obj){ // obj_unmark_in_vt(p_next_obj); /*Fixme: This might not be necessary, for there is a bit clearing operation in forward_object->obj_mark_in_oi*/ obj_clear_dual_bits_in_oi(p_next_obj); /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/ unsigned int obj_size = vm_object_size(p_next_obj); #ifdef USE_32BITS_HASHCODE obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0; #endif lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size); #ifdef GC_GEN_STATS stats->los_suviving_obj_num++; stats->los_suviving_obj_size += obj_size; #endif } cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj); cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj); unsigned int hash_extend_size = 0; Free_Area* cur_area = NULL; while(cur_area_end){ cur_area = NULL; cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start; if(cur_size){ //debug assert(cur_size >= KB); cur_area = free_area_new(cur_area_start, cur_size); if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area); } /* successfully create an area */ p_prev_obj = p_next_obj; p_next_obj = lspace_get_next_marked_object_by_oi(lspace, &mark_bit_idx); if(p_next_obj){ // obj_unmark_in_vt(p_next_obj); obj_clear_dual_bits_in_oi(p_next_obj); /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/ unsigned int obj_size = vm_object_size(p_next_obj); #ifdef USE_32BITS_HASHCODE obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0; #endif lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size); #ifdef GC_GEN_STATS stats->los_suviving_obj_num++; stats->los_suviving_obj_size += obj_size; #endif } #ifdef USE_32BITS_HASHCODE hash_extend_size = (hashcode_is_attached((Partial_Reveal_Object*)p_prev_obj))?GC_OBJECT_ALIGNMENT:0; #endif cur_area_start = (void*)ALIGN_UP_TO_KILO((POINTER_SIZE_INT)p_prev_obj + vm_object_size(p_prev_obj) + hash_extend_size); cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj); } /* cur_area_end == NULL */ cur_area_end = (void*)ALIGN_DOWN_TO_KILO(lspace->heap_end); cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start; if(cur_size){ //debug assert(cur_size >= KB); cur_area = free_area_new(cur_area_start, cur_size); if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area); } mark_bit_idx = 0; assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx)); TRACE2("gc.process", "GC: end of lspace sweep algo ...\n"); return; }