예제 #1
0
void lspace_sweep(Lspace* lspace)
{
  TRACE2("gc.process", "GC: lspace sweep algo start ...\n");

#ifdef GC_GEN_STATS
  GC_Gen_Stats* stats = ((GC_Gen*)lspace->gc)->stats;
  gc_gen_stats_set_los_collected_flag((GC_Gen*)lspace->gc, true);
#endif
  unsigned int mark_bit_idx = 0;
  POINTER_SIZE_INT cur_size = 0;
  void *cur_area_start, *cur_area_end;

  free_area_pool_reset(lspace->free_pool);

  Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start;
  Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object_by_oi(lspace, &mark_bit_idx);
  if(p_next_obj){
//    obj_unmark_in_vt(p_next_obj);
    /*Fixme: This might not be necessary, for there is a bit clearing operation in forward_object->obj_mark_in_oi*/
    obj_clear_dual_bits_in_oi(p_next_obj);
    /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
    unsigned int obj_size = vm_object_size(p_next_obj);
#ifdef USE_32BITS_HASHCODE
    obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
#endif
    lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size);    
#ifdef GC_GEN_STATS
    stats->los_suviving_obj_num++;
    stats->los_suviving_obj_size += obj_size;
#endif
  }

  cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj);
  cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
  unsigned int hash_extend_size = 0;

  Free_Area* cur_area = NULL;
  while(cur_area_end){
    cur_area = NULL;
    cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
      
    if(cur_size){
      //debug
      assert(cur_size >= KB);
      cur_area = free_area_new(cur_area_start, cur_size);
      if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area);
    }
    /* successfully create an area */

    p_prev_obj = p_next_obj;
    p_next_obj = lspace_get_next_marked_object_by_oi(lspace, &mark_bit_idx);
    if(p_next_obj){
//      obj_unmark_in_vt(p_next_obj);
      obj_clear_dual_bits_in_oi(p_next_obj);
      /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
      unsigned int obj_size = vm_object_size(p_next_obj);
#ifdef USE_32BITS_HASHCODE
      obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
#endif
      lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size);
#ifdef GC_GEN_STATS
      stats->los_suviving_obj_num++;
      stats->los_suviving_obj_size += obj_size;
#endif
    }

#ifdef USE_32BITS_HASHCODE
    hash_extend_size  = (hashcode_is_attached((Partial_Reveal_Object*)p_prev_obj))?GC_OBJECT_ALIGNMENT:0;
#endif
    cur_area_start = (void*)ALIGN_UP_TO_KILO((POINTER_SIZE_INT)p_prev_obj + vm_object_size(p_prev_obj) + hash_extend_size);
    cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
    
  }

   /* cur_area_end == NULL */
  cur_area_end = (void*)ALIGN_DOWN_TO_KILO(lspace->heap_end);
  cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
  if(cur_size){
    //debug
    assert(cur_size >= KB);
    cur_area = free_area_new(cur_area_start, cur_size);
    if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area);
  }  

   mark_bit_idx = 0;
   assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx));

  TRACE2("gc.process", "GC: end of lspace sweep algo ...\n");
  return;
}
static void mspace_move_objects(Collector* collector, Mspace* mspace) 
{
  Block_Header* curr_block = collector->cur_compact_block;
  Block_Header* dest_block = collector->cur_target_block;
  Block_Header *local_last_dest = dest_block;

  void* dest_sector_addr = dest_block->base;
  Boolean is_fallback = collect_is_fallback();
  
#ifdef USE_32BITS_HASHCODE
  Hashcode_Buf* old_hashcode_buf = NULL;
  Hashcode_Buf* new_hashcode_buf = hashcode_buf_create();
  hashcode_buf_init(new_hashcode_buf);
#endif  

#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

  unsigned int debug_num_live_obj = 0;

  while( curr_block ){

    if(verify_live_heap){ 
      atomic_inc32(&debug_num_compact_blocks);
      debug_num_live_obj = 0;
    }
    
    void* start_pos;
    Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos);

    if( !p_obj ){
 #ifdef USE_32BITS_HASHCODE      
      hashcode_buf_clear(curr_block->hashcode_buf);
 #endif
      assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs);
      curr_block = mspace_get_next_compact_block(collector, mspace);
      continue;    
    }
    
    int curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
    void* src_sector_addr = p_obj;
          
    while( p_obj ){
      debug_num_live_obj++;
      assert( obj_is_marked_in_vt(p_obj));
      /* we don't check if it's set, since only non-forwarded objs from last NOS partial-forward collection need it. */
      obj_clear_dual_bits_in_oi(p_obj); 

#ifdef GC_GEN_STATS
      gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, vm_object_size(p_obj));
#endif

#ifdef USE_32BITS_HASHCODE
      move_compact_process_hashcode(p_obj, curr_block->hashcode_buf, new_hashcode_buf);
#endif 
      
      POINTER_SIZE_INT curr_sector_size = (POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)src_sector_addr;

      /* check if dest block is not enough to hold this sector. If yes, grab next one */      
      POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block);
      if( ((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) > block_end ){
        dest_block->new_free = dest_sector_addr; 
#ifdef USE_32BITS_HASHCODE
        block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf);
#endif        
        dest_block = mspace_get_next_target_block(collector, mspace);
        if(dest_block == NULL){ 
#ifdef USE_32BITS_HASHCODE
          hashcode_buf_rollback_new_entry(old_hashcode_buf);
#endif
          collector->result = FALSE; 
          return; 
        }
#ifdef USE_32BITS_HASHCODE
        hashcode_buf_transfer_new_entry(old_hashcode_buf, new_hashcode_buf);
#endif 
        if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx))
          local_last_dest = dest_block;
        block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block);
        dest_sector_addr = dest_block->base;
      }
        
      assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end );

      Partial_Reveal_Object *last_obj_end = (Partial_Reveal_Object *)start_pos;
      /* check if next live object is out of current sector. If not, loop back to continue within this sector. FIXME:: we should add a condition for block check (?) */      
      p_obj =  block_get_next_marked_object(curr_block, &start_pos);
      if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector)) {
      	if(last_obj_end != p_obj) obj_set_vt_to_next_obj(last_obj_end, p_obj);
        continue;
      }

      /* current sector is done, let's move it. */
      POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr;
      assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0);
      /* if sector_distance is zero, we don't do anything. But since block offset table is never cleaned, we have to set 0 to it. */
      curr_block->table[curr_sector] = sector_distance;

      if(sector_distance != 0) 
        memmove(dest_sector_addr, src_sector_addr, curr_sector_size);

#ifdef USE_32BITS_HASHCODE
      hashcode_buf_refresh_new_entry(new_hashcode_buf, sector_distance);
#endif

      dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size);
      src_sector_addr = p_obj;
      curr_sector  = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
    }
#ifdef USE_32BITS_HASHCODE      
    hashcode_buf_clear(curr_block->hashcode_buf);
 #endif    
    assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs);
    curr_block = mspace_get_next_compact_block(collector, mspace);
  }
    
  dest_block->new_free = dest_sector_addr;
  collector->cur_target_block = local_last_dest;
 
#ifdef USE_32BITS_HASHCODE
  old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf);
  hashcode_buf_destory(old_hashcode_buf);
#endif
  return;
}