Ejemplo n.º 1
0
static void mspace_compute_object_target(Collector* collector, Mspace* mspace)
{
    Block_Header *curr_block = collector->cur_compact_block;
    Block_Header *dest_block = collector->cur_target_block;
    Block_Header *local_last_dest = dest_block;
    void *dest_addr = dest_block->base;
    Block_Header *last_src;

#ifdef USE_32BITS_HASHCODE
    Hashcode_Buf* old_hashcode_buf = NULL;
    Hashcode_Buf* new_hashcode_buf = hashcode_buf_create();
    hashcode_buf_init(new_hashcode_buf);
#endif

    assert(!collector->rem_set);
    collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
#ifdef USE_32BITS_HASHCODE
    collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
#endif

#ifdef GC_GEN_STATS
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

    while( curr_block ) {
        void* start_pos;
        Partial_Reveal_Object *first_obj = block_get_first_marked_obj_prefetch_next(curr_block, &start_pos);
        if(first_obj) {
            ++curr_block->dest_counter;
            if(!dest_block->src)
                dest_block->src = first_obj;
            else
                last_src->next_src = first_obj;
            last_src = curr_block;
        }
        Partial_Reveal_Object* p_obj = first_obj;

        while( p_obj ) {
            assert( obj_is_marked_in_vt(p_obj));

            unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj);


#ifdef GC_GEN_STATS
            gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, obj_size);
#endif

            Obj_Info_Type obj_info = get_obj_info(p_obj);

            unsigned int obj_size_precompute = obj_size;

#ifdef USE_32BITS_HASHCODE
            precompute_hashcode_extend_size(p_obj, dest_addr, &obj_size_precompute);
#endif
            if( ((POINTER_SIZE_INT)dest_addr + obj_size_precompute) > (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)) {
#ifdef USE_32BITS_HASHCODE
                block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf);
#endif
                dest_block->new_free = dest_addr;
                dest_block = mspace_get_next_target_block(collector, mspace);
                if(dest_block == NULL) {
                    collector->result = FALSE;
                    return;
                }
                if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx))
                    local_last_dest = dest_block;
                dest_addr = dest_block->base;
                dest_block->src = p_obj;
                last_src = curr_block;
                if(p_obj != first_obj)
                    ++curr_block->dest_counter;
            }
            assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)GC_BLOCK_END(dest_block));

#ifdef USE_32BITS_HASHCODE
            obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector,curr_block->hashcode_buf, new_hashcode_buf);
#endif

            if( obj_info != 0 ) {
                collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
                collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
            }

            obj_set_fw_in_oi(p_obj, dest_addr);

            /* FIXME: should use alloc to handle alignment requirement */
            dest_addr = (void *)((POINTER_SIZE_INT) dest_addr + obj_size);
            p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos);
        }
#ifdef USE_32BITS_HASHCODE
        hashcode_buf_clear(curr_block->hashcode_buf);
#endif
        curr_block = mspace_get_next_compact_block(collector, mspace);

    }

#ifdef USE_32BITS_HASHCODE
    pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
    collector->hashcode_set = NULL;
#endif
    pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
    collector->rem_set = NULL;
    dest_block->new_free = dest_addr;

    Block_Header *cur_last_dest = (Block_Header *)last_block_for_dest;
    collector->cur_target_block = local_last_dest;
    while((local_last_dest)&&((!cur_last_dest) || (local_last_dest->block_idx > cur_last_dest->block_idx))) {
        atomic_casptr((volatile void **)&last_block_for_dest, local_last_dest, cur_last_dest);
        cur_last_dest = (Block_Header *)last_block_for_dest;
    }

#ifdef USE_32BITS_HASHCODE
    old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf);
    hashcode_buf_destory(old_hashcode_buf);
#endif
    return;
}
static void mspace_move_objects(Collector* collector, Mspace* mspace) 
{
  Block_Header* curr_block = collector->cur_compact_block;
  Block_Header* dest_block = collector->cur_target_block;
  Block_Header *local_last_dest = dest_block;

  void* dest_sector_addr = dest_block->base;
  Boolean is_fallback = collect_is_fallback();
  
#ifdef USE_32BITS_HASHCODE
  Hashcode_Buf* old_hashcode_buf = NULL;
  Hashcode_Buf* new_hashcode_buf = hashcode_buf_create();
  hashcode_buf_init(new_hashcode_buf);
#endif  

#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

  unsigned int debug_num_live_obj = 0;

  while( curr_block ){

    if(verify_live_heap){ 
      atomic_inc32(&debug_num_compact_blocks);
      debug_num_live_obj = 0;
    }
    
    void* start_pos;
    Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos);

    if( !p_obj ){
 #ifdef USE_32BITS_HASHCODE      
      hashcode_buf_clear(curr_block->hashcode_buf);
 #endif
      assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs);
      curr_block = mspace_get_next_compact_block(collector, mspace);
      continue;    
    }
    
    int curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
    void* src_sector_addr = p_obj;
          
    while( p_obj ){
      debug_num_live_obj++;
      assert( obj_is_marked_in_vt(p_obj));
      /* we don't check if it's set, since only non-forwarded objs from last NOS partial-forward collection need it. */
      obj_clear_dual_bits_in_oi(p_obj); 

#ifdef GC_GEN_STATS
      gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, vm_object_size(p_obj));
#endif

#ifdef USE_32BITS_HASHCODE
      move_compact_process_hashcode(p_obj, curr_block->hashcode_buf, new_hashcode_buf);
#endif 
      
      POINTER_SIZE_INT curr_sector_size = (POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)src_sector_addr;

      /* check if dest block is not enough to hold this sector. If yes, grab next one */      
      POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block);
      if( ((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) > block_end ){
        dest_block->new_free = dest_sector_addr; 
#ifdef USE_32BITS_HASHCODE
        block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf);
#endif        
        dest_block = mspace_get_next_target_block(collector, mspace);
        if(dest_block == NULL){ 
#ifdef USE_32BITS_HASHCODE
          hashcode_buf_rollback_new_entry(old_hashcode_buf);
#endif
          collector->result = FALSE; 
          return; 
        }
#ifdef USE_32BITS_HASHCODE
        hashcode_buf_transfer_new_entry(old_hashcode_buf, new_hashcode_buf);
#endif 
        if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx))
          local_last_dest = dest_block;
        block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block);
        dest_sector_addr = dest_block->base;
      }
        
      assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end );

      Partial_Reveal_Object *last_obj_end = (Partial_Reveal_Object *)start_pos;
      /* check if next live object is out of current sector. If not, loop back to continue within this sector. FIXME:: we should add a condition for block check (?) */      
      p_obj =  block_get_next_marked_object(curr_block, &start_pos);
      if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector)) {
      	if(last_obj_end != p_obj) obj_set_vt_to_next_obj(last_obj_end, p_obj);
        continue;
      }

      /* current sector is done, let's move it. */
      POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr;
      assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0);
      /* if sector_distance is zero, we don't do anything. But since block offset table is never cleaned, we have to set 0 to it. */
      curr_block->table[curr_sector] = sector_distance;

      if(sector_distance != 0) 
        memmove(dest_sector_addr, src_sector_addr, curr_sector_size);

#ifdef USE_32BITS_HASHCODE
      hashcode_buf_refresh_new_entry(new_hashcode_buf, sector_distance);
#endif

      dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size);
      src_sector_addr = p_obj;
      curr_sector  = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
    }
#ifdef USE_32BITS_HASHCODE      
    hashcode_buf_clear(curr_block->hashcode_buf);
 #endif    
    assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs);
    curr_block = mspace_get_next_compact_block(collector, mspace);
  }
    
  dest_block->new_free = dest_sector_addr;
  collector->cur_target_block = local_last_dest;
 
#ifdef USE_32BITS_HASHCODE
  old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf);
  hashcode_buf_destory(old_hashcode_buf);
#endif
  return;
}