示例#1
0
static inline Partial_Reveal_Object *get_next_first_src_obj(Mspace *mspace)
{
    Partial_Reveal_Object *first_src_obj;

    while(TRUE) {
        lock(current_dest_block.lock);
        Block_Header *next_dest_block = (Block_Header *)current_dest_block.block;

        if (!next_dest_block || !(first_src_obj = next_dest_block->src)) {
            next_dest_block = get_next_dest_block(mspace);
            if(!next_dest_block) {
                unlock(current_dest_block.lock);
                return NULL;
            } else if(next_dest_block == DEST_NOT_EMPTY) {
                unlock(current_dest_block.lock);
                while(check_dest_block(mspace)==DEST_NOT_EMPTY);
                continue;
            }
            first_src_obj = next_dest_block->src;
            if(next_dest_block->status == BLOCK_DEST) {
                assert(!next_dest_block->dest_counter);
                current_dest_block.block = next_dest_block;
            }
        }

        Partial_Reveal_Object *next_src_obj = GC_BLOCK_HEADER(first_src_obj)->next_src;
        if(next_src_obj && GC_BLOCK_HEADER(ref_to_obj_ptr((REF)get_obj_info_raw(next_src_obj))) != next_dest_block) {
            next_src_obj = NULL;
        }
        next_dest_block->src = next_src_obj;
        unlock(current_dest_block.lock);
        return first_src_obj;
    }
}
示例#2
0
static void *wspace_alloc_super_obj(Wspace *wspace, unsigned size, Allocator *allocator)
{
  assert(size > SUPER_OBJ_THRESHOLD);

  unsigned int chunk_size = SUPER_SIZE_ROUNDUP(size);
  assert(chunk_size > SUPER_OBJ_THRESHOLD);
  assert(!(chunk_size & CHUNK_GRANULARITY_LOW_MASK));
  
  Chunk_Header *chunk;
  if(chunk_size <= HYPER_OBJ_THRESHOLD)
    chunk = (Chunk_Header*)wspace_get_abnormal_free_chunk(wspace, chunk_size);
  else
    chunk = (Chunk_Header*)wspace_get_hyper_free_chunk(wspace, chunk_size, FALSE);
  
  if(!chunk) return NULL;
  abnormal_chunk_init(chunk, chunk_size, size);
  
  mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_ENTER_ALLOC_MARK);  
  if(is_obj_alloced_live()){
    chunk->table[0] |= cur_mark_black_color; // just for debugging, mark new object
  } 
  mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
  
  chunk->table[0] |= cur_alloc_color;
  set_super_obj_mask(chunk->base);
  assert(chunk->status == CHUNK_ABNORMAL);
  chunk->status = CHUNK_ABNORMAL| CHUNK_USED;
  wspace_reg_used_chunk(wspace, chunk);
  assert(get_obj_info_raw((Partial_Reveal_Object*)chunk->base) & SUPER_OBJ_MASK);
  
  ((Mutator*)allocator)->new_obj_occupied_size+=chunk_size;
  return chunk->base;
}
示例#3
0
static void mutator_rem_obj(Managed_Object_Handle p_obj_written)
{
  if( obj_is_remembered((Partial_Reveal_Object*)p_obj_written))
    return;

  Partial_Reveal_Object* p_obj = (Partial_Reveal_Object*)p_obj_written;
  Obj_Info_Type info = get_obj_info_raw(p_obj);
  Obj_Info_Type new_info = info | OBJ_REM_BIT;
  while ( info != new_info) {
    Obj_Info_Type temp =
      atomic_casptrsz((volatile POINTER_SIZE_INT*)get_obj_info_addr(p_obj), new_info, info);
    if (temp == info) break;
    info = get_obj_info_raw(p_obj);
    new_info = info | OBJ_REM_BIT;
  }
  if(info == new_info) return; /* remembered by other */
    
  Mutator *mutator = (Mutator *)gc_get_tls();            
  mutator_remset_add_entry(mutator, (REF*)p_obj);
  return;
}
示例#4
0
void lspace_compute_object_target(Collector* collector, Lspace* lspace)
{
  void* dest_addr = lspace->heap_start;
  unsigned int iterate_index = 0;
  Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
  	
  assert(!collector->rem_set);
  collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
#ifdef USE_32BITS_HASHCODE  
  collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
#endif
  
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  while( p_obj ){
   
    assert( obj_is_marked_in_vt(p_obj));
    unsigned int obj_size = vm_object_size(p_obj);
#ifdef GC_GEN_STATS
  gc_gen_collector_update_moved_los_obj_stats_major(stats, vm_object_size(p_obj));
#endif
    assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end);
#ifdef USE_32BITS_HASHCODE 
    obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ;
    Obj_Info_Type obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector, null, null);
#else
    Obj_Info_Type obj_info = get_obj_info_raw(p_obj);
#endif

    if( obj_info != 0 ) {
      collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
      collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
    }
      
    obj_set_fw_in_oi(p_obj, dest_addr);
    dest_addr = (void *)ALIGN_UP_TO_KILO(((POINTER_SIZE_INT) dest_addr + obj_size));
    p_obj = lspace_get_next_marked_object(lspace, &iterate_index);
  }

  pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
  collector->rem_set = NULL;
#ifdef USE_32BITS_HASHCODE 
  pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
  collector->hashcode_set = NULL;
#endif
  
  lspace->scompact_fa_start = dest_addr;
  lspace->scompact_fa_end= lspace->heap_end;
  return;
}
示例#5
0
static inline void move_obj_between_chunks(Wspace *wspace, Chunk_Header **dest_ptr, Chunk_Header *src)
{
  Chunk_Header *dest = *dest_ptr;
  assert(dest->slot_size == src->slot_size);
  
  unsigned int slot_size = dest->slot_size;
  unsigned int alloc_num = src->alloc_num;
  assert(alloc_num);
  
#ifdef USE_32BITS_HASHCODE
  Hashcode_Buf*  old_hashcode_buf = src->hashcode_buf;
  Hashcode_Buf* new_hashcode_buf = dest->hashcode_buf;
#endif

  while(alloc_num && dest){
    Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(src);
    Partial_Reveal_Object *target = (Partial_Reveal_Object *)alloc_in_chunk(dest);

    if(dest->slot_index == MAX_SLOT_INDEX){
      dest->status = CHUNK_USED | CHUNK_NORMAL;      
      wspace_reg_used_chunk(wspace,dest);
      dest = NULL;
    }
    
    assert(p_obj && target);
    memcpy(target, p_obj, slot_size);

#ifdef USE_32BITS_HASHCODE
  if(hashcode_is_set(p_obj)){
    int hashcode;
    if(new_hashcode_buf == NULL) {
      new_hashcode_buf = hashcode_buf_create();
      hashcode_buf_init(new_hashcode_buf);
      dest->hashcode_buf = new_hashcode_buf;
    }
    if(hashcode_is_buffered(p_obj)){
      /*already buffered objects;*/
      hashcode = hashcode_buf_lookup(p_obj, old_hashcode_buf);
      hashcode_buf_update(target, hashcode, new_hashcode_buf);
    }else{
      /*objects need buffering.*/
      hashcode = hashcode_gen(p_obj);
      hashcode_buf_update(target, hashcode, new_hashcode_buf);
      Obj_Info_Type oi = get_obj_info_raw(target);
      set_obj_info(target, oi | HASHCODE_BUFFERED_BIT);
    }
  }
#endif


#ifdef SSPACE_VERIFY
    wspace_modify_mark_in_compact(target, p_obj, slot_size);
#endif
    obj_set_fw_in_oi(p_obj, target);
    --alloc_num;
  }

#ifdef USE_32BITS_HASHCODE
  if(alloc_num == 0) {
    if(old_hashcode_buf) hashcode_buf_destory(old_hashcode_buf);
    src->hashcode_buf = NULL;
  }
#endif

  
  /* dest might be set to NULL, so we use *dest_ptr here */
  assert((*dest_ptr)->alloc_num <= (*dest_ptr)->slot_num);
  src->alloc_num = alloc_num;
  if(!dest){
    assert((*dest_ptr)->alloc_num == (*dest_ptr)->slot_num);
    *dest_ptr = NULL;
    clear_free_slot_in_table(src->table, src->slot_index);
  }
}