コード例 #1
0
void allocator_object_write_barrier(Partial_Reveal_Object* p_object, Collector* allocator) 
{
  if( addr_belongs_to_nos(p_object)) return;

  REF* p_slot; 
  /* scan array object */
  if (object_is_array((Partial_Reveal_Object*)p_object)) {
    Partial_Reveal_Object* array = p_object;
    assert(!obj_is_primitive_array(array));

    I_32 array_length = vector_get_length((Vector_Handle) array);
    for (int i = 0; i < array_length; i++) {
      p_slot = (REF *)vector_get_element_address_ref((Vector_Handle)array, i);
      if( read_slot(p_slot) != NULL && addr_belongs_to_nos(read_slot(p_slot))){
        collector_remset_add_entry(allocator, (Partial_Reveal_Object**)p_slot);
      }
    }   
    return;
  }

  /* scan non-array object */
  Partial_Reveal_Object* p_obj =  (Partial_Reveal_Object*)p_object;   
  unsigned int num_refs = object_ref_field_num(p_obj);
  int *ref_iterator = object_ref_iterator_init(p_obj);
            
  for(unsigned int i=0; i<num_refs; i++){
    p_slot = object_ref_iterator_get(ref_iterator+i, p_obj);        
    if( addr_belongs_to_nos(read_slot(p_slot))){
      collector_remset_add_entry(allocator, (Partial_Reveal_Object**)p_slot);
    }
  }

  return;
}
コード例 #2
0
ファイル: lspace_alloc_collect.cpp プロジェクト: dacut/juliet
void lspace_compute_object_target(Collector* collector, Lspace* lspace)
{
  void* dest_addr = lspace->heap_start;
  unsigned int iterate_index = 0;
  Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
  	
  assert(!collector->rem_set);
  collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
#ifdef USE_32BITS_HASHCODE  
  collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
#endif
  
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  while( p_obj ){
   
    assert( obj_is_marked_in_vt(p_obj));
    unsigned int obj_size = vm_object_size(p_obj);
#ifdef GC_GEN_STATS
  gc_gen_collector_update_moved_los_obj_stats_major(stats, vm_object_size(p_obj));
#endif
    assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end);
#ifdef USE_32BITS_HASHCODE 
    obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ;
    Obj_Info_Type obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector, null, null);
#else
    Obj_Info_Type obj_info = get_obj_info_raw(p_obj);
#endif

    if( obj_info != 0 ) {
      collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
      collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
    }
      
    obj_set_fw_in_oi(p_obj, dest_addr);
    dest_addr = (void *)ALIGN_UP_TO_KILO(((POINTER_SIZE_INT) dest_addr + obj_size));
    p_obj = lspace_get_next_marked_object(lspace, &iterate_index);
  }

  pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
  collector->rem_set = NULL;
#ifdef USE_32BITS_HASHCODE 
  pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
  collector->hashcode_set = NULL;
#endif
  
  lspace->scompact_fa_start = dest_addr;
  lspace->scompact_fa_end= lspace->heap_end;
  return;
}
コード例 #3
0
static void update_referent_field_ignore_finref(GC *gc, Pool *pool)
{
  Vector_Block *block = pool_get_entry(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent))
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else {
            finref_repset_add_entry(gc, p_referent_field);
          }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */
    }
    block = pool_get_entry(pool);
  }
}
コード例 #4
0
/*
 * The reason why we don't use identify_dead_refs() to implement this function is
 * that we will differentiate phanref from weakref in the future.
 */
static void identify_dead_phanrefs(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *phanref_pool = metadata->phanref_pool;
  
  if(collect_need_update_repset())
    finref_reset_repset(gc);
//  collector_reset_repset(collector);
  pool_iterator_init(phanref_pool);
  Vector_Block *block = pool_iterator_next(phanref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
      Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);

      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL;
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
      /* Phantom status: for future use
       * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){
       *   // enqueued but not explicitly cleared OR pending for enqueueing
       *   *iter = NULL;
       * }
       * resurrect_obj_tree(collector, p_referent_field);
       */
    }
    block = pool_iterator_next(phanref_pool);
  }
//  collector_put_repset(collector);
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, phanref_pool);
  }
}
コード例 #5
0
static void identify_dead_refs(GC *gc, Pool *pool)
{
  if(collect_need_update_repset())
    finref_reset_repset(gc);

  pool_iterator_init(pool);
  Vector_Block *block = pool_iterator_next(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
      
      if(!p_referent){  
        /* referent field has been cleared. I forgot why we set p_ref with NULL here. 
           I guess it's because this ref_obj was processed in abother p_ref already, so
           there is no need to keep same ref_obj in this p_ref. */
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            /* if it's gen mode, and referent stays in NOS, we need keep p_referent_field in collector remset.
               This leads to the ref obj live even it is actually only weakly-reachable in next gen-mode collection. 
               This simplifies the design. Otherwise, we need remember the refobj in MOS seperately and process them seperately. */
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks 
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
      }else{
	      /* else, the referent is dead (weakly reachable), clear the referent field */
	      *p_referent_field = (REF)NULL; 
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
	      /* for dead referent, p_ref is not set NULL. p_ref keeps the ref object, which
	         will be moved to VM for enqueueing. */
      }
    }/* for each ref object */
    
    block = pool_iterator_next(pool);
  }
  
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, pool);
  }
}
コード例 #6
0
ファイル: mspace_slide_compact.cpp プロジェクト: dacut/juliet
static void mspace_compute_object_target(Collector* collector, Mspace* mspace)
{
    Block_Header *curr_block = collector->cur_compact_block;
    Block_Header *dest_block = collector->cur_target_block;
    Block_Header *local_last_dest = dest_block;
    void *dest_addr = dest_block->base;
    Block_Header *last_src;

#ifdef USE_32BITS_HASHCODE
    Hashcode_Buf* old_hashcode_buf = NULL;
    Hashcode_Buf* new_hashcode_buf = hashcode_buf_create();
    hashcode_buf_init(new_hashcode_buf);
#endif

    assert(!collector->rem_set);
    collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
#ifdef USE_32BITS_HASHCODE
    collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
#endif

#ifdef GC_GEN_STATS
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

    while( curr_block ) {
        void* start_pos;
        Partial_Reveal_Object *first_obj = block_get_first_marked_obj_prefetch_next(curr_block, &start_pos);
        if(first_obj) {
            ++curr_block->dest_counter;
            if(!dest_block->src)
                dest_block->src = first_obj;
            else
                last_src->next_src = first_obj;
            last_src = curr_block;
        }
        Partial_Reveal_Object* p_obj = first_obj;

        while( p_obj ) {
            assert( obj_is_marked_in_vt(p_obj));

            unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj);


#ifdef GC_GEN_STATS
            gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, obj_size);
#endif

            Obj_Info_Type obj_info = get_obj_info(p_obj);

            unsigned int obj_size_precompute = obj_size;

#ifdef USE_32BITS_HASHCODE
            precompute_hashcode_extend_size(p_obj, dest_addr, &obj_size_precompute);
#endif
            if( ((POINTER_SIZE_INT)dest_addr + obj_size_precompute) > (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)) {
#ifdef USE_32BITS_HASHCODE
                block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf);
#endif
                dest_block->new_free = dest_addr;
                dest_block = mspace_get_next_target_block(collector, mspace);
                if(dest_block == NULL) {
                    collector->result = FALSE;
                    return;
                }
                if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx))
                    local_last_dest = dest_block;
                dest_addr = dest_block->base;
                dest_block->src = p_obj;
                last_src = curr_block;
                if(p_obj != first_obj)
                    ++curr_block->dest_counter;
            }
            assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)GC_BLOCK_END(dest_block));

#ifdef USE_32BITS_HASHCODE
            obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector,curr_block->hashcode_buf, new_hashcode_buf);
#endif

            if( obj_info != 0 ) {
                collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
                collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
            }

            obj_set_fw_in_oi(p_obj, dest_addr);

            /* FIXME: should use alloc to handle alignment requirement */
            dest_addr = (void *)((POINTER_SIZE_INT) dest_addr + obj_size);
            p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos);
        }
#ifdef USE_32BITS_HASHCODE
        hashcode_buf_clear(curr_block->hashcode_buf);
#endif
        curr_block = mspace_get_next_compact_block(collector, mspace);

    }

#ifdef USE_32BITS_HASHCODE
    pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
    collector->hashcode_set = NULL;
#endif
    pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
    collector->rem_set = NULL;
    dest_block->new_free = dest_addr;

    Block_Header *cur_last_dest = (Block_Header *)last_block_for_dest;
    collector->cur_target_block = local_last_dest;
    while((local_last_dest)&&((!cur_last_dest) || (local_last_dest->block_idx > cur_last_dest->block_idx))) {
        atomic_casptr((volatile void **)&last_block_for_dest, local_last_dest, cur_last_dest);
        cur_last_dest = (Block_Header *)last_block_for_dest;
    }

#ifdef USE_32BITS_HASHCODE
    old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf);
    hashcode_buf_destory(old_hashcode_buf);
#endif
    return;
}
コード例 #7
0
static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) 
{
  Space* space = collector->collect_space; 
  GC* gc = collector->gc;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);

  /* p_obj can also be in tospace because this p_ref is a redundant one in mutator remset. 
     We don't rem p_ref because it was remembered in first time it's met. 
     FIXME:: the situation obj_belongs_to_tospace() should never be true if we
     remember object rather than slot. Currently, mutator remembers objects, and
     collector remembers slots. Although collectors remember slots, we are sure 
     there are no chances to have repetitive p_ref because an object is scanned only
     when it is marked or forwarded atomically, hence only one collector has chance
     to do the scanning. */   
  if(!obj_belongs_to_nos(p_obj) || obj_belongs_to_tospace(p_obj)) return; 

  Partial_Reveal_Object* p_target_obj = NULL;
  Boolean to_rem_slot = FALSE;

  /* Fastpath: object has already been forwarded, update the ref slot */
  if(obj_is_fw_in_oi(p_obj)){
    p_target_obj = obj_get_fw_in_oi(p_obj);
    write_slot(p_ref, p_target_obj);

    /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
    if(obj_belongs_to_tospace(p_target_obj))
      if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
        collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 

    return; 
  }  
    
  /* following is the logic for forwarding */  
  p_target_obj = collector_forward_object(collector, p_obj);
  
  /* if p_target_obj is NULL, it is forwarded by other thread. 
      Note: a race condition here, it might be forwarded by other, but not set the 
      forwarding pointer yet. We need spin here to get the forwarding pointer. 
      We can implement the collector_forward_object() so that the forwarding pointer 
      is set in the atomic instruction, which requires to roll back the mos_alloced
      space. That is easy for thread local block allocation cancellation. */
  if( p_target_obj == NULL ){
    if(collector->result == FALSE ){
      /* failed to forward, let's get back to controller. */
      vector_stack_clear(collector->trace_stack);
      return;
    }
    /* forwarded already*/
    p_target_obj = obj_get_fw_in_oi(p_obj);
  
  }else{  /* otherwise, we successfully forwarded */

#ifdef GC_GEN_STATS
  if(gc_profile){
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
  }
#endif

    scan_object(collector, p_target_obj);
  }
  
  assert(p_target_obj);
  write_slot(p_ref, p_target_obj);
  
  /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
  if(obj_belongs_to_tospace(p_target_obj)){
    if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
      collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 
  }
   
  return;
}