Пример #1
0
static inline void fallback_update_fw_ref(REF *p_ref)
{
  assert(collect_is_fallback());
  
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
    assert(!obj_is_marked_in_vt(p_obj));
    assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
    p_obj = obj_get_fw_in_oi(p_obj);
    assert(p_obj);
    write_slot(p_ref, p_obj);
  }
}
Пример #2
0
/* only called in non-minor collection. parameter pointer_addr_in_pool means it is p_ref or p_obj in pool*/
static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool, Boolean double_fix)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  REF *p_ref;
  Partial_Reveal_Object *p_obj;
  
  /* NOTE:: this is nondestructive to the root sets. */
  pool_iterator_init(pool);
  Vector_Block *repset = pool_iterator_next(pool);
  while(repset){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(repset);
    for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){
      if(pointer_addr_in_pool)
        p_ref = (REF*)*iter;
      else
        p_ref = (REF*)iter;
      p_obj = read_slot(p_ref);
      
      if(collect_is_compact_move()){ /* include both unique move-compact and major move-compact */
        move_compaction_update_ref(gc, p_ref);
      } else if(collect_is_ms_compact()){
        if(obj_is_fw_in_oi(p_obj))
          moving_mark_sweep_update_ref(gc, p_ref, double_fix);
      } else { /* major slide compact */
        assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj)));
        write_slot(p_ref , obj_get_fw_in_oi(p_obj));
      }
    }
    repset = pool_iterator_next(pool);
  }
}
Пример #3
0
/* Finalizable objs falls back to objs with fin when resurrection fallback happens */
static void finalizable_objs_fallback(GC *gc)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
  Vector_Block *obj_with_fin_block = finref_get_free_block(gc);
    
  Vector_Block *block = pool_get_entry(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      /* Perhaps obj has been resurrected by previous resurrections. If the fin-obj was resurrected, we need put it back to obj_with_fin pool.
         For minor collection, the resurrected obj was forwarded, so we need use the new copy.*/
      if(!gc_obj_is_dead(gc, p_obj) && obj_belongs_to_nos(p_obj)){
        /* Even in NOS, not all live objects are forwarded due to the partial-forward algortihm */ 
        if(!NOS_PARTIAL_FORWARD || fspace_obj_to_be_forwarded(p_obj)){
          write_slot(p_ref , obj_get_fw_in_oi(p_obj));
          p_obj = read_slot(p_ref);
        }
      }
      /* Perhaps obj_with_fin_block has been allocated with a new free block if it is full */
      obj_with_fin_block = gc_add_finalizer(gc, obj_with_fin_block, p_obj);
    }
    block = pool_get_entry(finalizable_obj_pool);
  }
  
  pool_put_entry(obj_with_fin_pool, obj_with_fin_block);
  metadata->pending_finalizers = FALSE;
}
Пример #4
0
static void mspace_sliding_compact(Collector* collector, Mspace* mspace)
{
    void *start_pos;

    while(Partial_Reveal_Object *p_obj = get_next_first_src_obj(mspace)) {
        Block_Header *src_block = GC_BLOCK_HEADER(p_obj);
        assert(src_block->dest_counter);

        Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj);
        Block_Header *dest_block = GC_BLOCK_HEADER(p_target_obj);

        /* We don't set start_pos as p_obj in case that memmove of this obj may overlap itself.
         * In that case we can't get the correct vt and obj_info.
         */
#ifdef USE_32BITS_HASHCODE
        start_pos = obj_end_extend(p_obj);
#else
        start_pos = obj_end(p_obj);
#endif

        do {
            assert(obj_is_marked_in_vt(p_obj));
#ifdef USE_32BITS_HASHCODE
            obj_clear_dual_bits_in_vt(p_obj);
#else
            obj_unmark_in_vt(p_obj);
#endif

            unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj);
            if(p_obj != p_target_obj) {
                assert((((POINTER_SIZE_INT)p_target_obj) % GC_OBJECT_ALIGNMENT) == 0);
                memmove(p_target_obj, p_obj, obj_size);
            }
            set_obj_info(p_target_obj, 0);

            p_obj = block_get_next_marked_obj_after_prefetch(src_block, &start_pos);
            if(!p_obj)
                break;
            p_target_obj = obj_get_fw_in_oi(p_obj);

        } while(GC_BLOCK_HEADER(p_target_obj) == dest_block);

        atomic_dec32(&src_block->dest_counter);
    }

}
Пример #5
0
/* In two cases mark-sweep needs fixing repointed refs:
 * 1. ms with compaction
 * 2. ms as a mos collection algorithm
 */
static inline void moving_mark_sweep_update_ref(GC *gc, REF *p_ref, Boolean double_fix)
{
  /* There are only two kinds of p_ref being added into finref_repset_pool:
   * 1. p_ref is in a vector block from one finref pool;
   * 2. p_ref is a referent field.
   * So if p_ref belongs to heap, it must be a referent field pointer.
   * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool.
   */
  if(address_belongs_to_gc_heap((void*)p_ref, gc)){
    unsigned int offset = get_gc_referent_offset();
    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
    if(obj_is_fw_in_oi(p_old_ref)){
      Partial_Reveal_Object *p_new_ref = obj_get_fw_in_oi(p_old_ref);
      /* Only major collection in MS Gen GC might need double_fix.
       * Double fixing happens when both forwarding and compaction happen.
       */
      if(double_fix && obj_is_fw_in_oi(p_new_ref)){
        assert(major_is_marksweep());
        p_new_ref = obj_get_fw_in_oi(p_new_ref);
        assert(address_belongs_to_gc_heap(p_new_ref, gc));
      }
      p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
    }
  }
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  /* assert(obj_need_move(gc, p_obj));
   * This assertion is commented out because it assert(!obj_is_dead(gc, p_obj)).
   * When gc_fix_rootset is invoked, mark bit and alloc bit have been flipped in Mark-Sweep,
   * so this assertion will fail.
   * But for sure p_obj here must be an one needing moving.
   */
  p_obj = obj_get_fw_in_oi(p_obj);
  /* Only major collection in MS Gen GC might need double_fix.
   * Double fixing happens when both forwarding and compaction happen.
   */
  if(double_fix && obj_is_fw_in_oi(p_obj)){
    assert(major_is_marksweep());
    p_obj = obj_get_fw_in_oi(p_obj);
    assert(address_belongs_to_gc_heap(p_obj, gc));
  }
  write_slot(p_ref, p_obj);
}
Пример #6
0
static void resurrect_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
  
  if(finalizable_obj_pool_is_empty(gc))
    return;
  
  DURING_RESURRECTION = TRUE;
  
  pool_iterator_init(finalizable_obj_pool);
  Vector_Block *block = pool_iterator_next(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      
      /* Perhaps obj has been resurrected by previous resurrections */
      if(!gc_obj_is_dead(gc, p_obj)){
        if(collect_is_minor() && obj_need_move(gc, p_obj))
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        continue;
      }
      
      resurrect_obj_tree(collector, p_ref);
      if(collector->result == FALSE){
        /* Resurrection fallback happens */
        assert(collect_is_minor());
        return; /* force return */
      }
    }
    
    block = pool_iterator_next(finalizable_obj_pool);
  }
  
  /* In major & fallback & sweep-compact collection we need record p_ref of the root dead obj to update it later.
   * Because it is outside heap, we can't update it in ref fixing.
   * In minor collection p_ref of the root dead obj is automatically updated while tracing.
   */
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, finalizable_obj_pool);
  metadata->pending_finalizers = TRUE;
  
  DURING_RESURRECTION = FALSE;
  
  /* fianlizable objs have been added to finref repset pool or updated by tracing */
}
Пример #7
0
void lspace_sliding_compact(Collector* collector, Lspace* lspace)
{
  unsigned int iterate_index = 0;
  Partial_Reveal_Object* p_obj; 
  POINTER_SIZE_INT last_one=(POINTER_SIZE_INT) lspace->heap_start;
  
  
  p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
  if(!LOS_ADJUST_BOUNDARY)
    lspace->last_surviving_size=0;
  
  if(!p_obj) return;
  

  while( p_obj ){
    assert( obj_is_marked_in_vt(p_obj));
#ifdef USE_32BITS_HASHCODE
    obj_clear_dual_bits_in_vt(p_obj); 
#else
    obj_unmark_in_vt(p_obj);
#endif
    
    unsigned int obj_size = vm_object_size(p_obj);
#ifdef USE_32BITS_HASHCODE 
    obj_size += (obj_is_sethash_in_vt(p_obj))?GC_OBJECT_ALIGNMENT:0;    
#endif
    Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj);
    POINTER_SIZE_INT target_obj_end = (POINTER_SIZE_INT)p_target_obj + obj_size;
    last_one = target_obj_end;
    if( p_obj != p_target_obj){
      memmove(p_target_obj, p_obj, obj_size);
    }
    set_obj_info(p_target_obj, 0);
    p_obj = lspace_get_next_marked_object(lspace, &iterate_index);  
  }

 if(!LOS_ADJUST_BOUNDARY)
   lspace->last_surviving_size = ALIGN_UP_TO_KILO(last_one) - (POINTER_SIZE_INT) lspace->heap_start;

  return;
}
Пример #8
0
static void identify_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
  
  gc_reset_finalizable_objects(gc);
  pool_iterator_init(obj_with_fin_pool);
  Vector_Block *block = pool_iterator_next(obj_with_fin_pool);
  while(block){
    unsigned int block_has_ref = 0;
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      if(collect_is_fallback())
        fallback_update_fw_ref(p_ref);  // in case that this collection is ALGO_MAJOR_FALLBACK
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      if(!p_obj)
        continue;
      if(gc_obj_is_dead(gc, p_obj)){
        gc_add_finalizable_obj(gc, p_obj);
        *p_ref = (REF)NULL;
      } else {
        if(collect_is_minor() && obj_need_move(gc, p_obj)){
          assert(obj_is_fw_in_oi(p_obj));
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        }
        ++block_has_ref;
      }
    }
    if(!block_has_ref)
      vector_block_clear(block);
    
    block = pool_iterator_next(obj_with_fin_pool);
  }
  gc_put_finalizable_objects(gc);
  
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, obj_with_fin_pool);
}
Пример #9
0
static void update_referent_field_ignore_finref(GC *gc, Pool *pool)
{
  Vector_Block *block = pool_get_entry(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent))
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else {
            finref_repset_add_entry(gc, p_referent_field);
          }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */
    }
    block = pool_get_entry(pool);
  }
}
Пример #10
0
/* Move compaction needs special treament when updating referent field */
static inline void move_compaction_update_ref(GC *gc, REF *p_ref)
{
  /* There are only two kinds of p_ref being added into finref_repset_pool:
   * 1. p_ref is in a vector block from one finref pool;
   * 2. p_ref is a referent field.
   * So if p_ref belongs to heap, it must be a referent field pointer.
   * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool.
   */
  if(address_belongs_to_gc_heap(p_ref, gc) && (p_ref >= los_boundary)){
    unsigned int offset = get_gc_referent_offset();
    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
    Partial_Reveal_Object *p_new_ref = obj_get_fw_in_table(p_old_ref);
    p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
  }
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(space_of_addr(gc, p_obj)->move_object);
  
  if(p_obj < los_boundary)
    p_obj = obj_get_fw_in_oi(p_obj);
  else
    p_obj = obj_get_fw_in_table(p_obj);

  write_slot(p_ref, p_obj);
}
Пример #11
0
/*
 * The reason why we don't use identify_dead_refs() to implement this function is
 * that we will differentiate phanref from weakref in the future.
 */
static void identify_dead_phanrefs(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *phanref_pool = metadata->phanref_pool;
  
  if(collect_need_update_repset())
    finref_reset_repset(gc);
//  collector_reset_repset(collector);
  pool_iterator_init(phanref_pool);
  Vector_Block *block = pool_iterator_next(phanref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
      Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);

      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL;
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
      /* Phantom status: for future use
       * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){
       *   // enqueued but not explicitly cleared OR pending for enqueueing
       *   *iter = NULL;
       * }
       * resurrect_obj_tree(collector, p_referent_field);
       */
    }
    block = pool_iterator_next(phanref_pool);
  }
//  collector_put_repset(collector);
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, phanref_pool);
  }
}
Пример #12
0
static void identify_dead_refs(GC *gc, Pool *pool)
{
  if(collect_need_update_repset())
    finref_reset_repset(gc);

  pool_iterator_init(pool);
  Vector_Block *block = pool_iterator_next(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
      
      if(!p_referent){  
        /* referent field has been cleared. I forgot why we set p_ref with NULL here. 
           I guess it's because this ref_obj was processed in abother p_ref already, so
           there is no need to keep same ref_obj in this p_ref. */
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            /* if it's gen mode, and referent stays in NOS, we need keep p_referent_field in collector remset.
               This leads to the ref obj live even it is actually only weakly-reachable in next gen-mode collection. 
               This simplifies the design. Otherwise, we need remember the refobj in MOS seperately and process them seperately. */
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks 
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
      }else{
	      /* else, the referent is dead (weakly reachable), clear the referent field */
	      *p_referent_field = (REF)NULL; 
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
	      /* for dead referent, p_ref is not set NULL. p_ref keeps the ref object, which
	         will be moved to VM for enqueueing. */
      }
    }/* for each ref object */
    
    block = pool_iterator_next(pool);
  }
  
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, pool);
  }
}
Пример #13
0
static FORCE_INLINE void scan_object(Heap_Verifier* heap_verifier, Partial_Reveal_Object *p_obj) 
{
  GC_Verifier* gc_verifier = heap_verifier->gc_verifier;

#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
  if(gc_verifier->is_before_fallback_collection) {
    if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
      assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
      p_obj = obj_get_fw_in_oi(p_obj);
      assert(p_obj);
    }
  }
#endif
  
  if(!obj_mark_in_vt(p_obj)) return;

  if( !major_is_marksweep() && p_obj >= los_boundary ){
    Block_Header* block = GC_BLOCK_HEADER(p_obj);
    if( heap_verifier->is_before_gc)  block->num_live_objs++;
    /* we can't set block->num_live_objs = 0 if !is_before_gc, because the some blocks may be freed hence not
        visited after GC. So we should reset it in GC space reset functions. */
  }

  verify_object_header(p_obj, heap_verifier); 
  verifier_update_verify_info(p_obj, heap_verifier);

   /*FIXME: */
  if (!object_has_ref_field(p_obj)) return;
    
  REF* p_ref;

  if (object_is_array(p_obj)) {  
  
    Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
    unsigned int array_length = array->array_len; 
    p_ref = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));

    for (unsigned int i = 0; i < array_length; i++) {
      scan_slot(heap_verifier, p_ref+i);
    }   

  }else{ 
    
    unsigned int num_refs = object_ref_field_num(p_obj);
    int* ref_iterator = object_ref_iterator_init(p_obj);
 
    for(unsigned int i=0; i<num_refs; i++){  
      p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);  
      scan_slot(heap_verifier, p_ref);
    }

#ifndef BUILD_IN_REFERENT
     WeakReferenceType type = special_reference_type(p_obj);
    if(type == SOFT_REFERENCE && verifier_collect_is_minor(gc_verifier)){
      p_ref = obj_get_referent_field(p_obj);
      scan_slot(heap_verifier, p_ref);
    } 
#endif  
  }
  return;
}
Пример #14
0
static FORCE_INLINE void forward_object(Collector* collector, REF *p_ref) 
{
  GC* gc = collector->gc;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);

  if(obj_belongs_to_tospace(p_obj)) return;
    
  if(!obj_belongs_to_nos(p_obj)){
    if(obj_mark_in_oi(p_obj)){
#ifdef GC_GEN_STATS
      if(gc_profile){
        GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
        gc_gen_collector_update_marked_nonnos_obj_stats_minor(stats);
      }
#endif
      scan_object(collector, p_obj);
    }
    return;
  }

  Partial_Reveal_Object* p_target_obj = NULL;
  /* Fastpath: object has already been forwarded, update the ref slot */
  if(obj_is_fw_in_oi(p_obj)) {
    p_target_obj = obj_get_fw_in_oi(p_obj);
    assert(p_target_obj);
    write_slot(p_ref, p_target_obj);
    return;
  }

  /* following is the logic for forwarding */  
  p_target_obj = collector_forward_object(collector, p_obj);
  
  /* if p_target_obj is NULL, it is forwarded by other thread. 
      We can implement the collector_forward_object() so that the forwarding pointer 
      is set in the atomic instruction, which requires to roll back the mos_alloced
      space. That is easy for thread local block allocation cancellation. */
  if( p_target_obj == NULL ){
    if(collector->result == FALSE ){
      /* failed to forward, let's get back to controller. */
      vector_stack_clear(collector->trace_stack);
      return;
    }

    p_target_obj = obj_get_fw_in_oi(p_obj);
    assert(p_target_obj);
    write_slot(p_ref, p_target_obj);
    return;
  }
  /* otherwise, we successfully forwarded */

#ifdef GC_GEN_STATS
  if(gc_profile){
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
  }
#endif
  write_slot(p_ref, p_target_obj);

  scan_object(collector, p_target_obj); 
  return;
}
Пример #15
0
static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) 
{
  Space* space = collector->collect_space; 
  GC* gc = collector->gc;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);

  /* p_obj can also be in tospace because this p_ref is a redundant one in mutator remset. 
     We don't rem p_ref because it was remembered in first time it's met. 
     FIXME:: the situation obj_belongs_to_tospace() should never be true if we
     remember object rather than slot. Currently, mutator remembers objects, and
     collector remembers slots. Although collectors remember slots, we are sure 
     there are no chances to have repetitive p_ref because an object is scanned only
     when it is marked or forwarded atomically, hence only one collector has chance
     to do the scanning. */   
  if(!obj_belongs_to_nos(p_obj) || obj_belongs_to_tospace(p_obj)) return; 

  Partial_Reveal_Object* p_target_obj = NULL;
  Boolean to_rem_slot = FALSE;

  /* Fastpath: object has already been forwarded, update the ref slot */
  if(obj_is_fw_in_oi(p_obj)){
    p_target_obj = obj_get_fw_in_oi(p_obj);
    write_slot(p_ref, p_target_obj);

    /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
    if(obj_belongs_to_tospace(p_target_obj))
      if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
        collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 

    return; 
  }  
    
  /* following is the logic for forwarding */  
  p_target_obj = collector_forward_object(collector, p_obj);
  
  /* if p_target_obj is NULL, it is forwarded by other thread. 
      Note: a race condition here, it might be forwarded by other, but not set the 
      forwarding pointer yet. We need spin here to get the forwarding pointer. 
      We can implement the collector_forward_object() so that the forwarding pointer 
      is set in the atomic instruction, which requires to roll back the mos_alloced
      space. That is easy for thread local block allocation cancellation. */
  if( p_target_obj == NULL ){
    if(collector->result == FALSE ){
      /* failed to forward, let's get back to controller. */
      vector_stack_clear(collector->trace_stack);
      return;
    }
    /* forwarded already*/
    p_target_obj = obj_get_fw_in_oi(p_obj);
  
  }else{  /* otherwise, we successfully forwarded */

#ifdef GC_GEN_STATS
  if(gc_profile){
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
  }
#endif

    scan_object(collector, p_target_obj);
  }
  
  assert(p_target_obj);
  write_slot(p_ref, p_target_obj);
  
  /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
  if(obj_belongs_to_tospace(p_target_obj)){
    if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
      collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 
  }
   
  return;
}