示例#1
0
void lspace_compute_object_target(Collector* collector, Lspace* lspace)
{
  void* dest_addr = lspace->heap_start;
  unsigned int iterate_index = 0;
  Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
  	
  assert(!collector->rem_set);
  collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
#ifdef USE_32BITS_HASHCODE  
  collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
#endif
  
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  while( p_obj ){
   
    assert( obj_is_marked_in_vt(p_obj));
    unsigned int obj_size = vm_object_size(p_obj);
#ifdef GC_GEN_STATS
  gc_gen_collector_update_moved_los_obj_stats_major(stats, vm_object_size(p_obj));
#endif
    assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end);
#ifdef USE_32BITS_HASHCODE 
    obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ;
    Obj_Info_Type obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector, null, null);
#else
    Obj_Info_Type obj_info = get_obj_info_raw(p_obj);
#endif

    if( obj_info != 0 ) {
      collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
      collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
    }
      
    obj_set_fw_in_oi(p_obj, dest_addr);
    dest_addr = (void *)ALIGN_UP_TO_KILO(((POINTER_SIZE_INT) dest_addr + obj_size));
    p_obj = lspace_get_next_marked_object(lspace, &iterate_index);
  }

  pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
  collector->rem_set = NULL;
#ifdef USE_32BITS_HASHCODE 
  pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
  collector->hashcode_set = NULL;
#endif
  
  lspace->scompact_fa_start = dest_addr;
  lspace->scompact_fa_end= lspace->heap_end;
  return;
}
示例#2
0
inline Partial_Reveal_Object* lspace_get_next_object( Space* lspace, POINTER_SIZE_INT* & next_area_start){
  POINTER_SIZE_INT* ret_obj = NULL;
  
  while(((POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end)&&!*next_area_start ){
    next_area_start =(POINTER_SIZE_INT*)((POINTER_SIZE_INT)next_area_start + ((Free_Area*)next_area_start)->size);
  }
  if((POINTER_SIZE_INT)next_area_start < (POINTER_SIZE_INT)lspace->heap_end){
    ret_obj = next_area_start;
    unsigned int hash_extend_size = 0;
#ifdef USE_32BITS_HASHCODE
    hash_extend_size  = (hashcode_is_attached((Partial_Reveal_Object*)next_area_start))?GC_OBJECT_ALIGNMENT:0;
#endif
    POINTER_SIZE_INT obj_size = ALIGN_UP_TO_KILO(vm_object_size((Partial_Reveal_Object*)next_area_start) + hash_extend_size);
    assert(obj_size);
    next_area_start = (POINTER_SIZE_INT*)((POINTER_SIZE_INT)next_area_start + obj_size);
    return (Partial_Reveal_Object*)ret_obj;
  }else{
    return NULL;
  } 
}
示例#3
0
void lspace_sliding_compact(Collector* collector, Lspace* lspace)
{
  unsigned int iterate_index = 0;
  Partial_Reveal_Object* p_obj; 
  POINTER_SIZE_INT last_one=(POINTER_SIZE_INT) lspace->heap_start;
  
  
  p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
  if(!LOS_ADJUST_BOUNDARY)
    lspace->last_surviving_size=0;
  
  if(!p_obj) return;
  

  while( p_obj ){
    assert( obj_is_marked_in_vt(p_obj));
#ifdef USE_32BITS_HASHCODE
    obj_clear_dual_bits_in_vt(p_obj); 
#else
    obj_unmark_in_vt(p_obj);
#endif
    
    unsigned int obj_size = vm_object_size(p_obj);
#ifdef USE_32BITS_HASHCODE 
    obj_size += (obj_is_sethash_in_vt(p_obj))?GC_OBJECT_ALIGNMENT:0;    
#endif
    Partial_Reveal_Object *p_target_obj = obj_get_fw_in_oi(p_obj);
    POINTER_SIZE_INT target_obj_end = (POINTER_SIZE_INT)p_target_obj + obj_size;
    last_one = target_obj_end;
    if( p_obj != p_target_obj){
      memmove(p_target_obj, p_obj, obj_size);
    }
    set_obj_info(p_target_obj, 0);
    p_obj = lspace_get_next_marked_object(lspace, &iterate_index);  
  }

 if(!LOS_ADJUST_BOUNDARY)
   lspace->last_surviving_size = ALIGN_UP_TO_KILO(last_one) - (POINTER_SIZE_INT) lspace->heap_start;

  return;
}
示例#4
0
// Resurrect the obj tree whose root is the obj which p_ref points to
static inline void resurrect_obj_tree(Collector *collector, REF *p_ref)
{
  GC *gc = collector->gc;
  GC_Metadata *metadata = gc->metadata;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(p_obj && gc_obj_is_dead(gc, p_obj));
  
  void *p_ref_or_obj = p_ref;
  Trace_Object_Func trace_object;
  
  /* set trace_object() function */
  if(collect_is_minor()){
    if(gc_is_gen_mode()){
      if(minor_is_forward())
        trace_object = trace_obj_in_gen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_gen_ss;
      else 
        assert(0);
    }else{
      if(minor_is_forward())
        trace_object = trace_obj_in_nongen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_nongen_ss;
      else 
        assert(0);
    }
  } else if(collect_is_major_normal() || !gc_has_nos()){
    p_ref_or_obj = p_obj;
    if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
      trace_object = trace_obj_in_space_tune_marking;
      unsigned int obj_size = vm_object_size(p_obj);
#ifdef USE_32BITS_HASHCODE
      obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0;
#endif
      if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){
        collector->non_los_live_obj_size += obj_size;
        collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size;
      } else {
        collector->los_live_obj_size += round_up_to_size(obj_size, KB); 
      }
    } else if(!gc_has_nos()){
      trace_object = trace_obj_in_ms_marking;
    } else {
      trace_object = trace_obj_in_normal_marking;
    }
  } else if(collect_is_fallback()){
    if(major_is_marksweep())
      trace_object = trace_obj_in_ms_fallback_marking;
    else
      trace_object = trace_obj_in_fallback_marking;
  } else {
    assert(major_is_marksweep());
    p_ref_or_obj = p_obj;
   if( gc->gc_concurrent_status == GC_CON_NIL ) 
      trace_object = trace_obj_in_ms_marking;
    else
      trace_object = trace_obj_in_ms_concurrent_mark;
  }
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  collector_tracestack_push(collector, p_ref_or_obj);
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool);
  while(task_block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
    while(!vector_block_iterator_end(task_block, iter)){
      void *p_ref_or_obj = (void*)*iter;
      assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
              || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj));
      trace_object(collector, p_ref_or_obj);
      if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
      
      iter = vector_block_iterator_advance(task_block, iter);
    }
    vector_stack_clear(task_block);
    pool_put_entry(metadata->free_task_pool, task_block);
    
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    task_block = pool_get_entry(metadata->mark_task_pool);
  }
  
  task_block = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(task_block);
  pool_put_entry(metadata->free_task_pool, task_block);
  collector->trace_stack = NULL;
}
示例#5
0
void lspace_sweep(Lspace* lspace)
{
  TRACE2("gc.process", "GC: lspace sweep algo start ...\n");

#ifdef GC_GEN_STATS
  GC_Gen_Stats* stats = ((GC_Gen*)lspace->gc)->stats;
  gc_gen_stats_set_los_collected_flag((GC_Gen*)lspace->gc, true);
#endif
  unsigned int mark_bit_idx = 0;
  POINTER_SIZE_INT cur_size = 0;
  void *cur_area_start, *cur_area_end;

  free_area_pool_reset(lspace->free_pool);

  Partial_Reveal_Object* p_prev_obj = (Partial_Reveal_Object *)lspace->heap_start;
  Partial_Reveal_Object* p_next_obj = lspace_get_first_marked_object_by_oi(lspace, &mark_bit_idx);
  if(p_next_obj){
//    obj_unmark_in_vt(p_next_obj);
    /*Fixme: This might not be necessary, for there is a bit clearing operation in forward_object->obj_mark_in_oi*/
    obj_clear_dual_bits_in_oi(p_next_obj);
    /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
    unsigned int obj_size = vm_object_size(p_next_obj);
#ifdef USE_32BITS_HASHCODE
    obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
#endif
    lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size);    
#ifdef GC_GEN_STATS
    stats->los_suviving_obj_num++;
    stats->los_suviving_obj_size += obj_size;
#endif
  }

  cur_area_start = (void*)ALIGN_UP_TO_KILO(p_prev_obj);
  cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
  unsigned int hash_extend_size = 0;

  Free_Area* cur_area = NULL;
  while(cur_area_end){
    cur_area = NULL;
    cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
      
    if(cur_size){
      //debug
      assert(cur_size >= KB);
      cur_area = free_area_new(cur_area_start, cur_size);
      if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area);
    }
    /* successfully create an area */

    p_prev_obj = p_next_obj;
    p_next_obj = lspace_get_next_marked_object_by_oi(lspace, &mark_bit_idx);
    if(p_next_obj){
//      obj_unmark_in_vt(p_next_obj);
      obj_clear_dual_bits_in_oi(p_next_obj);
      /*For_statistic: sum up the size of suvived large objects, useful to deciede los extention.*/
      unsigned int obj_size = vm_object_size(p_next_obj);
#ifdef USE_32BITS_HASHCODE
      obj_size += (hashcode_is_attached(p_next_obj))?GC_OBJECT_ALIGNMENT:0;
#endif
      lspace->last_surviving_size += ALIGN_UP_TO_KILO(obj_size);
#ifdef GC_GEN_STATS
      stats->los_suviving_obj_num++;
      stats->los_suviving_obj_size += obj_size;
#endif
    }

#ifdef USE_32BITS_HASHCODE
    hash_extend_size  = (hashcode_is_attached((Partial_Reveal_Object*)p_prev_obj))?GC_OBJECT_ALIGNMENT:0;
#endif
    cur_area_start = (void*)ALIGN_UP_TO_KILO((POINTER_SIZE_INT)p_prev_obj + vm_object_size(p_prev_obj) + hash_extend_size);
    cur_area_end = (void*)ALIGN_DOWN_TO_KILO(p_next_obj);
    
  }

   /* cur_area_end == NULL */
  cur_area_end = (void*)ALIGN_DOWN_TO_KILO(lspace->heap_end);
  cur_size = (POINTER_SIZE_INT)cur_area_end - (POINTER_SIZE_INT)cur_area_start;
  if(cur_size){
    //debug
    assert(cur_size >= KB);
    cur_area = free_area_new(cur_area_start, cur_size);
    if( cur_area ) free_pool_add_area(lspace->free_pool, cur_area);
  }  

   mark_bit_idx = 0;
   assert(!lspace_get_first_marked_object(lspace, &mark_bit_idx));

  TRACE2("gc.process", "GC: end of lspace sweep algo ...\n");
  return;
}
static void mspace_move_objects(Collector* collector, Mspace* mspace) 
{
  Block_Header* curr_block = collector->cur_compact_block;
  Block_Header* dest_block = collector->cur_target_block;
  Block_Header *local_last_dest = dest_block;

  void* dest_sector_addr = dest_block->base;
  Boolean is_fallback = collect_is_fallback();
  
#ifdef USE_32BITS_HASHCODE
  Hashcode_Buf* old_hashcode_buf = NULL;
  Hashcode_Buf* new_hashcode_buf = hashcode_buf_create();
  hashcode_buf_init(new_hashcode_buf);
#endif  

#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

  unsigned int debug_num_live_obj = 0;

  while( curr_block ){

    if(verify_live_heap){ 
      atomic_inc32(&debug_num_compact_blocks);
      debug_num_live_obj = 0;
    }
    
    void* start_pos;
    Partial_Reveal_Object* p_obj = block_get_first_marked_object(curr_block, &start_pos);

    if( !p_obj ){
 #ifdef USE_32BITS_HASHCODE      
      hashcode_buf_clear(curr_block->hashcode_buf);
 #endif
      assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs);
      curr_block = mspace_get_next_compact_block(collector, mspace);
      continue;    
    }
    
    int curr_sector = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
    void* src_sector_addr = p_obj;
          
    while( p_obj ){
      debug_num_live_obj++;
      assert( obj_is_marked_in_vt(p_obj));
      /* we don't check if it's set, since only non-forwarded objs from last NOS partial-forward collection need it. */
      obj_clear_dual_bits_in_oi(p_obj); 

#ifdef GC_GEN_STATS
      gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, vm_object_size(p_obj));
#endif

#ifdef USE_32BITS_HASHCODE
      move_compact_process_hashcode(p_obj, curr_block->hashcode_buf, new_hashcode_buf);
#endif 
      
      POINTER_SIZE_INT curr_sector_size = (POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)src_sector_addr;

      /* check if dest block is not enough to hold this sector. If yes, grab next one */      
      POINTER_SIZE_INT block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block);
      if( ((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) > block_end ){
        dest_block->new_free = dest_sector_addr; 
#ifdef USE_32BITS_HASHCODE
        block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf);
#endif        
        dest_block = mspace_get_next_target_block(collector, mspace);
        if(dest_block == NULL){ 
#ifdef USE_32BITS_HASHCODE
          hashcode_buf_rollback_new_entry(old_hashcode_buf);
#endif
          collector->result = FALSE; 
          return; 
        }
#ifdef USE_32BITS_HASHCODE
        hashcode_buf_transfer_new_entry(old_hashcode_buf, new_hashcode_buf);
#endif 
        if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx))
          local_last_dest = dest_block;
        block_end = (POINTER_SIZE_INT)GC_BLOCK_END(dest_block);
        dest_sector_addr = dest_block->base;
      }
        
      assert(((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size) <= block_end );

      Partial_Reveal_Object *last_obj_end = (Partial_Reveal_Object *)start_pos;
      /* check if next live object is out of current sector. If not, loop back to continue within this sector. FIXME:: we should add a condition for block check (?) */      
      p_obj =  block_get_next_marked_object(curr_block, &start_pos);
      if ((p_obj != NULL) && (OBJECT_INDEX_TO_OFFSET_TABLE(p_obj) == curr_sector)) {
      	if(last_obj_end != p_obj) obj_set_vt_to_next_obj(last_obj_end, p_obj);
        continue;
      }

      /* current sector is done, let's move it. */
      POINTER_SIZE_INT sector_distance = (POINTER_SIZE_INT)src_sector_addr - (POINTER_SIZE_INT)dest_sector_addr;
      assert((sector_distance % GC_OBJECT_ALIGNMENT) == 0);
      /* if sector_distance is zero, we don't do anything. But since block offset table is never cleaned, we have to set 0 to it. */
      curr_block->table[curr_sector] = sector_distance;

      if(sector_distance != 0) 
        memmove(dest_sector_addr, src_sector_addr, curr_sector_size);

#ifdef USE_32BITS_HASHCODE
      hashcode_buf_refresh_new_entry(new_hashcode_buf, sector_distance);
#endif

      dest_sector_addr = (void*)((POINTER_SIZE_INT)dest_sector_addr + curr_sector_size);
      src_sector_addr = p_obj;
      curr_sector  = OBJECT_INDEX_TO_OFFSET_TABLE(p_obj);
    }
#ifdef USE_32BITS_HASHCODE      
    hashcode_buf_clear(curr_block->hashcode_buf);
 #endif    
    assert(!verify_live_heap ||debug_num_live_obj == curr_block->num_live_objs);
    curr_block = mspace_get_next_compact_block(collector, mspace);
  }
    
  dest_block->new_free = dest_sector_addr;
  collector->cur_target_block = local_last_dest;
 
#ifdef USE_32BITS_HASHCODE
  old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf);
  hashcode_buf_destory(old_hashcode_buf);
#endif
  return;
}
示例#7
0
static FORCE_INLINE void forward_object(Collector* collector, REF *p_ref) 
{
  GC* gc = collector->gc;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);

  if(obj_belongs_to_tospace(p_obj)) return;
    
  if(!obj_belongs_to_nos(p_obj)){
    if(obj_mark_in_oi(p_obj)){
#ifdef GC_GEN_STATS
      if(gc_profile){
        GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
        gc_gen_collector_update_marked_nonnos_obj_stats_minor(stats);
      }
#endif
      scan_object(collector, p_obj);
    }
    return;
  }

  Partial_Reveal_Object* p_target_obj = NULL;
  /* Fastpath: object has already been forwarded, update the ref slot */
  if(obj_is_fw_in_oi(p_obj)) {
    p_target_obj = obj_get_fw_in_oi(p_obj);
    assert(p_target_obj);
    write_slot(p_ref, p_target_obj);
    return;
  }

  /* following is the logic for forwarding */  
  p_target_obj = collector_forward_object(collector, p_obj);
  
  /* if p_target_obj is NULL, it is forwarded by other thread. 
      We can implement the collector_forward_object() so that the forwarding pointer 
      is set in the atomic instruction, which requires to roll back the mos_alloced
      space. That is easy for thread local block allocation cancellation. */
  if( p_target_obj == NULL ){
    if(collector->result == FALSE ){
      /* failed to forward, let's get back to controller. */
      vector_stack_clear(collector->trace_stack);
      return;
    }

    p_target_obj = obj_get_fw_in_oi(p_obj);
    assert(p_target_obj);
    write_slot(p_ref, p_target_obj);
    return;
  }
  /* otherwise, we successfully forwarded */

#ifdef GC_GEN_STATS
  if(gc_profile){
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
  }
#endif
  write_slot(p_ref, p_target_obj);

  scan_object(collector, p_target_obj); 
  return;
}
示例#8
0
static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) 
{
  Space* space = collector->collect_space; 
  GC* gc = collector->gc;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);

  /* p_obj can also be in tospace because this p_ref is a redundant one in mutator remset. 
     We don't rem p_ref because it was remembered in first time it's met. 
     FIXME:: the situation obj_belongs_to_tospace() should never be true if we
     remember object rather than slot. Currently, mutator remembers objects, and
     collector remembers slots. Although collectors remember slots, we are sure 
     there are no chances to have repetitive p_ref because an object is scanned only
     when it is marked or forwarded atomically, hence only one collector has chance
     to do the scanning. */   
  if(!obj_belongs_to_nos(p_obj) || obj_belongs_to_tospace(p_obj)) return; 

  Partial_Reveal_Object* p_target_obj = NULL;
  Boolean to_rem_slot = FALSE;

  /* Fastpath: object has already been forwarded, update the ref slot */
  if(obj_is_fw_in_oi(p_obj)){
    p_target_obj = obj_get_fw_in_oi(p_obj);
    write_slot(p_ref, p_target_obj);

    /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
    if(obj_belongs_to_tospace(p_target_obj))
      if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
        collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 

    return; 
  }  
    
  /* following is the logic for forwarding */  
  p_target_obj = collector_forward_object(collector, p_obj);
  
  /* if p_target_obj is NULL, it is forwarded by other thread. 
      Note: a race condition here, it might be forwarded by other, but not set the 
      forwarding pointer yet. We need spin here to get the forwarding pointer. 
      We can implement the collector_forward_object() so that the forwarding pointer 
      is set in the atomic instruction, which requires to roll back the mos_alloced
      space. That is easy for thread local block allocation cancellation. */
  if( p_target_obj == NULL ){
    if(collector->result == FALSE ){
      /* failed to forward, let's get back to controller. */
      vector_stack_clear(collector->trace_stack);
      return;
    }
    /* forwarded already*/
    p_target_obj = obj_get_fw_in_oi(p_obj);
  
  }else{  /* otherwise, we successfully forwarded */

#ifdef GC_GEN_STATS
  if(gc_profile){
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
  }
#endif

    scan_object(collector, p_target_obj);
  }
  
  assert(p_target_obj);
  write_slot(p_ref, p_target_obj);
  
  /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
  if(obj_belongs_to_tospace(p_target_obj)){
    if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
      collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 
  }
   
  return;
}