static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref)
{
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  if( p_obj == NULL) return;

  if(obj_mark_in_vt(p_obj))
    collector_tracestack_push(collector, p_obj);
#ifdef GC_GEN_STATS
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_obj_stats_major(stats);
#endif
  
  return;
}
static FORCE_INLINE void scan_object(Collector* collector, Partial_Reveal_Object *p_obj)
{
  vm_notify_obj_alive( (void *)p_obj);
  
  assert((((POINTER_SIZE_INT)p_obj) % GC_OBJECT_ALIGNMENT) == 0);
  
  Partial_Reveal_VTable *vtable = decode_vt(obj_get_vt(p_obj));
  if(TRACE_JLC_VIA_VTABLE)
    if(vtable->vtmark == VT_UNMARKED) {
      vtable->vtmark = VT_MARKED;
      if(obj_mark_in_vt(vtable->jlC))
        collector_tracestack_push(collector, vtable->jlC);
    }
  
  if( !object_has_ref_field(p_obj) ) return;
  
  REF *p_ref;

  if (object_is_array(p_obj)) {   /* scan array object */
  
    Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
    unsigned int array_length = array->array_len;
  
    p_ref = (REF *)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));

    for (unsigned int i = 0; i < array_length; i++) {
      scan_slot(collector, p_ref+i);
    }   

  }else{ /* scan non-array object */
    
    unsigned int num_refs = object_ref_field_num(p_obj);    
    int* ref_iterator = object_ref_iterator_init(p_obj);
    
    for(unsigned int i=0; i<num_refs; i++){  
      p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);  
      scan_slot(collector, p_ref);
    }    

#ifndef BUILD_IN_REFERENT
    scan_weak_reference(collector, p_obj, scan_slot);
#endif
  }
  
  return;
}
Example #3
0
static FORCE_INLINE void scan_object(Heap_Verifier* heap_verifier, Partial_Reveal_Object *p_obj) 
{
  GC_Verifier* gc_verifier = heap_verifier->gc_verifier;

#if !defined(USE_UNIQUE_MARK_SWEEP_GC) && !defined(USE_UNIQUE_MOVE_COMPACT_GC)
  if(gc_verifier->is_before_fallback_collection) {
    if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
      assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
      p_obj = obj_get_fw_in_oi(p_obj);
      assert(p_obj);
    }
  }
#endif
  
  if(!obj_mark_in_vt(p_obj)) return;

  if( !major_is_marksweep() && p_obj >= los_boundary ){
    Block_Header* block = GC_BLOCK_HEADER(p_obj);
    if( heap_verifier->is_before_gc)  block->num_live_objs++;
    /* we can't set block->num_live_objs = 0 if !is_before_gc, because the some blocks may be freed hence not
        visited after GC. So we should reset it in GC space reset functions. */
  }

  verify_object_header(p_obj, heap_verifier); 
  verifier_update_verify_info(p_obj, heap_verifier);

   /*FIXME: */
  if (!object_has_ref_field(p_obj)) return;
    
  REF* p_ref;

  if (object_is_array(p_obj)) {  
  
    Partial_Reveal_Array* array = (Partial_Reveal_Array*)p_obj;
    unsigned int array_length = array->array_len; 
    p_ref = (REF*)((POINTER_SIZE_INT)array + (int)array_first_element_offset(array));

    for (unsigned int i = 0; i < array_length; i++) {
      scan_slot(heap_verifier, p_ref+i);
    }   

  }else{ 
    
    unsigned int num_refs = object_ref_field_num(p_obj);
    int* ref_iterator = object_ref_iterator_init(p_obj);
 
    for(unsigned int i=0; i<num_refs; i++){  
      p_ref = object_ref_iterator_get(ref_iterator+i, p_obj);  
      scan_slot(heap_verifier, p_ref);
    }

#ifndef BUILD_IN_REFERENT
     WeakReferenceType type = special_reference_type(p_obj);
    if(type == SOFT_REFERENCE && verifier_collect_is_minor(gc_verifier)){
      p_ref = obj_get_referent_field(p_obj);
      scan_slot(heap_verifier, p_ref);
    } 
#endif  
  }
  return;
}
void trace_obj_in_normal_marking(Collector *collector, void *p_obj)
{
  obj_mark_in_vt((Partial_Reveal_Object*)p_obj);
  trace_object(collector, (Partial_Reveal_Object *)p_obj);
}
void mark_scan_pool(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
   
  collector->trace_stack = free_task_pool_get_entry(metadata);

  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to mark tasks. 
      FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);

      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
      assert(p_obj!=NULL);
      /* we have to mark the object before put it into marktask, because
         it is possible to have two slots containing a same object. They will
         be scanned twice and their ref slots will be recorded twice. Problem
         occurs after the ref slot is updated first time with new position
         and the second time the value is the ref slot is the old position as expected.
         This can be worked around if we want. 
      */
      if(obj_mark_in_vt(p_obj)){
        collector_tracestack_push(collector, p_obj);
#ifdef GC_GEN_STATS
        gc_gen_collector_update_rootset_ref_num(stats);
        gc_gen_collector_update_marked_obj_stats_major(stats);
#endif
      }

    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the mark tasks and scan objects */
  /* get a task buf for the mark stack */
  collector->trace_stack = free_task_pool_get_entry(metadata);

retry:
  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);

      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. 
         degenerate my stack into mark_task, and grab another mark_task */
      trace_object(collector, p_obj);
    } 
    /* run out one task, put back to the pool and grab another task */
   vector_stack_clear(mark_task);
   pool_put_entry(metadata->free_task_pool, mark_task);
   mark_task = pool_get_entry(metadata->mark_task_pool);      
  }
  
  /* termination detection. This is also a barrier.
     NOTE:: We can simply spin waiting for num_finished_collectors, because each 
     generated new task would surely be processed by its generating collector eventually. 
     So code below is only for load balance optimization. */
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( !pool_is_empty(metadata->mark_task_pool)){
      atomic_dec32(&num_finished_collectors);
      goto retry;  
    }
  }
     
  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);   
  collector->trace_stack = NULL;
  
  return;
}