Ejemplo n.º 1
0
static FORCE_INLINE void scan_slot(Collector *collector, REF *p_ref) 
{
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  if( p_obj == NULL) return;
    
  /* the slot can be in tspace or fspace, we don't care. In gen mode,
     we care only if the reference in the slot is pointing to nos */
  if (obj_belongs_to_nos(p_obj))
    collector_tracestack_push(collector, p_ref); 

  return;
}
Ejemplo n.º 2
0
uint8_t onewire_readbyte()
{
    uint8_t value = 0;
    uint8_t bit;
    for(bit = 0; bit < 8; ++bit)
        {
            // Values come in LSB first
            value |= (read_slot() << bit);
        }

    return value;
}
Ejemplo n.º 3
0
static inline void fallback_update_fw_ref(REF *p_ref)
{
  assert(collect_is_fallback());
  
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  if(obj_belongs_to_nos(p_obj) && obj_is_fw_in_oi(p_obj)){
    assert(!obj_is_marked_in_vt(p_obj));
    assert(obj_get_vt(p_obj) == obj_get_vt(obj_get_fw_in_oi(p_obj)));
    p_obj = obj_get_fw_in_oi(p_obj);
    assert(p_obj);
    write_slot(p_ref, p_obj);
  }
}
Ejemplo n.º 4
0
static void update_referent_field_ignore_finref(GC *gc, Pool *pool)
{
  Vector_Block *block = pool_get_entry(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent))
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else {
            finref_repset_add_entry(gc, p_referent_field);
          }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL; /* referent is weakly reachable: clear the referent field */
    }
    block = pool_get_entry(pool);
  }
}
static FORCE_INLINE void scan_slot(Collector* collector, REF *p_ref)
{
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  if( p_obj == NULL) return;

  if(obj_mark_in_vt(p_obj))
    collector_tracestack_push(collector, p_obj);
#ifdef GC_GEN_STATS
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_obj_stats_major(stats);
#endif
  
  return;
}
Ejemplo n.º 6
0
static void resurrect_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
  
  if(finalizable_obj_pool_is_empty(gc))
    return;
  
  DURING_RESURRECTION = TRUE;
  
  pool_iterator_init(finalizable_obj_pool);
  Vector_Block *block = pool_iterator_next(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      
      /* Perhaps obj has been resurrected by previous resurrections */
      if(!gc_obj_is_dead(gc, p_obj)){
        if(collect_is_minor() && obj_need_move(gc, p_obj))
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        continue;
      }
      
      resurrect_obj_tree(collector, p_ref);
      if(collector->result == FALSE){
        /* Resurrection fallback happens */
        assert(collect_is_minor());
        return; /* force return */
      }
    }
    
    block = pool_iterator_next(finalizable_obj_pool);
  }
  
  /* In major & fallback & sweep-compact collection we need record p_ref of the root dead obj to update it later.
   * Because it is outside heap, we can't update it in ref fixing.
   * In minor collection p_ref of the root dead obj is automatically updated while tracing.
   */
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, finalizable_obj_pool);
  metadata->pending_finalizers = TRUE;
  
  DURING_RESURRECTION = FALSE;
  
  /* fianlizable objs have been added to finref repset pool or updated by tracing */
}
Ejemplo n.º 7
0
static void gc_object_write_barrier(Managed_Object_Handle p_object) 
{
  
  if( addr_belongs_to_nos(p_object)) return;

  Mutator *mutator = (Mutator *)gc_get_tls();
  
  REF* p_slot; 
  /* scan array object */
  if (object_is_array((Partial_Reveal_Object*)p_object)) {
    Partial_Reveal_Object* array = (Partial_Reveal_Object*)p_object;
    assert(!obj_is_primitive_array(array));
    
    I_32 array_length = vector_get_length((Vector_Handle) array);
    for (int i = 0; i < array_length; i++) {
      p_slot = (REF*)vector_get_element_address_ref((Vector_Handle) array, i);
      if( read_slot(p_slot) != NULL && addr_belongs_to_nos(read_slot(p_slot))){
        mutator_remset_add_entry(mutator, p_slot);
      }
    }   
    return;
  }

  /* scan non-array object */
  Partial_Reveal_Object* p_obj =  (Partial_Reveal_Object*)p_object;   
  unsigned int num_refs = object_ref_field_num(p_obj);
  int *ref_iterator = object_ref_iterator_init(p_obj);
            
  for(unsigned int i=0; i<num_refs; i++){
    p_slot = object_ref_iterator_get(ref_iterator+i, p_obj);        
    if( addr_belongs_to_nos(read_slot(p_slot))){
      mutator_remset_add_entry(mutator, p_slot);
    }
  }

  return;
}
Ejemplo n.º 8
0
static inline void
add_or_change_slot_parent(node_t *node, node_t *parent, vtree_pthread_data_t *vtree_data)
{
	vtree_slot_t *slot;

	slot = get_slot_from_rec_new(node, vtree_data);

	if (slot)
		slot->parent = change_node_parent(slot->parent, parent);
	else {
		slot = read_slot(node, vtree_data);
		add_slot(node, slot->child[0], slot->child[1],
		         change_node_parent(slot->parent, parent),
		         vtree_data);
	}
}
Ejemplo n.º 9
0
static void finref_add_repset_from_pool(GC *gc, Pool *pool)
{
  finref_reset_repset(gc);
  pool_iterator_init(pool);
  Vector_Block *block = pool_iterator_next(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      if(*p_ref && obj_need_move(gc, p_obj))
        finref_repset_add_entry(gc, p_ref);
    }
    block = pool_iterator_next(pool);
  }
  finref_put_repset(gc);
}
Ejemplo n.º 10
0
void verifier_trace_objsets(Heap_Verifier* heap_verifier, Pool* obj_set_pool)
{
  Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
  GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);
  pool_iterator_init(obj_set_pool);
  Vector_Block* obj_set = pool_iterator_next(obj_set_pool);
  /* first step: copy all root objects to trace tasks. */ 
  while(obj_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(obj_set);
    while(!vector_block_iterator_end(obj_set,iter)){
      Partial_Reveal_Object* p_obj = read_slot((REF*)iter);
      iter = vector_block_iterator_advance(obj_set,iter);
      /*p_obj can be NULL , When GC happened, the obj in Finalize objs list will be clear.*/
      //assert(p_obj != NULL);  
      if(p_obj == NULL) continue;
      if(heap_verifier->gc_is_gen_mode && heap_verifier->is_before_gc && !obj_belongs_to_nos(p_obj)) continue;
      verifier_tracestack_push(p_obj, gc_verifier->trace_stack);
    } 
    obj_set = pool_iterator_next(obj_set_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);

  Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter;
      iter = vector_block_iterator_advance(trace_task,iter);
      trace_object(heap_verifier, p_obj); 
    }
    vector_stack_clear(trace_task);
    pool_put_entry(verifier_metadata->free_task_pool, trace_task);
    trace_task = pool_get_entry(verifier_metadata->mark_task_pool);
  }
  vector_stack_clear(gc_verifier->trace_stack);
  pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack);
  gc_verifier->trace_stack = NULL;

}
Ejemplo n.º 11
0
static inline void put_dead_weak_refs_to_vm(GC *gc, Pool *ref_pool)
{
  Pool *free_pool = gc->finref_metadata->free_pool;
  
  Vector_Block *block = pool_get_entry(ref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    while(!vector_block_iterator_end(block, iter)){
      Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter);
      if(p_obj)
        vm_enqueue_reference(p_obj);
      iter = vector_block_iterator_advance(block, iter);
    }
    vector_block_clear(block);
    pool_put_entry(free_pool, block);
    block = pool_get_entry(ref_pool);
  }
}
Ejemplo n.º 12
0
int main(int argc, char **argv)
{
	if (argc < 3 || 
			(strcmp(argv[1], "read") && strcmp(argv[1], "write"))) {
		printf("Usage: %s read|write mailslot\n", argv[0]);
		return 1;
	}

	if (!strcmp(argv[1], "read")) {
		return read_slot(argv[2]);
	}

	if (!strcmp(argv[1], "write")) {
		return write_slot(argv[2]);
	}

	return 0;
}
Ejemplo n.º 13
0
static void put_finalizable_obj_to_vm(GC *gc)
{
  Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool;
  Pool *free_pool = gc->finref_metadata->free_pool;
  
  Vector_Block *block = pool_get_entry(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    while(!vector_block_iterator_end(block, iter)){
      assert(*iter);
      Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter);
      vm_finalize_object(p_obj);
      iter = vector_block_iterator_advance(block, iter);
    }
    vector_block_clear(block);
    pool_put_entry(free_pool, block);
    block = pool_get_entry(finalizable_obj_pool);
  }
}
Ejemplo n.º 14
0
/* In two cases mark-sweep needs fixing repointed refs:
 * 1. ms with compaction
 * 2. ms as a mos collection algorithm
 */
static inline void moving_mark_sweep_update_ref(GC *gc, REF *p_ref, Boolean double_fix)
{
  /* There are only two kinds of p_ref being added into finref_repset_pool:
   * 1. p_ref is in a vector block from one finref pool;
   * 2. p_ref is a referent field.
   * So if p_ref belongs to heap, it must be a referent field pointer.
   * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool.
   */
  if(address_belongs_to_gc_heap((void*)p_ref, gc)){
    unsigned int offset = get_gc_referent_offset();
    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
    if(obj_is_fw_in_oi(p_old_ref)){
      Partial_Reveal_Object *p_new_ref = obj_get_fw_in_oi(p_old_ref);
      /* Only major collection in MS Gen GC might need double_fix.
       * Double fixing happens when both forwarding and compaction happen.
       */
      if(double_fix && obj_is_fw_in_oi(p_new_ref)){
        assert(major_is_marksweep());
        p_new_ref = obj_get_fw_in_oi(p_new_ref);
        assert(address_belongs_to_gc_heap(p_new_ref, gc));
      }
      p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
    }
  }
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  /* assert(obj_need_move(gc, p_obj));
   * This assertion is commented out because it assert(!obj_is_dead(gc, p_obj)).
   * When gc_fix_rootset is invoked, mark bit and alloc bit have been flipped in Mark-Sweep,
   * so this assertion will fail.
   * But for sure p_obj here must be an one needing moving.
   */
  p_obj = obj_get_fw_in_oi(p_obj);
  /* Only major collection in MS Gen GC might need double_fix.
   * Double fixing happens when both forwarding and compaction happen.
   */
  if(double_fix && obj_is_fw_in_oi(p_obj)){
    assert(major_is_marksweep());
    p_obj = obj_get_fw_in_oi(p_obj);
    assert(address_belongs_to_gc_heap(p_obj, gc));
  }
  write_slot(p_ref, p_obj);
}
Ejemplo n.º 15
0
static void trace_object(Collector *collector, REF *p_ref)
{ 
  forward_object(collector, p_ref);

  Vector_Block* trace_stack = (Vector_Block*)collector->trace_stack;
  while( !vector_stack_is_empty(trace_stack)){
    p_ref = (REF *)vector_stack_pop(trace_stack); 
#ifdef PREFETCH_SUPPORTED
    /* DO PREFETCH */
   if(mark_prefetch) {
     if(!vector_stack_is_empty(trace_stack)) {
        REF *pref = (REF*)vector_stack_read(trace_stack, 0);
        PREFETCH( read_slot(pref) );
     }
   }
#endif    
    forward_object(collector, p_ref);
    trace_stack = (Vector_Block*)collector->trace_stack;
  }
  return; 
}
Ejemplo n.º 16
0
static void identify_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
  
  gc_reset_finalizable_objects(gc);
  pool_iterator_init(obj_with_fin_pool);
  Vector_Block *block = pool_iterator_next(obj_with_fin_pool);
  while(block){
    unsigned int block_has_ref = 0;
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      if(collect_is_fallback())
        fallback_update_fw_ref(p_ref);  // in case that this collection is ALGO_MAJOR_FALLBACK
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      if(!p_obj)
        continue;
      if(gc_obj_is_dead(gc, p_obj)){
        gc_add_finalizable_obj(gc, p_obj);
        *p_ref = (REF)NULL;
      } else {
        if(collect_is_minor() && obj_need_move(gc, p_obj)){
          assert(obj_is_fw_in_oi(p_obj));
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        }
        ++block_has_ref;
      }
    }
    if(!block_has_ref)
      vector_block_clear(block);
    
    block = pool_iterator_next(obj_with_fin_pool);
  }
  gc_put_finalizable_objects(gc);
  
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, obj_with_fin_pool);
}
Ejemplo n.º 17
0
/*
 * rot_right
 *
 *      A              B
 *     / \            / \
 *    B   C   ===>   D   A
 *   / \                / \
 *  D   E              E   C
 *
 */
static void
rot_right(node_t *root, node_t *nodeA, vtree_slot_t *slotA, vtree_pthread_data_t *vtree_data)
{
	node_t *nodeB, *nodeE, *parent;
	vtree_slot_t *slotB;
	int slotB_old;

	nodeB = slotA->child[0];
	if ((slotB = get_slot_from_rec_new(nodeB, vtree_data)) == NULL) {
		slotB_old = 1;
		slotB = read_slot(nodeB, vtree_data);
	} else
		slotB_old = 0;
	if ((nodeE = slotB->child[1]) != NULL)
		add_or_change_slot_parent(nodeE, nodeA, vtree_data);
	parent = get_node_parent(slotA->parent);
	if (!parent)
		add_or_change_slot(root, nodeB, NULL, NULL, vtree_data);
	else
		add_or_change_slot_child(parent, nodeA, nodeB, vtree_data);

	if (get_slot_from_rec_new(nodeA, vtree_data) == NULL) {
		add_slot(nodeA, nodeE, slotA->child[1],
		         change_node_parent(slotA->parent, nodeB),
		         vtree_data);
	} else {
		slotA->child[0] = nodeE;
		slotA->parent = change_node_parent(slotA->parent, nodeB);
	}
	if (slotB_old) {
		add_slot(nodeB, slotB->child[0], nodeA,
		         change_node_parent(slotB->parent, parent),
		         vtree_data);
	} else {
		slotB->child[1] = nodeA;
		slotB->parent = change_node_parent(slotB->parent, parent);
	}
}
Ejemplo n.º 18
0
/*
 * rot_left
 *
 *    A                  C
 *   / \                / \
 *  B   C     ===>     A   E
 *     / \            / \
 *    D   E          B   D
 *
 */
static void
rot_left(node_t *root, node_t *nodeA, vtree_slot_t *slotA, vtree_pthread_data_t *vtree_data)
{
	node_t *nodeC, *nodeD, *parent;
	vtree_slot_t *slotC;
	int slotC_old;

	nodeC = slotA->child[1];
	if ((slotC = get_slot_from_rec_new(nodeC, vtree_data)) == NULL) {
		slotC_old = 1;
		slotC = read_slot(nodeC, vtree_data);
	} else
		slotC_old = 0;
	if ((nodeD = slotC->child[0]) != NULL)
		add_or_change_slot_parent(nodeD, nodeA, vtree_data);
	parent = get_node_parent(slotA->parent);
	if (!parent)
		add_or_change_slot(root, nodeC, NULL, NULL, vtree_data);
	else
		add_or_change_slot_child(parent, nodeA, nodeC, vtree_data);

	if (get_slot_from_rec_new(nodeA, vtree_data) == NULL) {
		add_slot(nodeA, slotA->child[0], nodeD,
		         change_node_parent(slotA->parent, nodeC),
		         vtree_data);
	} else {
		slotA->child[1] = nodeD;
		slotA->parent = change_node_parent(slotA->parent, nodeC);
	}
	if (slotC_old) {
		add_slot(nodeC, nodeA, slotC->child[1],
		         change_node_parent(slotC->parent, parent),
		         vtree_data);
	} else {
		slotC->child[0] = nodeA;
		slotC->parent = change_node_parent(slotC->parent, parent);
	}
}
Ejemplo n.º 19
0
static inline void
add_or_change_slot_child(node_t *node, node_t *old_child, node_t *new_child,
                         vtree_pthread_data_t *vtree_data)
{
	vtree_slot_t *slot;

	slot = get_slot_from_rec_new(node, vtree_data);

	if (slot) {
		if (slot->child[0] == old_child)
			slot->child[0] = new_child;
		else
			slot->child[1] = new_child;
	}else {
		slot = read_slot(node, vtree_data);
		if (slot->child[0] == old_child)
			add_slot(node, new_child, slot->child[1], slot->parent,
			         vtree_data);
		else
			add_slot(node, slot->child[0], new_child, slot->parent,
			         vtree_data);
	}
}
Ejemplo n.º 20
0
/* Move compaction needs special treament when updating referent field */
static inline void move_compaction_update_ref(GC *gc, REF *p_ref)
{
  /* There are only two kinds of p_ref being added into finref_repset_pool:
   * 1. p_ref is in a vector block from one finref pool;
   * 2. p_ref is a referent field.
   * So if p_ref belongs to heap, it must be a referent field pointer.
   * Objects except a tree root which are resurrected need not be recorded in finref_repset_pool.
   */
  if(address_belongs_to_gc_heap(p_ref, gc) && (p_ref >= los_boundary)){
    unsigned int offset = get_gc_referent_offset();
    Partial_Reveal_Object *p_old_ref = (Partial_Reveal_Object*)((POINTER_SIZE_INT)p_ref - offset);
    Partial_Reveal_Object *p_new_ref = obj_get_fw_in_table(p_old_ref);
    p_ref = (REF*)((POINTER_SIZE_INT)p_new_ref + offset);
  }
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(space_of_addr(gc, p_obj)->move_object);
  
  if(p_obj < los_boundary)
    p_obj = obj_get_fw_in_oi(p_obj);
  else
    p_obj = obj_get_fw_in_table(p_obj);

  write_slot(p_ref, p_obj);
}
Ejemplo n.º 21
0
static void dead_weak_refs_fallback(GC *gc, Pool *ref_pool)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *free_pool = metadata->free_pool;
  Pool *fallback_ref_pool = metadata->fallback_ref_pool;
  
  Vector_Block *fallback_ref_block = finref_get_free_block(gc);
  Vector_Block *block = pool_get_entry(ref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    while(!vector_block_iterator_end(block, iter)){
      Partial_Reveal_Object *p_obj = read_slot((REF*)iter);
      /* Perhaps fallback_ref_block has been allocated with a new free block if it is full */
      if(p_obj)
        fallback_ref_block = finref_add_fallback_ref(gc, fallback_ref_block, p_obj);
      iter = vector_block_iterator_advance(block, iter);
    }
    vector_block_clear(block);
    pool_put_entry(free_pool, block);
    block = pool_get_entry(ref_pool);
  }
  
  pool_put_entry(fallback_ref_pool, fallback_ref_block);
}
Ejemplo n.º 22
0
static int
rbtree_check(node_t *node, node_t *parent, int parent_color,
             vtree_pthread_data_t *vtree_data)
{
	int ln, rn;
	int color;
	vtree_slot_t *slot;

	if (node == NULL)
		return 0;
	slot = read_slot(node, vtree_data);
	assert(parent == get_node_parent(slot->parent));
	color = get_node_color(slot->parent);
	assert(color == RB_BLACK || parent_color == RB_BLACK);

	assert(slot->child[0] == NULL || slot->child[0]->value < node->value);
	assert(slot->child[1] == NULL || slot->child[1]->value > node->value);
	ln = rbtree_check(slot->child[0], node, color, vtree_data);
	rn = rbtree_check(slot->child[1], node, color, vtree_data);
	assert(ln == rn);
	if (color == RB_BLACK)
		return 1 + ln;
	return ln;
}
Ejemplo n.º 23
0
static void collector_trace_rootsets(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);

  Space* space = collector->collect_space;
  collector->trace_stack = free_task_pool_get_entry(metadata);

  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to trace tasks. */ 

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ...");
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set, iter);

      assert(*p_ref);  /* root ref cann't be NULL, but remset can be */

      collector_tracestack_push(collector, p_ref);

#ifdef GC_GEN_STATS    
      gc_gen_collector_update_rootset_ref_num(stats);
#endif
    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  collector->trace_stack = free_task_pool_get_entry(metadata);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ...");

retry:
  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(trace_task, iter);
#ifdef PREFETCH_SUPPORTED
      /* DO PREFETCH */
      if( mark_prefetch ) {    
        if(!vector_block_iterator_end(trace_task, iter)) {
      	  REF *pref= (REF*) *iter;
      	  PREFETCH( read_slot(pref));
        }	
      }
#endif      
      trace_object(collector, p_ref);
      
      if(collector->result == FALSE)  break; /* force return */
 
    }
    vector_stack_clear(trace_task);
    pool_put_entry(metadata->free_task_pool, trace_task);

    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    trace_task = pool_get_entry(metadata->mark_task_pool);
  }
  
  /* A collector comes here when seeing an empty mark_task_pool. The last collector will ensure 
     all the tasks are finished.*/
     
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( pool_is_empty(metadata->mark_task_pool)) continue;
    /* we can't grab the task here, because of a race condition. If we grab the task, 
       and the pool is empty, other threads may fall to this barrier and then pass. */
    atomic_dec32(&num_finished_collectors);
    goto retry; 
  }

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");

  /* now we are done, but each collector has a private stack that is empty */  
  trace_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(trace_task);
  pool_put_entry(metadata->free_task_pool, trace_task);   
  collector->trace_stack = NULL;
  
  return;
}
Ejemplo n.º 24
0
static FORCE_INLINE void forward_object(Collector* collector, REF *p_ref) 
{
  GC* gc = collector->gc;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);

  if(obj_belongs_to_tospace(p_obj)) return;
    
  if(!obj_belongs_to_nos(p_obj)){
    if(obj_mark_in_oi(p_obj)){
#ifdef GC_GEN_STATS
      if(gc_profile){
        GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
        gc_gen_collector_update_marked_nonnos_obj_stats_minor(stats);
      }
#endif
      scan_object(collector, p_obj);
    }
    return;
  }

  Partial_Reveal_Object* p_target_obj = NULL;
  /* Fastpath: object has already been forwarded, update the ref slot */
  if(obj_is_fw_in_oi(p_obj)) {
    p_target_obj = obj_get_fw_in_oi(p_obj);
    assert(p_target_obj);
    write_slot(p_ref, p_target_obj);
    return;
  }

  /* following is the logic for forwarding */  
  p_target_obj = collector_forward_object(collector, p_obj);
  
  /* if p_target_obj is NULL, it is forwarded by other thread. 
      We can implement the collector_forward_object() so that the forwarding pointer 
      is set in the atomic instruction, which requires to roll back the mos_alloced
      space. That is easy for thread local block allocation cancellation. */
  if( p_target_obj == NULL ){
    if(collector->result == FALSE ){
      /* failed to forward, let's get back to controller. */
      vector_stack_clear(collector->trace_stack);
      return;
    }

    p_target_obj = obj_get_fw_in_oi(p_obj);
    assert(p_target_obj);
    write_slot(p_ref, p_target_obj);
    return;
  }
  /* otherwise, we successfully forwarded */

#ifdef GC_GEN_STATS
  if(gc_profile){
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
  }
#endif
  write_slot(p_ref, p_target_obj);

  scan_object(collector, p_target_obj); 
  return;
}
Ejemplo n.º 25
0
/*
 * The reason why we don't use identify_dead_refs() to implement this function is
 * that we will differentiate phanref from weakref in the future.
 */
static void identify_dead_phanrefs(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *phanref_pool = metadata->phanref_pool;
  
  if(collect_need_update_repset())
    finref_reset_repset(gc);
//  collector_reset_repset(collector);
  pool_iterator_init(phanref_pool);
  Vector_Block *block = pool_iterator_next(phanref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
      Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);

      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL;
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
      /* Phantom status: for future use
       * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){
       *   // enqueued but not explicitly cleared OR pending for enqueueing
       *   *iter = NULL;
       * }
       * resurrect_obj_tree(collector, p_referent_field);
       */
    }
    block = pool_iterator_next(phanref_pool);
  }
//  collector_put_repset(collector);
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, phanref_pool);
  }
}
Ejemplo n.º 26
0
static void identify_dead_refs(GC *gc, Pool *pool)
{
  if(collect_need_update_repset())
    finref_reset_repset(gc);

  pool_iterator_init(pool);
  Vector_Block *block = pool_iterator_next(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
      
      if(!p_referent){  
        /* referent field has been cleared. I forgot why we set p_ref with NULL here. 
           I guess it's because this ref_obj was processed in abother p_ref already, so
           there is no need to keep same ref_obj in this p_ref. */
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            /* if it's gen mode, and referent stays in NOS, we need keep p_referent_field in collector remset.
               This leads to the ref obj live even it is actually only weakly-reachable in next gen-mode collection. 
               This simplifies the design. Otherwise, we need remember the refobj in MOS seperately and process them seperately. */
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks 
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
      }else{
	      /* else, the referent is dead (weakly reachable), clear the referent field */
	      *p_referent_field = (REF)NULL; 
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
	      /* for dead referent, p_ref is not set NULL. p_ref keeps the ref object, which
	         will be moved to VM for enqueueing. */
      }
    }/* for each ref object */
    
    block = pool_iterator_next(pool);
  }
  
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, pool);
  }
}
Ejemplo n.º 27
0
static FORCE_INLINE void forward_object(Collector *collector, REF *p_ref) 
{
  Space* space = collector->collect_space; 
  GC* gc = collector->gc;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);

  /* p_obj can also be in tospace because this p_ref is a redundant one in mutator remset. 
     We don't rem p_ref because it was remembered in first time it's met. 
     FIXME:: the situation obj_belongs_to_tospace() should never be true if we
     remember object rather than slot. Currently, mutator remembers objects, and
     collector remembers slots. Although collectors remember slots, we are sure 
     there are no chances to have repetitive p_ref because an object is scanned only
     when it is marked or forwarded atomically, hence only one collector has chance
     to do the scanning. */   
  if(!obj_belongs_to_nos(p_obj) || obj_belongs_to_tospace(p_obj)) return; 

  Partial_Reveal_Object* p_target_obj = NULL;
  Boolean to_rem_slot = FALSE;

  /* Fastpath: object has already been forwarded, update the ref slot */
  if(obj_is_fw_in_oi(p_obj)){
    p_target_obj = obj_get_fw_in_oi(p_obj);
    write_slot(p_ref, p_target_obj);

    /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
    if(obj_belongs_to_tospace(p_target_obj))
      if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
        collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 

    return; 
  }  
    
  /* following is the logic for forwarding */  
  p_target_obj = collector_forward_object(collector, p_obj);
  
  /* if p_target_obj is NULL, it is forwarded by other thread. 
      Note: a race condition here, it might be forwarded by other, but not set the 
      forwarding pointer yet. We need spin here to get the forwarding pointer. 
      We can implement the collector_forward_object() so that the forwarding pointer 
      is set in the atomic instruction, which requires to roll back the mos_alloced
      space. That is easy for thread local block allocation cancellation. */
  if( p_target_obj == NULL ){
    if(collector->result == FALSE ){
      /* failed to forward, let's get back to controller. */
      vector_stack_clear(collector->trace_stack);
      return;
    }
    /* forwarded already*/
    p_target_obj = obj_get_fw_in_oi(p_obj);
  
  }else{  /* otherwise, we successfully forwarded */

#ifdef GC_GEN_STATS
  if(gc_profile){
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
    gc_gen_collector_update_marked_nos_obj_stats_minor(stats);
    gc_gen_collector_update_moved_nos_obj_stats_minor(stats, vm_object_size(p_obj));
  }
#endif

    scan_object(collector, p_target_obj);
  }
  
  assert(p_target_obj);
  write_slot(p_ref, p_target_obj);
  
  /* check if the target obj stays in NOS, and p_ref from MOS. If yes, rem p_ref. */
  if(obj_belongs_to_tospace(p_target_obj)){
    if( !addr_belongs_to_nos(p_ref) && address_belongs_to_gc_heap(p_ref, gc))
      collector_remset_add_entry(collector, ( Partial_Reveal_Object**) p_ref); 
  }
   
  return;
}
Ejemplo n.º 28
0
// Resurrect the obj tree whose root is the obj which p_ref points to
static inline void resurrect_obj_tree(Collector *collector, REF *p_ref)
{
  GC *gc = collector->gc;
  GC_Metadata *metadata = gc->metadata;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(p_obj && gc_obj_is_dead(gc, p_obj));
  
  void *p_ref_or_obj = p_ref;
  Trace_Object_Func trace_object;
  
  /* set trace_object() function */
  if(collect_is_minor()){
    if(gc_is_gen_mode()){
      if(minor_is_forward())
        trace_object = trace_obj_in_gen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_gen_ss;
      else 
        assert(0);
    }else{
      if(minor_is_forward())
        trace_object = trace_obj_in_nongen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_nongen_ss;
      else 
        assert(0);
    }
  } else if(collect_is_major_normal() || !gc_has_nos()){
    p_ref_or_obj = p_obj;
    if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
      trace_object = trace_obj_in_space_tune_marking;
      unsigned int obj_size = vm_object_size(p_obj);
#ifdef USE_32BITS_HASHCODE
      obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0;
#endif
      if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){
        collector->non_los_live_obj_size += obj_size;
        collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size;
      } else {
        collector->los_live_obj_size += round_up_to_size(obj_size, KB); 
      }
    } else if(!gc_has_nos()){
      trace_object = trace_obj_in_ms_marking;
    } else {
      trace_object = trace_obj_in_normal_marking;
    }
  } else if(collect_is_fallback()){
    if(major_is_marksweep())
      trace_object = trace_obj_in_ms_fallback_marking;
    else
      trace_object = trace_obj_in_fallback_marking;
  } else {
    assert(major_is_marksweep());
    p_ref_or_obj = p_obj;
   if( gc->gc_concurrent_status == GC_CON_NIL ) 
      trace_object = trace_obj_in_ms_marking;
    else
      trace_object = trace_obj_in_ms_concurrent_mark;
  }
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  collector_tracestack_push(collector, p_ref_or_obj);
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool);
  while(task_block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
    while(!vector_block_iterator_end(task_block, iter)){
      void *p_ref_or_obj = (void*)*iter;
      assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
              || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj));
      trace_object(collector, p_ref_or_obj);
      if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
      
      iter = vector_block_iterator_advance(task_block, iter);
    }
    vector_stack_clear(task_block);
    pool_put_entry(metadata->free_task_pool, task_block);
    
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    task_block = pool_get_entry(metadata->mark_task_pool);
  }
  
  task_block = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(task_block);
  pool_put_entry(metadata->free_task_pool, task_block);
  collector->trace_stack = NULL;
}
Ejemplo n.º 29
0
rp_frame *
frame_read (char *str, rp_screen *screen)
{
    Window w = 0L;
    rp_window *win;
    rp_frame *f;
    char *tmp, *d;
    int s_width = -1;
    int s_height = -1;

    /* Create a blank frame. */
    f = xmalloc (sizeof (rp_frame));
    init_frame(f);

    PRINT_DEBUG(("parsing '%s'\n", str));

    d = xstrdup(str);
    tmp = strtok_ws (d);

    /* Verify it starts with '(frame ' */
    if (strcmp(tmp, "(frame"))
    {
        PRINT_DEBUG(("Doesn't start with '(frame '\n"));
        free (d);
        free (f);
        return NULL;
    }
    /* NOTE: there is no check to make sure each field was filled in. */
    tmp = strtok_ws(NULL);
    while (tmp)
    {
        if (!strcmp(tmp, ":number"))
            read_slot(f->number);
        else if (!strcmp(tmp, ":x"))
            read_slot(f->x);
        else if (!strcmp(tmp, ":y"))
            read_slot(f->y);
        else if (!strcmp(tmp, ":width"))
            read_slot(f->width);
        else if (!strcmp(tmp, ":height"))
            read_slot(f->height);
        else if (!strcmp(tmp, ":screenw"))
            read_slot(s_width);
        else if (!strcmp(tmp, ":screenh"))
            read_slot(s_height);
        else if (!strcmp(tmp, ":window"))
            read_slot(w);
        else if (!strcmp(tmp, ":last-access"))
            read_slot(f->last_access);
        else if (!strcmp(tmp, ":dedicated")) {
            /* f->dedicated is unsigned, so read into local variable. */
            long dedicated;

            read_slot(dedicated);
            if (dedicated <= 0)
                f->dedicated = 0;
            else
                f->dedicated = 1;
        }
        else if (!strcmp(tmp, ")"))
            break;
        else
            PRINT_ERROR(("Unknown slot %s\n", tmp));
        /* Read the next token. */
        tmp = strtok_ws(NULL);
    }
    if (tmp)
        PRINT_ERROR(("Frame has trailing garbage\n"));
    free (d);

    /* adjust x, y, width and height to a possible screen size change */
    if (s_width > 0)
    {
        f->x = (f->x*screen->width)/s_width;
        f->width = (f->width*screen->width)/s_width;
    }
    if (s_height > 0)
    {
        f->y = (f->y*screen->height)/s_height;
        f->height = (f->height*screen->height)/s_height;
    }

    /* Perform some integrity checks on what we got and fix any
       problems. */
    if (f->number <= 0)
        f->number = 0;
    if (f->x <= 0)
        f->x = 0;
    if (f->y <= 0)
        f->y = 0;
    if (f->width <= defaults.window_border_width*2)
        f->width = defaults.window_border_width*2 + 1;
    if (f->height <= defaults.window_border_width*2)
        f->height = defaults.window_border_width*2 + 1;
    if (f->last_access < 0)
        f->last_access = 0;

    /* Find the window with the X11 window ID. */
    win = find_window_in_list (w, &rp_mapped_window);
    if (win)
        f->win_number = win->number;
    else
        f->win_number = EMPTY;

    return f;
}
Ejemplo n.º 30
0
void mark_scan_pool(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
   
  collector->trace_stack = free_task_pool_get_entry(metadata);

  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to mark tasks. 
      FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);

      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
      assert(p_obj!=NULL);
      /* we have to mark the object before put it into marktask, because
         it is possible to have two slots containing a same object. They will
         be scanned twice and their ref slots will be recorded twice. Problem
         occurs after the ref slot is updated first time with new position
         and the second time the value is the ref slot is the old position as expected.
         This can be worked around if we want. 
      */
      if(obj_mark_in_vt(p_obj)){
        collector_tracestack_push(collector, p_obj);
#ifdef GC_GEN_STATS
        gc_gen_collector_update_rootset_ref_num(stats);
        gc_gen_collector_update_marked_obj_stats_major(stats);
#endif
      }

    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the mark tasks and scan objects */
  /* get a task buf for the mark stack */
  collector->trace_stack = free_task_pool_get_entry(metadata);

retry:
  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);

      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. 
         degenerate my stack into mark_task, and grab another mark_task */
      trace_object(collector, p_obj);
    } 
    /* run out one task, put back to the pool and grab another task */
   vector_stack_clear(mark_task);
   pool_put_entry(metadata->free_task_pool, mark_task);
   mark_task = pool_get_entry(metadata->mark_task_pool);      
  }
  
  /* termination detection. This is also a barrier.
     NOTE:: We can simply spin waiting for num_finished_collectors, because each 
     generated new task would surely be processed by its generating collector eventually. 
     So code below is only for load balance optimization. */
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( !pool_is_empty(metadata->mark_task_pool)){
      atomic_dec32(&num_finished_collectors);
      goto retry;  
    }
  }
     
  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);   
  collector->trace_stack = NULL;
  
  return;
}