コード例 #1
0
/*copy dest pool to source pool, ignore NULL slot*/
void verifier_copy_pool(Pool* dest_pool, Pool* source_pool)
{
  Pool* temp_pool = sync_pool_create();
  
  Vector_Block* dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  pool_iterator_init(source_pool);
  while(Vector_Block *source_set = pool_iterator_next(source_pool)){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(source_set);
    while( !vector_block_iterator_end(source_set, iter)){
      assert(!vector_block_is_full(dest_set));
      if(*iter)  vector_block_add_entry(dest_set, *iter);
      iter = vector_block_iterator_advance(source_set, iter);
    }
    pool_put_entry(temp_pool, dest_set);
    dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  }
  
  dest_set = NULL;
  pool_iterator_init(temp_pool);
  while(dest_set = pool_iterator_next(temp_pool)){
    pool_put_entry(dest_pool, dest_set);
  }
  
  sync_pool_destruct(temp_pool);
  return;
}
コード例 #2
0
void verifier_trace_rootsets(Heap_Verifier* heap_verifier, Pool* root_set_pool)
{
  Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
  GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
  gc_verifier->objects_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);
  gc_verifier->hashcode_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  pool_iterator_init(root_set_pool);
  Vector_Block* root_set = pool_iterator_next(root_set_pool);
  
  /* first step: copy all root objects to trace tasks. */ 
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF* p_ref = (REF* )*iter;
      iter = vector_block_iterator_advance(root_set,iter);

      if(!heap_verifier->need_verify_rootset || !heap_verifier->is_before_gc){
        if(!verify_rootset_slot(p_ref, heap_verifier)){
          gc_verifier->is_verification_passed = FALSE;
          assert(0);
          continue;
        }
      }

      Partial_Reveal_Object* p_obj = read_slot(p_ref);
      assert(p_obj != NULL);  

      verifier_tracestack_push(p_obj, gc_verifier->trace_stack);
    } 
    root_set = pool_iterator_next(root_set_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);

  Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter;
      iter = vector_block_iterator_advance(trace_task,iter);
      trace_object(heap_verifier, p_obj); 
    }
    vector_stack_clear(trace_task);
    pool_put_entry(verifier_metadata->free_task_pool, trace_task);
    trace_task = pool_get_entry(verifier_metadata->mark_task_pool);
  }
  vector_stack_clear(gc_verifier->trace_stack);
  pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack);
  gc_verifier->trace_stack = NULL;

}
コード例 #3
0
void mutator_destruct(GC* gc, void *unused_gc_information)
{

  Mutator *mutator = (Mutator *)gc_get_tls();

  alloc_context_reset((Allocator*)mutator);


  lock(gc->mutator_list_lock);     // vvvvvvvvvvvvvvvvvvvvvvvvvvvvvv

#ifdef USE_UNIQUE_MARK_SWEEP_GC
  allocactor_destruct_local_chunks((Allocator*)mutator);
#endif
  mutator_register_new_obj_size(mutator);

  volatile Mutator *temp = gc->mutator_list;
  if (temp == mutator) {  /* it is at the head of the list */
    gc->mutator_list = temp->next;
  } else {
    while (temp->next != mutator) {
      temp = temp->next;
      assert(temp);
    }
    temp->next = mutator->next;
  }
  gc->num_mutators--;

  unlock(gc->mutator_list_lock); // ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  
  if(gc_is_gen_mode()){ /* put back the remset when a mutator exits */
    pool_put_entry(gc->metadata->mutator_remset_pool, mutator->rem_set);
    mutator->rem_set = NULL;
  }
  
  if(mutator->obj_with_fin){
    pool_put_entry(gc->finref_metadata->obj_with_fin_pool, mutator->obj_with_fin);
    mutator->obj_with_fin = NULL;
  }

  lock(mutator->dirty_set_lock);
  if( mutator->dirty_set != NULL){
    if(vector_block_is_empty(mutator->dirty_set))
      pool_put_entry(gc->metadata->free_set_pool, mutator->dirty_set);
    else{ /* FIXME:: this condition may be released. */
      pool_put_entry(gc->metadata->gc_dirty_set_pool, mutator->dirty_set);
      mutator->dirty_set = NULL;
    }
  }
  unlock(mutator->dirty_set_lock);
  STD_FREE(mutator);
  gc_set_tls(NULL);
  
  return;
}
コード例 #4
0
ファイル: lspace_alloc_collect.cpp プロジェクト: dacut/juliet
void lspace_compute_object_target(Collector* collector, Lspace* lspace)
{
  void* dest_addr = lspace->heap_start;
  unsigned int iterate_index = 0;
  Partial_Reveal_Object* p_obj = lspace_get_first_marked_object(lspace, &iterate_index);
  	
  assert(!collector->rem_set);
  collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
#ifdef USE_32BITS_HASHCODE  
  collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
#endif
  
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  while( p_obj ){
   
    assert( obj_is_marked_in_vt(p_obj));
    unsigned int obj_size = vm_object_size(p_obj);
#ifdef GC_GEN_STATS
  gc_gen_collector_update_moved_los_obj_stats_major(stats, vm_object_size(p_obj));
#endif
    assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)lspace->heap_end);
#ifdef USE_32BITS_HASHCODE 
    obj_size += hashcode_is_attached(p_obj)? GC_OBJECT_ALIGNMENT : 0 ;
    Obj_Info_Type obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector, null, null);
#else
    Obj_Info_Type obj_info = get_obj_info_raw(p_obj);
#endif

    if( obj_info != 0 ) {
      collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
      collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
    }
      
    obj_set_fw_in_oi(p_obj, dest_addr);
    dest_addr = (void *)ALIGN_UP_TO_KILO(((POINTER_SIZE_INT) dest_addr + obj_size));
    p_obj = lspace_get_next_marked_object(lspace, &iterate_index);
  }

  pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
  collector->rem_set = NULL;
#ifdef USE_32BITS_HASHCODE 
  pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
  collector->hashcode_set = NULL;
#endif
  
  lspace->scompact_fa_start = dest_addr;
  lspace->scompact_fa_end= lspace->heap_end;
  return;
}
コード例 #5
0
void verifier_metadata_initialize(Heap_Verifier* heap_verifier)
{
  Heap_Verifier_Metadata* heap_verifier_metadata = (Heap_Verifier_Metadata* )STD_MALLOC(sizeof(Heap_Verifier_Metadata));
  assert(heap_verifier_metadata);
  memset(heap_verifier_metadata, 0, sizeof(Heap_Verifier_Metadata));
  
  unsigned int seg_size = GC_VERIFIER_METADATA_SIZE_BYTES + GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;
  void* metadata = STD_MALLOC(seg_size);
  assert(metadata);
  memset(metadata, 0, seg_size);
  heap_verifier_metadata->segments[0] = metadata;
  metadata = (void*)round_up_to_size((POINTER_SIZE_INT)metadata, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
  heap_verifier_metadata->num_alloc_segs = 1;
  
  unsigned int i = 0;
  unsigned int num_blocks = GC_VERIFIER_METADATA_SIZE_BYTES/GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;
  for(i=0; i<num_blocks; i++){
    Vector_Block* block = (Vector_Block*)((POINTER_SIZE_INT)metadata + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
    vector_block_init(block, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
  }
  
  unsigned num_tasks = num_blocks>>1;
  heap_verifier_metadata->free_task_pool = sync_pool_create();
  for(i=0; i<num_tasks; i++){
    Vector_Block *block = (Vector_Block*)((POINTER_SIZE_INT)metadata + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
    vector_stack_init((Vector_Block*)block);
    pool_put_entry(heap_verifier_metadata->free_task_pool, (void*)block); 
  }
  
  heap_verifier_metadata->free_set_pool = sync_pool_create();
  for(; i<num_blocks; i++){
    POINTER_SIZE_INT block = (POINTER_SIZE_INT)metadata + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;    
    pool_put_entry(heap_verifier_metadata->free_set_pool, (void*)block); 
  }

  heap_verifier_metadata->mark_task_pool = sync_pool_create();
  heap_verifier_metadata->root_set_pool = sync_pool_create();
  heap_verifier_metadata->objects_pool_before_gc  = sync_pool_create();
  heap_verifier_metadata->objects_pool_after_gc     = sync_pool_create();
  heap_verifier_metadata->resurrect_objects_pool_before_gc  = sync_pool_create();
  heap_verifier_metadata->resurrect_objects_pool_after_gc      = sync_pool_create();
  heap_verifier_metadata->new_objects_pool  = sync_pool_create();  
  heap_verifier_metadata->hashcode_pool_before_gc = sync_pool_create();
  heap_verifier_metadata->hashcode_pool_after_gc = sync_pool_create();
  heap_verifier_metadata->obj_with_fin_pool= sync_pool_create();
  heap_verifier_metadata->finalizable_obj_pool= sync_pool_create();

  verifier_metadata = heap_verifier_metadata;
  heap_verifier->heap_verifier_metadata = heap_verifier_metadata;
  return;
}
コード例 #6
0
Vector_Block* gc_verifier_metadata_extend(Pool* pool, Boolean is_set_pool)
{
  /*add a slot to pool point back to verifier_metadata, then we do not need the global var verifer_metadata*/
  lock(verifier_metadata->alloc_lock);
  Vector_Block* block = pool_get_entry(pool);
  if( block ){
    unlock(verifier_metadata->alloc_lock);
    return block;
  }
  
  unsigned int num_alloced = verifier_metadata->num_alloc_segs;
  if(num_alloced == METADATA_SEGMENT_NUM){
    printf("Run out GC metadata, please give it more segments!\n");
    exit(0);
  }
  unsigned int seg_size =  GC_VERIFIER_METADATA_EXTEND_SIZE_BYTES + GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;
  void *new_segment = STD_MALLOC(seg_size);
  assert(new_segment);
  memset(new_segment, 0, seg_size);
  verifier_metadata->segments[num_alloced] = new_segment;
  new_segment = (void*)round_up_to_size((POINTER_SIZE_INT)new_segment, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
  verifier_metadata->num_alloc_segs = num_alloced + 1;
  
  unsigned int num_blocks =  GC_VERIFIER_METADATA_EXTEND_SIZE_BYTES/GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;
  
  unsigned int i=0;
  for(i=0; i<num_blocks; i++){
    Vector_Block* block = (Vector_Block*)((POINTER_SIZE_INT)new_segment + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
    vector_block_init(block, GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
    assert(vector_block_is_empty(block));
  }
  
  if(is_set_pool){
    for(i=0; i<num_blocks; i++){
      POINTER_SIZE_INT block = (POINTER_SIZE_INT)new_segment + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES;    
      pool_put_entry(pool, (void*)block); 
    }
  }else{
    for(i=0; i<num_blocks; i++){
      Vector_Block *block = (Vector_Block *)((POINTER_SIZE_INT)new_segment + i*GC_VERIFIER_METADATA_BLOCK_SIZE_BYTES);
      vector_stack_init(block);
      pool_put_entry(pool, (void*)block);
    }
  }

  block = pool_get_entry(pool);
  unlock(verifier_metadata->alloc_lock);
  return block;
}
コード例 #7
0
void put_all_fin_on_exit(GC *gc)
{
  Pool *obj_with_fin_pool = gc->finref_metadata->obj_with_fin_pool;
  Pool *free_pool = gc->finref_metadata->free_pool;
  
  /* Because we are manipulating obj_with_fin_pool, GC lock must be hold in case that GC happens */
  vm_gc_lock_enum();
  /* FIXME: holding gc lock is not enough, perhaps there are mutators that are allocating objects with finalizer
   * could be fixed as this:
   * in fspace_alloc() and lspace_alloc() hold gc lock through
   * allocating mem and adding the objects with finalizer to the pool
   */
  lock(gc->mutator_list_lock);
  gc_set_obj_with_fin(gc);
  unlock(gc->mutator_list_lock);
  
  Vector_Block *block = pool_get_entry(obj_with_fin_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    while(!vector_block_iterator_end(block, iter)){
      Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter);
      if(p_obj)
        vm_finalize_object(p_obj);
      iter = vector_block_iterator_advance(block, iter);
    }
    vector_block_clear(block);
    pool_put_entry(free_pool, block);
    block = pool_get_entry(obj_with_fin_pool);
  }
  
  vm_gc_unlock_enum();
}
コード例 #8
0
/* Finalizable objs falls back to objs with fin when resurrection fallback happens */
static void finalizable_objs_fallback(GC *gc)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
  Vector_Block *obj_with_fin_block = finref_get_free_block(gc);
    
  Vector_Block *block = pool_get_entry(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      /* Perhaps obj has been resurrected by previous resurrections. If the fin-obj was resurrected, we need put it back to obj_with_fin pool.
         For minor collection, the resurrected obj was forwarded, so we need use the new copy.*/
      if(!gc_obj_is_dead(gc, p_obj) && obj_belongs_to_nos(p_obj)){
        /* Even in NOS, not all live objects are forwarded due to the partial-forward algortihm */ 
        if(!NOS_PARTIAL_FORWARD || fspace_obj_to_be_forwarded(p_obj)){
          write_slot(p_ref , obj_get_fw_in_oi(p_obj));
          p_obj = read_slot(p_ref);
        }
      }
      /* Perhaps obj_with_fin_block has been allocated with a new free block if it is full */
      obj_with_fin_block = gc_add_finalizer(gc, obj_with_fin_block, p_obj);
    }
    block = pool_get_entry(finalizable_obj_pool);
  }
  
  pool_put_entry(obj_with_fin_pool, obj_with_fin_block);
  metadata->pending_finalizers = FALSE;
}
コード例 #9
0
void verifier_trace_objsets(Heap_Verifier* heap_verifier, Pool* obj_set_pool)
{
  Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
  GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);
  pool_iterator_init(obj_set_pool);
  Vector_Block* obj_set = pool_iterator_next(obj_set_pool);
  /* first step: copy all root objects to trace tasks. */ 
  while(obj_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(obj_set);
    while(!vector_block_iterator_end(obj_set,iter)){
      Partial_Reveal_Object* p_obj = read_slot((REF*)iter);
      iter = vector_block_iterator_advance(obj_set,iter);
      /*p_obj can be NULL , When GC happened, the obj in Finalize objs list will be clear.*/
      //assert(p_obj != NULL);  
      if(p_obj == NULL) continue;
      if(heap_verifier->gc_is_gen_mode && heap_verifier->is_before_gc && !obj_belongs_to_nos(p_obj)) continue;
      verifier_tracestack_push(p_obj, gc_verifier->trace_stack);
    } 
    obj_set = pool_iterator_next(obj_set_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);

  Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter;
      iter = vector_block_iterator_advance(trace_task,iter);
      trace_object(heap_verifier, p_obj); 
    }
    vector_stack_clear(trace_task);
    pool_put_entry(verifier_metadata->free_task_pool, trace_task);
    trace_task = pool_get_entry(verifier_metadata->mark_task_pool);
  }
  vector_stack_clear(gc_verifier->trace_stack);
  pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack);
  gc_verifier->trace_stack = NULL;

}
コード例 #10
0
void verifier_clear_pool(Pool* working_pool, Pool* free_pool, Boolean is_vector_stack)
{
  Vector_Block* working_block = pool_get_entry(working_pool);
  while(working_block){
    if(is_vector_stack) vector_stack_clear(working_block);
    else vector_block_clear(working_block);
    pool_put_entry(free_pool, working_block);
    working_block = pool_get_entry(working_pool);
  }
}
コード例 #11
0
static void dead_weak_refs_fallback(GC *gc, Pool *ref_pool)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *free_pool = metadata->free_pool;
  Pool *fallback_ref_pool = metadata->fallback_ref_pool;
  
  Vector_Block *fallback_ref_block = finref_get_free_block(gc);
  Vector_Block *block = pool_get_entry(ref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    while(!vector_block_iterator_end(block, iter)){
      Partial_Reveal_Object *p_obj = read_slot((REF*)iter);
      /* Perhaps fallback_ref_block has been allocated with a new free block if it is full */
      if(p_obj)
        fallback_ref_block = finref_add_fallback_ref(gc, fallback_ref_block, p_obj);
      iter = vector_block_iterator_advance(block, iter);
    }
    vector_block_clear(block);
    pool_put_entry(free_pool, block);
    block = pool_get_entry(ref_pool);
  }
  
  pool_put_entry(fallback_ref_pool, fallback_ref_block);
}
コード例 #12
0
void verifier_copy_pool_reverse_order(Pool* dest_pool, Pool* source_pool)
{
  pool_iterator_init(source_pool);
  Vector_Block* dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  
  while(Vector_Block *source_set = pool_iterator_next(source_pool)){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(source_set);
    while( !vector_block_iterator_end(source_set, iter)){
      assert(!vector_block_is_full(dest_set));
      vector_block_add_entry(dest_set, *iter);
      iter = vector_block_iterator_advance(source_set, iter);
    }
    pool_put_entry(dest_pool, dest_set);
    dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  }
  return ;
}
コード例 #13
0
static inline void put_dead_weak_refs_to_vm(GC *gc, Pool *ref_pool)
{
  Pool *free_pool = gc->finref_metadata->free_pool;
  
  Vector_Block *block = pool_get_entry(ref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    while(!vector_block_iterator_end(block, iter)){
      Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter);
      if(p_obj)
        vm_enqueue_reference(p_obj);
      iter = vector_block_iterator_advance(block, iter);
    }
    vector_block_clear(block);
    pool_put_entry(free_pool, block);
    block = pool_get_entry(ref_pool);
  }
}
コード例 #14
0
static void put_finalizable_obj_to_vm(GC *gc)
{
  Pool *finalizable_obj_pool = gc->finref_metadata->finalizable_obj_pool;
  Pool *free_pool = gc->finref_metadata->free_pool;
  
  Vector_Block *block = pool_get_entry(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    while(!vector_block_iterator_end(block, iter)){
      assert(*iter);
      Managed_Object_Handle p_obj = (Managed_Object_Handle)read_slot((REF*)iter);
      vm_finalize_object(p_obj);
      iter = vector_block_iterator_advance(block, iter);
    }
    vector_block_clear(block);
    pool_put_entry(free_pool, block);
    block = pool_get_entry(finalizable_obj_pool);
  }
}
コード例 #15
0
/* One assumption: pfc_pool is not empty */
static Boolean pfc_pool_roughly_sort(Pool *pfc_pool, Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
{
  Chunk_Header *bucket_head[PFC_SORT_NUM];  /* Sorted chunk buckets' heads */
  Chunk_Header *bucket_tail[PFC_SORT_NUM];  /* Sorted chunk buckets' tails */
  unsigned int slot_num;
  unsigned int chunk_num = 0;
  unsigned int slot_alloc_num = 0;
  
  /* Init buckets' heads and tails */
  memset(bucket_head, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
  memset(bucket_tail, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
  
  /* Roughly sort chunks in pfc_pool */
  pool_iterator_init(pfc_pool);
  Chunk_Header *chunk = (Chunk_Header*)pool_iterator_next(pfc_pool);
  if(chunk) slot_num = chunk->slot_num;
  while(chunk){
    ++chunk_num;
    assert(chunk->alloc_num);
    slot_alloc_num += chunk->alloc_num;
    Chunk_Header *next_chunk = chunk->next;
    unsigned int bucket_index = (chunk->alloc_num*PFC_SORT_NUM-1) / slot_num;
    assert(bucket_index < PFC_SORT_NUM);
    sorted_chunk_bucket_add_entry(&bucket_head[bucket_index], &bucket_tail[bucket_index], chunk);
    chunk = next_chunk;
  }
  
  /* Empty the pfc pool because some chunks in this pool will be free after compaction */
  pool_empty(pfc_pool);
  
  /* If we can't get a free chunk after compaction, there is no need to compact.
   * This condition includes that the chunk num in pfc pool is equal to 1, in which case there is also no need to compact
   */
  if(slot_num*(chunk_num-1) <= slot_alloc_num){
    for(unsigned int i = 0; i < PFC_SORT_NUM; i++){
      Chunk_Header *chunk = bucket_head[i];
      while(chunk){
        Chunk_Header *next_chunk = chunk->next;
        pool_put_entry(pfc_pool, chunk);
        chunk = next_chunk;
      }
    }
    return FALSE;
  }
  
  /* Link the sorted chunk buckets into one single ordered bidirectional list */
  Chunk_Header *head = NULL;
  Chunk_Header *tail = NULL;
  for(unsigned int i = PFC_SORT_NUM; i--;){
    assert((head && tail) || (!head && !tail));
    assert((bucket_head[i] && bucket_tail[i]) || (!bucket_head[i] && !bucket_tail[i]));
    if(!bucket_head[i]) continue;
    if(!tail){
      head = bucket_head[i];
      tail = bucket_tail[i];
    } else {
      tail->next = bucket_head[i];
      bucket_head[i]->prev = tail;
      tail = bucket_tail[i];
    }
  }
  
  assert(head && tail);
  *least_free_chunk = head;
  *most_free_chunk = tail;
  
  return TRUE;
}
コード例 #16
0
// Resurrect the obj tree whose root is the obj which p_ref points to
static inline void resurrect_obj_tree(Collector *collector, REF *p_ref)
{
  GC *gc = collector->gc;
  GC_Metadata *metadata = gc->metadata;
  Partial_Reveal_Object *p_obj = read_slot(p_ref);
  assert(p_obj && gc_obj_is_dead(gc, p_obj));
  
  void *p_ref_or_obj = p_ref;
  Trace_Object_Func trace_object;
  
  /* set trace_object() function */
  if(collect_is_minor()){
    if(gc_is_gen_mode()){
      if(minor_is_forward())
        trace_object = trace_obj_in_gen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_gen_ss;
      else 
        assert(0);
    }else{
      if(minor_is_forward())
        trace_object = trace_obj_in_nongen_fw;
      else if(minor_is_semispace())
        trace_object = trace_obj_in_nongen_ss;
      else 
        assert(0);
    }
  } else if(collect_is_major_normal() || !gc_has_nos()){
    p_ref_or_obj = p_obj;
    if(gc_has_space_tuner(gc) && (gc->tuner->kind != TRANS_NOTHING)){
      trace_object = trace_obj_in_space_tune_marking;
      unsigned int obj_size = vm_object_size(p_obj);
#ifdef USE_32BITS_HASHCODE
      obj_size += hashcode_is_set(p_obj) ? GC_OBJECT_ALIGNMENT : 0;
#endif
      if(!obj_belongs_to_space(p_obj, gc_get_los((GC_Gen*)gc))){
        collector->non_los_live_obj_size += obj_size;
        collector->segment_live_size[SIZE_TO_SEGMENT_INDEX(obj_size)] += obj_size;
      } else {
        collector->los_live_obj_size += round_up_to_size(obj_size, KB); 
      }
    } else if(!gc_has_nos()){
      trace_object = trace_obj_in_ms_marking;
    } else {
      trace_object = trace_obj_in_normal_marking;
    }
  } else if(collect_is_fallback()){
    if(major_is_marksweep())
      trace_object = trace_obj_in_ms_fallback_marking;
    else
      trace_object = trace_obj_in_fallback_marking;
  } else {
    assert(major_is_marksweep());
    p_ref_or_obj = p_obj;
   if( gc->gc_concurrent_status == GC_CON_NIL ) 
      trace_object = trace_obj_in_ms_marking;
    else
      trace_object = trace_obj_in_ms_concurrent_mark;
  }
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  collector_tracestack_push(collector, p_ref_or_obj);
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  collector->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *task_block = pool_get_entry(metadata->mark_task_pool);
  while(task_block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(task_block);
    while(!vector_block_iterator_end(task_block, iter)){
      void *p_ref_or_obj = (void*)*iter;
      assert(((collect_is_minor()||collect_is_fallback()) && *(Partial_Reveal_Object **)p_ref_or_obj)
              || ((collect_is_major_normal()||major_is_marksweep()||!gc_has_nos()) && p_ref_or_obj));
      trace_object(collector, p_ref_or_obj);
      if(collector->result == FALSE)  break; /* Resurrection fallback happens; force return */
      
      iter = vector_block_iterator_advance(task_block, iter);
    }
    vector_stack_clear(task_block);
    pool_put_entry(metadata->free_task_pool, task_block);
    
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    task_block = pool_get_entry(metadata->mark_task_pool);
  }
  
  task_block = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(task_block);
  pool_put_entry(metadata->free_task_pool, task_block);
  collector->trace_stack = NULL;
}
コード例 #17
0
void wspace_final_mark_scan_mostly_concurrent(Conclctor* marker)
{
  
  GC *gc = marker->gc;
  GC_Metadata *metadata = gc->metadata;

  unsigned int num_dirtyset_slot = 0;
  
  marker->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);
  
  /* first step: copy all root objects to mark tasks.*/
  while(root_set){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);
      
      assert(p_obj!=NULL);
      assert(address_belongs_to_gc_heap(p_obj, gc));
      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);
    }
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
  marker->trace_stack = free_task_pool_get_entry(metadata);


  /*second step: mark dirty pool*/
  Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);

  while(dirty_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
    while(!vector_block_iterator_end(dirty_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(dirty_set,iter);

      assert(p_obj!=NULL); //FIXME: restrict condition?
      
      obj_clear_dirty_in_table(p_obj);
      obj_clear_mark_in_table(p_obj, marker);

      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);

      num_dirtyset_slot ++;
    } 
    vector_block_clear(dirty_set);
    pool_put_entry(metadata->free_set_pool, dirty_set);
    dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
  }
   /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);  

  /* third step: iterate over the mark tasks and scan objects */
  marker->trace_stack = free_task_pool_get_entry(metadata);

  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);
      trace_object(marker, p_obj);      
    }
    /* run out one task, put back to the pool and grab another task */
    vector_stack_clear(mark_task);
    pool_put_entry(metadata->free_task_pool, mark_task);
    mark_task = pool_get_entry(metadata->mark_task_pool);
  }

  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)marker->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);
  marker->trace_stack = NULL;

  //marker->time_mark += time_mark;
  marker->num_dirty_slots_traced = num_dirtyset_slot;
  //INFO2("gc.marker", "[final marker] processed dirty slot="<<num_dirtyset_slot);
  
  return;
}
コード例 #18
0
void wspace_mark_scan_mostly_concurrent(Conclctor* marker)
{
  GC *gc = marker->gc;
  GC_Metadata *metadata = gc->metadata;
  
  unsigned int num_dirtyset_slot = 0;

  marker->trace_stack = free_task_pool_get_entry(metadata);
  
  /* first step: copy all root objects to mark tasks.*/
  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);

  while(root_set){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);
      
      assert(p_obj!=NULL);
      assert(address_belongs_to_gc_heap(p_obj, gc));
      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);
    }
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }

  /* put back the last trace_stack task */
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
  marker->trace_stack = free_task_pool_get_entry(metadata);

  /* following code has such concerns:
      1, current_thread_id should be unique
      2, mostly concurrent do not need adding new marker dynamically
      3, when the heap is exhausted, final marking will enumeration rootset, it should be after above actions
  */
  unsigned int current_thread_id = atomic_inc32(&num_active_markers);

  if((current_thread_id+1) == gc->num_active_markers )
    state_transformation( gc, GC_CON_START_MARKERS, GC_CON_TRACING);
  
  while( gc->gc_concurrent_status == GC_CON_START_MARKERS );

retry:

  
  
  /*second step: mark dirty pool*/
  Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);

  while(dirty_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
    while(!vector_block_iterator_end(dirty_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(dirty_set,iter);

      assert(p_obj!=NULL); //FIXME: restrict condition?
      
      obj_clear_dirty_in_table(p_obj);
      obj_clear_mark_in_table(p_obj, marker);

      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);

      num_dirtyset_slot ++;
    } 
    vector_block_clear(dirty_set);
    pool_put_entry(metadata->free_set_pool, dirty_set);
    dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
  }

   /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);  

  /* third step: iterate over the mark tasks and scan objects */
   marker->trace_stack = free_task_pool_get_entry(metadata);

  
  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);
      trace_object(marker, p_obj);      
    }
    /* run out one task, put back to the pool and grab another task */
    vector_stack_clear(mark_task);
    pool_put_entry(metadata->free_task_pool, mark_task);
    mark_task = pool_get_entry(metadata->mark_task_pool);
  }

  /*
  if(current_thread_id == 0){
    gc_prepare_dirty_set(marker->gc);
  }*/

  gc_copy_local_dirty_set_to_global(gc);
  
  /* conditions to terminate mark: 
           1.All thread finished current job.
           2.Flag is set to terminate concurrent mark.
    */
  atomic_dec32(&num_active_markers);
  while(num_active_markers != 0 || !concurrent_mark_need_terminating_mc(gc) ) {
      if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)) {
	   atomic_inc32(&num_active_markers);
          goto retry;
      } else if( current_thread_id >= mostly_con_long_marker_num ) {
         break;
      }
      apr_sleep(15000);
  }

  /*
  while(num_active_markers != 0 || !concurrent_mark_need_terminating_mc(gc)){
    if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)){
      atomic_inc32(&num_active_markers);
      goto retry;
    }
  }*/
  
  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)marker->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);
  marker->trace_stack = NULL;
  marker->num_dirty_slots_traced = num_dirtyset_slot;

  /*
  if(num_dirtyset_slot!=0) {
  	lock(info_lock);
  	INFO2("gc.marker", "marker ["<< current_thread_id <<"] processed dirty slot="<<num_dirtyset_slot);
	unlock(info_lock);
  }*/
  return;
}
コード例 #19
0
static void collector_trace_rootsets(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);

  Space* space = collector->collect_space;
  collector->trace_stack = free_task_pool_get_entry(metadata);

  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to trace tasks. */ 

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ...");
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set, iter);

      assert(*p_ref);  /* root ref cann't be NULL, but remset can be */

      collector_tracestack_push(collector, p_ref);

#ifdef GC_GEN_STATS    
      gc_gen_collector_update_rootset_ref_num(stats);
#endif
    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  collector->trace_stack = free_task_pool_get_entry(metadata);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ...");

retry:
  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(trace_task, iter);
#ifdef PREFETCH_SUPPORTED
      /* DO PREFETCH */
      if( mark_prefetch ) {    
        if(!vector_block_iterator_end(trace_task, iter)) {
      	  REF *pref= (REF*) *iter;
      	  PREFETCH( read_slot(pref));
        }	
      }
#endif      
      trace_object(collector, p_ref);
      
      if(collector->result == FALSE)  break; /* force return */
 
    }
    vector_stack_clear(trace_task);
    pool_put_entry(metadata->free_task_pool, trace_task);

    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    trace_task = pool_get_entry(metadata->mark_task_pool);
  }
  
  /* A collector comes here when seeing an empty mark_task_pool. The last collector will ensure 
     all the tasks are finished.*/
     
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( pool_is_empty(metadata->mark_task_pool)) continue;
    /* we can't grab the task here, because of a race condition. If we grab the task, 
       and the pool is empty, other threads may fall to this barrier and then pass. */
    atomic_dec32(&num_finished_collectors);
    goto retry; 
  }

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");

  /* now we are done, but each collector has a private stack that is empty */  
  trace_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(trace_task);
  pool_put_entry(metadata->free_task_pool, trace_task);   
  collector->trace_stack = NULL;
  
  return;
}
コード例 #20
0
void wspace_mark_scan_concurrent(Conclctor* marker)
{
  //marker->time_measurement_start = time_now();
  GC *gc = marker->gc;
  GC_Metadata *metadata = gc->metadata;
  
  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
  unsigned int current_thread_id = atomic_inc32(&num_active_markers);
  marker->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to mark tasks.*/
  while(root_set){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);
      
      assert(p_obj!=NULL);
      assert(address_belongs_to_gc_heap(p_obj, gc));
      //if(obj_mark_gray_in_table(p_obj, &root_set_obj_size))
      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);
    }
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
  
  marker->trace_stack = free_task_pool_get_entry(metadata);

  state_transformation( gc, GC_CON_START_MARKERS, GC_CON_TRACING);
retry:
  
  gc_copy_local_dirty_set_to_global(marker->gc);
  /*second step: mark dirty object snapshot pool*/
  Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);

  while(dirty_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
    while(!vector_block_iterator_end(dirty_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(dirty_set,iter);

      if(p_obj==NULL) { //FIXME: restrict?
        RAISE_ERROR;
      }
      marker->num_dirty_slots_traced++;
      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);
    } 
    vector_block_clear(dirty_set);
    pool_put_entry(metadata->free_set_pool, dirty_set);
    dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
  }

    /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);  

  /* third step: iterate over the mark tasks and scan objects */
  /* get a task buf for the mark stack */
  marker->trace_stack = free_task_pool_get_entry(metadata);

  
  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);
      trace_object(marker, p_obj);      
    }
    /* run out one task, put back to the pool and grab another task */
    vector_stack_clear(mark_task);
    pool_put_entry(metadata->free_task_pool, mark_task);
    mark_task = pool_get_entry(metadata->mark_task_pool);
  }
  
  /* termination condition:  
           1.all thread finished current job.
           2.local snapshot vectors are empty.
           3.global snapshot pool is empty.
    */
  atomic_dec32(&num_active_markers);
  while(num_active_markers != 0 || !concurrent_mark_need_terminating_otf(gc)){
     if(!pool_is_empty(metadata->mark_task_pool) || !concurrent_mark_need_terminating_otf(gc)){
       atomic_inc32(&num_active_markers);
       goto retry; 
    }
    apr_sleep(15000);
  }

  state_transformation( gc, GC_CON_TRACING, GC_CON_TRACE_DONE );
  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)marker->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);
  marker->trace_stack = NULL;
  assert(pool_is_empty(metadata->gc_dirty_set_pool));

    //INFO2("gc.con.info", "<stage 5>first marker finishes its job");

  return;
}
コード例 #21
0
static void collector_trace_rootsets(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);

  Space* space = collector->collect_space;
  collector->trace_stack = free_task_pool_get_entry(metadata);

  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to trace tasks. */ 

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ......");
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);
      
      if(!*p_ref) continue;  /* root ref cann't be NULL, but remset can be */
      Partial_Reveal_Object *p_obj = read_slot(p_ref);

#ifdef GC_GEN_STATS
      gc_gen_collector_update_rootset_ref_num(stats);
#endif

      if(obj_belongs_to_nos(p_obj)){
        collector_tracestack_push(collector, p_ref);
      }
    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  collector->trace_stack = free_task_pool_get_entry(metadata);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ......");

retry:
  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(trace_task,iter);
      assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */
#ifdef PREFETCH_SUPPORTED      
      /* DO PREFETCH */  
      if( mark_prefetch ) {    
        if(!vector_block_iterator_end(trace_task, iter)) {
      	  REF *pref= (REF*) *iter;
      	  PREFETCH( read_slot(pref));
        }	
      }
#endif            
      /* in sequential version, we only trace same object once, but we were using a local hashset for that,
         which couldn't catch the repetition between multiple collectors. This is subject to more study. */
   
      /* FIXME:: we should not let root_set empty during working, other may want to steal it. 
         degenerate my stack into root_set, and grab another stack */
   
      /* a task has to belong to collected space, it was checked before put into the stack */
      trace_object(collector, p_ref);
      if(collector->result == FALSE)  break; /* force return */
    }
    vector_stack_clear(trace_task);
    pool_put_entry(metadata->free_task_pool, trace_task);
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }

    trace_task = pool_get_entry(metadata->mark_task_pool);
  }
  
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( pool_is_empty(metadata->mark_task_pool)) continue;
    /* we can't grab the task here, because of a race condition. If we grab the task, 
       and the pool is empty, other threads may fall to this barrier and then pass. */
    atomic_dec32(&num_finished_collectors);
    goto retry;      
  }
  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");

  /* now we are done, but each collector has a private stack that is empty */  
  trace_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(trace_task);
  pool_put_entry(metadata->free_task_pool, trace_task);   
  collector->trace_stack = NULL;
  
  return;
}
コード例 #22
0
void mark_scan_pool(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
   
  collector->trace_stack = free_task_pool_get_entry(metadata);

  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to mark tasks. 
      FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);

      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
      assert(p_obj!=NULL);
      /* we have to mark the object before put it into marktask, because
         it is possible to have two slots containing a same object. They will
         be scanned twice and their ref slots will be recorded twice. Problem
         occurs after the ref slot is updated first time with new position
         and the second time the value is the ref slot is the old position as expected.
         This can be worked around if we want. 
      */
      if(obj_mark_in_vt(p_obj)){
        collector_tracestack_push(collector, p_obj);
#ifdef GC_GEN_STATS
        gc_gen_collector_update_rootset_ref_num(stats);
        gc_gen_collector_update_marked_obj_stats_major(stats);
#endif
      }

    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the mark tasks and scan objects */
  /* get a task buf for the mark stack */
  collector->trace_stack = free_task_pool_get_entry(metadata);

retry:
  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);

      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. 
         degenerate my stack into mark_task, and grab another mark_task */
      trace_object(collector, p_obj);
    } 
    /* run out one task, put back to the pool and grab another task */
   vector_stack_clear(mark_task);
   pool_put_entry(metadata->free_task_pool, mark_task);
   mark_task = pool_get_entry(metadata->mark_task_pool);      
  }
  
  /* termination detection. This is also a barrier.
     NOTE:: We can simply spin waiting for num_finished_collectors, because each 
     generated new task would surely be processed by its generating collector eventually. 
     So code below is only for load balance optimization. */
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( !pool_is_empty(metadata->mark_task_pool)){
      atomic_dec32(&num_finished_collectors);
      goto retry;  
    }
  }
     
  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);   
  collector->trace_stack = NULL;
  
  return;
}
コード例 #23
0
ファイル: mspace_slide_compact.cpp プロジェクト: dacut/juliet
static void mspace_compute_object_target(Collector* collector, Mspace* mspace)
{
    Block_Header *curr_block = collector->cur_compact_block;
    Block_Header *dest_block = collector->cur_target_block;
    Block_Header *local_last_dest = dest_block;
    void *dest_addr = dest_block->base;
    Block_Header *last_src;

#ifdef USE_32BITS_HASHCODE
    Hashcode_Buf* old_hashcode_buf = NULL;
    Hashcode_Buf* new_hashcode_buf = hashcode_buf_create();
    hashcode_buf_init(new_hashcode_buf);
#endif

    assert(!collector->rem_set);
    collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
#ifdef USE_32BITS_HASHCODE
    collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
#endif

#ifdef GC_GEN_STATS
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

    while( curr_block ) {
        void* start_pos;
        Partial_Reveal_Object *first_obj = block_get_first_marked_obj_prefetch_next(curr_block, &start_pos);
        if(first_obj) {
            ++curr_block->dest_counter;
            if(!dest_block->src)
                dest_block->src = first_obj;
            else
                last_src->next_src = first_obj;
            last_src = curr_block;
        }
        Partial_Reveal_Object* p_obj = first_obj;

        while( p_obj ) {
            assert( obj_is_marked_in_vt(p_obj));

            unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj);


#ifdef GC_GEN_STATS
            gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, obj_size);
#endif

            Obj_Info_Type obj_info = get_obj_info(p_obj);

            unsigned int obj_size_precompute = obj_size;

#ifdef USE_32BITS_HASHCODE
            precompute_hashcode_extend_size(p_obj, dest_addr, &obj_size_precompute);
#endif
            if( ((POINTER_SIZE_INT)dest_addr + obj_size_precompute) > (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)) {
#ifdef USE_32BITS_HASHCODE
                block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf);
#endif
                dest_block->new_free = dest_addr;
                dest_block = mspace_get_next_target_block(collector, mspace);
                if(dest_block == NULL) {
                    collector->result = FALSE;
                    return;
                }
                if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx))
                    local_last_dest = dest_block;
                dest_addr = dest_block->base;
                dest_block->src = p_obj;
                last_src = curr_block;
                if(p_obj != first_obj)
                    ++curr_block->dest_counter;
            }
            assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)GC_BLOCK_END(dest_block));

#ifdef USE_32BITS_HASHCODE
            obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector,curr_block->hashcode_buf, new_hashcode_buf);
#endif

            if( obj_info != 0 ) {
                collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
                collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
            }

            obj_set_fw_in_oi(p_obj, dest_addr);

            /* FIXME: should use alloc to handle alignment requirement */
            dest_addr = (void *)((POINTER_SIZE_INT) dest_addr + obj_size);
            p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos);
        }
#ifdef USE_32BITS_HASHCODE
        hashcode_buf_clear(curr_block->hashcode_buf);
#endif
        curr_block = mspace_get_next_compact_block(collector, mspace);

    }

#ifdef USE_32BITS_HASHCODE
    pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
    collector->hashcode_set = NULL;
#endif
    pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
    collector->rem_set = NULL;
    dest_block->new_free = dest_addr;

    Block_Header *cur_last_dest = (Block_Header *)last_block_for_dest;
    collector->cur_target_block = local_last_dest;
    while((local_last_dest)&&((!cur_last_dest) || (local_last_dest->block_idx > cur_last_dest->block_idx))) {
        atomic_casptr((volatile void **)&last_block_for_dest, local_last_dest, cur_last_dest);
        cur_last_dest = (Block_Header *)last_block_for_dest;
    }

#ifdef USE_32BITS_HASHCODE
    old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf);
    hashcode_buf_destory(old_hashcode_buf);
#endif
    return;
}