Exemplo n.º 1
0
static void _ensure_pool_is_empty_gracefully_handles_cleaned_up_pool( void )
{
	pool_t pool;
	pool_init( &pool, 4, 1, allocator_default( ) );
	pool_cleanup( &pool );
	TEST_REQUIRE( pool_is_empty( &pool ) != 0 );
}
Exemplo n.º 2
0
static void _ensure_pool_is_empty_returns_non_zero_on_empty_legitimate_pool( void )
{
	pool_t pool;
	pool_init( &pool, 4, 1, allocator_default( ) );
	TEST_REQUIRE( pool_is_empty( &pool ) == 0 );
	pool_cleanup( &pool );
}
Exemplo n.º 3
0
/* Deal with resurrection fallback */
static void resurrection_fallback_handler(GC *gc)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  
  /* Repset pool should be empty, because we don't add anything to this pool in Minor Collection. */
  assert(pool_is_empty(metadata->repset_pool));
  
  finalizable_objs_fallback(gc);
  dead_refs_fallback(gc);
  
  assert(pool_is_empty(metadata->finalizable_obj_pool));
  assert(pool_is_empty(metadata->softref_pool));
  assert(pool_is_empty(metadata->weakref_pool));
  assert(pool_is_empty(metadata->phanref_pool));
  
  assert(metadata->finalizable_obj_set == NULL);
  assert(metadata->repset == NULL);
}
Exemplo n.º 4
0
static void _ensure_pool_init_with_zero_element_count_constructs_valid_empty_pool( void )
{
	allocator_counted_t alloc;
	pool_t pool;

	allocator_counted_init_default( &alloc );
	pool_init( &pool, 4, 0, allocator_counted_get( &alloc ) );
	TEST_REQUIRE( pool_is_empty( &pool ) );
	pool_cleanup( &pool );
	TEST_REQUIRE( allocator_counted_get_current_count( &alloc ) == 0 );
}
Exemplo n.º 5
0
static void put_dead_refs_to_vm(GC *gc)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  
  if(softref_pool_is_empty(gc)
      && weakref_pool_is_empty(gc)
      && phanref_pool_is_empty(gc)
      && pool_is_empty(metadata->fallback_ref_pool)){
    gc_clear_weakref_pools(gc);
    return;
  }
  
  put_dead_weak_refs_to_vm(gc, metadata->softref_pool);
  put_dead_weak_refs_to_vm(gc, metadata->weakref_pool);
  put_dead_weak_refs_to_vm(gc, metadata->phanref_pool);
  
  /* This is a major collection after resurrection fallback */
  if(!pool_is_empty(metadata->fallback_ref_pool)){
    put_dead_weak_refs_to_vm(gc, metadata->fallback_ref_pool);
  }
  
  metadata->pending_weakrefs = TRUE;
}
Exemplo n.º 6
0
void gc_update_finref_repointed_refs(GC *gc, Boolean double_fix)
{
  assert(!collect_is_minor());
  
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *repset_pool = metadata->repset_pool;
  Pool *fallback_ref_pool = metadata->fallback_ref_pool;
  
  nondestructively_fix_finref_pool(gc, repset_pool, TRUE, double_fix);
  if(!pool_is_empty(fallback_ref_pool)){
    assert(collect_is_fallback());
    nondestructively_fix_finref_pool(gc, fallback_ref_pool, FALSE, double_fix);
  }
}
Exemplo n.º 7
0
/* Record softrefs and weakrefs whose referents are dead
 * so that we can update their addr and put them to VM.
 * In fallback collection these refs will not be considered for enqueueing again,
 * since their referent fields have been cleared by identify_dead_refs().
 */
static void dead_refs_fallback(GC *gc)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  
  if(!softref_pool_is_empty(gc) || !weakref_pool_is_empty(gc))
    metadata->pending_weakrefs = TRUE;
  
  /* We only use fallback_ref_pool in resurrection fallback so it must be empty */
  assert(pool_is_empty(metadata->fallback_ref_pool));
  
  dead_weak_refs_fallback(gc, metadata->softref_pool);
  dead_weak_refs_fallback(gc, metadata->weakref_pool);
  
  gc_clear_weakref_pools(gc);
}
Exemplo n.º 8
0
static Boolean dirty_set_is_empty(GC *gc) 
{
  lock(gc->mutator_list_lock);
  Mutator *mutator = gc->mutator_list;
  while (mutator) {
    Vector_Block* local_dirty_set = mutator->dirty_set;
    if(!vector_block_is_empty(local_dirty_set)){
      unlock(gc->mutator_list_lock); 
      return FALSE;
    }
    mutator = mutator->next;
  }
  GC_Metadata *metadata = gc->metadata;
  Boolean is_empty = pool_is_empty(metadata->gc_dirty_set_pool);
  unlock(gc->mutator_list_lock); //unlock put here to prevent creating new mutators before checking global dirty set
  return is_empty;
}
Exemplo n.º 9
0
void wspace_compact(Collector *collector, Wspace *wspace)
{
  Chunk_Header *least_free_chunk, *most_free_chunk;
  Pool *pfc_pool = wspace_grab_next_pfc_pool(wspace);
  
  for(; pfc_pool; pfc_pool = wspace_grab_next_pfc_pool(wspace)){
    if(pool_is_empty(pfc_pool)) continue;
    Boolean pfc_pool_need_compact = pfc_pool_roughly_sort(pfc_pool, &least_free_chunk, &most_free_chunk);
    if(!pfc_pool_need_compact) continue;
    
    Chunk_Header *dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
    Chunk_Header *src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
    Boolean src_is_new = TRUE;
    while(dest && src){
      if(src_is_new)
        src->slot_index = 0;
      //chunk_depad_last_index_word(src);
      move_obj_between_chunks(wspace, &dest, src);
      if(!dest)
        dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
      if(!src->alloc_num){
        collector_add_free_chunk(collector, (Free_Chunk*)src);
        src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
        src_is_new = TRUE;
      } else {
        src_is_new = FALSE;
      }
    }
    
    /* Rebuild the pfc_pool */
    if(dest)
      wspace_put_pfc(wspace, dest);
    if(src){
      //chunk_pad_last_index_word(src, cur_alloc_mask);
      pfc_reset_slot_index(src);
      wspace_put_pfc(wspace, src);
    }
  }
}
Exemplo n.º 10
0
/*Concurrent Sweep:  
   The mark bit and alloc bit is exchanged before entering this function. 
   This function is to clear the mark bit and merge the free chunks concurrently.   
  */
void wspace_sweep_concurrent(Conclctor* sweeper)
{
  GC *gc = sweeper->gc;
  
  Wspace *wspace = gc_get_wspace(gc);

  sweeper->live_obj_size = 0;
  sweeper->live_obj_num = 0;

  Pool* used_chunk_pool = wspace->used_chunk_pool;

  Chunk_Header_Basic* chunk_to_sweep;
  
  /*1. Grab chunks from used list, sweep the chunk and push back to PFC backup list & free list.*/
  chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
  while(chunk_to_sweep != NULL){
    wspace_sweep_chunk_con(wspace, sweeper, chunk_to_sweep);
    chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
  }

  /*2. Grab chunks from PFC list, sweep the chunk and push back to PFC backup list & free list.*/
  Pool* pfc_pool = wspace_grab_next_pfc_pool(wspace);
  while(pfc_pool != NULL){
    if(!pool_is_empty(pfc_pool)){
      /*sweep the chunks in pfc_pool. push back to pfc backup list*/
      chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
      while(chunk_to_sweep != NULL){
        assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
        chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED;
        wspace_sweep_chunk_con(wspace, sweeper, chunk_to_sweep);
        chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
      }
    }
    /*grab more pfc pools*/
    pfc_pool = wspace_grab_next_pfc_pool(wspace);
  }

}
Exemplo n.º 11
0
void wspace_mark_scan_mostly_concurrent(Conclctor* marker)
{
  GC *gc = marker->gc;
  GC_Metadata *metadata = gc->metadata;
  
  unsigned int num_dirtyset_slot = 0;

  marker->trace_stack = free_task_pool_get_entry(metadata);
  
  /* first step: copy all root objects to mark tasks.*/
  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);

  while(root_set){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);
      
      assert(p_obj!=NULL);
      assert(address_belongs_to_gc_heap(p_obj, gc));
      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);
    }
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }

  /* put back the last trace_stack task */
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
  marker->trace_stack = free_task_pool_get_entry(metadata);

  /* following code has such concerns:
      1, current_thread_id should be unique
      2, mostly concurrent do not need adding new marker dynamically
      3, when the heap is exhausted, final marking will enumeration rootset, it should be after above actions
  */
  unsigned int current_thread_id = atomic_inc32(&num_active_markers);

  if((current_thread_id+1) == gc->num_active_markers )
    state_transformation( gc, GC_CON_START_MARKERS, GC_CON_TRACING);
  
  while( gc->gc_concurrent_status == GC_CON_START_MARKERS );

retry:

  
  
  /*second step: mark dirty pool*/
  Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);

  while(dirty_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
    while(!vector_block_iterator_end(dirty_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(dirty_set,iter);

      assert(p_obj!=NULL); //FIXME: restrict condition?
      
      obj_clear_dirty_in_table(p_obj);
      obj_clear_mark_in_table(p_obj, marker);

      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);

      num_dirtyset_slot ++;
    } 
    vector_block_clear(dirty_set);
    pool_put_entry(metadata->free_set_pool, dirty_set);
    dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
  }

   /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);  

  /* third step: iterate over the mark tasks and scan objects */
   marker->trace_stack = free_task_pool_get_entry(metadata);

  
  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);
      trace_object(marker, p_obj);      
    }
    /* run out one task, put back to the pool and grab another task */
    vector_stack_clear(mark_task);
    pool_put_entry(metadata->free_task_pool, mark_task);
    mark_task = pool_get_entry(metadata->mark_task_pool);
  }

  /*
  if(current_thread_id == 0){
    gc_prepare_dirty_set(marker->gc);
  }*/

  gc_copy_local_dirty_set_to_global(gc);
  
  /* conditions to terminate mark: 
           1.All thread finished current job.
           2.Flag is set to terminate concurrent mark.
    */
  atomic_dec32(&num_active_markers);
  while(num_active_markers != 0 || !concurrent_mark_need_terminating_mc(gc) ) {
      if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)) {
	   atomic_inc32(&num_active_markers);
          goto retry;
      } else if( current_thread_id >= mostly_con_long_marker_num ) {
         break;
      }
      apr_sleep(15000);
  }

  /*
  while(num_active_markers != 0 || !concurrent_mark_need_terminating_mc(gc)){
    if(!pool_is_empty(metadata->mark_task_pool) || !pool_is_empty(metadata->gc_dirty_set_pool)){
      atomic_inc32(&num_active_markers);
      goto retry;
    }
  }*/
  
  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)marker->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);
  marker->trace_stack = NULL;
  marker->num_dirty_slots_traced = num_dirtyset_slot;

  /*
  if(num_dirtyset_slot!=0) {
  	lock(info_lock);
  	INFO2("gc.marker", "marker ["<< current_thread_id <<"] processed dirty slot="<<num_dirtyset_slot);
	unlock(info_lock);
  }*/
  return;
}
Exemplo n.º 12
0
//final work should be done by the last sweeper
void wspace_last_sweeper_work( Conclctor *last_sweeper ) {

  GC *gc = last_sweeper->gc;
  Wspace *wspace = gc_get_wspace(gc);
  Chunk_Header_Basic* chunk_to_sweep;
  Pool* used_chunk_pool = wspace->used_chunk_pool;

  /* all but one sweeper finishes its job*/
  state_transformation( gc, GC_CON_SWEEPING, GC_CON_SWEEP_DONE );
	
  /*3. Check the local chunk of mutator*/
  gc_sweep_mutator_local_chunks(wspace->gc);
  
    /*4. Sweep gloabl alloc normal chunks again*/
    gc_set_sweep_global_normal_chunk();
    gc_wait_mutator_signal(wspace->gc, HSIG_MUTATOR_SAFE);
    wspace_init_pfc_pool_iterator(wspace);
    Pool* pfc_pool = wspace_grab_next_pfc_pool(wspace);
    while(pfc_pool != NULL){
      if(!pool_is_empty(pfc_pool)){
        chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
        while(chunk_to_sweep != NULL){
          assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
          chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED;
          wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep);
          chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
        }
      }
      /*grab more pfc pools*/
      pfc_pool = wspace_grab_next_pfc_pool(wspace);
    }

    /*5. Check the used list again.*/
    chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
    while(chunk_to_sweep != NULL){
      wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep);
      chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
    }

    /*6. Switch the PFC backup list to PFC list.*/
    wspace_exchange_pfc_pool(wspace);
    
    gc_unset_sweep_global_normal_chunk();

    /*7. Put back live abnormal chunk and normal unreusable chunk*/
    Chunk_Header* used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
    while(used_abnormal_chunk){      
      used_abnormal_chunk->status = CHUNK_USED | CHUNK_ABNORMAL;
      wspace_reg_used_chunk(wspace,used_abnormal_chunk);
      used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
    }
    pool_empty(wspace->live_abnormal_chunk_pool);

    Chunk_Header* unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace);
    while(unreusable_normal_chunk){  
      unreusable_normal_chunk->status = CHUNK_USED | CHUNK_NORMAL;
      wspace_reg_used_chunk(wspace,unreusable_normal_chunk);
      unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace);
    }
    pool_empty(wspace->unreusable_normal_chunk_pool);

    /*8. Merge free chunks from sweepers*/
   Free_Chunk_List *free_list_from_sweeper = wspace_collect_free_chunks_from_sweepers(gc);
   wspace_merge_free_list(wspace, free_list_from_sweeper);
     
  /* last sweeper will transform the state to before_finish */
  state_transformation( gc, GC_CON_SWEEP_DONE, GC_CON_BEFORE_FINISH );
}
Exemplo n.º 13
0
static void* working_thread(void* ctx)
{
   struct task_dispatcher_t* td = (struct task_dispatcher_t*)ctx;
   LOGI("Task dispatcher working thread started");

   pthread_mutex_lock(&td->event_lock);
   while (1)
   {
      if (pool_is_empty(td->tasks))
      {
         LOGD("Waiting for event");
         pthread_cond_wait(&td->event, &td->event_lock);
      }
      else
      {
         struct task_t* task = pool_get_next(td->tasks, NULL);

         struct timespec activate_time = {0};
         timestamp_to_timespec(&task->activate_time, &activate_time);

         timestamp_t current = {0};
         timestamp_set(&current);

         LOGD("Waiting for task [%ld:%d -> %ld:%d]", current.value.tv_sec, current.value.tv_usec, task->activate_time.value.tv_sec, task->activate_time.value.tv_usec);
         pthread_cond_timedwait(&td->event, &td->event_lock, &activate_time);
      }

      if (!td->running)
      {
         LOGI("Stopping working thread");
         break;
      }

      if (!pool_is_empty(td->tasks))
      {
         struct task_t* task = pool_get_next(td->tasks, NULL);

         timestamp_t current = {0};
         timestamp_set(&current);

         if (timestamp_diff(&current, &task->activate_time) >= 0)
         {
            pool_remove(td->tasks, task);
            LOGD("Removed task %p [queue empty %d]", task, pool_is_empty(td->tasks));

            if (task->interval > 0)
            {
               task_dispatcher_enqueue_task(td, task->callback, task->ctx, task->interval, task->interval);
            }

            pthread_mutex_unlock(&td->event_lock);

            LOGD("Running task");
            task->callback(td, task, &task->enqueue_time, &current, task->ctx);

            pthread_mutex_lock(&td->event_lock);
         }
      }
   }
   pthread_mutex_unlock(&td->event_lock);

   LOGI("Task dispatcher working thread stopped");
   return NULL;
}
Exemplo n.º 14
0
void wspace_mark_scan_concurrent(Conclctor* marker)
{
  //marker->time_measurement_start = time_now();
  GC *gc = marker->gc;
  GC_Metadata *metadata = gc->metadata;
  
  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
  unsigned int current_thread_id = atomic_inc32(&num_active_markers);
  marker->trace_stack = free_task_pool_get_entry(metadata);
  Vector_Block *root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to mark tasks.*/
  while(root_set){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);
      
      assert(p_obj!=NULL);
      assert(address_belongs_to_gc_heap(p_obj, gc));
      //if(obj_mark_gray_in_table(p_obj, &root_set_obj_size))
      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);
    }
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);
  
  marker->trace_stack = free_task_pool_get_entry(metadata);

  state_transformation( gc, GC_CON_START_MARKERS, GC_CON_TRACING);
retry:
  
  gc_copy_local_dirty_set_to_global(marker->gc);
  /*second step: mark dirty object snapshot pool*/
  Vector_Block* dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);

  while(dirty_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(dirty_set);
    while(!vector_block_iterator_end(dirty_set,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(dirty_set,iter);

      if(p_obj==NULL) { //FIXME: restrict?
        RAISE_ERROR;
      }
      marker->num_dirty_slots_traced++;
      if(obj_mark_gray_in_table(p_obj))
        collector_tracestack_push((Collector*)marker, p_obj);
    } 
    vector_block_clear(dirty_set);
    pool_put_entry(metadata->free_set_pool, dirty_set);
    dirty_set = pool_get_entry(metadata->gc_dirty_set_pool);
  }

    /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, marker->trace_stack);  

  /* third step: iterate over the mark tasks and scan objects */
  /* get a task buf for the mark stack */
  marker->trace_stack = free_task_pool_get_entry(metadata);

  
  Vector_Block *mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object *p_obj = (Partial_Reveal_Object*)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);
      trace_object(marker, p_obj);      
    }
    /* run out one task, put back to the pool and grab another task */
    vector_stack_clear(mark_task);
    pool_put_entry(metadata->free_task_pool, mark_task);
    mark_task = pool_get_entry(metadata->mark_task_pool);
  }
  
  /* termination condition:  
           1.all thread finished current job.
           2.local snapshot vectors are empty.
           3.global snapshot pool is empty.
    */
  atomic_dec32(&num_active_markers);
  while(num_active_markers != 0 || !concurrent_mark_need_terminating_otf(gc)){
     if(!pool_is_empty(metadata->mark_task_pool) || !concurrent_mark_need_terminating_otf(gc)){
       atomic_inc32(&num_active_markers);
       goto retry; 
    }
    apr_sleep(15000);
  }

  state_transformation( gc, GC_CON_TRACING, GC_CON_TRACE_DONE );
  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)marker->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);
  marker->trace_stack = NULL;
  assert(pool_is_empty(metadata->gc_dirty_set_pool));

    //INFO2("gc.con.info", "<stage 5>first marker finishes its job");

  return;
}
Exemplo n.º 15
0
static void collector_trace_rootsets(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);

  Space* space = collector->collect_space;
  collector->trace_stack = free_task_pool_get_entry(metadata);

  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to trace tasks. */ 

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ...");
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set, iter);

      assert(*p_ref);  /* root ref cann't be NULL, but remset can be */

      collector_tracestack_push(collector, p_ref);

#ifdef GC_GEN_STATS    
      gc_gen_collector_update_rootset_ref_num(stats);
#endif
    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  collector->trace_stack = free_task_pool_get_entry(metadata);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ...");

retry:
  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(trace_task, iter);
#ifdef PREFETCH_SUPPORTED
      /* DO PREFETCH */
      if( mark_prefetch ) {    
        if(!vector_block_iterator_end(trace_task, iter)) {
      	  REF *pref= (REF*) *iter;
      	  PREFETCH( read_slot(pref));
        }	
      }
#endif      
      trace_object(collector, p_ref);
      
      if(collector->result == FALSE)  break; /* force return */
 
    }
    vector_stack_clear(trace_task);
    pool_put_entry(metadata->free_task_pool, trace_task);

    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }
    
    trace_task = pool_get_entry(metadata->mark_task_pool);
  }
  
  /* A collector comes here when seeing an empty mark_task_pool. The last collector will ensure 
     all the tasks are finished.*/
     
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( pool_is_empty(metadata->mark_task_pool)) continue;
    /* we can't grab the task here, because of a race condition. If we grab the task, 
       and the pool is empty, other threads may fall to this barrier and then pass. */
    atomic_dec32(&num_finished_collectors);
    goto retry; 
  }

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");

  /* now we are done, but each collector has a private stack that is empty */  
  trace_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(trace_task);
  pool_put_entry(metadata->free_task_pool, trace_task);   
  collector->trace_stack = NULL;
  
  return;
}
Exemplo n.º 16
0
static void collector_trace_rootsets(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif
  
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);

  Space* space = collector->collect_space;
  collector->trace_stack = free_task_pool_get_entry(metadata);

  /* find root slots saved by 1. active mutators, 2. exited mutators, 3. last cycle collectors */  
  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to trace tasks. */ 

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: copy root objects to trace stack ......");
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);
      
      if(!*p_ref) continue;  /* root ref cann't be NULL, but remset can be */
      Partial_Reveal_Object *p_obj = read_slot(p_ref);

#ifdef GC_GEN_STATS
      gc_gen_collector_update_rootset_ref_num(stats);
#endif

      if(obj_belongs_to_nos(p_obj)){
        collector_tracestack_push(collector, p_ref);
      }
    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  collector->trace_stack = free_task_pool_get_entry(metadata);

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish copying root objects to trace stack.");

  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: trace and forward objects ......");

retry:
  Vector_Block* trace_task = pool_get_entry(metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(trace_task,iter);
      assert(*p_ref); /* a task can't be NULL, it was checked before put into the task stack */
#ifdef PREFETCH_SUPPORTED      
      /* DO PREFETCH */  
      if( mark_prefetch ) {    
        if(!vector_block_iterator_end(trace_task, iter)) {
      	  REF *pref= (REF*) *iter;
      	  PREFETCH( read_slot(pref));
        }	
      }
#endif            
      /* in sequential version, we only trace same object once, but we were using a local hashset for that,
         which couldn't catch the repetition between multiple collectors. This is subject to more study. */
   
      /* FIXME:: we should not let root_set empty during working, other may want to steal it. 
         degenerate my stack into root_set, and grab another stack */
   
      /* a task has to belong to collected space, it was checked before put into the stack */
      trace_object(collector, p_ref);
      if(collector->result == FALSE)  break; /* force return */
    }
    vector_stack_clear(trace_task);
    pool_put_entry(metadata->free_task_pool, trace_task);
    if(collector->result == FALSE){
      gc_task_pool_clear(metadata->mark_task_pool);
      break; /* force return */
    }

    trace_task = pool_get_entry(metadata->mark_task_pool);
  }
  
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( pool_is_empty(metadata->mark_task_pool)) continue;
    /* we can't grab the task here, because of a race condition. If we grab the task, 
       and the pool is empty, other threads may fall to this barrier and then pass. */
    atomic_dec32(&num_finished_collectors);
    goto retry;      
  }
  TRACE2("gc.process", "GC: collector["<<((POINTER_SIZE_INT)collector->thread_handle)<<"]: finish tracing and forwarding objects.");

  /* now we are done, but each collector has a private stack that is empty */  
  trace_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(trace_task);
  pool_put_entry(metadata->free_task_pool, trace_task);   
  collector->trace_stack = NULL;
  
  return;
}
Exemplo n.º 17
0
void mark_scan_pool(Collector* collector)
{
  GC* gc = collector->gc;
  GC_Metadata* metadata = gc->metadata;
#ifdef GC_GEN_STATS
  GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

  /* reset the num_finished_collectors to be 0 by one collector. This is necessary for the barrier later. */
  unsigned int num_active_collectors = gc->num_active_collectors;
  atomic_cas32( &num_finished_collectors, 0, num_active_collectors);
   
  collector->trace_stack = free_task_pool_get_entry(metadata);

  Vector_Block* root_set = pool_iterator_next(metadata->gc_rootset_pool);

  /* first step: copy all root objects to mark tasks. 
      FIXME:: can be done sequentially before coming here to eliminate atomic ops */ 
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF *p_ref = (REF *)*iter;
      iter = vector_block_iterator_advance(root_set,iter);

      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      /* root ref can't be NULL, (remset may have NULL ref entry, but this function is only for ALGO_MAJOR */
      assert(p_obj!=NULL);
      /* we have to mark the object before put it into marktask, because
         it is possible to have two slots containing a same object. They will
         be scanned twice and their ref slots will be recorded twice. Problem
         occurs after the ref slot is updated first time with new position
         and the second time the value is the ref slot is the old position as expected.
         This can be worked around if we want. 
      */
      if(obj_mark_in_vt(p_obj)){
        collector_tracestack_push(collector, p_obj);
#ifdef GC_GEN_STATS
        gc_gen_collector_update_rootset_ref_num(stats);
        gc_gen_collector_update_marked_obj_stats_major(stats);
#endif
      }

    } 
    root_set = pool_iterator_next(metadata->gc_rootset_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(metadata->mark_task_pool, collector->trace_stack);
  
  /* second step: iterate over the mark tasks and scan objects */
  /* get a task buf for the mark stack */
  collector->trace_stack = free_task_pool_get_entry(metadata);

retry:
  Vector_Block* mark_task = pool_get_entry(metadata->mark_task_pool);
  
  while(mark_task){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(mark_task);
    while(!vector_block_iterator_end(mark_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object *)*iter;
      iter = vector_block_iterator_advance(mark_task,iter);

      /* FIXME:: we should not let mark_task empty during working, , other may want to steal it. 
         degenerate my stack into mark_task, and grab another mark_task */
      trace_object(collector, p_obj);
    } 
    /* run out one task, put back to the pool and grab another task */
   vector_stack_clear(mark_task);
   pool_put_entry(metadata->free_task_pool, mark_task);
   mark_task = pool_get_entry(metadata->mark_task_pool);      
  }
  
  /* termination detection. This is also a barrier.
     NOTE:: We can simply spin waiting for num_finished_collectors, because each 
     generated new task would surely be processed by its generating collector eventually. 
     So code below is only for load balance optimization. */
  atomic_inc32(&num_finished_collectors);
  while(num_finished_collectors != num_active_collectors){
    if( !pool_is_empty(metadata->mark_task_pool)){
      atomic_dec32(&num_finished_collectors);
      goto retry;  
    }
  }
     
  /* put back the last mark stack to the free pool */
  mark_task = (Vector_Block*)collector->trace_stack;
  vector_stack_clear(mark_task);
  pool_put_entry(metadata->free_task_pool, mark_task);   
  collector->trace_stack = NULL;
  
  return;
}
Exemplo n.º 18
0
static void _ensure_pool_is_empty_gracefully_handles_null_pool( void )
{
	TEST_REQUIRE( pool_is_empty( 0 ) != 0 );
}