Exemple #1
0
//final remerge in a STW manner, this can reduce the lock of merging global free list
void gc_merge_free_list_global(GC *gc) {
  Wspace *wspace = gc_get_wspace(gc);
  int64 start_merge = time_now();
  
  Free_Chunk_List *global_free_list = gc_collect_global_free_chunk_list(wspace, gc);
  wspace_merge_free_list(wspace, global_free_list);
  wspace_reset_free_list_chunks(global_free_list);
  
  //put to global list
  Free_Chunk *chunk = global_free_list->head;
  while(chunk) {
     global_free_list->head = chunk->next;
     if(global_free_list->head)
      global_free_list->head->prev = NULL;
     wspace_put_free_chunk(wspace, chunk);
     chunk = global_free_list->head;
  }
  //INFO2("gc.merge", "[merge global] time=" << (time_now()-start_merge) << " us" );
  
}
Exemple #2
0
/*Concurrent Sweep:  
   The mark bit and alloc bit is exchanged before entering this function. 
   This function is to clear the mark bit and merge the free chunks concurrently.   
  */
void wspace_sweep_concurrent(Conclctor* sweeper)
{
  GC *gc = sweeper->gc;
  
  Wspace *wspace = gc_get_wspace(gc);

  sweeper->live_obj_size = 0;
  sweeper->live_obj_num = 0;

  Pool* used_chunk_pool = wspace->used_chunk_pool;

  Chunk_Header_Basic* chunk_to_sweep;
  
  /*1. Grab chunks from used list, sweep the chunk and push back to PFC backup list & free list.*/
  chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
  while(chunk_to_sweep != NULL){
    wspace_sweep_chunk_con(wspace, sweeper, chunk_to_sweep);
    chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
  }

  /*2. Grab chunks from PFC list, sweep the chunk and push back to PFC backup list & free list.*/
  Pool* pfc_pool = wspace_grab_next_pfc_pool(wspace);
  while(pfc_pool != NULL){
    if(!pool_is_empty(pfc_pool)){
      /*sweep the chunks in pfc_pool. push back to pfc backup list*/
      chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
      while(chunk_to_sweep != NULL){
        assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
        chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED;
        wspace_sweep_chunk_con(wspace, sweeper, chunk_to_sweep);
        chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
      }
    }
    /*grab more pfc pools*/
    pfc_pool = wspace_grab_next_pfc_pool(wspace);
  }

}
Exemple #3
0
static void allocator_sweep_local_chunks(Allocator *allocator)
{
  Wspace *wspace = gc_get_wspace(allocator->gc);
  Size_Segment **size_segs = wspace->size_segments;
  Chunk_Header ***local_chunks = allocator->local_chunks;
  
  for(unsigned int i = SIZE_SEGMENT_NUM; i--;){
    if(!size_segs[i]->local_alloc){
      assert(!local_chunks[i]);
      continue;
    }
    Chunk_Header **chunks = local_chunks[i];
    assert(chunks);
    for(unsigned int j = size_segs[i]->chunk_num; j--;){
      if(chunks[j]){
        unsigned int slot_num = chunks[j]->slot_num;
        POINTER_SIZE_INT *table = chunks[j]->table;
        
        unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
        for(unsigned int i=0; i<index_word_num; ++i){
          //atomic sweep.
          POINTER_SIZE_INT old_word = table[i];
          POINTER_SIZE_INT new_word = old_word & cur_alloc_mask;
          while(old_word != new_word){
            POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**) &table[i],(void*) new_word,(void*) old_word);
            if(temp == old_word){
              break;
            }
            old_word = table[i];
            new_word = old_word & cur_alloc_mask;
          }
        }
      }
    }
  }
}
Exemple #4
0
static void *wspace_try_alloc(unsigned size, Allocator *allocator)
{
  Wspace *wspace = gc_get_wspace(allocator->gc);
  void *p_obj = NULL;
  
  if(size <= SUPER_OBJ_THRESHOLD)
    p_obj = wspace_alloc_normal_obj(wspace, size, allocator);
  else
    p_obj = wspace_alloc_super_obj(wspace, size, allocator);

#ifdef SSPACE_ALLOC_INFO
  if(p_obj) wspace_alloc_info(size);
#endif
#ifdef SSPACE_VERIFY
  if(p_obj) wspace_verify_alloc(p_obj, size);
#endif

#ifdef WSPACE_CONCURRENT_GC_STATS
  if(p_obj && gc_con_is_in_marking()) ((Partial_Reveal_Object*)p_obj)->obj_info |= NEW_OBJ_MASK;
#endif

 
  return p_obj;
}
Exemple #5
0
//final work should be done by the last sweeper
void wspace_last_sweeper_work( Conclctor *last_sweeper ) {

  GC *gc = last_sweeper->gc;
  Wspace *wspace = gc_get_wspace(gc);
  Chunk_Header_Basic* chunk_to_sweep;
  Pool* used_chunk_pool = wspace->used_chunk_pool;

  /* all but one sweeper finishes its job*/
  state_transformation( gc, GC_CON_SWEEPING, GC_CON_SWEEP_DONE );
	
  /*3. Check the local chunk of mutator*/
  gc_sweep_mutator_local_chunks(wspace->gc);
  
    /*4. Sweep gloabl alloc normal chunks again*/
    gc_set_sweep_global_normal_chunk();
    gc_wait_mutator_signal(wspace->gc, HSIG_MUTATOR_SAFE);
    wspace_init_pfc_pool_iterator(wspace);
    Pool* pfc_pool = wspace_grab_next_pfc_pool(wspace);
    while(pfc_pool != NULL){
      if(!pool_is_empty(pfc_pool)){
        chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
        while(chunk_to_sweep != NULL){
          assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
          chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED;
          wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep);
          chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
        }
      }
      /*grab more pfc pools*/
      pfc_pool = wspace_grab_next_pfc_pool(wspace);
    }

    /*5. Check the used list again.*/
    chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
    while(chunk_to_sweep != NULL){
      wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep);
      chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
    }

    /*6. Switch the PFC backup list to PFC list.*/
    wspace_exchange_pfc_pool(wspace);
    
    gc_unset_sweep_global_normal_chunk();

    /*7. Put back live abnormal chunk and normal unreusable chunk*/
    Chunk_Header* used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
    while(used_abnormal_chunk){      
      used_abnormal_chunk->status = CHUNK_USED | CHUNK_ABNORMAL;
      wspace_reg_used_chunk(wspace,used_abnormal_chunk);
      used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
    }
    pool_empty(wspace->live_abnormal_chunk_pool);

    Chunk_Header* unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace);
    while(unreusable_normal_chunk){  
      unreusable_normal_chunk->status = CHUNK_USED | CHUNK_NORMAL;
      wspace_reg_used_chunk(wspace,unreusable_normal_chunk);
      unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace);
    }
    pool_empty(wspace->unreusable_normal_chunk_pool);

    /*8. Merge free chunks from sweepers*/
   Free_Chunk_List *free_list_from_sweeper = wspace_collect_free_chunks_from_sweepers(gc);
   wspace_merge_free_list(wspace, free_list_from_sweeper);
     
  /* last sweeper will transform the state to before_finish */
  state_transformation( gc, GC_CON_SWEEP_DONE, GC_CON_BEFORE_FINISH );
}
Exemple #6
0
/* Alloc small without-fin object in wspace without getting new free chunk */
void *wspace_thread_local_alloc(unsigned size, Allocator *allocator)
{
  if(size > LARGE_OBJ_THRESHOLD) return NULL;
  
  Wspace *wspace = gc_get_wspace(allocator->gc);
  
  /* Flexible alloc mechanism:
  Size_Segment *size_seg = wspace_get_size_seg(wspace, size);
  unsigned int seg_index = size_seg->seg_index;
  */
  unsigned int seg_index = (size-GC_OBJECT_ALIGNMENT) / MEDIUM_OBJ_THRESHOLD;
  assert(seg_index <= 2);
  Size_Segment *size_seg = wspace->size_segments[seg_index];
  assert(size_seg->local_alloc);
  
  size = (unsigned int)NORMAL_SIZE_ROUNDUP(size, size_seg);
  unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg);
  
  Chunk_Header **chunks = allocator->local_chunks[seg_index];
  Chunk_Header *chunk = chunks[index];  
  if(!chunk){
    mutator_post_signal((Mutator*) allocator,HSIG_DISABLE_SWEEP_LOCAL_CHUNKS);
    
    chunk = wspace_get_pfc(wspace, seg_index, index);
    //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index);
    if(!chunk){
      mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
      return NULL;
    }
    chunk->status |= CHUNK_IN_USE;
    chunks[index] = chunk;
    
    mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
  }
  
  mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_ENTER_ALLOC_MARK);
  void *p_obj = alloc_in_chunk(chunks[index]);
  mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);

  if(chunk->slot_index == MAX_SLOT_INDEX){
    chunk->status = CHUNK_USED | CHUNK_NORMAL;
    /*register to used chunk list.*/
    wspace_reg_used_chunk(wspace,chunk);
    chunks[index] = NULL;
    chunk = NULL;
  }
  
  assert(!chunk || chunk->slot_index <= chunk->alloc_num);
  assert(!chunk || chunk->slot_index < chunk->slot_num);
  assert(p_obj);

#ifdef SSPACE_ALLOC_INFO
  wspace_alloc_info(size);
#endif
#ifdef SSPACE_VERIFY
  wspace_verify_alloc(p_obj, size);
#endif
 if(p_obj) {
   ((Mutator*)allocator)->new_obj_occupied_size+=size;
 }
  return p_obj;
}