示例#1
0
static void *wspace_alloc_super_obj(Wspace *wspace, unsigned size, Allocator *allocator)
{
  assert(size > SUPER_OBJ_THRESHOLD);

  unsigned int chunk_size = SUPER_SIZE_ROUNDUP(size);
  assert(chunk_size > SUPER_OBJ_THRESHOLD);
  assert(!(chunk_size & CHUNK_GRANULARITY_LOW_MASK));
  
  Chunk_Header *chunk;
  if(chunk_size <= HYPER_OBJ_THRESHOLD)
    chunk = (Chunk_Header*)wspace_get_abnormal_free_chunk(wspace, chunk_size);
  else
    chunk = (Chunk_Header*)wspace_get_hyper_free_chunk(wspace, chunk_size, FALSE);
  
  if(!chunk) return NULL;
  abnormal_chunk_init(chunk, chunk_size, size);
  
  mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_ENTER_ALLOC_MARK);  
  if(is_obj_alloced_live()){
    chunk->table[0] |= cur_mark_black_color; // just for debugging, mark new object
  } 
  mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
  
  chunk->table[0] |= cur_alloc_color;
  set_super_obj_mask(chunk->base);
  assert(chunk->status == CHUNK_ABNORMAL);
  chunk->status = CHUNK_ABNORMAL| CHUNK_USED;
  wspace_reg_used_chunk(wspace, chunk);
  assert(get_obj_info_raw((Partial_Reveal_Object*)chunk->base) & SUPER_OBJ_MASK);
  
  ((Mutator*)allocator)->new_obj_occupied_size+=chunk_size;
  return chunk->base;
}
示例#2
0
//final work should be done by the last sweeper
void wspace_last_sweeper_work( Conclctor *last_sweeper ) {

  GC *gc = last_sweeper->gc;
  Wspace *wspace = gc_get_wspace(gc);
  Chunk_Header_Basic* chunk_to_sweep;
  Pool* used_chunk_pool = wspace->used_chunk_pool;

  /* all but one sweeper finishes its job*/
  state_transformation( gc, GC_CON_SWEEPING, GC_CON_SWEEP_DONE );
	
  /*3. Check the local chunk of mutator*/
  gc_sweep_mutator_local_chunks(wspace->gc);
  
    /*4. Sweep gloabl alloc normal chunks again*/
    gc_set_sweep_global_normal_chunk();
    gc_wait_mutator_signal(wspace->gc, HSIG_MUTATOR_SAFE);
    wspace_init_pfc_pool_iterator(wspace);
    Pool* pfc_pool = wspace_grab_next_pfc_pool(wspace);
    while(pfc_pool != NULL){
      if(!pool_is_empty(pfc_pool)){
        chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
        while(chunk_to_sweep != NULL){
          assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING));
          chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED;
          wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep);
          chunk_to_sweep = chunk_pool_get_chunk(pfc_pool);
        }
      }
      /*grab more pfc pools*/
      pfc_pool = wspace_grab_next_pfc_pool(wspace);
    }

    /*5. Check the used list again.*/
    chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
    while(chunk_to_sweep != NULL){
      wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep);
      chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool);
    }

    /*6. Switch the PFC backup list to PFC list.*/
    wspace_exchange_pfc_pool(wspace);
    
    gc_unset_sweep_global_normal_chunk();

    /*7. Put back live abnormal chunk and normal unreusable chunk*/
    Chunk_Header* used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
    while(used_abnormal_chunk){      
      used_abnormal_chunk->status = CHUNK_USED | CHUNK_ABNORMAL;
      wspace_reg_used_chunk(wspace,used_abnormal_chunk);
      used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace);
    }
    pool_empty(wspace->live_abnormal_chunk_pool);

    Chunk_Header* unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace);
    while(unreusable_normal_chunk){  
      unreusable_normal_chunk->status = CHUNK_USED | CHUNK_NORMAL;
      wspace_reg_used_chunk(wspace,unreusable_normal_chunk);
      unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace);
    }
    pool_empty(wspace->unreusable_normal_chunk_pool);

    /*8. Merge free chunks from sweepers*/
   Free_Chunk_List *free_list_from_sweeper = wspace_collect_free_chunks_from_sweepers(gc);
   wspace_merge_free_list(wspace, free_list_from_sweeper);
     
  /* last sweeper will transform the state to before_finish */
  state_transformation( gc, GC_CON_SWEEP_DONE, GC_CON_BEFORE_FINISH );
}
示例#3
0
/* Alloc small without-fin object in wspace without getting new free chunk */
void *wspace_thread_local_alloc(unsigned size, Allocator *allocator)
{
  if(size > LARGE_OBJ_THRESHOLD) return NULL;
  
  Wspace *wspace = gc_get_wspace(allocator->gc);
  
  /* Flexible alloc mechanism:
  Size_Segment *size_seg = wspace_get_size_seg(wspace, size);
  unsigned int seg_index = size_seg->seg_index;
  */
  unsigned int seg_index = (size-GC_OBJECT_ALIGNMENT) / MEDIUM_OBJ_THRESHOLD;
  assert(seg_index <= 2);
  Size_Segment *size_seg = wspace->size_segments[seg_index];
  assert(size_seg->local_alloc);
  
  size = (unsigned int)NORMAL_SIZE_ROUNDUP(size, size_seg);
  unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg);
  
  Chunk_Header **chunks = allocator->local_chunks[seg_index];
  Chunk_Header *chunk = chunks[index];  
  if(!chunk){
    mutator_post_signal((Mutator*) allocator,HSIG_DISABLE_SWEEP_LOCAL_CHUNKS);
    
    chunk = wspace_get_pfc(wspace, seg_index, index);
    //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index);
    if(!chunk){
      mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
      return NULL;
    }
    chunk->status |= CHUNK_IN_USE;
    chunks[index] = chunk;
    
    mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
  }
  
  mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_ENTER_ALLOC_MARK);
  void *p_obj = alloc_in_chunk(chunks[index]);
  mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);

  if(chunk->slot_index == MAX_SLOT_INDEX){
    chunk->status = CHUNK_USED | CHUNK_NORMAL;
    /*register to used chunk list.*/
    wspace_reg_used_chunk(wspace,chunk);
    chunks[index] = NULL;
    chunk = NULL;
  }
  
  assert(!chunk || chunk->slot_index <= chunk->alloc_num);
  assert(!chunk || chunk->slot_index < chunk->slot_num);
  assert(p_obj);

#ifdef SSPACE_ALLOC_INFO
  wspace_alloc_info(size);
#endif
#ifdef SSPACE_VERIFY
  wspace_verify_alloc(p_obj, size);
#endif
 if(p_obj) {
   ((Mutator*)allocator)->new_obj_occupied_size+=size;
 }
  return p_obj;
}
示例#4
0
static void *wspace_alloc_normal_obj(Wspace *wspace, unsigned size, Allocator *allocator)
{
  Size_Segment *size_seg = wspace_get_size_seg(wspace, size);
  unsigned int seg_index = size_seg->seg_index;
  
  size = (unsigned int)NORMAL_SIZE_ROUNDUP(size, size_seg);
  unsigned int index = NORMAL_SIZE_TO_INDEX(size, size_seg);
  
  Chunk_Header *chunk = NULL;
  void *p_obj = NULL;
  
  if(size_seg->local_alloc){
    Chunk_Header **chunks = allocator->local_chunks[seg_index];
    chunk = chunks[index];
    if(!chunk){
      mutator_post_signal((Mutator*) allocator,HSIG_DISABLE_SWEEP_LOCAL_CHUNKS);
      chunk = wspace_get_pfc(wspace, seg_index, index);
      if(!chunk){
        chunk = (Chunk_Header*)wspace_get_normal_free_chunk(wspace);
        if(chunk) normal_chunk_init(chunk, size);
      }
      //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index);
      if(!chunk){
        mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
	 //INFO2("gc.wspace", "[Local Alloc Failed] alloc obj with size" << size << " bytes" );
        return NULL;
      }
      chunk->status |= CHUNK_IN_USE;
      chunks[index] = chunk;
      mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
    }    
    
    mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_ENTER_ALLOC_MARK);
    p_obj = alloc_in_chunk(chunks[index]);
    mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
    
    if(chunk->slot_index == MAX_SLOT_INDEX){
      chunk->status = CHUNK_USED | CHUNK_NORMAL;
      /*register to used chunk list.*/
      wspace_reg_used_chunk(wspace,chunk);
      chunks[index] = NULL;
    }
    
  } else {  
    mutator_post_signal((Mutator*) allocator,HSIG_DISABLE_SWEEP_GLOBAL_CHUNKS);

    if(gc_is_specify_con_sweep()){
      while(gc_is_sweep_global_normal_chunk()){
        mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
      }  
    }

    chunk = wspace_get_pfc(wspace, seg_index, index);
    if(!chunk){
      chunk = (Chunk_Header*)wspace_get_normal_free_chunk(wspace);
      if(chunk) normal_chunk_init(chunk, size);
    }
    //if(!chunk) chunk = wspace_steal_pfc(wspace, seg_index, index);
    if(!chunk) {
      mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
      //INFO2("gc.wspace", "[Non-Local Alloc Failed] alloc obj with size" << size << " bytes" );
      return NULL;
    }
    p_obj = alloc_in_chunk(chunk);

    if(chunk->slot_index == MAX_SLOT_INDEX){
      chunk->status = CHUNK_USED | CHUNK_NORMAL;
      /*register to used chunk list.*/
      wspace_reg_used_chunk(wspace,chunk);
      chunk = NULL;
    }
    
    if(chunk){
      wspace_put_pfc(wspace, chunk);
    }
    
    mutator_post_signal((Mutator*) allocator,HSIG_MUTATOR_SAFE);
  }
  if(p_obj) {
  	((Mutator*)allocator)->new_obj_occupied_size+=size;
  }
  return p_obj;
}
示例#5
0
static inline void move_obj_between_chunks(Wspace *wspace, Chunk_Header **dest_ptr, Chunk_Header *src)
{
  Chunk_Header *dest = *dest_ptr;
  assert(dest->slot_size == src->slot_size);
  
  unsigned int slot_size = dest->slot_size;
  unsigned int alloc_num = src->alloc_num;
  assert(alloc_num);
  
#ifdef USE_32BITS_HASHCODE
  Hashcode_Buf*  old_hashcode_buf = src->hashcode_buf;
  Hashcode_Buf* new_hashcode_buf = dest->hashcode_buf;
#endif

  while(alloc_num && dest){
    Partial_Reveal_Object *p_obj = next_alloc_slot_in_chunk(src);
    Partial_Reveal_Object *target = (Partial_Reveal_Object *)alloc_in_chunk(dest);

    if(dest->slot_index == MAX_SLOT_INDEX){
      dest->status = CHUNK_USED | CHUNK_NORMAL;      
      wspace_reg_used_chunk(wspace,dest);
      dest = NULL;
    }
    
    assert(p_obj && target);
    memcpy(target, p_obj, slot_size);

#ifdef USE_32BITS_HASHCODE
  if(hashcode_is_set(p_obj)){
    int hashcode;
    if(new_hashcode_buf == NULL) {
      new_hashcode_buf = hashcode_buf_create();
      hashcode_buf_init(new_hashcode_buf);
      dest->hashcode_buf = new_hashcode_buf;
    }
    if(hashcode_is_buffered(p_obj)){
      /*already buffered objects;*/
      hashcode = hashcode_buf_lookup(p_obj, old_hashcode_buf);
      hashcode_buf_update(target, hashcode, new_hashcode_buf);
    }else{
      /*objects need buffering.*/
      hashcode = hashcode_gen(p_obj);
      hashcode_buf_update(target, hashcode, new_hashcode_buf);
      Obj_Info_Type oi = get_obj_info_raw(target);
      set_obj_info(target, oi | HASHCODE_BUFFERED_BIT);
    }
  }
#endif


#ifdef SSPACE_VERIFY
    wspace_modify_mark_in_compact(target, p_obj, slot_size);
#endif
    obj_set_fw_in_oi(p_obj, target);
    --alloc_num;
  }

#ifdef USE_32BITS_HASHCODE
  if(alloc_num == 0) {
    if(old_hashcode_buf) hashcode_buf_destory(old_hashcode_buf);
    src->hashcode_buf = NULL;
  }
#endif

  
  /* dest might be set to NULL, so we use *dest_ptr here */
  assert((*dest_ptr)->alloc_num <= (*dest_ptr)->slot_num);
  src->alloc_num = alloc_num;
  if(!dest){
    assert((*dest_ptr)->alloc_num == (*dest_ptr)->slot_num);
    *dest_ptr = NULL;
    clear_free_slot_in_table(src->table, src->slot_index);
  }
}