Exemple #1
0
static inline void collector_sweep_abnormal_chunk_con(Conclctor *sweeper, Wspace *wspace, Chunk_Header *chunk)
{
  assert(chunk->status == (CHUNK_ABNORMAL | CHUNK_USED));
  POINTER_SIZE_INT *table = chunk->table;
  table[0] &= cur_alloc_mask;
  if(!table[0]){    
    collector_add_free_chunk(sweeper, (Free_Chunk*)chunk);
  }
  else {
    wspace_reg_live_abnormal_chunk(wspace, chunk);
    sweeper->live_obj_size += CHUNK_SIZE(chunk);
    sweeper->live_obj_num++;
  }
}
Exemple #2
0
void wspace_compact(Collector *collector, Wspace *wspace)
{
  Chunk_Header *least_free_chunk, *most_free_chunk;
  Pool *pfc_pool = wspace_grab_next_pfc_pool(wspace);
  
  for(; pfc_pool; pfc_pool = wspace_grab_next_pfc_pool(wspace)){
    if(pool_is_empty(pfc_pool)) continue;
    Boolean pfc_pool_need_compact = pfc_pool_roughly_sort(pfc_pool, &least_free_chunk, &most_free_chunk);
    if(!pfc_pool_need_compact) continue;
    
    Chunk_Header *dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
    Chunk_Header *src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
    Boolean src_is_new = TRUE;
    while(dest && src){
      if(src_is_new)
        src->slot_index = 0;
      //chunk_depad_last_index_word(src);
      move_obj_between_chunks(wspace, &dest, src);
      if(!dest)
        dest = get_least_free_chunk(&least_free_chunk, &most_free_chunk);
      if(!src->alloc_num){
        collector_add_free_chunk(collector, (Free_Chunk*)src);
        src = get_most_free_chunk(&least_free_chunk, &most_free_chunk);
        src_is_new = TRUE;
      } else {
        src_is_new = FALSE;
      }
    }
    
    /* Rebuild the pfc_pool */
    if(dest)
      wspace_put_pfc(wspace, dest);
    if(src){
      //chunk_pad_last_index_word(src, cur_alloc_mask);
      pfc_reset_slot_index(src);
      wspace_put_pfc(wspace, src);
    }
  }
}
Exemple #3
0
static void collector_sweep_normal_chunk_con(Conclctor *sweeper, Wspace *wspace, Chunk_Header *chunk)
{
  unsigned int slot_num = chunk->slot_num;
  unsigned int live_num = 0;
  unsigned int first_free_word_index = MAX_SLOT_INDEX;
  POINTER_SIZE_INT *table = chunk->table;
  
  unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
  for(unsigned int i=0; i<index_word_num; ++i){
    
    table[i] &= cur_alloc_mask;
    unsigned int live_num_in_word = (table[i] == cur_alloc_mask) ? SLOT_NUM_PER_WORD_IN_TABLE : word_set_bit_num(table[i]);
    live_num += live_num_in_word;
    
    /* for concurrent sweeping, sweeping and allocation are performed concurrently. so we can not just count the current live obj*/
    
    if((first_free_word_index == MAX_SLOT_INDEX) && (live_num_in_word < SLOT_NUM_PER_WORD_IN_TABLE)){
      first_free_word_index = i;
      pfc_set_slot_index((Chunk_Header*)chunk, first_free_word_index, cur_alloc_color);
    }
  }
  assert(live_num <= slot_num);
  sweeper->live_obj_size += live_num * chunk->slot_size;
  sweeper->live_obj_num += live_num;

  if(!live_num){  /* all objects in this chunk are dead */
    collector_add_free_chunk(sweeper, (Free_Chunk*)chunk);
   } else {
    chunk->alloc_num = live_num;
   if(!chunk_is_reusable(chunk)){  /* most objects in this chunk are swept, add chunk to pfc list*/
    wspace_reg_unreusable_normal_chunk(wspace, chunk);
   } else {  /* most objects in this chunk are swept, add chunk to pfc list*/
    wspace_put_pfc_backup(wspace, chunk);
   }
  }
}