Example #1
0
Block_Header* mspace_get_next_compact_block(Collector* collector, Mspace* mspace)
{ 
  /* firstly put back the compacted block. If it's not BLOCK_TARGET, it will be set to BLOCK_COMPACTED */
  unsigned int block_status = collector->cur_compact_block->status;
  assert( block_status & (BLOCK_IN_COMPACT|BLOCK_TARGET));
  if( block_status == BLOCK_IN_COMPACT)
    collector->cur_compact_block->status = BLOCK_COMPACTED;

  Block_Header* cur_compact_block = (Block_Header*)next_block_for_compact;
  
  while(cur_compact_block != NULL){
    Block_Header* next_compact_block = cur_compact_block->next;

    Block_Header* temp = (Block_Header*)atomic_casptr((volatile void **)&next_block_for_compact, next_compact_block, cur_compact_block);
    if(temp != cur_compact_block){
      cur_compact_block = (Block_Header*)next_block_for_compact;
      continue;
    }
    /* got it, set its state to be BLOCK_IN_COMPACT. It must be the first time touched by compactor */
    block_status = cur_compact_block->status;
    assert( !(block_status & (BLOCK_IN_COMPACT|BLOCK_COMPACTED|BLOCK_TARGET)));
    cur_compact_block->status = BLOCK_IN_COMPACT;
    collector->cur_compact_block = cur_compact_block;
    return cur_compact_block;
      
  }
  /* run out space blocks for compacting */
  return NULL;
}
Example #2
0
static void allocator_sweep_local_chunks(Allocator *allocator)
{
  Wspace *wspace = gc_get_wspace(allocator->gc);
  Size_Segment **size_segs = wspace->size_segments;
  Chunk_Header ***local_chunks = allocator->local_chunks;
  
  for(unsigned int i = SIZE_SEGMENT_NUM; i--;){
    if(!size_segs[i]->local_alloc){
      assert(!local_chunks[i]);
      continue;
    }
    Chunk_Header **chunks = local_chunks[i];
    assert(chunks);
    for(unsigned int j = size_segs[i]->chunk_num; j--;){
      if(chunks[j]){
        unsigned int slot_num = chunks[j]->slot_num;
        POINTER_SIZE_INT *table = chunks[j]->table;
        
        unsigned int index_word_num = (slot_num + SLOT_NUM_PER_WORD_IN_TABLE - 1) / SLOT_NUM_PER_WORD_IN_TABLE;
        for(unsigned int i=0; i<index_word_num; ++i){
          //atomic sweep.
          POINTER_SIZE_INT old_word = table[i];
          POINTER_SIZE_INT new_word = old_word & cur_alloc_mask;
          while(old_word != new_word){
            POINTER_SIZE_INT temp = (POINTER_SIZE_INT)atomic_casptr((volatile void**) &table[i],(void*) new_word,(void*) old_word);
            if(temp == old_word){
              break;
            }
            old_word = table[i];
            new_word = old_word & cur_alloc_mask;
          }
        }
      }
    }
  }
}
Example #3
0
static void mspace_compute_object_target(Collector* collector, Mspace* mspace)
{
    Block_Header *curr_block = collector->cur_compact_block;
    Block_Header *dest_block = collector->cur_target_block;
    Block_Header *local_last_dest = dest_block;
    void *dest_addr = dest_block->base;
    Block_Header *last_src;

#ifdef USE_32BITS_HASHCODE
    Hashcode_Buf* old_hashcode_buf = NULL;
    Hashcode_Buf* new_hashcode_buf = hashcode_buf_create();
    hashcode_buf_init(new_hashcode_buf);
#endif

    assert(!collector->rem_set);
    collector->rem_set = free_set_pool_get_entry(collector->gc->metadata);
#ifdef USE_32BITS_HASHCODE
    collector->hashcode_set = free_set_pool_get_entry(collector->gc->metadata);
#endif

#ifdef GC_GEN_STATS
    GC_Gen_Collector_Stats* stats = (GC_Gen_Collector_Stats*)collector->stats;
#endif

    while( curr_block ) {
        void* start_pos;
        Partial_Reveal_Object *first_obj = block_get_first_marked_obj_prefetch_next(curr_block, &start_pos);
        if(first_obj) {
            ++curr_block->dest_counter;
            if(!dest_block->src)
                dest_block->src = first_obj;
            else
                last_src->next_src = first_obj;
            last_src = curr_block;
        }
        Partial_Reveal_Object* p_obj = first_obj;

        while( p_obj ) {
            assert( obj_is_marked_in_vt(p_obj));

            unsigned int obj_size = (unsigned int)((POINTER_SIZE_INT)start_pos - (POINTER_SIZE_INT)p_obj);


#ifdef GC_GEN_STATS
            gc_gen_collector_update_moved_nos_mos_obj_stats_major(stats, obj_size);
#endif

            Obj_Info_Type obj_info = get_obj_info(p_obj);

            unsigned int obj_size_precompute = obj_size;

#ifdef USE_32BITS_HASHCODE
            precompute_hashcode_extend_size(p_obj, dest_addr, &obj_size_precompute);
#endif
            if( ((POINTER_SIZE_INT)dest_addr + obj_size_precompute) > (POINTER_SIZE_INT)GC_BLOCK_END(dest_block)) {
#ifdef USE_32BITS_HASHCODE
                block_swap_hashcode_buf(dest_block, &new_hashcode_buf, &old_hashcode_buf);
#endif
                dest_block->new_free = dest_addr;
                dest_block = mspace_get_next_target_block(collector, mspace);
                if(dest_block == NULL) {
                    collector->result = FALSE;
                    return;
                }
                if((!local_last_dest) || (dest_block->block_idx > local_last_dest->block_idx))
                    local_last_dest = dest_block;
                dest_addr = dest_block->base;
                dest_block->src = p_obj;
                last_src = curr_block;
                if(p_obj != first_obj)
                    ++curr_block->dest_counter;
            }
            assert(((POINTER_SIZE_INT)dest_addr + obj_size) <= (POINTER_SIZE_INT)GC_BLOCK_END(dest_block));

#ifdef USE_32BITS_HASHCODE
            obj_info = slide_compact_process_hashcode(p_obj, dest_addr, &obj_size, collector,curr_block->hashcode_buf, new_hashcode_buf);
#endif

            if( obj_info != 0 ) {
                collector_remset_add_entry(collector, (Partial_Reveal_Object **)dest_addr);
                collector_remset_add_entry(collector, (Partial_Reveal_Object **)(POINTER_SIZE_INT)obj_info);
            }

            obj_set_fw_in_oi(p_obj, dest_addr);

            /* FIXME: should use alloc to handle alignment requirement */
            dest_addr = (void *)((POINTER_SIZE_INT) dest_addr + obj_size);
            p_obj = block_get_next_marked_obj_prefetch_next(curr_block, &start_pos);
        }
#ifdef USE_32BITS_HASHCODE
        hashcode_buf_clear(curr_block->hashcode_buf);
#endif
        curr_block = mspace_get_next_compact_block(collector, mspace);

    }

#ifdef USE_32BITS_HASHCODE
    pool_put_entry(collector->gc->metadata->collector_hashcode_pool, collector->hashcode_set);
    collector->hashcode_set = NULL;
#endif
    pool_put_entry(collector->gc->metadata->collector_remset_pool, collector->rem_set);
    collector->rem_set = NULL;
    dest_block->new_free = dest_addr;

    Block_Header *cur_last_dest = (Block_Header *)last_block_for_dest;
    collector->cur_target_block = local_last_dest;
    while((local_last_dest)&&((!cur_last_dest) || (local_last_dest->block_idx > cur_last_dest->block_idx))) {
        atomic_casptr((volatile void **)&last_block_for_dest, local_last_dest, cur_last_dest);
        cur_last_dest = (Block_Header *)last_block_for_dest;
    }

#ifdef USE_32BITS_HASHCODE
    old_hashcode_buf = block_set_hashcode_buf(dest_block, new_hashcode_buf);
    hashcode_buf_destory(old_hashcode_buf);
#endif
    return;
}