Beispiel #1
0
/*copy dest pool to source pool, ignore NULL slot*/
void verifier_copy_pool(Pool* dest_pool, Pool* source_pool)
{
  Pool* temp_pool = sync_pool_create();
  
  Vector_Block* dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  pool_iterator_init(source_pool);
  while(Vector_Block *source_set = pool_iterator_next(source_pool)){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(source_set);
    while( !vector_block_iterator_end(source_set, iter)){
      assert(!vector_block_is_full(dest_set));
      if(*iter)  vector_block_add_entry(dest_set, *iter);
      iter = vector_block_iterator_advance(source_set, iter);
    }
    pool_put_entry(temp_pool, dest_set);
    dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  }
  
  dest_set = NULL;
  pool_iterator_init(temp_pool);
  while(dest_set = pool_iterator_next(temp_pool)){
    pool_put_entry(dest_pool, dest_set);
  }
  
  sync_pool_destruct(temp_pool);
  return;
}
Beispiel #2
0
/* only called in non-minor collection. parameter pointer_addr_in_pool means it is p_ref or p_obj in pool*/
static void nondestructively_fix_finref_pool(GC *gc, Pool *pool, Boolean pointer_addr_in_pool, Boolean double_fix)
{
  Finref_Metadata *metadata = gc->finref_metadata;
  REF *p_ref;
  Partial_Reveal_Object *p_obj;
  
  /* NOTE:: this is nondestructive to the root sets. */
  pool_iterator_init(pool);
  Vector_Block *repset = pool_iterator_next(pool);
  while(repset){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(repset);
    for(; !vector_block_iterator_end(repset,iter); iter = vector_block_iterator_advance(repset,iter)){
      if(pointer_addr_in_pool)
        p_ref = (REF*)*iter;
      else
        p_ref = (REF*)iter;
      p_obj = read_slot(p_ref);
      
      if(collect_is_compact_move()){ /* include both unique move-compact and major move-compact */
        move_compaction_update_ref(gc, p_ref);
      } else if(collect_is_ms_compact()){
        if(obj_is_fw_in_oi(p_obj))
          moving_mark_sweep_update_ref(gc, p_ref, double_fix);
      } else { /* major slide compact */
        assert((obj_is_marked_in_vt(p_obj) && obj_is_fw_in_oi(p_obj)));
        write_slot(p_ref , obj_get_fw_in_oi(p_obj));
      }
    }
    repset = pool_iterator_next(pool);
  }
}
Beispiel #3
0
void verifier_trace_rootsets(Heap_Verifier* heap_verifier, Pool* root_set_pool)
{
  Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
  GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
  gc_verifier->objects_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);
  gc_verifier->hashcode_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  pool_iterator_init(root_set_pool);
  Vector_Block* root_set = pool_iterator_next(root_set_pool);
  
  /* first step: copy all root objects to trace tasks. */ 
  while(root_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set,iter)){
      REF* p_ref = (REF* )*iter;
      iter = vector_block_iterator_advance(root_set,iter);

      if(!heap_verifier->need_verify_rootset || !heap_verifier->is_before_gc){
        if(!verify_rootset_slot(p_ref, heap_verifier)){
          gc_verifier->is_verification_passed = FALSE;
          assert(0);
          continue;
        }
      }

      Partial_Reveal_Object* p_obj = read_slot(p_ref);
      assert(p_obj != NULL);  

      verifier_tracestack_push(p_obj, gc_verifier->trace_stack);
    } 
    root_set = pool_iterator_next(root_set_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);

  Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter;
      iter = vector_block_iterator_advance(trace_task,iter);
      trace_object(heap_verifier, p_obj); 
    }
    vector_stack_clear(trace_task);
    pool_put_entry(verifier_metadata->free_task_pool, trace_task);
    trace_task = pool_get_entry(verifier_metadata->mark_task_pool);
  }
  vector_stack_clear(gc_verifier->trace_stack);
  pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack);
  gc_verifier->trace_stack = NULL;

}
Beispiel #4
0
void mspace_collection(Mspace* mspace) 
{
  mspace->num_collections++;

  GC* gc = mspace->gc;  
  Transform_Kind kind= gc->tuner->kind;
 
  /* init the pool before starting multiple collectors */

  pool_iterator_init(gc->metadata->gc_rootset_pool);

  //For_LOS_extend
  if(LOS_ADJUST_BOUNDARY){
    if(gc->tuner->kind != TRANS_NOTHING){
      major_set_compact_slide();
    }else if (collect_is_fallback()){
      major_set_compact_slide();
    }else{
      major_set_compact_move();    
    }
  }else {
    gc->tuner->kind = TRANS_NOTHING;
  }

  if(major_is_compact_slide()){
#ifdef ORDER_GC_DEBUG
    printf("[GC DEBUG]: slide compact algo start ... \n");
#endif
    TRACE2("gc.process", "GC: slide compact algo start ... \n");
    collector_execute_task(gc, (TaskType)slide_compact_mspace, (Space*)mspace);
    TRACE2("gc.process", "\nGC: end of slide compact algo ... \n");
#ifdef ORDER_GC_DEBUG
    printf("[GC DEBUG]: end of slide compact algo ... \n");
#endif
  }else if( major_is_compact_move()){      
#ifdef ORDER_GC_DEBUG
    printf("[GC DEBUG]: move compact algo start ... \n");
#endif    
    TRACE2("gc.process", "GC: move compact algo start ... \n");
    collector_execute_task(gc, (TaskType)move_compact_mspace, (Space*)mspace);
    TRACE2("gc.process", "\nGC: end of move compact algo ... \n");
#ifdef ORDER_GC_DEBUG
    printf("[GC DEBUG]: end of move compact algo ... \n");
#endif    
  }else{
    LDIE(75, "GC: The speficied major collection algorithm doesn't exist!");
  }

  if((!LOS_ADJUST_BOUNDARY)&&(kind != TRANS_NOTHING) ) {
    gc->tuner->kind = kind;
    gc_compute_space_tune_size_after_marking(gc);
  }
  
  return;  
} 
Beispiel #5
0
static void finref_copy_pool_to_rootset(GC *gc, Pool *src_pool)
{
  pool_iterator_init(src_pool);
  while(Vector_Block *root_set = pool_iterator_next(src_pool)){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(root_set);
    while(!vector_block_iterator_end(root_set, iter)){
      gc_compressed_rootset_add_entry(gc, (REF*)iter);
      iter = vector_block_iterator_advance(root_set, iter);
    }
  }
}
Beispiel #6
0
static void resurrect_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *finalizable_obj_pool = metadata->finalizable_obj_pool;
  
  if(finalizable_obj_pool_is_empty(gc))
    return;
  
  DURING_RESURRECTION = TRUE;
  
  pool_iterator_init(finalizable_obj_pool);
  Vector_Block *block = pool_iterator_next(finalizable_obj_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      
      /* Perhaps obj has been resurrected by previous resurrections */
      if(!gc_obj_is_dead(gc, p_obj)){
        if(collect_is_minor() && obj_need_move(gc, p_obj))
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        continue;
      }
      
      resurrect_obj_tree(collector, p_ref);
      if(collector->result == FALSE){
        /* Resurrection fallback happens */
        assert(collect_is_minor());
        return; /* force return */
      }
    }
    
    block = pool_iterator_next(finalizable_obj_pool);
  }
  
  /* In major & fallback & sweep-compact collection we need record p_ref of the root dead obj to update it later.
   * Because it is outside heap, we can't update it in ref fixing.
   * In minor collection p_ref of the root dead obj is automatically updated while tracing.
   */
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, finalizable_obj_pool);
  metadata->pending_finalizers = TRUE;
  
  DURING_RESURRECTION = FALSE;
  
  /* fianlizable objs have been added to finref repset pool or updated by tracing */
}
Beispiel #7
0
static void finref_add_repset_from_pool(GC *gc, Pool *pool)
{
  finref_reset_repset(gc);
  pool_iterator_init(pool);
  Vector_Block *block = pool_iterator_next(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      if(*p_ref && obj_need_move(gc, p_obj))
        finref_repset_add_entry(gc, p_ref);
    }
    block = pool_iterator_next(pool);
  }
  finref_put_repset(gc);
}
Beispiel #8
0
void verifier_copy_pool_reverse_order(Pool* dest_pool, Pool* source_pool)
{
  pool_iterator_init(source_pool);
  Vector_Block* dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  
  while(Vector_Block *source_set = pool_iterator_next(source_pool)){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(source_set);
    while( !vector_block_iterator_end(source_set, iter)){
      assert(!vector_block_is_full(dest_set));
      vector_block_add_entry(dest_set, *iter);
      iter = vector_block_iterator_advance(source_set, iter);
    }
    pool_put_entry(dest_pool, dest_set);
    dest_set = verifier_free_set_pool_get_entry(verifier_metadata->free_set_pool);
  }
  return ;
}
Beispiel #9
0
void verifier_trace_objsets(Heap_Verifier* heap_verifier, Pool* obj_set_pool)
{
  Heap_Verifier_Metadata* verifier_metadata = heap_verifier->heap_verifier_metadata;
  GC_Verifier* gc_verifier = heap_verifier->gc_verifier;
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);
  pool_iterator_init(obj_set_pool);
  Vector_Block* obj_set = pool_iterator_next(obj_set_pool);
  /* first step: copy all root objects to trace tasks. */ 
  while(obj_set){
    POINTER_SIZE_INT* iter = vector_block_iterator_init(obj_set);
    while(!vector_block_iterator_end(obj_set,iter)){
      Partial_Reveal_Object* p_obj = read_slot((REF*)iter);
      iter = vector_block_iterator_advance(obj_set,iter);
      /*p_obj can be NULL , When GC happened, the obj in Finalize objs list will be clear.*/
      //assert(p_obj != NULL);  
      if(p_obj == NULL) continue;
      if(heap_verifier->gc_is_gen_mode && heap_verifier->is_before_gc && !obj_belongs_to_nos(p_obj)) continue;
      verifier_tracestack_push(p_obj, gc_verifier->trace_stack);
    } 
    obj_set = pool_iterator_next(obj_set_pool);
  }
  /* put back the last trace_stack task */    
  pool_put_entry(verifier_metadata->mark_task_pool, gc_verifier->trace_stack);
  
  /* second step: iterate over the trace tasks and forward objects */
  gc_verifier->trace_stack = verifier_free_task_pool_get_entry(verifier_metadata->free_task_pool);

  Vector_Block* trace_task = pool_get_entry(verifier_metadata->mark_task_pool);

  while(trace_task){    
    POINTER_SIZE_INT* iter = vector_block_iterator_init(trace_task);
    while(!vector_block_iterator_end(trace_task,iter)){
      Partial_Reveal_Object* p_obj = (Partial_Reveal_Object* )*iter;
      iter = vector_block_iterator_advance(trace_task,iter);
      trace_object(heap_verifier, p_obj); 
    }
    vector_stack_clear(trace_task);
    pool_put_entry(verifier_metadata->free_task_pool, trace_task);
    trace_task = pool_get_entry(verifier_metadata->mark_task_pool);
  }
  vector_stack_clear(gc_verifier->trace_stack);
  pool_put_entry(verifier_metadata->free_task_pool, gc_verifier->trace_stack);
  gc_verifier->trace_stack = NULL;

}
Beispiel #10
0
void wspace_mostly_con_final_mark( GC *gc ) {

    /*init the root set pool*/
    pool_iterator_init(gc->metadata->gc_rootset_pool);
    /*prepare dirty object*/
    gc_prepare_dirty_set(gc);
    /*new asssign thread may reuse the one just finished in the same phase*/ 
    conclctor_set_weakref_sets(gc);

    /*start final mostly concurrent mark */
   gc_ms_start_mostly_con_final_mark((GC_MS*)gc, mostly_con_final_marker_num);

   mostly_con_mark_terminate_reset();
   gc_mostly_con_update_stat_after_final_marking(gc);
   
   gc_reset_dirty_set(gc);
   gc_clear_rootset(gc);
   gc_prepare_sweeping(gc);
   state_transformation( gc, GC_CON_TRACE_DONE, GC_CON_BEFORE_SWEEP );
}
Beispiel #11
0
static void identify_finalizable_objects(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *obj_with_fin_pool = metadata->obj_with_fin_pool;
  
  gc_reset_finalizable_objects(gc);
  pool_iterator_init(obj_with_fin_pool);
  Vector_Block *block = pool_iterator_next(obj_with_fin_pool);
  while(block){
    unsigned int block_has_ref = 0;
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      if(collect_is_fallback())
        fallback_update_fw_ref(p_ref);  // in case that this collection is ALGO_MAJOR_FALLBACK
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      if(!p_obj)
        continue;
      if(gc_obj_is_dead(gc, p_obj)){
        gc_add_finalizable_obj(gc, p_obj);
        *p_ref = (REF)NULL;
      } else {
        if(collect_is_minor() && obj_need_move(gc, p_obj)){
          assert(obj_is_fw_in_oi(p_obj));
          write_slot(p_ref, obj_get_fw_in_oi(p_obj));
        }
        ++block_has_ref;
      }
    }
    if(!block_has_ref)
      vector_block_clear(block);
    
    block = pool_iterator_next(obj_with_fin_pool);
  }
  gc_put_finalizable_objects(gc);
  
  if(collect_need_update_repset())
    finref_add_repset_from_pool(gc, obj_with_fin_pool);
}
Beispiel #12
0
/*
 * The reason why we don't use identify_dead_refs() to implement this function is
 * that we will differentiate phanref from weakref in the future.
 */
static void identify_dead_phanrefs(Collector *collector)
{
  GC *gc = collector->gc;
  Finref_Metadata *metadata = gc->finref_metadata;
  Pool *phanref_pool = metadata->phanref_pool;
  
  if(collect_need_update_repset())
    finref_reset_repset(gc);
//  collector_reset_repset(collector);
  pool_iterator_init(phanref_pool);
  Vector_Block *block = pool_iterator_next(phanref_pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      Partial_Reveal_Object **p_ref = (Partial_Reveal_Object **)iter;
      Partial_Reveal_Object *p_obj = read_slot((REF*)p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);

      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);      
      if(!p_referent){  // referent field has been cleared
        *p_ref = NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ this check is redundant because obj_need_move checks
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
        continue;
      }
      *p_referent_field = (REF)NULL;
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
      /* Phantom status: for future use
       * if((unsigned int)p_referent & PHANTOM_REF_ENQUEUE_STATUS_MASK){
       *   // enqueued but not explicitly cleared OR pending for enqueueing
       *   *iter = NULL;
       * }
       * resurrect_obj_tree(collector, p_referent_field);
       */
    }
    block = pool_iterator_next(phanref_pool);
  }
//  collector_put_repset(collector);
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, phanref_pool);
  }
}
Beispiel #13
0
static void identify_dead_refs(GC *gc, Pool *pool)
{
  if(collect_need_update_repset())
    finref_reset_repset(gc);

  pool_iterator_init(pool);
  Vector_Block *block = pool_iterator_next(pool);
  while(block){
    POINTER_SIZE_INT *iter = vector_block_iterator_init(block);
    for(; !vector_block_iterator_end(block, iter); iter = vector_block_iterator_advance(block, iter)){
      REF *p_ref = (REF*)iter;
      Partial_Reveal_Object *p_obj = read_slot(p_ref);
      assert(p_obj);
      REF *p_referent_field = obj_get_referent_field(p_obj);
      if(collect_is_fallback())
        fallback_update_fw_ref(p_referent_field);
        
      Partial_Reveal_Object *p_referent = read_slot(p_referent_field);
      
      if(!p_referent){  
        /* referent field has been cleared. I forgot why we set p_ref with NULL here. 
           I guess it's because this ref_obj was processed in abother p_ref already, so
           there is no need to keep same ref_obj in this p_ref. */
        *p_ref = (REF)NULL;
        continue;
      }
      if(!gc_obj_is_dead(gc, p_referent)){  // referent is alive
        if(obj_need_move(gc, p_referent)){
          if(collect_is_minor()){
            assert(obj_is_fw_in_oi(p_referent));
            Partial_Reveal_Object* p_new_referent = obj_get_fw_in_oi(p_referent);
            write_slot(p_referent_field, p_new_referent);
            /* if it's gen mode, and referent stays in NOS, we need keep p_referent_field in collector remset.
               This leads to the ref obj live even it is actually only weakly-reachable in next gen-mode collection. 
               This simplifies the design. Otherwise, we need remember the refobj in MOS seperately and process them seperately. */
            if(gc_is_gen_mode())
              if(addr_belongs_to_nos(p_new_referent) && !addr_belongs_to_nos(p_obj))
                collector_remset_add_entry(gc->collectors[0], ( Partial_Reveal_Object**)p_referent_field); 

          } else{ // if(collect_move_object()){ the condition is redundant because obj_need_move already checks 
            finref_repset_add_entry(gc, p_referent_field);
          }
        }
        *p_ref = (REF)NULL;
      }else{
	      /* else, the referent is dead (weakly reachable), clear the referent field */
	      *p_referent_field = (REF)NULL; 
#ifdef ORDER_DEBUG
               if(ref_file == NULL){
                   if(order_record){
                       ref_file = fopen64("RECORD_REF_LOG.log", "w+");
                   }
		 else{
		     ref_file = fopen64("REPLAY_REF_LOG.log", "w+");
		 }
               }
               assert(ref_file);
               fprintf(ref_file, "GC[%d]: ref (%d, %d) is DEAD!\n", gc->num_collections, p_referent->alloc_tid, p_referent->alloc_count);
               fflush(ref_file);
#endif
	      /* for dead referent, p_ref is not set NULL. p_ref keeps the ref object, which
	         will be moved to VM for enqueueing. */
      }
    }/* for each ref object */
    
    block = pool_iterator_next(pool);
  }
  
  if(collect_need_update_repset()){
    finref_put_repset(gc);
    finref_add_repset_from_pool(gc, pool);
  }
}
Beispiel #14
0
/* One assumption: pfc_pool is not empty */
static Boolean pfc_pool_roughly_sort(Pool *pfc_pool, Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk)
{
  Chunk_Header *bucket_head[PFC_SORT_NUM];  /* Sorted chunk buckets' heads */
  Chunk_Header *bucket_tail[PFC_SORT_NUM];  /* Sorted chunk buckets' tails */
  unsigned int slot_num;
  unsigned int chunk_num = 0;
  unsigned int slot_alloc_num = 0;
  
  /* Init buckets' heads and tails */
  memset(bucket_head, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
  memset(bucket_tail, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM);
  
  /* Roughly sort chunks in pfc_pool */
  pool_iterator_init(pfc_pool);
  Chunk_Header *chunk = (Chunk_Header*)pool_iterator_next(pfc_pool);
  if(chunk) slot_num = chunk->slot_num;
  while(chunk){
    ++chunk_num;
    assert(chunk->alloc_num);
    slot_alloc_num += chunk->alloc_num;
    Chunk_Header *next_chunk = chunk->next;
    unsigned int bucket_index = (chunk->alloc_num*PFC_SORT_NUM-1) / slot_num;
    assert(bucket_index < PFC_SORT_NUM);
    sorted_chunk_bucket_add_entry(&bucket_head[bucket_index], &bucket_tail[bucket_index], chunk);
    chunk = next_chunk;
  }
  
  /* Empty the pfc pool because some chunks in this pool will be free after compaction */
  pool_empty(pfc_pool);
  
  /* If we can't get a free chunk after compaction, there is no need to compact.
   * This condition includes that the chunk num in pfc pool is equal to 1, in which case there is also no need to compact
   */
  if(slot_num*(chunk_num-1) <= slot_alloc_num){
    for(unsigned int i = 0; i < PFC_SORT_NUM; i++){
      Chunk_Header *chunk = bucket_head[i];
      while(chunk){
        Chunk_Header *next_chunk = chunk->next;
        pool_put_entry(pfc_pool, chunk);
        chunk = next_chunk;
      }
    }
    return FALSE;
  }
  
  /* Link the sorted chunk buckets into one single ordered bidirectional list */
  Chunk_Header *head = NULL;
  Chunk_Header *tail = NULL;
  for(unsigned int i = PFC_SORT_NUM; i--;){
    assert((head && tail) || (!head && !tail));
    assert((bucket_head[i] && bucket_tail[i]) || (!bucket_head[i] && !bucket_tail[i]));
    if(!bucket_head[i]) continue;
    if(!tail){
      head = bucket_head[i];
      tail = bucket_tail[i];
    } else {
      tail->next = bucket_head[i];
      bucket_head[i]->prev = tail;
      tail = bucket_tail[i];
    }
  }
  
  assert(head && tail);
  *least_free_chunk = head;
  *most_free_chunk = tail;
  
  return TRUE;
}