void client_loop(struct client *c) { struct pool *mem = pool_create("client io", 1024); if (!mem) return; for (;;) { command *cmd; response resp; uint32_t req_id; if (!read_request(c->socket, c->db, mem, &cmd, &req_id)) break; resp.discriminator = SUCCESS; if (!write_response(c->socket, req_id, &resp)) break; pool_empty(mem); } }
//final work should be done by the last sweeper void wspace_last_sweeper_work( Conclctor *last_sweeper ) { GC *gc = last_sweeper->gc; Wspace *wspace = gc_get_wspace(gc); Chunk_Header_Basic* chunk_to_sweep; Pool* used_chunk_pool = wspace->used_chunk_pool; /* all but one sweeper finishes its job*/ state_transformation( gc, GC_CON_SWEEPING, GC_CON_SWEEP_DONE ); /*3. Check the local chunk of mutator*/ gc_sweep_mutator_local_chunks(wspace->gc); /*4. Sweep gloabl alloc normal chunks again*/ gc_set_sweep_global_normal_chunk(); gc_wait_mutator_signal(wspace->gc, HSIG_MUTATOR_SAFE); wspace_init_pfc_pool_iterator(wspace); Pool* pfc_pool = wspace_grab_next_pfc_pool(wspace); while(pfc_pool != NULL){ if(!pool_is_empty(pfc_pool)){ chunk_to_sweep = chunk_pool_get_chunk(pfc_pool); while(chunk_to_sweep != NULL){ assert(chunk_to_sweep->status == (CHUNK_NORMAL | CHUNK_NEED_ZEROING)); chunk_to_sweep->status = CHUNK_NORMAL | CHUNK_USED; wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep); chunk_to_sweep = chunk_pool_get_chunk(pfc_pool); } } /*grab more pfc pools*/ pfc_pool = wspace_grab_next_pfc_pool(wspace); } /*5. Check the used list again.*/ chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool); while(chunk_to_sweep != NULL){ wspace_sweep_chunk_con(wspace, last_sweeper, chunk_to_sweep); chunk_to_sweep = chunk_pool_get_chunk(used_chunk_pool); } /*6. Switch the PFC backup list to PFC list.*/ wspace_exchange_pfc_pool(wspace); gc_unset_sweep_global_normal_chunk(); /*7. Put back live abnormal chunk and normal unreusable chunk*/ Chunk_Header* used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace); while(used_abnormal_chunk){ used_abnormal_chunk->status = CHUNK_USED | CHUNK_ABNORMAL; wspace_reg_used_chunk(wspace,used_abnormal_chunk); used_abnormal_chunk = wspace_get_live_abnormal_chunk(wspace); } pool_empty(wspace->live_abnormal_chunk_pool); Chunk_Header* unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace); while(unreusable_normal_chunk){ unreusable_normal_chunk->status = CHUNK_USED | CHUNK_NORMAL; wspace_reg_used_chunk(wspace,unreusable_normal_chunk); unreusable_normal_chunk = wspace_get_unreusable_normal_chunk(wspace); } pool_empty(wspace->unreusable_normal_chunk_pool); /*8. Merge free chunks from sweepers*/ Free_Chunk_List *free_list_from_sweeper = wspace_collect_free_chunks_from_sweepers(gc); wspace_merge_free_list(wspace, free_list_from_sweeper); /* last sweeper will transform the state to before_finish */ state_transformation( gc, GC_CON_SWEEP_DONE, GC_CON_BEFORE_FINISH ); }
/* One assumption: pfc_pool is not empty */ static Boolean pfc_pool_roughly_sort(Pool *pfc_pool, Chunk_Header **least_free_chunk, Chunk_Header **most_free_chunk) { Chunk_Header *bucket_head[PFC_SORT_NUM]; /* Sorted chunk buckets' heads */ Chunk_Header *bucket_tail[PFC_SORT_NUM]; /* Sorted chunk buckets' tails */ unsigned int slot_num; unsigned int chunk_num = 0; unsigned int slot_alloc_num = 0; /* Init buckets' heads and tails */ memset(bucket_head, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM); memset(bucket_tail, 0, sizeof(Chunk_Header*) * PFC_SORT_NUM); /* Roughly sort chunks in pfc_pool */ pool_iterator_init(pfc_pool); Chunk_Header *chunk = (Chunk_Header*)pool_iterator_next(pfc_pool); if(chunk) slot_num = chunk->slot_num; while(chunk){ ++chunk_num; assert(chunk->alloc_num); slot_alloc_num += chunk->alloc_num; Chunk_Header *next_chunk = chunk->next; unsigned int bucket_index = (chunk->alloc_num*PFC_SORT_NUM-1) / slot_num; assert(bucket_index < PFC_SORT_NUM); sorted_chunk_bucket_add_entry(&bucket_head[bucket_index], &bucket_tail[bucket_index], chunk); chunk = next_chunk; } /* Empty the pfc pool because some chunks in this pool will be free after compaction */ pool_empty(pfc_pool); /* If we can't get a free chunk after compaction, there is no need to compact. * This condition includes that the chunk num in pfc pool is equal to 1, in which case there is also no need to compact */ if(slot_num*(chunk_num-1) <= slot_alloc_num){ for(unsigned int i = 0; i < PFC_SORT_NUM; i++){ Chunk_Header *chunk = bucket_head[i]; while(chunk){ Chunk_Header *next_chunk = chunk->next; pool_put_entry(pfc_pool, chunk); chunk = next_chunk; } } return FALSE; } /* Link the sorted chunk buckets into one single ordered bidirectional list */ Chunk_Header *head = NULL; Chunk_Header *tail = NULL; for(unsigned int i = PFC_SORT_NUM; i--;){ assert((head && tail) || (!head && !tail)); assert((bucket_head[i] && bucket_tail[i]) || (!bucket_head[i] && !bucket_tail[i])); if(!bucket_head[i]) continue; if(!tail){ head = bucket_head[i]; tail = bucket_tail[i]; } else { tail->next = bucket_head[i]; bucket_head[i]->prev = tail; tail = bucket_tail[i]; } } assert(head && tail); *least_free_chunk = head; *most_free_chunk = tail; return TRUE; }