bool CompController::lp_write(uint64_t lp_no) { if( lp_no > lp_num ) ERR_AND_RTN; LPMAP old_lp_map = lpt[lp_no]; // data compression uint32_t length = do_compression( lp_size ); // get next write destination if( is_buf_full(buf, length) && !close_buf_and_open_next() ) ERR_AND_RTN; // update map lpt[lp_no].lb_id = buf.tgt_lgblock->id; lpt[lp_no].ofs = buf.cur_sector; lpt[lp_no].len = length; // update reverse map REVMAP revmap = { buf.cur_sector , lp_no }; buf.tgt_lgblock->rev_map.push_back(revmap); // invalidate old data lbt[old_lp_map.lb_id].free_sector_num += old_lp_map.len; if( lbt[old_lp_map.lb_id].free_sector_num > lb_size ) { std::cout << "size violation:" << lbt[old_lp_map.lb_id].free_sector_num <<","<< lb_size << "," << old_lp_map.len << "," << old_lp_map.lb_id<< std::endl; ERR_AND_RTN; } if( lbt[old_lp_map.lb_id].free_sector_num == lb_size ) { lbt[old_lp_map.lb_id].state = LB_FREE; free_list.push_back( &lbt[old_lp_map.lb_id] ); } update_lb_list( &lbt[old_lp_map.lb_id] ); // update buffer information buf.cur_sector += length; buf.tgt_lgblock->free_sector_num -= length; update_lb_list( buf.tgt_lgblock ); // destaging while( !destage_tgt.empty() ) { LOGBLOCK* lg = destage_tgt.front(); destage_tgt.pop_front(); CommandInfo cmd; cmd.lba = calc_raid_addr( lg->id, 0 ); cmd.sector_num = lb_size; cmd.opcode = IO_WRITE; if( !raid_translation(cmd, pending_cmd) ) ERR_AND_RTN; } return true; }
void INST_BUF_CLASS::insert ( ASIM_INST inst) { ASSERTX(!is_buf_full()); my_inst[my_tail] = inst; my_tail = next_idx(my_tail); my_num_entries++; }
bool CompController::init(CompEngine* _cmp_engine, double gc_buffer_ratio, uint64_t chunk_sector_size) { if( lpt != NULL || lbt != NULL ) ERR_AND_RTN; // test parameter real_data_ratio = 1 - gc_buffer_ratio; cmp_engine = _cmp_engine; avg_comp_ratio = cmp_engine->get_avg_cmp_ratio(); lb_size = rg_list.front().stripe_width; lp_size = chunk_sector_size; // parameter check if( ( rg_list.size() != 1 ) || ( lb_size < lp_size || lb_size % lp_size != 0 ) || ( rg_list.front().max_lba % lb_size != 0 ) ) { printf( HERE "error : parameter violation %ld, %ld, %ld\n", lp_size, lb_size, rg_list.front().max_lba); return false; } // set data size ... aligned to log block size real_data_size = rg_list.front().max_lba * real_data_ratio; real_data_size += ( (real_data_size % lb_size) == 0 ? 0 : (lb_size - (real_data_size % lb_size)) ); virtual_max_lba = real_data_size / avg_comp_ratio; virtual_max_lba += ( (virtual_max_lba % lb_size) == 0 ? 0 : (lb_size - (virtual_max_lba % lb_size)) ); // init log block lb_num = rg_list.front().max_lba / lb_size; gc_threshold = (uint64_t)lb_num * (0.01); lbt = new LOGBLOCK[lb_num]; for( uint64_t i = 0; i < lb_num; i++ ) { lbt[i].id = i; lbt[i].state = LB_FREE; lbt[i].free_sector_num = lb_size; free_list.push_back( &(lbt[i]) ); lb_list.push_back( &(lbt[i]) ); } lp_num = virtual_max_lba / lp_size; lpt = new LPMAP[lp_num]; // set first open block LOGBLOCK* lb = free_list.front(); free_list.pop_front(); buf.tgt_lgblock = lb; buf.cur_sector = 0; //-- write initial data for( uint64_t i = 0; i < lp_num; i++ ) { lpt[i].len = lp_size * avg_comp_ratio; if( is_buf_full(buf, lpt[i].len) && !close_buf_and_open_next() ) ERR_AND_RTN; // update map lpt[i].lb_id = buf.tgt_lgblock->id; lpt[i].ofs = buf.cur_sector; // add reverse map REVMAP revmap = { buf.cur_sector, i}; buf.tgt_lgblock->rev_map.push_back(revmap); // update buffer information buf.cur_sector += lpt[i].len; buf.tgt_lgblock->free_sector_num -= lpt[i].len; } destage_tgt.clear(); return true; }