示例#1
0
/*
 * writes a page to the current active page. if there is no active page,
 * allocate one and then move.
 */
static double ssd_move_page(int lpn, int from_blk, int plane_num, int elem_num, ssd_t *s)
{
    double cost = 0;
    ssd_element_metadata *metadata = &s->elements[elem_num].metadata;

    switch(s->params.copy_back) {
        case SSD_COPY_BACK_DISABLE:
            if (ssd_last_page_in_block(metadata->active_page, s)) {
                _ssd_alloc_active_block(-1, elem_num, s);
            }
            break;

        case SSD_COPY_BACK_ENABLE:
            ASSERT(metadata->plane_meta[plane_num].active_page == metadata->active_page);
            if (ssd_last_page_in_block(metadata->active_page, s)) {
                _ssd_alloc_active_block(plane_num, elem_num, s);
            }
            break;

        default:
            fprintf(stderr, "Error: invalid copy back policy %d\n",
                s->params.copy_back);
            exit(1);
    }

    cost += _ssd_write_page_osr(s, metadata, lpn);

    return cost;
}
示例#2
0
static double ssd_write_one_active_page(int blkno, int count, int elem_num, ssd_t *s)
{
    double cost = 0;
    int cleaning_invoked = 0;
    ssd_element_metadata *metadata;
    ssd_power_element_stat *power_stat;
    int lbn;

    metadata = &(s->elements[elem_num].metadata);
    power_stat = &(s->elements[elem_num].power_stat);

    // get the logical page number corresponding to this blkno
    lbn = ssd_logical_pageno(blkno, s);

    // see if there are any free pages left inside the active block.
    // as per the osr design, the last page is used as a summary page.
    // so if the active_page is already pointing to the summary page,
    // then we need to find another free block to act as active block.
    if (ssd_last_page_in_block(metadata->active_block, s)) {

        // do we need to create more free blocks for future writes?
        if (ssd_start_cleaning(-1, elem_num, s)) {

            printf ("We should not clean here ...\n");
            ASSERT(0);

            // if we're cleaning in the background, this should
            // not get executed
            if (s->params.cleaning_in_background) {
                exit(1);
            }

            cleaning_invoked = 1;
            cost += ssd_clean_element_no_copyback(elem_num, s);
        }

        // if we had invoked the cleaning, we must again check if we
        // need an active block before allocating one. this check is
        // needed because the above cleaning procedure might have
        // allocated new active blocks during the process of cleaning,
        // which might still have free pages for writing.
        if (!cleaning_invoked ||
            ssd_last_page_in_block(metadata->active_block, s)) {
            _ssd_alloc_active_block(-1, elem_num, s);
        }
    }


    // issue the write to the current active page
    cost += _ssd_write_page_osr(s, metadata, lbn, power_stat, blkno);
    cost += ssd_data_transfer_cost(s, count);
    ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, ssd_data_transfer_cost(s,s->params.page_size), power_stat, s);

    return cost;
}
示例#3
0
static double ssd_write_one_active_page(int blkno, int count, int elem_num, ssd_t *s)
{
    double cost = 0;
    int cleaning_invoked = 0;
    ssd_element_metadata *metadata;
    ssd_power_element_stat *power_stat;
    int lbn;
	int offset;
	int tmp_block;
	int apn;

    metadata = &(s->elements[elem_num].metadata);
    power_stat = &(s->elements[elem_num].power_stat);

    // get the logical page number corresponding to this blkno
    lbn = ssd_logical_blockno(blkno, s);
	apn = blkno/s->params.page_size;
	offset = (apn/s->params.nelements)%s->params.pages_per_block;

	// check lbn table
	if(metadata->lba_table[lbn] == -1 ) {
		metadata->lba_table[lbn] = metadata->active_block;
		cost += _ssd_write_page_osr(s, metadata, lbn, offset, power_stat);
		_ssd_alloc_active_block(-1, elem_num, s);
	}
	else { //if already mapped, check log block
		tmp_block = metadata->lba_table[lbn];
		if(metadata->block_usage[tmp_block].page[offset] == -1)
			cost += _ssd_write_page_osr(s, metadata, lbn, offset, power_stat);
		else {
			if (metadata->block_usage[tmp_block].log_index == -1) {
				metadata->block_usage[tmp_block].log_index = _ssd_alloc_log_block(-1, elem_num, s, tmp_block);
				cost += _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat);
			}
			else {
				if(_last_page_in_log_block(metadata, s, tmp_block)){
					ssd_invoke_logblock_cleaning(s, elem_num, lbn);
					metadata->block_usage[tmp_block].log_index = _ssd_alloc_log_block(-1, elem_num, s, tmp_block);
				}
				cost += _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat);
			}
		}
	}

    // issue the write to the current active page
    cost += ssd_data_transfer_cost(s, count);
    ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, ssd_data_transfer_cost(s,s->params.page_size), power_stat, s);

    return cost;
}
示例#4
0
static double ssd_issue_overlapped_ios(ssd_req **reqs, int total, int elem_num, ssd_t *s)
{
    double max_cost = 0;
	double parunit_op_cost[SSD_MAX_PARUNITS_PER_ELEM];
    double parunit_tot_cost[SSD_MAX_PARUNITS_PER_ELEM];
    ssd_element_metadata *metadata;
    ssd_power_element_stat *power_stat;
    
    int lbn;
	int offset;
    int i;
    int read_cycle = 0;
    listnode **parunits;

    // all the requests must be of the same type
    for (i = 1; i < total; i ++) {
        ASSERT(reqs[i]->is_read == reqs[0]->is_read);
    }

    // is this a set of read requests?
    if (reqs[0]->is_read) {
        read_cycle = 1;
    }

    memset(parunit_tot_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM);

    // find the planes to which the reqs are to be issued
    metadata = &(s->elements[elem_num].metadata);
    power_stat = &(s->elements[elem_num].power_stat);
    parunits = ssd_pick_parunits(reqs, total, elem_num, metadata, s);

    // repeat until we've served all the requests
    while (1) {
        double max_op_cost = 0;
		double read_xfer_cost = 0.0;
		double write_xfer_cost = 0.0;
        int active_parunits = 0;
        int op_count = 0;

        // do we still have any request to service?
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            if (ll_get_size(parunits[i]) > 0) {
                active_parunits ++;
            }
        }

        // no more requests -- get out
        if (active_parunits == 0) {
            break;
        }

        // clear this arrays for storing costs
        memset(parunit_op_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM);

        // begin a round of serving. we serve one request per
        // parallel unit. if an unit has more than one request
        // in the list, they have to be serialized.
        max_cost = 0;
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            int size;

            size = ll_get_size(parunits[i]);
            if (size > 0) {
				int apn;
                // this parallel unit has a request to serve
                ssd_req *r;
                listnode *n = ll_get_nth_node(parunits[i], 0);

                op_count ++;
                ASSERT(op_count <= active_parunits);

                // get the request
                r = (ssd_req *)n->data;
                lbn = ssd_logical_blockno(r->blk, s);
				apn = r->blk/s->params.page_size;
				offset = (apn/s->params.nelements)%(s->params.pages_per_block-1);
				parunit_op_cost[i] = 0;

                if (r->is_read) {
					int block = metadata->lba_table[lbn];

					if(block == -1){
						parunit_op_cost[i] = s->params.page_read_latency;
						//Micky
						ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s);
					}else if(metadata->block_usage[block].log_index == -1){
						parunit_op_cost[i] = s->params.page_read_latency;
						//Micky
						ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s);
					}else{
						parunit_op_cost[i] = s->params.page_read_latency;
						//Micky
						ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s);
						parunit_op_cost[i] += s->params.page_read_latency;
						ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s);
						s->spare_read++;
					}

					//tiel xfer cost
					read_xfer_cost += ssd_data_transfer_cost(s,r->count);
                } else { //for write
                    int plane_num = r->plane_num;
					// issue the write to the current active page.
                    // we need to transfer the data across the serial pins for write.
					metadata->active_block = metadata->plane_meta[plane_num].active_block;
                    // check lbn table
					if(metadata->lba_table[lbn] == -1 ) {
						metadata->lba_table[lbn] = metadata->active_block;
						parunit_op_cost[i] = _ssd_write_page_osr(s, metadata, lbn, offset, power_stat);
						_ssd_alloc_active_block(plane_num, elem_num, s);
					}
					else { //if already mapped, check log block
						int tmp_block = metadata->lba_table[lbn];
						if(metadata->block_usage[tmp_block].page[offset] == -1) {
							parunit_op_cost[i] = _ssd_write_page_osr(s, metadata, lbn, offset, power_stat);
						}
						else {
							if (metadata->block_usage[tmp_block].log_index == -1) {
								metadata->block_usage[tmp_block].log_index = _ssd_alloc_log_block(plane_num, elem_num, s, tmp_block);
								parunit_op_cost[i] = _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat);
							}
							else {
								if(_last_page_in_log_block(metadata, s, tmp_block)){
									int new_block;
									parunit_op_cost[i] += ssd_invoke_logblock_cleaning(elem_num, s, lbn);
									new_block = metadata->lba_table[lbn];
									if(metadata->block_usage[new_block].log_index == -1){
										metadata->block_usage[new_block].log_index = _ssd_alloc_log_block(plane_num, elem_num, s, tmp_block);
									}
								}else{
									parunit_op_cost[i] += _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat);
								}
							}
						}
					}
					write_xfer_cost += ssd_data_transfer_cost(s,r->count);
				}

                ASSERT(r->count <= s->params.page_size);

                // calc the cost: the access time should be something like this
                // for read
                if (read_cycle) {
                    if (SSD_PARUNITS_PER_ELEM(s) > 4) {
                        printf("modify acc time here ...\n");
                        ASSERT(0);
                    }
                    if (op_count == 1) {
						r->acctime = parunit_op_cost[i] + read_xfer_cost;
                        r->schtime = parunit_tot_cost[i] + r->acctime;
                    } else {
						r->acctime = ssd_data_transfer_cost(s,r->count);
                        r->schtime = parunit_tot_cost[i] + read_xfer_cost + parunit_op_cost[i];
                    }
                } else {
                    // for write
                    r->acctime = parunit_op_cost[i];
                    r->schtime = parunit_tot_cost[i] + write_xfer_cost + r->acctime;
                }

                // find the maximum cost for this round of operations
                if (max_cost < r->schtime) {
                    max_cost = r->schtime;
                }

                // release the node from the linked list
                ll_release_node(parunits[i], n);
            }
		}
		ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, read_xfer_cost, power_stat, s);
		ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, write_xfer_cost, power_stat, s);

        // we can start the next round of operations only after all
        // the operations in the first round are over because we're
        // limited by the one set of pins to all the parunits
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            parunit_tot_cost[i] = max_cost;
        }
    }

    for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
        ll_release(parunits[i]);
    }
    free(parunits);

	power_stat->acc_time += max_cost;

    return max_cost;
}
示例#5
0
static double ssd_issue_overlapped_ios(ssd_req **reqs, int total, int elem_num, ssd_t *s)
{
    double max_cost = 0;
    double parunit_op_cost[SSD_MAX_PARUNITS_PER_ELEM];
    double parunit_tot_cost[SSD_MAX_PARUNITS_PER_ELEM];
    ssd_element_metadata *metadata;
    int lpn;
    int i;
    int read_cycle = 0;
    listnode **parunits;

    // all the requests must be of the same type
    for (i = 1; i < total; i ++) {
        ASSERT(reqs[i]->is_read == reqs[0]->is_read);
    }

    // is this a set of read requests?
    if (reqs[0]->is_read) {
        read_cycle = 1;
    }

    memset(parunit_tot_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM);

    // find the planes to which the reqs are to be issued
    metadata = &(s->elements[elem_num].metadata);
    parunits = ssd_pick_parunits(reqs, total, elem_num, metadata, s);

    // repeat until we've served all the requests
    while (1) {
        //double tot_xfer_cost = 0;
        double max_op_cost = 0;
        int active_parunits = 0;
        int op_count = 0;

        // do we still have any request to service?
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            if (ll_get_size(parunits[i]) > 0) {
                active_parunits ++;
            }
        }

        // no more requests -- get out
        if (active_parunits == 0) {
            break;
        }

        // clear this arrays for storing costs
        memset(parunit_op_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM);

        // begin a round of serving. we serve one request per
        // parallel unit. if an unit has more than one request
        // in the list, they have to be serialized.
        max_cost = 0;
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            int size;

            size = ll_get_size(parunits[i]);
            if (size > 0) {
                // this parallel unit has a request to serve
                ssd_req *r;
                listnode *n = ll_get_nth_node(parunits[i], 0);

                op_count ++;
                ASSERT(op_count <= active_parunits);

                // get the request
                r = (ssd_req *)n->data;
                lpn = ssd_logical_pageno(r->blk, s);

                if (r->is_read) {
                    parunit_op_cost[i] = s->params.page_read_latency;
                } else {
                    int plane_num = r->plane_num;
                    // if this is the last page on the block, allocate a new block
                    if (ssd_last_page_in_block(metadata->plane_meta[plane_num].active_page, s)) {
                        _ssd_alloc_active_block(plane_num, elem_num, s);
                    }

                    // issue the write to the current active page.
                    // we need to transfer the data across the serial pins for write.
                    metadata->active_page = metadata->plane_meta[plane_num].active_page;
                    //printf("elem %d plane %d ", elem_num, plane_num);
                    parunit_op_cost[i] = _ssd_write_page_osr(s, metadata, lpn);
                }

                ASSERT(r->count <= s->params.page_size);

                // calc the cost: the access time should be something like this
                // for read
                if (read_cycle) {
                    if (SSD_PARUNITS_PER_ELEM(s) > 4) {
                        printf("modify acc time here ...\n");
                        ASSERT(0);
                    }
                    if (op_count == 1) {
                        r->acctime = parunit_op_cost[i] + ssd_data_transfer_cost(s,s->params.page_size);
                        r->schtime = parunit_tot_cost[i] + (op_count-1)*ssd_data_transfer_cost(s,s->params.page_size) + r->acctime;
                    } else {
                        r->acctime = ssd_data_transfer_cost(s,s->params.page_size);
                        r->schtime = parunit_tot_cost[i] + op_count*ssd_data_transfer_cost(s,s->params.page_size) + parunit_op_cost[i];
                    }
                } else {
                    // for write
                    r->acctime = parunit_op_cost[i] + ssd_data_transfer_cost(s,s->params.page_size);
                    r->schtime = parunit_tot_cost[i] + (op_count-1)*ssd_data_transfer_cost(s,s->params.page_size) + r->acctime;
                }


                // find the maximum cost for this round of operations
                if (max_cost < r->schtime) {
                    max_cost = r->schtime;
                }

                // release the node from the linked list
                ll_release_node(parunits[i], n);
            }
        }

        // we can start the next round of operations only after all
        // the operations in the first round are over because we're
        // limited by the one set of pins to all the parunits
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            parunit_tot_cost[i] = max_cost;
        }
    }

    for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
        ll_release(parunits[i]);
    }
    free(parunits);

    return max_cost;
}
示例#6
0
double ssd_fullmerge(ssd_t *s, ssd_element_metadata *metadata, ssd_power_element_stat *power_stat, int lbn, int elem_num)
{
	int prev_block = metadata->lba_table[lbn];
	int log_index = metadata->block_usage[prev_block].log_index;
	int log_block = metadata->log_data[log_index].bsn;
	int num_valid = 0;
	int num_valid_d = metadata->block_usage[prev_block].num_valid;
	int num_valid_u = metadata->block_usage[log_block].num_valid;
	int prev_plane_num = metadata->block_usage[prev_block].plane_num;
	int log_plane_num = metadata->block_usage[log_block].plane_num;
	int plane_num;

	int active_block;
	int i;
	double cost = 0.0;
	double r_cost, w_cost, xfer_cost;

	//set active_block
	metadata->active_block = metadata->plane_meta[prev_plane_num].active_block;
	active_block = metadata->active_block;
	_ssd_alloc_active_block(prev_plane_num, elem_num, s);
	plane_num = metadata->block_usage[active_block].plane_num;
	metadata->plane_meta[prev_plane_num].clean_in_block = prev_block;
	metadata->plane_meta[prev_plane_num].clean_in_progress = 1;
	metadata->plane_meta[log_plane_num].clean_in_block = prev_block;
	metadata->plane_meta[log_plane_num].clean_in_progress = 1;

	//page state copy & init page state
	for( i = 0 ; i < s->params.pages_per_block ; i++) {
		if((metadata->block_usage[prev_block].page[i] == 1) || (metadata->log_data[log_index].page[i] != -1)){
			metadata->block_usage[active_block].page[i] = 1;
		}
		metadata->block_usage[prev_block].page[i] = -1;
		metadata->log_data[log_index].page[i] = -1;
		metadata->block_usage[log_block].page[i] = -1;
	}
	metadata->lba_table[lbn] = active_block;

	//update stat
	//update log_table
	metadata->block_usage[prev_block].log_index = -1;
	metadata->log_data[log_index].bsn = -1;
	metadata->log_data[log_index].data_block = -1;
	metadata->log_pos = log_index;
	metadata->num_log--;
	//update block usage
	metadata->block_usage[prev_block].num_valid = 0;
	metadata->block_usage[log_block].num_valid = 0;
	//update plane data
	metadata->plane_meta[prev_plane_num].valid_pages -= num_valid_d;
	metadata->plane_meta[log_plane_num].valid_pages -= num_valid_u;
	num_valid += num_valid_d;
	num_valid += num_valid_u;
	if(num_valid > s->params.pages_per_block) {
		fprintf(outputfile3, "Error number of pages : valid_page %d, Real_page %d\n", num_valid, s->params.pages_per_block);
		fprintf(outputfile3, "Error elem_num %d, lbn %d, original block %d, log block %d\n", elem_num, lbn, prev_block, log_block);
		exit(-1);
	}
	metadata->block_usage[active_block].num_valid = num_valid;
	metadata->plane_meta[plane_num].valid_pages += num_valid;


	//data tranfer cost
	//read
	r_cost = s->params.page_read_latency * num_valid;
	cost += r_cost;
	ssd_power_flash_calculate(SSD_POWER_FLASH_READ, r_cost, power_stat, s);

	//write
	w_cost = s->params.page_write_latency * num_valid;
	cost += w_cost;
	ssd_power_flash_calculate(SSD_POWER_FLASH_WRITE, w_cost, power_stat, s);

	//transfer cost
	for( i = 0 ; i < num_valid ; i++) {
		double xfer_cost;
		xfer_cost = ssd_crossover_cost(s, metadata, power_stat, prev_block, active_block);
		cost += xfer_cost;
		s->elements[elem_num].stat.tot_xfer_cost += xfer_cost;
	}

	//erase two block(D)
	cost += s->params.block_erase_latency;
	ssd_power_flash_calculate(SSD_POWER_FLASH_ERASE, s->params.block_erase_latency, power_stat, s);
	ssd_update_free_block_status(prev_block, prev_plane_num, metadata, s);
	ssd_update_block_lifetime(simtime+cost, prev_block, metadata);
	metadata->plane_meta[prev_plane_num].num_cleans++;
	metadata->plane_meta[prev_plane_num].clean_in_block = 0;
	metadata->plane_meta[prev_plane_num].clean_in_progress = -1;

	//erase two block(U)
	cost += s->params.block_erase_latency;
	ssd_power_flash_calculate(SSD_POWER_FLASH_ERASE, s->params.block_erase_latency, power_stat, s);
	ssd_update_free_block_status(log_block, log_plane_num, metadata, s);
	ssd_update_block_lifetime(simtime+cost, log_block, metadata);
	metadata->plane_meta[log_plane_num].num_cleans++;
	metadata->plane_meta[log_plane_num].clean_in_block = 0;
	metadata->plane_meta[log_plane_num].clean_in_progress = -1;

	//erase stat update
	s->elements[elem_num].stat.pages_moved += num_valid;
	s->elements[elem_num].stat.num_clean += 2;
	s->elements[elem_num].stat.num_fullmerge++;

	return cost;
}
示例#7
0
double ssd_replacement(ssd_t *s, int elem_num, int lbn)
{
	ssd_element_metadata *metadata;
	ssd_power_element_stat *power_stat;
	int block;
	int log_index;
	int prev_log_block;
	int prev_plane_num;
	int log_block;
	int plane_num;
	int num_valid;
	double cost = 0.0;
	double r_cost, w_cost, xfer_cost;
	int i,j;

	metadata = &(s->elements[elem_num].metadata);
	power_stat = &(s->elements[elem_num].power_stat);

	block = metadata->lba_table[lbn];
	log_index = metadata->block_usage[block].log_index;
	prev_log_block = metadata->log_data[log_index].bsn;
	prev_plane_num = metadata->block_usage[prev_log_block].plane_num;
	num_valid = metadata->block_usage[prev_log_block].num_valid;

	//alloc new logblock and erase old logblock
	log_block = metadata->plane_meta[prev_plane_num].active_block;
	_ssd_alloc_active_block(prev_plane_num, elem_num, s);
	plane_num = metadata->block_usage[log_block].plane_num;
	metadata->log_data[log_index].bsn = log_block; 
	//move page old to new 
	j = 0;
	for( i = 0 ; i < s->params.pages_per_block ; i++) {
		if( metadata->log_data[log_index].page[i] != -1) {
			metadata->log_data[log_index].page[i] = j;
			metadata->block_usage[log_block].page[j] = 1;
			metadata->block_usage[log_block].num_valid++;
			j++;
		}
		metadata->block_usage[prev_log_block].page[i] = -1;
	}
	metadata->block_usage[prev_log_block].num_valid = 0;

	//plane metadata update
	metadata->plane_meta[prev_plane_num].valid_pages -= metadata->block_usage[log_block].num_valid;
	metadata->plane_meta[plane_num].valid_pages += metadata->block_usage[log_block].num_valid;

	//cost
	//read
	r_cost = s->params.page_read_latency * num_valid;
	cost += r_cost;
	ssd_power_flash_calculate(SSD_POWER_FLASH_READ, r_cost, power_stat, s);

	//write
	w_cost = s->params.page_write_latency * num_valid;
	cost += w_cost;
	ssd_power_flash_calculate(SSD_POWER_FLASH_WRITE, w_cost, power_stat, s);

	//transfer cost
	for( i = 0 ; i < num_valid ; i++) {
		double xfer_cost;
		xfer_cost = ssd_crossover_cost(s, metadata, power_stat, prev_log_block, log_block);
		cost += xfer_cost;
		s->elements[elem_num].stat.tot_xfer_cost += xfer_cost;
	}

	//erase U block
	cost += s->params.block_erase_latency;
	ssd_power_flash_calculate(SSD_POWER_FLASH_ERASE, s->params.block_erase_latency, power_stat, s);
	ssd_update_free_block_status(prev_log_block, prev_plane_num, metadata, s);
	ssd_update_block_lifetime(simtime+cost, prev_log_block, metadata);
	s->elements[elem_num].stat.pages_moved += num_valid;
	s->elements[elem_num].stat.num_clean ++;
	s->elements[elem_num].stat.num_replacement++;
	metadata->plane_meta[prev_plane_num].num_cleans++;

	return cost;

}