コード例 #1
0
ファイル: ssd_refresh_bk.c プロジェクト: IMCG/Disksim-ssd
void ssd_perform_refresh(ssd_t *currdisk, double now)
{
  int size = ll_get_size(currdisk->refresh_queue);
  int i=0,blocks_to_refresh=0;


  double next_refresh_time = currdisk->params.refresh_interval + currdisk->params.refresh_service_time * currdisk->params.nelements * currdisk->params.blocks_per_element/1000; //This value is an upper bound on the refresh time.
  listnode **clean_blocks_issue_list = (listnode**)malloc(currdisk->params.nelements * sizeof(listnode*));

  for(i=0;i<currdisk->params.nelements;i++)
      ll_create(&clean_blocks_issue_list[i]);

  i=0;
  while(i<size) {

    listnode* currnode = ll_get_nth_node(currdisk->refresh_queue,i);
    block_metadata* currblock = (block_metadata*)currnode->data;
    ssd_page_metadata* least_retention_page = currblock->least_retention_page;
    //assert(now - least_retention_page->time_of_last_stress >0);
    //least_retention_page->retention_period -= (now - least_retention_page->time_of_last_stress); //account for time spent idling.

    if(ssd_block_dead(currblock,currdisk)) //Too late, block is dead. Handle it accordingly
      continue;

    //check if currblock needs refresh *now*
    if(current_retention_period(currblock) < next_refresh_time || ssd_virtual_retention_expired(currblock,currdisk)) {
      refresh_queue_free_node(currdisk->refresh_queue,currblock);
      blocks_to_refresh++;size--;
      ll_insert_at_tail(clean_blocks_issue_list[currblock->elem_num],currblock);
    }
    i++;
  }

  if(blocks_to_refresh!=0) {
    fprintf(stderr, "# of blocks to refresh :%d\n",blocks_to_refresh);
  }

  for(i=0;i<currdisk->params.nelements;i++) {
    if(is_queue_empty(clean_blocks_issue_list[i]))
      continue;
    fprintf(stderr, "About to refresh %d blocks in elem #%d\n",ll_get_size(clean_blocks_issue_list[i]),i);
    ssd_invoke_element_refresh_fcfs(i,clean_blocks_issue_list[i],currdisk);
    ll_release(clean_blocks_issue_list[i]);
  }
}
コード例 #2
0
ファイル: ssd_refresh_bk.c プロジェクト: IMCG/Disksim-ssd
void ssd_invoke_element_refresh_fcfs(int elem_num,listnode *blocks_to_refresh,ssd_t *currdisk)
{
  int block_len = 0;
  int i;
  double cost = 0;

  block_len = ll_get_size(blocks_to_refresh);
  block_metadata *currblock = 0;
  listnode *currnode = 0;
  //for all blocks in every element.

  for(i=0;i<block_len;i++) {
    currnode = ll_get_nth_node(blocks_to_refresh,i);
    currblock = (block_metadata*)currnode->data;
    ASSERT(currblock->elem_num  == elem_num);
    cost+= ssd_refresh_block(currblock,currdisk); //sum every cost, because we are not applying refresh in batches
  }

  ssd_element *elem = &currdisk->elements[elem_num];
  if (cost > 0) {
      ioreq_event *tmp;
      elem->media_busy = TRUE;

      // we use the 'blkno' field to store the element number
      tmp = (ioreq_event *)getfromextraq();
      tmp->devno = currdisk->devno;
      tmp->time = simtime + cost;
      tmp->blkno = elem_num;
      tmp->ssd_elem_num = elem_num;
      tmp->type = SSD_REFRESH_ELEMENT;
      tmp->flags = SSD_REFRESH_ELEMENT;
      tmp->busno = -1;
      tmp->bcount = -1;
      stat_update (&currdisk->stat.acctimestats, cost);
      addtointq ((event *)tmp);
      // stat
      elem->stat.tot_refresh_time += cost;
  }
}
コード例 #3
0
ファイル: ssd_timing.c プロジェクト: ESOS-Lab/EnergySim
static double ssd_issue_overlapped_ios(ssd_req **reqs, int total, int elem_num, ssd_t *s)
{
    double max_cost = 0;
	double parunit_op_cost[SSD_MAX_PARUNITS_PER_ELEM];
    double parunit_tot_cost[SSD_MAX_PARUNITS_PER_ELEM];
    ssd_element_metadata *metadata;
    ssd_power_element_stat *power_stat;
    
    int lbn;
	int offset;
    int i;
    int read_cycle = 0;
    listnode **parunits;

    // all the requests must be of the same type
    for (i = 1; i < total; i ++) {
        ASSERT(reqs[i]->is_read == reqs[0]->is_read);
    }

    // is this a set of read requests?
    if (reqs[0]->is_read) {
        read_cycle = 1;
    }

    memset(parunit_tot_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM);

    // find the planes to which the reqs are to be issued
    metadata = &(s->elements[elem_num].metadata);
    power_stat = &(s->elements[elem_num].power_stat);
    parunits = ssd_pick_parunits(reqs, total, elem_num, metadata, s);

    // repeat until we've served all the requests
    while (1) {
        double max_op_cost = 0;
		double read_xfer_cost = 0.0;
		double write_xfer_cost = 0.0;
        int active_parunits = 0;
        int op_count = 0;

        // do we still have any request to service?
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            if (ll_get_size(parunits[i]) > 0) {
                active_parunits ++;
            }
        }

        // no more requests -- get out
        if (active_parunits == 0) {
            break;
        }

        // clear this arrays for storing costs
        memset(parunit_op_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM);

        // begin a round of serving. we serve one request per
        // parallel unit. if an unit has more than one request
        // in the list, they have to be serialized.
        max_cost = 0;
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            int size;

            size = ll_get_size(parunits[i]);
            if (size > 0) {
				int apn;
                // this parallel unit has a request to serve
                ssd_req *r;
                listnode *n = ll_get_nth_node(parunits[i], 0);

                op_count ++;
                ASSERT(op_count <= active_parunits);

                // get the request
                r = (ssd_req *)n->data;
                lbn = ssd_logical_blockno(r->blk, s);
				apn = r->blk/s->params.page_size;
				offset = (apn/s->params.nelements)%(s->params.pages_per_block-1);
				parunit_op_cost[i] = 0;

                if (r->is_read) {
					int block = metadata->lba_table[lbn];

					if(block == -1){
						parunit_op_cost[i] = s->params.page_read_latency;
						//Micky
						ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s);
					}else if(metadata->block_usage[block].log_index == -1){
						parunit_op_cost[i] = s->params.page_read_latency;
						//Micky
						ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s);
					}else{
						parunit_op_cost[i] = s->params.page_read_latency;
						//Micky
						ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s);
						parunit_op_cost[i] += s->params.page_read_latency;
						ssd_power_flash_calculate(SSD_POWER_FLASH_READ, s->params.page_read_latency, power_stat, s);
						s->spare_read++;
					}

					//tiel xfer cost
					read_xfer_cost += ssd_data_transfer_cost(s,r->count);
                } else { //for write
                    int plane_num = r->plane_num;
					// issue the write to the current active page.
                    // we need to transfer the data across the serial pins for write.
					metadata->active_block = metadata->plane_meta[plane_num].active_block;
                    // check lbn table
					if(metadata->lba_table[lbn] == -1 ) {
						metadata->lba_table[lbn] = metadata->active_block;
						parunit_op_cost[i] = _ssd_write_page_osr(s, metadata, lbn, offset, power_stat);
						_ssd_alloc_active_block(plane_num, elem_num, s);
					}
					else { //if already mapped, check log block
						int tmp_block = metadata->lba_table[lbn];
						if(metadata->block_usage[tmp_block].page[offset] == -1) {
							parunit_op_cost[i] = _ssd_write_page_osr(s, metadata, lbn, offset, power_stat);
						}
						else {
							if (metadata->block_usage[tmp_block].log_index == -1) {
								metadata->block_usage[tmp_block].log_index = _ssd_alloc_log_block(plane_num, elem_num, s, tmp_block);
								parunit_op_cost[i] = _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat);
							}
							else {
								if(_last_page_in_log_block(metadata, s, tmp_block)){
									int new_block;
									parunit_op_cost[i] += ssd_invoke_logblock_cleaning(elem_num, s, lbn);
									new_block = metadata->lba_table[lbn];
									if(metadata->block_usage[new_block].log_index == -1){
										metadata->block_usage[new_block].log_index = _ssd_alloc_log_block(plane_num, elem_num, s, tmp_block);
									}
								}else{
									parunit_op_cost[i] += _ssd_write_log_block_osr(s, metadata, lbn, offset, power_stat);
								}
							}
						}
					}
					write_xfer_cost += ssd_data_transfer_cost(s,r->count);
				}

                ASSERT(r->count <= s->params.page_size);

                // calc the cost: the access time should be something like this
                // for read
                if (read_cycle) {
                    if (SSD_PARUNITS_PER_ELEM(s) > 4) {
                        printf("modify acc time here ...\n");
                        ASSERT(0);
                    }
                    if (op_count == 1) {
						r->acctime = parunit_op_cost[i] + read_xfer_cost;
                        r->schtime = parunit_tot_cost[i] + r->acctime;
                    } else {
						r->acctime = ssd_data_transfer_cost(s,r->count);
                        r->schtime = parunit_tot_cost[i] + read_xfer_cost + parunit_op_cost[i];
                    }
                } else {
                    // for write
                    r->acctime = parunit_op_cost[i];
                    r->schtime = parunit_tot_cost[i] + write_xfer_cost + r->acctime;
                }

                // find the maximum cost for this round of operations
                if (max_cost < r->schtime) {
                    max_cost = r->schtime;
                }

                // release the node from the linked list
                ll_release_node(parunits[i], n);
            }
		}
		ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, read_xfer_cost, power_stat, s);
		ssd_power_flash_calculate(SSD_POWER_FLASH_BUS_DATA_TRANSFER, write_xfer_cost, power_stat, s);

        // we can start the next round of operations only after all
        // the operations in the first round are over because we're
        // limited by the one set of pins to all the parunits
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            parunit_tot_cost[i] = max_cost;
        }
    }

    for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
        ll_release(parunits[i]);
    }
    free(parunits);

	power_stat->acc_time += max_cost;

    return max_cost;
}
コード例 #4
0
ファイル: ssd_clean.c プロジェクト: Anurag461/disksim
/*
 * a greedy solution, where we find the block in a plane with the least
 * num of valid pages and return it.
 */
static int ssd_pick_block_to_clean2(int plane_num, int elem_num, double *mcost, ssd_element_metadata *metadata, ssd_t *s)
{
    double avg_lifetime = 1;
    int i;
    int size;
    int block = -1;
    int min_valid = s->params.pages_per_block - 1; // one page goes for the summary info
    listnode *greedy_list;

    *mcost = 0;

    // find the average life time of all the blocks in this element
    avg_lifetime = ssd_compute_avg_lifetime(plane_num, elem_num, s);

    // we create a list of greedily selected blocks
    ll_create(&greedy_list);
    for (i = 0; i < s->params.blocks_per_element; i ++) {
        if (_ssd_pick_block_to_clean(i, plane_num, elem_num, metadata, s)) {

            // greedily select the block
            if (metadata->block_usage[i].num_valid <= min_valid) {
                ASSERT(i == metadata->block_usage[i].block_num);
                ll_insert_at_head(greedy_list, (void*)&metadata->block_usage[i]);
                min_valid = metadata->block_usage[i].num_valid;
                block = i;
            }
        }
    }

    ASSERT(block != -1);
    block = -1;

    // from the greedily picked blocks, select one after rate
    // limiting the overly used blocks
    size = ll_get_size(greedy_list);

    //printf("plane %d elem %d size %d avg lifetime %f\n",
    //  plane_num, elem_num, size, avg_lifetime);

    for (i = 0; i < size; i ++) {
        block_metadata *bm;
        int mig_blk;

        listnode *n = ll_get_nth_node(greedy_list, i);
        bm = ((block_metadata *)n->data);

        if (i == 0) {
            ASSERT(min_valid == bm->num_valid);
        }

        // this is the last of the greedily picked blocks.
        if (i == size -1) {
            // select it!
            block = bm->block_num;
            break;
        }

        if (s->params.cleaning_policy == DISKSIM_SSD_CLEANING_POLICY_GREEDY_WEAR_AGNOSTIC) {
            block = bm->block_num;
            break;
        } else {
#if MIGRATE
            // migration
            mig_blk = ssd_pick_wear_aware_with_migration(bm->block_num, bm->rem_lifetime, avg_lifetime, mcost, bm->plane_num, elem_num, s);
            if (mig_blk != bm->block_num) {
                // data has been migrated and we have a new
                // block to use
                block = mig_blk;
                break;
            }
#endif

            // pick this block giving consideration to its life time
            if (ssd_pick_wear_aware(bm->block_num, bm->rem_lifetime, avg_lifetime, s)) {
                block = bm->block_num;
                break;
            }
        }
    }

    ll_release(greedy_list);

    ASSERT(block != -1);
    return block;
}
コード例 #5
0
ファイル: ssd_timing.c プロジェクト: Anurag461/disksim
static double ssd_issue_overlapped_ios(ssd_req **reqs, int total, int elem_num, ssd_t *s)
{
    double max_cost = 0;
    double parunit_op_cost[SSD_MAX_PARUNITS_PER_ELEM];
    double parunit_tot_cost[SSD_MAX_PARUNITS_PER_ELEM];
    ssd_element_metadata *metadata;
    int lpn;
    int i;
    int read_cycle = 0;
    listnode **parunits;

    // all the requests must be of the same type
    for (i = 1; i < total; i ++) {
        ASSERT(reqs[i]->is_read == reqs[0]->is_read);
    }

    // is this a set of read requests?
    if (reqs[0]->is_read) {
        read_cycle = 1;
    }

    memset(parunit_tot_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM);

    // find the planes to which the reqs are to be issued
    metadata = &(s->elements[elem_num].metadata);
    parunits = ssd_pick_parunits(reqs, total, elem_num, metadata, s);

    // repeat until we've served all the requests
    while (1) {
        //double tot_xfer_cost = 0;
        double max_op_cost = 0;
        int active_parunits = 0;
        int op_count = 0;

        // do we still have any request to service?
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            if (ll_get_size(parunits[i]) > 0) {
                active_parunits ++;
            }
        }

        // no more requests -- get out
        if (active_parunits == 0) {
            break;
        }

        // clear this arrays for storing costs
        memset(parunit_op_cost, 0, sizeof(double)*SSD_MAX_PARUNITS_PER_ELEM);

        // begin a round of serving. we serve one request per
        // parallel unit. if an unit has more than one request
        // in the list, they have to be serialized.
        max_cost = 0;
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            int size;

            size = ll_get_size(parunits[i]);
            if (size > 0) {
                // this parallel unit has a request to serve
                ssd_req *r;
                listnode *n = ll_get_nth_node(parunits[i], 0);

                op_count ++;
                ASSERT(op_count <= active_parunits);

                // get the request
                r = (ssd_req *)n->data;
                lpn = ssd_logical_pageno(r->blk, s);

                if (r->is_read) {
                    parunit_op_cost[i] = s->params.page_read_latency;
                } else {
                    int plane_num = r->plane_num;
                    // if this is the last page on the block, allocate a new block
                    if (ssd_last_page_in_block(metadata->plane_meta[plane_num].active_page, s)) {
                        _ssd_alloc_active_block(plane_num, elem_num, s);
                    }

                    // issue the write to the current active page.
                    // we need to transfer the data across the serial pins for write.
                    metadata->active_page = metadata->plane_meta[plane_num].active_page;
                    //printf("elem %d plane %d ", elem_num, plane_num);
                    parunit_op_cost[i] = _ssd_write_page_osr(s, metadata, lpn);
                }

                ASSERT(r->count <= s->params.page_size);

                // calc the cost: the access time should be something like this
                // for read
                if (read_cycle) {
                    if (SSD_PARUNITS_PER_ELEM(s) > 4) {
                        printf("modify acc time here ...\n");
                        ASSERT(0);
                    }
                    if (op_count == 1) {
                        r->acctime = parunit_op_cost[i] + ssd_data_transfer_cost(s,s->params.page_size);
                        r->schtime = parunit_tot_cost[i] + (op_count-1)*ssd_data_transfer_cost(s,s->params.page_size) + r->acctime;
                    } else {
                        r->acctime = ssd_data_transfer_cost(s,s->params.page_size);
                        r->schtime = parunit_tot_cost[i] + op_count*ssd_data_transfer_cost(s,s->params.page_size) + parunit_op_cost[i];
                    }
                } else {
                    // for write
                    r->acctime = parunit_op_cost[i] + ssd_data_transfer_cost(s,s->params.page_size);
                    r->schtime = parunit_tot_cost[i] + (op_count-1)*ssd_data_transfer_cost(s,s->params.page_size) + r->acctime;
                }


                // find the maximum cost for this round of operations
                if (max_cost < r->schtime) {
                    max_cost = r->schtime;
                }

                // release the node from the linked list
                ll_release_node(parunits[i], n);
            }
        }

        // we can start the next round of operations only after all
        // the operations in the first round are over because we're
        // limited by the one set of pins to all the parunits
        for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
            parunit_tot_cost[i] = max_cost;
        }
    }

    for (i = 0; i < SSD_PARUNITS_PER_ELEM(s); i ++) {
        ll_release(parunits[i]);
    }
    free(parunits);

    return max_cost;
}