Пример #1
0
/*
 * updates the status of erased blocks
 */
void ssd_update_free_block_status(int blk, int plane_num, ssd_element_metadata *metadata, ssd_t *s)
{
    int bitpos;

    // clear the bit corresponding to this block in the
    // free blocks list for future use
    bitpos = ssd_block_to_bitpos(s, blk);
    ssd_clear_bit(metadata->free_blocks, bitpos);
    metadata->block_usage[blk].state = SSD_BLOCK_CLEAN;
    metadata->block_usage[blk].bsn = 0;
    metadata->tot_free_blocks ++;
    metadata->plane_meta[plane_num].free_blocks ++;
    ssd_assert_free_blocks(s, metadata);

    // there must be no valid pages in the erased block
    ASSERT(metadata->block_usage[blk].num_valid == 0);
    ssd_assert_valid_pages(plane_num, metadata, s);
}
Пример #2
0
/*
 * migrate data from a cold block to "to_blk"
 */
int ssd_migrate_cold_data(int to_blk, double *mcost, int plane_num, int elem_num, ssd_t *s)
{
    int i;
    int from_blk = -1;
    double oldest_erase_time = simtime;
    double cost = 0;
    int bitpos;

#if SSD_ASSERT_ALL
    int f1;
    int f2;
#endif

    ssd_element_metadata *metadata = &(s->elements[elem_num].metadata);

    // first select the coldest of all blocks.
    // one way to select is to find the one that has the oldest
    // erasure time.
    if (plane_num == -1) {
        for (i = 0; i < s->params.blocks_per_element; i ++) {
            if (metadata->block_usage[i].num_valid > 0) {
                if (metadata->block_usage[i].time_of_last_erasure < oldest_erase_time) {
                    oldest_erase_time = metadata->block_usage[i].time_of_last_erasure;
                    from_blk = i;
                }
            }
        }
    } else {

#if SSD_ASSERT_ALL
        f1 = ssd_free_bits(plane_num, elem_num, metadata, s);
        ASSERT(f1 == metadata->plane_meta[metadata->block_usage[to_blk].plane_num].free_blocks);
#endif

        bitpos = plane_num * s->params.blocks_per_plane;
        for (i = bitpos; i < bitpos + (int)s->params.blocks_per_plane; i ++) {
            int block = ssd_bitpos_to_block(i, s);
            ASSERT(metadata->block_usage[block].plane_num == plane_num);

            if (metadata->block_usage[block].num_valid > 0) {
                if (metadata->block_usage[block].time_of_last_erasure < oldest_erase_time) {
                    oldest_erase_time = metadata->block_usage[block].time_of_last_erasure;
                    from_blk = block;
                }
            }
        }
    }

    ASSERT(from_blk != -1);
    if (plane_num != -1) {
        ASSERT(metadata->block_usage[from_blk].plane_num == metadata->block_usage[to_blk].plane_num);
    }

    // next, clean the block to which we'll transfer the
    // cold data
    cost += _ssd_clean_block_fully(to_blk, metadata->block_usage[to_blk].plane_num, elem_num, metadata, s);

#if SSD_ASSERT_ALL
    if (plane_num != -1) {
        f2 = ssd_free_bits(plane_num, elem_num, metadata, s);
        ASSERT(f2 == metadata->plane_meta[metadata->block_usage[to_blk].plane_num].free_blocks);
    }
#endif

    // then, migrate the cold data to the worn out block.
    // for which, we first read all the valid data
    cost += metadata->block_usage[from_blk].num_valid * s->params.page_read_latency;
    // include the write cost
    cost += metadata->block_usage[from_blk].num_valid * s->params.page_write_latency;
    // if the src and dest blocks are on different planes
    // include the transfer cost also
    cost += ssd_crossover_cost(s, metadata, from_blk, to_blk);

    // the cost of erasing the cold block (represented by from_blk)
    // will be added later ...

    // finally, update the metadata
    metadata->block_usage[to_blk].bsn = metadata->block_usage[from_blk].bsn;
    metadata->block_usage[to_blk].num_valid = metadata->block_usage[from_blk].num_valid;
    metadata->block_usage[from_blk].num_valid = 0;

    for (i = 0; i < s->params.pages_per_block; i ++) {
        int lpn = metadata->block_usage[from_blk].page[i];
        if (lpn != -1) {
            ASSERT(metadata->lba_table[lpn] == (from_blk * s->params.pages_per_block + i));
            metadata->lba_table[lpn] = to_blk * s->params.pages_per_block + i;
        }
        metadata->block_usage[to_blk].page[i] = metadata->block_usage[from_blk].page[i];
    }
    metadata->block_usage[to_blk].state = metadata->block_usage[from_blk].state;

    bitpos = ssd_block_to_bitpos(s, to_blk);
    ssd_set_bit(metadata->free_blocks, bitpos);
    metadata->tot_free_blocks --;
    metadata->plane_meta[metadata->block_usage[to_blk].plane_num].free_blocks --;

#if SSD_ASSERT_ALL
    if (plane_num != -1) {
        f2 = ssd_free_bits(plane_num, elem_num, metadata, s);
        ASSERT(f2 == metadata->plane_meta[metadata->block_usage[to_blk].plane_num].free_blocks);
    }
#endif

    ssd_assert_free_blocks(s, metadata);
    ASSERT(metadata->block_usage[from_blk].num_valid == 0);

#if ASSERT_FREEBITS
    if (plane_num != -1) {
        f2 = ssd_free_bits(plane_num, elem_num, metadata, s);
        ASSERT(f1 == f2);
    }
#endif

    *mcost = cost;

    // stat
    metadata->tot_migrations ++;
    metadata->tot_pgs_migrated += metadata->block_usage[to_blk].num_valid;
    metadata->mig_cost += cost;

    return from_blk;
}
Пример #3
0
/*
 * we clean a block only after it is fully used.
 */
int ssd_can_clean_block(ssd_t *s, ssd_element_metadata *metadata, int blk)
{
    int bitpos = ssd_block_to_bitpos(s, blk);
    return ((ssd_bit_on(metadata->free_blocks, bitpos)) && (metadata->block_usage[blk].state == SSD_BLOCK_SEALED));
}
Пример #4
0
/*
 * vp
 * description: this routine allocates and initializes the ssd element metadata
 * structures. FIXME: if the systems is powered up, this init routine has to
 * populate the structures by scanning the summary pages (to implement this,
 * we can read from a disk checkpoint file). but, this is future work.
*/
void ssd_element_metadata_init(int elem_number, ssd_element_metadata *metadata, ssd_t *currdisk)
{
    gang_metadata *g;
    unsigned int ppage;
    unsigned int i;
    unsigned int bytes_to_alloc;
    unsigned int tot_blocks = currdisk->params.blocks_per_element;
    unsigned int tot_pages = tot_blocks * currdisk->params.pages_per_block;
    unsigned int reserved_blocks, usable_blocks, export_size;
    unsigned int reserved_blocks_per_plane, usable_blocks_per_plane;
    unsigned int bitpos;
    unsigned int active_block;
    unsigned int elem_index;
    unsigned int bsn = 1;
    int plane_block_mapping = currdisk->params.plane_block_mapping;

    //////////////////////////////////////////////////////////////////////////////
    // active page starts at the 1st page on the reserved section
    reserved_blocks_per_plane = (currdisk->params.reserve_blocks * currdisk->params.blocks_per_plane) / 100;
    usable_blocks_per_plane = currdisk->params.blocks_per_plane - reserved_blocks_per_plane;
    reserved_blocks = reserved_blocks_per_plane * currdisk->params.planes_per_pkg;
    usable_blocks = usable_blocks_per_plane * currdisk->params.planes_per_pkg;

    //////////////////////////////////////////////////////////////////////////////
    // initialize the free blocks and free pages
    metadata->tot_free_blocks = reserved_blocks;

    //////////////////////////////////////////////////////////////////////////////
    // assign the gang and init the element's free pages
    metadata->gang_num = elem_number / currdisk->params.elements_per_gang;
    currdisk->gang_meta[metadata->gang_num].elem_free_pages[elem_number] = \
        metadata->tot_free_blocks * SSD_DATA_PAGES_PER_BLOCK(currdisk);
    g = &currdisk->gang_meta[metadata->gang_num];
    elem_index = elem_number % currdisk->params.elements_per_gang;

    //////////////////////////////////////////////////////////////////////////////
    // let's begin cleaning with the first plane
    metadata->plane_to_clean = 0;
    metadata->plane_to_write = 0;
    metadata->block_alloc_pos = 0;
    metadata->reqs_waiting = 0;
    metadata->tot_migrations = 0;
    metadata->tot_pgs_migrated = 0;
    metadata->mig_cost = 0;

    //////////////////////////////////////////////////////////////////////////////
    // init the plane metadata
    for (i = 0; i < (unsigned int)currdisk->params.planes_per_pkg; i ++) {
        int blocks_to_skip;

        switch(plane_block_mapping) {
            case PLANE_BLOCKS_CONCAT:
                assert(!"Refresh Operations not supported with PLANE_BLOCKS_CONCAT mapping");
                blocks_to_skip = (i*currdisk->params.blocks_per_plane + usable_blocks_per_plane);
                break;

            case PLANE_BLOCKS_PAIRWISE_STRIPE:
                assert(!"Refresh Operations not supported with PLANE_BLOCKS_PAIRWISE_STRIPE mapping");
                blocks_to_skip = (i/2)*(2*currdisk->params.blocks_per_plane) + (2*usable_blocks_per_plane) + i%2;
                break;

            case PLANE_BLOCKS_FULL_STRIPE:
                blocks_to_skip = (currdisk->params.planes_per_pkg * usable_blocks_per_plane) + i;
                break;

            default:
                fprintf(stderr, "Error: unknown plane_block_mapping %d\n", plane_block_mapping);
                exit(1);
        }

        //metadata->plane_meta[i].active_page = blocks_to_skip*currdisk->params.pages_per_block;
        metadata->plane_meta[i].free_blocks = reserved_blocks_per_plane;
        metadata->plane_meta[i].valid_pages = 0;
        metadata->plane_meta[i].clean_in_progress = 0;
        metadata->plane_meta[i].clean_in_block = -1;
        metadata->plane_meta[i].block_alloc_pos = i*currdisk->params.blocks_per_plane;
        metadata->plane_meta[i].parunit_num = i / SSD_PLANES_PER_PARUNIT(currdisk);
        metadata->plane_meta[i].num_cleans = 0;
        metadata->plane_meta[i].dead_blocks = 0;
    }

    //////////////////////////////////////////////////////////////////////////////
    // init the next plane to clean in a parunit
    for (i = 0; i < (unsigned int) SSD_PARUNITS_PER_ELEM(currdisk); i ++) {
        metadata->parunits[i].plane_to_clean = SSD_PLANES_PER_PARUNIT(currdisk)*i;
    }

    // since we reserve one page out of every block to store the summary info,
    // the size exported by the flash disk is little less.
    export_size = usable_blocks * SSD_DATA_PAGES_PER_BLOCK(currdisk);
    currdisk->data_pages_per_elem = export_size;
    //printf("res blks = %d, use blks = %d act page = %d exp size = %d\n",
    //  reserved_blocks, usable_blocks, metadata->active_page, export_size);

    //////////////////////////////////////////////////////////////////////////////
    // allocate the lba table
    if ((metadata->lba_table = (int *)malloc(export_size * sizeof(int))) == NULL) {
        fprintf(stderr, "Error: malloc to lba table in ssd_element_metadata_init failed\n");
#ifdef __x86_64__
        fprintf(stderr, "Allocation size = %lu\n", export_size * sizeof(int));
#else
        fprintf(stderr, "Allocation size = %u\n", export_size * sizeof(int));
#endif
        exit(1);
    }

    //////////////////////////////////////////////////////////////////////////////
    // allocate the free blocks bit map
    // what if the no of blocks is not divisible by 8?
    if ((tot_blocks % (sizeof(unsigned char) * 8)) != 0) {
        fprintf(stderr, "This case is not yet handled\n");
        exit(1);
    }

    bytes_to_alloc = tot_blocks / (sizeof(unsigned char) * 8);
    if (!(metadata->free_blocks = (unsigned char *)malloc(bytes_to_alloc))) {
        fprintf(stderr, "Error: malloc to free_blocks in ssd_element_metadata_init failed\n");
        fprintf(stderr, "Allocation size = %d\n", bytes_to_alloc);
        exit(1);
    }
    bzero(metadata->free_blocks, bytes_to_alloc);

    //////////////////////////////////////////////////////////////////////////////
    // allocate the block usage array and initialize it
    if (!(metadata->block_usage = (block_metadata *)malloc(tot_blocks * sizeof(block_metadata)))) {
        fprintf(stderr, "Error: malloc to block_usage in ssd_element_metadata_init failed\n");
#ifdef __x86_64__
        fprintf(stderr, "Allocation size = %lu\n", tot_blocks * sizeof(block_metadata));
#else
        fprintf(stderr, "Allocation size = %u\n", tot_blocks * sizeof(block_metadata));
#endif
        exit(1);
    }
    bzero(metadata->block_usage, tot_blocks * sizeof(block_metadata));

    for (i = 0; i < tot_blocks; i ++) {
        int j;
        metadata->block_usage[i].block_num = i;
        metadata->block_usage[i].elem_num = elem_number;
        metadata->block_usage[i].page = (ssd_page_metadata*)malloc(sizeof(ssd_page_metadata) * currdisk->params.pages_per_block);
	// KJ: initialize all the page metadata
	bzero(metadata->block_usage[i].page, currdisk->params.pages_per_block * sizeof(ssd_page_metadata));
		    metadata->block_usage[i].entry_in_refresh_queue= 0;
        metadata->block_usage[i].total_pages_written = INITIAL_STRESSES*currdisk->params.pages_per_block;
				metadata->block_usage[i].least_retention_page = &metadata->block_usage[i].page[0];
        metadata->block_usage[i].logical_stresses =  INITIAL_STRESSES*currdisk->params.pages_per_block;
        metadata->block_usage[i].physical_stresses =  INITIAL_STRESSES*currdisk->params.pages_per_block;        
	      assert(metadata->block_usage[i].page); 
        
    for (j = 0; j < currdisk->params.pages_per_block; j ++) {
            metadata->block_usage[i].page[j].lpn = -1;
            metadata->block_usage[i].page[j].time_of_last_stress = 0;
            metadata->block_usage[i].page[j].retention_period = 1e10;
            metadata->block_usage[i].page[j].logical_stresses= INITIAL_STRESSES;
            metadata->block_usage[i].page[j].physical_stresses = INITIAL_STRESSES;
            metadata->block_usage[i].page[j].eqn_cycle =  INITIAL_STRESSES;
            metadata->block_usage[i].page[j].stress_increment = 1;
            metadata->block_usage[i].page[j].recovery_period_total = 0;
        }

        // assign the plane number to each block
        switch(plane_block_mapping) {
            case PLANE_BLOCKS_CONCAT:
                metadata->block_usage[i].plane_num = i / currdisk->params.blocks_per_plane;
                break;

            case PLANE_BLOCKS_PAIRWISE_STRIPE:
                metadata->block_usage[i].plane_num = (i/(2*currdisk->params.blocks_per_plane))*2 + i%2;
                break;

            case PLANE_BLOCKS_FULL_STRIPE:
                metadata->block_usage[i].plane_num = i % currdisk->params.planes_per_pkg;
                break;

            default:
                fprintf(stderr, "Error: unknown plane_block_mapping %d\n", plane_block_mapping);
                exit(1);
        }

        // set the remaining life time and time of last erasure
        metadata->block_usage[i].rem_lifetime = SSD_MAX_ERASURES;
        metadata->block_usage[i].time_of_last_erasure = simtime;
        // set the block state
        metadata->block_usage[i].state = SSD_BLOCK_CLEAN;

        // init the bsn to be zero
        metadata->block_usage[i].bsn = 0;
    }

    //load the ssd state from the snapshot provided by trace_analyzer.
    if(ta_snapshotfile)
      ssd_init_stress_info_ta(currdisk,metadata,elem_number);
    else if(ds_snapshotfile)
      ssd_init_stress_info_ds(currdisk,metadata,elem_number);

    //check for dead block status.
    int dead_block_count =  0;
    for (i=0;i<currdisk->params.blocks_per_element;i++) {
      if (i < usable_blocks) //temporarily mark blocks as in use.
        metadata->block_usage[i].state = SSD_BLOCK_INUSE;
      if (ssd_block_dead(&metadata->block_usage[i],currdisk))
        dead_block_count++;
      else
        metadata->block_usage[i].state = SSD_BLOCK_CLEAN;
    }
    fprintf(stderr,"Dead blocks in element #%d : %d\n",elem_number,dead_block_count);

    if (dead_block_count >= reserved_blocks) { //if dead blocks more than overprovisioned blocks, then simulation cannot proceed further.
      fprintf(stderr,"Total reserved blocks : %d\n",reserved_blocks);
      fprintf(stderr,"Unable to proceed further as dead blocks more than reserved blocks\n");
      fprintf(outputfile,"Total reserved blocks : %d\n",reserved_blocks);
      fprintf(outputfile,"Unable to proceed further as dead blocks more than reserved blocks\n");
      fclose(outputfile);
      exit(1);
    }
    ssd_assert_free_blocks(currdisk,metadata); //////////////////////////////////////////////////////////////////////////////
    // initially, we assume that every logical page is mapped
    // onto a physical page. we start from the first phy page
    // and continue to map, leaving the last page of every block
    // to store the summary information.
    ppage = 0;
    i = 0;
    while (i < export_size) {
        int pgnum_in_gang;
        int pp_index;
        int plane_num;
        unsigned int block = SSD_PAGE_TO_BLOCK(ppage, currdisk);

        ASSERT(block < (unsigned int)currdisk->params.blocks_per_element);

        if (metadata->block_usage[block].state == SSD_BLOCK_DEAD) {
          //block dead. dont use this block for future.
          bitpos = ssd_block_to_bitpos(currdisk, block);
          ssd_set_bit(metadata->free_blocks, bitpos);
          ppage+=currdisk->params.pages_per_block;
          continue;
        }

        // if this is the last page in the block
        if (ssd_last_page_in_block(ppage, currdisk)) {
            // leave this physical page for summary page and
            // seal the block
            metadata->block_usage[block].state = SSD_BLOCK_SEALED;
            //metadata->block_usage[block].page[currdisk->params.pages_per_block-1].logical_stresses++;
            //metadata->block_usage[block].total_pages_written++;
            // go to next block
            ppage ++;
            block = SSD_PAGE_TO_BLOCK(ppage, currdisk);
        }

        // if this block is in the reserved section, skip it
        // and go to the next block.
        switch(plane_block_mapping) {
            case PLANE_BLOCKS_CONCAT:
            {
                assert(!"Unsupported plane block mapping PLANE_BLOCKS_CONCAT");
                unsigned int block_index = block % currdisk->params.blocks_per_plane;
                if ((block_index >= usable_blocks_per_plane) && (block_index < currdisk->params.blocks_per_plane)) {
                    // go to next block
                    ppage = ssd_first_page_in_next_block(ppage, currdisk);
                    continue;
                }
            }
            break;

            case PLANE_BLOCKS_PAIRWISE_STRIPE:
            {
                assert(!"Unsupported plane block mapping PLANE_BLOCKS_PAIRWISE_STRIPE");
                unsigned int block_index = block % (2*currdisk->params.blocks_per_plane);
                if ((block_index >= 2*usable_blocks_per_plane) && (block_index < 2*currdisk->params.blocks_per_plane)) {
                    ppage = ssd_first_page_in_next_block(ppage, currdisk);
                    continue;
                }
            }
            break;

            case PLANE_BLOCKS_FULL_STRIPE:
                if ((block >= usable_blocks+dead_block_count) && (block < (unsigned int)currdisk->params.blocks_per_element)) {
                    printf("Error: the control should not come here ...\n");
                    ppage = ssd_first_page_in_next_block(ppage, currdisk);
                    continue;
                }
            break;

            default:
                fprintf(stderr, "Error: unknown plane_block_mapping %d\n", plane_block_mapping);
                exit(1);
        }

        // when the control comes here, 'ppage' contains the next page
        // that can be assigned to a logical page.
        // find the index of the phy page within the block
        pp_index = ppage % currdisk->params.pages_per_block;

        
	// populate the lba table
        metadata->lba_table[i] = ppage;
        pgnum_in_gang = elem_index * export_size + i;
        g->pg2elem[pgnum_in_gang].e = elem_number;

        // mark this block as not free and its state as 'in use'.
        // note that a block could be not free and its state be 'sealed'.
        // it is enough if we set it once while working on the first phy page.
        // also increment the block sequence number.
        if (pp_index == 0) {
            bitpos = ssd_block_to_bitpos(currdisk, block);
            ssd_set_bit(metadata->free_blocks, bitpos);
            metadata->block_usage[block].state = SSD_BLOCK_INUSE;
            metadata->block_usage[block].bsn = bsn ++;
        }

        // increase the usage count per block
        plane_num = metadata->block_usage[block].plane_num;
        metadata->block_usage[block].page[pp_index].lpn = i;
        metadata->block_usage[block].page[pp_index].time_of_last_stress = simtime;
        metadata->block_usage[block].num_valid ++;
        metadata->plane_meta[plane_num].valid_pages ++;

        // go to the next physical page

        ppage ++;

        // go to the next logical page
        i ++;
    }


   
    ssd_assert_free_blocks(currdisk,metadata);   //////////////////////////////////////////////////////////////////////////////
    // init the element's active page as well as the plane's active page.
    int total_blocks = currdisk->params.blocks_per_plane * currdisk->params.planes_per_pkg;
    for (i = 0; i < (unsigned int)currdisk->params.planes_per_pkg; i ++) {
        int blocks_to_skip;
        switch(plane_block_mapping) {
            case PLANE_BLOCKS_CONCAT:
                assert(!"Refresh Operations not supported with PLANE_BLOCKS_CONCAT mapping");
                blocks_to_skip = (i*currdisk->params.blocks_per_plane + usable_blocks_per_plane);
                break;

            case PLANE_BLOCKS_PAIRWISE_STRIPE:
                assert(!"Refresh Operations not supported with PLANE_BLOCKS_PAIRWISE_STRIPE mapping");
                blocks_to_skip = (i/2)*(2*currdisk->params.blocks_per_plane) + (2*usable_blocks_per_plane) + i%2;
                break;

            case PLANE_BLOCKS_FULL_STRIPE:
                /*blocks_to_skip = (currdisk->params.planes_per_pkg * usable_blocks_per_plane) + i + 
                                  metadata->plane_meta[i].dead_blocks;*/
                blocks_to_skip = i;
                for (;blocks_to_skip<total_blocks;blocks_to_skip+=currdisk->params.planes_per_pkg)
                    if(metadata->block_usage[blocks_to_skip].state==SSD_BLOCK_CLEAN)
                      break;
                break;

            default:
                fprintf(stderr, "Error: unknown plane_block_mapping %d\n", plane_block_mapping);
                exit(1);
        }

        metadata->plane_meta[i].active_page = blocks_to_skip*currdisk->params.pages_per_block;
        //metadata->plane_meta[i].free_blocks = reserved_blocks_per_plane-metadata->plane_meta[i].dead_blocks;
    }
    switch(plane_block_mapping) {
        case PLANE_BLOCKS_CONCAT:
            assert(!"Refresh Operations not supported with PLANE_BLOCKS_CONCAT mapping");
            metadata->active_page = usable_blocks_per_plane * currdisk->params.pages_per_block;
            break;

        case PLANE_BLOCKS_PAIRWISE_STRIPE:
            assert(!"Refresh Operations not supported with PLANE_BLOCKS_PAIRWISE_STRIPE mapping");
            metadata->active_page = (2 * usable_blocks_per_plane) * currdisk->params.pages_per_block;
            break;

        case PLANE_BLOCKS_FULL_STRIPE:
            //metadata->active_page = currdisk->params.planes_per_pkg * (usable_blocks_per_plane+metadata->plane_meta[0].dead_blocks) * currdisk->params.pages_per_block;
            metadata->active_page = metadata->plane_meta[0].active_page;
            break;

        default:
            fprintf(stderr, "Error: unknown plane_block_mapping %d\n", plane_block_mapping);
            exit(1);
    }

    //ASSERT(metadata->active_page == metadata->plane_meta[0].active_page);
    active_block = metadata->active_page / currdisk->params.pages_per_block;

    //////////////////////////////////////////////////////////////////////////////
    // mark the block that corresponds to the active page
    // as not free and 'in_use'.
    ssd_assert_free_blocks(currdisk,metadata);
    switch(currdisk->params.copy_back) {
        case SSD_COPY_BACK_DISABLE:
            assert(!"Refresh Operation not supported for copy back disabled parameter");
            bitpos = ssd_block_to_bitpos(currdisk, active_block);
            ssd_set_bit(metadata->free_blocks, bitpos);
            metadata->block_usage[active_block].state = SSD_BLOCK_INUSE;
            metadata->block_usage[active_block].bsn = bsn ++;
        break;

        case SSD_COPY_BACK_ENABLE:
            for (i = 0; i < (unsigned int)currdisk->params.planes_per_pkg; i ++) {
                int plane_active_block = SSD_PAGE_TO_BLOCK(metadata->plane_meta[i].active_page, currdisk);

                bitpos = ssd_block_to_bitpos(currdisk, plane_active_block);
                ssd_set_bit(metadata->free_blocks, bitpos);
                metadata->block_usage[plane_active_block].state = SSD_BLOCK_INUSE;
                metadata->block_usage[plane_active_block].bsn = bsn ++;
                metadata->tot_free_blocks --;
                metadata->plane_meta[i].free_blocks --;
            }
            ssd_assert_free_blocks(currdisk,metadata);
        break;

        default:
            fprintf(stderr, "Error: invalid copy back policy %d\n",
                currdisk->params.copy_back);
            exit(1);
    }

    //////////////////////////////////////////////////////////////////////////////
    // set the bsn for the ssd element
    metadata->bsn = bsn;
    //printf("set the bsn to %d\n", bsn);
}
Пример #5
0
/*
 * vp
 * description: this routine allocates and initializes the ssd element metadata
 * structures. FIXME: if the systems is powered up, this init routine has to
 * populate the structures by scanning the summary pages (to implement this,
 * we can read from a disk checkpoint file). but, this is future work.
*/
void ssd_element_metadata_init(int elem_number, ssd_element_metadata *metadata, ssd_t *currdisk)
{
    gang_metadata *g;
    unsigned int ppage;
    unsigned int i;
    unsigned int bytes_to_alloc;
    unsigned int tot_blocks = currdisk->params.blocks_per_element;
    unsigned int tot_pages = tot_blocks * currdisk->params.pages_per_block;
    unsigned int reserved_blocks, usable_blocks, export_size;
    unsigned int reserved_blocks_per_plane, usable_blocks_per_plane;
    unsigned int bitpos;
    unsigned int active_block;
    unsigned int elem_index;
    unsigned int bsn = 1;
    int plane_block_mapping = currdisk->params.plane_block_mapping;

    //////////////////////////////////////////////////////////////////////////////
    // active page starts at the 1st page on the reserved section
    reserved_blocks_per_plane = (currdisk->params.reserve_blocks * currdisk->params.blocks_per_plane) / 100;
    usable_blocks_per_plane = currdisk->params.blocks_per_plane - reserved_blocks_per_plane;
    reserved_blocks = reserved_blocks_per_plane * currdisk->params.planes_per_pkg;
    usable_blocks = usable_blocks_per_plane * currdisk->params.planes_per_pkg;

    //////////////////////////////////////////////////////////////////////////////
    // initialize the free blocks and free pages
    metadata->tot_free_blocks = reserved_blocks;

    //////////////////////////////////////////////////////////////////////////////
    // assign the gang and init the element's free pages
    metadata->gang_num = elem_number / currdisk->params.elements_per_gang;
    currdisk->gang_meta[metadata->gang_num].elem_free_pages[elem_number] = \
        metadata->tot_free_blocks * SSD_DATA_PAGES_PER_BLOCK(currdisk);
    g = &currdisk->gang_meta[metadata->gang_num];
    elem_index = elem_number % currdisk->params.elements_per_gang;

    //////////////////////////////////////////////////////////////////////////////
    // let's begin cleaning with the first plane
    metadata->plane_to_clean = 0;
    metadata->plane_to_write = 0;
    metadata->block_alloc_pos = 0;
    metadata->reqs_waiting = 0;
    metadata->tot_migrations = 0;
    metadata->tot_pgs_migrated = 0;
    metadata->mig_cost = 0;

    //////////////////////////////////////////////////////////////////////////////
    // init the plane metadata
    for (i = 0; i < (unsigned int)currdisk->params.planes_per_pkg; i ++) {
        int blocks_to_skip;

        switch(plane_block_mapping) {
            case PLANE_BLOCKS_CONCAT:
                blocks_to_skip = (i*currdisk->params.blocks_per_plane + usable_blocks_per_plane);
                break;

            case PLANE_BLOCKS_PAIRWISE_STRIPE:
                blocks_to_skip = (i/2)*(2*currdisk->params.blocks_per_plane) + (2*usable_blocks_per_plane) + i%2;
                break;

            case PLANE_BLOCKS_FULL_STRIPE:
                blocks_to_skip = (currdisk->params.planes_per_pkg * usable_blocks_per_plane) + i;
                break;

            default:
                fprintf(stderr, "Error: unknown plane_block_mapping %d\n", plane_block_mapping);
                exit(1);
        }

        metadata->plane_meta[i].active_page = blocks_to_skip*currdisk->params.pages_per_block;
        metadata->plane_meta[i].free_blocks = reserved_blocks_per_plane;
        metadata->plane_meta[i].valid_pages = 0;
        metadata->plane_meta[i].clean_in_progress = 0;
        metadata->plane_meta[i].clean_in_block = -1;
        metadata->plane_meta[i].block_alloc_pos = i*currdisk->params.blocks_per_plane;
        metadata->plane_meta[i].parunit_num = i / SSD_PLANES_PER_PARUNIT(currdisk);
        metadata->plane_meta[i].num_cleans = 0;
    }

    //////////////////////////////////////////////////////////////////////////////
    // init the next plane to clean in a parunit
    for (i = 0; i < (unsigned int) SSD_PARUNITS_PER_ELEM(currdisk); i ++) {
        metadata->parunits[i].plane_to_clean = SSD_PLANES_PER_PARUNIT(currdisk)*i;
    }

    //////////////////////////////////////////////////////////////////////////////
    // init the element's active page
    switch(plane_block_mapping) {
        case PLANE_BLOCKS_CONCAT:
            metadata->active_page = usable_blocks_per_plane * currdisk->params.pages_per_block;
            break;

        case PLANE_BLOCKS_PAIRWISE_STRIPE:
            metadata->active_page = (2 * usable_blocks_per_plane) * currdisk->params.pages_per_block;
            break;

        case PLANE_BLOCKS_FULL_STRIPE:
            metadata->active_page = (currdisk->params.planes_per_pkg * usable_blocks_per_plane) * currdisk->params.pages_per_block;
            break;

        default:
            fprintf(stderr, "Error: unknown plane_block_mapping %d\n", plane_block_mapping);
            exit(1);
    }

    ASSERT(metadata->active_page == metadata->plane_meta[0].active_page);
    active_block = metadata->active_page / currdisk->params.pages_per_block;

    // since we reserve one page out of every block to store the summary info,
    // the size exported by the flash disk is little less.
    export_size = usable_blocks * SSD_DATA_PAGES_PER_BLOCK(currdisk);
    currdisk->data_pages_per_elem = export_size;
    //printf("res blks = %d, use blks = %d act page = %d exp size = %d\n",
    //  reserved_blocks, usable_blocks, metadata->active_page, export_size);

    //////////////////////////////////////////////////////////////////////////////
    // allocate the lba table
    if ((metadata->lba_table = (int *)malloc(export_size * sizeof(int))) == NULL) {
        fprintf(stderr, "Error: malloc to lba table in ssd_element_metadata_init failed\n");
        fprintf(stderr, "Allocation size = %d\n", export_size * sizeof(int));
        exit(1);
    }

    //////////////////////////////////////////////////////////////////////////////
    // allocate the free blocks bit map
    // what if the no of blocks is not divisible by 8?
    if ((tot_blocks % (sizeof(unsigned char) * 8)) != 0) {
        fprintf(stderr, "This case is not yet handled\n");
        exit(1);
    }

    bytes_to_alloc = tot_blocks / (sizeof(unsigned char) * 8);
    if (!(metadata->free_blocks = (unsigned char *)malloc(bytes_to_alloc))) {
        fprintf(stderr, "Error: malloc to free_blocks in ssd_element_metadata_init failed\n");
        fprintf(stderr, "Allocation size = %d\n", bytes_to_alloc);
        exit(1);
    }
    bzero(metadata->free_blocks, bytes_to_alloc);

    //////////////////////////////////////////////////////////////////////////////
    // allocate the block usage array and initialize it
    if (!(metadata->block_usage = (block_metadata *)malloc(tot_blocks * sizeof(block_metadata)))) {
        fprintf(stderr, "Error: malloc to block_usage in ssd_element_metadata_init failed\n");
        fprintf(stderr, "Allocation size = %d\n", tot_blocks * sizeof(block_metadata));
        exit(1);
    }
    bzero(metadata->block_usage, tot_blocks * sizeof(block_metadata));

    for (i = 0; i < tot_blocks; i ++) {
        int j;

        metadata->block_usage[i].block_num = i;
        metadata->block_usage[i].page = (int*)malloc(sizeof(int) * currdisk->params.pages_per_block);

        for (j = 0; j < currdisk->params.pages_per_block; j ++) {
            metadata->block_usage[i].page[j] = -1;
        }

        // assign the plane number to each block
        switch(plane_block_mapping) {
            case PLANE_BLOCKS_CONCAT:
                metadata->block_usage[i].plane_num = i / currdisk->params.blocks_per_plane;
                break;

            case PLANE_BLOCKS_PAIRWISE_STRIPE:
                metadata->block_usage[i].plane_num = (i/(2*currdisk->params.blocks_per_plane))*2 + i%2;
                break;

            case PLANE_BLOCKS_FULL_STRIPE:
                metadata->block_usage[i].plane_num = i % currdisk->params.planes_per_pkg;
                break;

            default:
                fprintf(stderr, "Error: unknown plane_block_mapping %d\n", plane_block_mapping);
                exit(1);
        }

        // set the remaining life time and time of last erasure
        metadata->block_usage[i].rem_lifetime = SSD_MAX_ERASURES;
        metadata->block_usage[i].time_of_last_erasure = simtime;

        // set the block state
        metadata->block_usage[i].state = SSD_BLOCK_CLEAN;

        // init the bsn to be zero
        metadata->block_usage[i].bsn = 0;
    }

    //////////////////////////////////////////////////////////////////////////////
    // initially, we assume that every logical page is mapped
    // onto a physical page. we start from the first phy page
    // and continue to map, leaving the last page of every block
    // to store the summary information.
    ppage = 0;
    i = 0;
    while (i < export_size) {
        int pgnum_in_gang;
        int pp_index;
        int plane_num;
        unsigned int block = SSD_PAGE_TO_BLOCK(ppage, currdisk);

        ASSERT(block < (unsigned int)currdisk->params.blocks_per_element);

        // if this is the last page in the block
        if (ssd_last_page_in_block(ppage, currdisk)) {
            // leave this physical page for summary page and
            // seal the block
            metadata->block_usage[block].state = SSD_BLOCK_SEALED;

            // go to next block
            ppage ++;
            block = SSD_PAGE_TO_BLOCK(ppage, currdisk);
        }

        // if this block is in the reserved section, skip it
        // and go to the next block.
        switch(plane_block_mapping) {
            case PLANE_BLOCKS_CONCAT:
            {
                unsigned int block_index = block % currdisk->params.blocks_per_plane;
                if ((block_index >= usable_blocks_per_plane) && (block_index < currdisk->params.blocks_per_plane)) {
                    // go to next block
                    ppage = ssd_first_page_in_next_block(ppage, currdisk);
                    continue;
                }
            }
            break;

            case PLANE_BLOCKS_PAIRWISE_STRIPE:
            {
                unsigned int block_index = block % (2*currdisk->params.blocks_per_plane);
                if ((block_index >= 2*usable_blocks_per_plane) && (block_index < 2*currdisk->params.blocks_per_plane)) {
                    ppage = ssd_first_page_in_next_block(ppage, currdisk);
                    continue;
                }
            }
            break;

            case PLANE_BLOCKS_FULL_STRIPE:
                // ideally the control should not come here ...
                if ((block >= usable_blocks) && (block < (unsigned int)currdisk->params.blocks_per_element)) {
                    printf("Error: the control should not come here ...\n");
                    ppage = ssd_first_page_in_next_block(ppage, currdisk);
                    continue;
                }
            break;

            default:
                fprintf(stderr, "Error: unknown plane_block_mapping %d\n", plane_block_mapping);
                exit(1);
        }

        // when the control comes here, 'ppage' contains the next page
        // that can be assigned to a logical page.
        // find the index of the phy page within the block
        pp_index = ppage % currdisk->params.pages_per_block;

        // populate the lba table
        metadata->lba_table[i] = ppage;
        pgnum_in_gang = elem_index * export_size + i;
        g->pg2elem[pgnum_in_gang].e = elem_number;

        // mark this block as not free and its state as 'in use'.
        // note that a block could be not free and its state be 'sealed'.
        // it is enough if we set it once while working on the first phy page.
        // also increment the block sequence number.
        if (pp_index == 0) {
            bitpos = ssd_block_to_bitpos(currdisk, block);
            ssd_set_bit(metadata->free_blocks, bitpos);
            metadata->block_usage[block].state = SSD_BLOCK_INUSE;
            metadata->block_usage[block].bsn = bsn ++;
        }

        // increase the usage count per block
        plane_num = metadata->block_usage[block].plane_num;
        metadata->block_usage[block].page[pp_index] = i;
        metadata->block_usage[block].num_valid ++;
        metadata->plane_meta[plane_num].valid_pages ++;

        // go to the next physical page
        ppage ++;

        // go to the next logical page
        i ++;
    }

    //////////////////////////////////////////////////////////////////////////////
    // mark the block that corresponds to the active page
    // as not free and 'in_use'.
    switch(currdisk->params.copy_back) {
        case SSD_COPY_BACK_DISABLE:
            bitpos = ssd_block_to_bitpos(currdisk, active_block);
            ssd_set_bit(metadata->free_blocks, bitpos);
            metadata->block_usage[active_block].state = SSD_BLOCK_INUSE;
            metadata->block_usage[active_block].bsn = bsn ++;
        break;

        case SSD_COPY_BACK_ENABLE:
            for (i = 0; i < (unsigned int)currdisk->params.planes_per_pkg; i ++) {
                int plane_active_block = SSD_PAGE_TO_BLOCK(metadata->plane_meta[i].active_page, currdisk);

                bitpos = ssd_block_to_bitpos(currdisk, plane_active_block);
                ssd_set_bit(metadata->free_blocks, bitpos);
                metadata->block_usage[plane_active_block].state = SSD_BLOCK_INUSE;
                metadata->block_usage[plane_active_block].bsn = bsn ++;
                metadata->tot_free_blocks --;
                metadata->plane_meta[i].free_blocks --;
            }
        break;

        default:
            fprintf(stderr, "Error: invalid copy back policy %d\n",
                currdisk->params.copy_back);
            exit(1);
    }

    //////////////////////////////////////////////////////////////////////////////
    // set the bsn for the ssd element
    metadata->bsn = bsn;
    //printf("set the bsn to %d\n", bsn);
}