Esempio n. 1
0
static void format(void)
{
    UINT32 bank, vblock, vcount_val;

    ASSERT(NUM_MISC_META_SECT > 0);
    ASSERT(NUM_VCOUNT_SECT > 0);

    uart_printf("Total FTL DRAM metadata size: %d KB", DRAM_BYTES_OTHER / 1024);

    uart_printf("VBLKS_PER_BANK: %d", VBLKS_PER_BANK);
    uart_printf("LBLKS_PER_BANK: %d", NUM_LPAGES / PAGES_PER_BLK / NUM_BANKS);
    uart_printf("META_BLKS_PER_BANK: %d", META_BLKS_PER_BANK);

    //----------------------------------------
    // initialize DRAM metadata
    //----------------------------------------
    mem_set_dram(PAGE_MAP_ADDR, NULL, PAGE_MAP_BYTES);
    mem_set_dram(VCOUNT_ADDR, NULL, VCOUNT_BYTES);

    //----------------------------------------
    // erase all blocks except vblock #0
    //----------------------------------------
	for (vblock = MISCBLK_VBN; vblock < VBLKS_PER_BANK; vblock++)
	{
		for (bank = 0; bank < NUM_BANKS; bank++)
		{
            vcount_val = VC_MAX;
            if (is_bad_block(bank, vblock) == FALSE)
			{
				nand_block_erase(bank, vblock);
                vcount_val = 0;
            }
            write_dram_16(VCOUNT_ADDR + ((bank * VBLKS_PER_BANK) + vblock) * sizeof(UINT16),
                          vcount_val);
        }
    }
    //----------------------------------------
    // initialize SRAM metadata
    //----------------------------------------
    init_metadata_sram();

    // flush metadata to NAND
    logging_pmap_table();
    logging_misc_metadata();

    write_format_mark();
	led(1);
    uart_print("format complete");
}
Esempio n. 2
0
/* g_smt_target, g_smt_victim */
void load_smt_piece(UINT32 idx){
	UINT32 bank,row,block;
	UINT32 dest;
	bank = idx / NUM_BANKS_MAX;
	block = idx % NUM_BANKS_MAX;
	row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + (PAGES_PER_VBLK * g_bad_list[bank][block]);
	if( g_smt_target == NUM_BANKS_MAX || g_smt_full == 1){
		g_smt_full = 1;
		g_smt_victim = (g_smt_victim + 1 ) % NUM_BANKS_MAX;
		flush_smt_piece(g_smt_victim);
		g_smt_target = (g_smt_target + 1) % NUM_BANKS_MAX;
	}
	SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);	
	SETREG(FCP_DMA_CNT,SMT_PIECE_BYTES);
	SETREG(FCP_COL, 0);
	dest = SMT_ADDR + (g_smt_target * SMT_PIECE_BYTES);
	SETREG(FCP_DMA_ADDR, dest);
	SETREG(FCP_OPTION, FO_P | FO_E );		
	SETREG(FCP_ROW_L(bank), row);
	SETREG(FCP_ROW_H(bank), row);
	flash_issue_cmd(bank, RETURN_WHEN_DONE);

	smt_dram_map[g_smt_target] = idx;
	smt_piece_map[idx] = g_smt_target;
	smt_bit_map[bank] &= ~( 1 <<block );
	if(( g_misc_meta[bank].smt_init & ( 1 << block ) ) == 0){
		mem_set_dram( dest, 0x00, SMT_PIECE_BYTES);
		g_misc_meta[bank].smt_init |= (1 <<block);
	}
	g_smt_target++;
}
Esempio n. 3
0
void ftl_read(UINT32 const lba, UINT32 const num_sectors)
{
    UINT32 remain_sects, num_sectors_to_read;
    UINT32 lpn, sect_offset;
    UINT32 bank, vpn;

    lpn          = lba / SECTORS_PER_PAGE;
    sect_offset  = lba % SECTORS_PER_PAGE;
    remain_sects = num_sectors;

    while (remain_sects != 0)
    {
        if ((sect_offset + remain_sects) < SECTORS_PER_PAGE)
        {
            num_sectors_to_read = remain_sects;
        }
        else
        {
            num_sectors_to_read = SECTORS_PER_PAGE - sect_offset;
        }
        bank = get_num_bank(lpn); // page striping
        vpn  = get_vpn(lpn);
        CHECK_VPAGE(vpn);

        if (vpn != NULL)
        {
            nand_page_ptread_to_host(bank,
                                     vpn / PAGES_PER_BLK,
                                     vpn % PAGES_PER_BLK,
                                     sect_offset,
                                     num_sectors_to_read);
        }
        // The host is requesting to read a logical page that has never been written to.
        else
        {
			UINT32 next_read_buf_id = (g_ftl_read_buf_id + 1) % NUM_RD_BUFFERS;

			#if OPTION_FTL_TEST == 0
			while (next_read_buf_id == GETREG(SATA_RBUF_PTR));	// wait if the read buffer is full (slow host)
			#endif

            // fix bug @ v.1.0.6
            // Send 0xFF...FF to host when the host request to read the sector that has never been written.
            // In old version, for example, if the host request to read unwritten sector 0 after programming in sector 1, Jasmine would send 0x00...00 to host.
            // However, if the host already wrote to sector 1, Jasmine would send 0xFF...FF to host when host request to read sector 0. (ftl_read() in ftl_xxx/ftl.c)
			mem_set_dram(RD_BUF_PTR(g_ftl_read_buf_id) + sect_offset*BYTES_PER_SECTOR,
                         0xFFFFFFFF, num_sectors_to_read*BYTES_PER_SECTOR);

            flash_finish();

			SETREG(BM_STACK_RDSET, next_read_buf_id);	// change bm_read_limit
			SETREG(BM_STACK_RESET, 0x02);				// change bm_read_limit

			g_ftl_read_buf_id = next_read_buf_id;
        }
        sect_offset   = 0;
        remain_sects -= num_sectors_to_read;
        lpn++;
    }
}
Esempio n. 4
0
static void overwritePartialChunkNew(UINT32 nSectsToWrite) {
    //uart_print_level_1("27 ");
    uart_print("overwritePartialChunkNew\r\n");
    chooseNewBank_();
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset_*BYTES_PER_SECTOR);
    UINT32 chunkBufStartAddr = OW_LOG_BUF(bank_)+(owChunkPtr[bank_]*BYTES_PER_CHUNK); // base address of the destination chunk
    waitBusyBank(bank_);
    mem_set_dram (chunkBufStartAddr, 0xFFFFFFFF, BYTES_PER_CHUNK);
    UINT32 dst = chunkBufStartAddr + (sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR;
    mem_copy(dst, src, nSectsToWrite * BYTES_PER_SECTOR);
}
Esempio n. 5
0
static void writeChunkNew(UINT32 nSectsToWrite)
{
    uart_print("writeChunkNew\r\n");
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset_*BYTES_PER_SECTOR);
    UINT32 dst = LOG_BUF(bank_)+(chunkPtr[bank_]*BYTES_PER_CHUNK); // base address of the destination chunk
    waitBusyBank(bank_);
    if (nSectsToWrite != SECTORS_PER_CHUNK)
    {
        mem_set_dram (dst, 0xFFFFFFFF, BYTES_PER_CHUNK); // Initialize chunk in dram log buffer with 0xFF
    }
    mem_copy(dst+((sectOffset_ % SECTORS_PER_CHUNK) * BYTES_PER_SECTOR), src, nSectsToWrite*BYTES_PER_SECTOR);
}
Esempio n. 6
0
void increaseLpnColdBlk (UINT32 const bank, LogCtrlBlock * ctrlBlock)
{

    uart_print("increaseLpnColdBlk\r\n");

    UINT32 lpn = ctrlBlock[bank].logLpn;

    if (LogPageToOffset(lpn) == UsedPagesPerLogBlk-1)
    { // current rw log block is full

        UINT32 lbn = get_log_lbn(lpn);
        nand_page_ptprogram(bank,
                            get_log_vbn(bank, lbn),
                            PAGES_PER_BLK - 1,
                            0,
                            (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                            ctrlBlock[bank].lpnsListAddr,
                            RETURN_WHEN_DONE);
        mem_set_dram(ctrlBlock[bank].lpnsListAddr, INVALID, (CHUNKS_PER_BLK * CHUNK_ADDR_BYTES));
        insertBlkInHeap(&heapDataCold, bank, lbn);

#if CanReuseBlksForColdData == 0
        lbn = cleanListPop(&cleanListDataWrite, bank); // Now the hybrid approach can pop from the cleanList
        ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;

        while(cleanListSize(&cleanListDataWrite, bank) < 2)
        {
#if PrintStats
            uart_print_level_1("GCCOLD\r\n");
#endif
            garbageCollectLog(bank);
        }
#else
        findNewLpnForColdLog(bank, ctrlBlock);
#endif
    }

    else
    {
        ctrlBlock[bank].logLpn = lpn+1;
    }
    uart_print("increaseLpnColdBlk new lpn "); uart_print_int(ctrlBlock[bank].logLpn); uart_print("\r\n");
}
Esempio n. 7
0
void ftl_read_sector(UINT32 const lba, UINT32 const sect_offset)							//added by GYUHWA
{
	UINT32 psn, bank, row, buf_offset, nand_offset;
	UINT32 t1;
	UINT32 src,dst;
	psn = get_psn(lba);		//physical sector nomber
	//bank = lba % NUM_BANKS;	
	bank = psn / SECTORS_PER_BANK;
	t1 = psn % SECTORS_PER_BANK;
	row = t1 / SECTORS_PER_PAGE;		
	nand_offset = t1 % SECTORS_PER_PAGE;				//physical nand offset

	if((psn & (UINT32)BIT31) != 0 )					//data is in merge buffer
	{
		buf_offset = (psn ^ (UINT32)BIT31);
		//bank = g_target_bank;
		bank = buf_offset / SECTORS_PER_PAGE;
		buf_offset = buf_offset % SECTORS_PER_PAGE;
		dst = RD_BUF_PTR(g_ftl_read_buf_id) + sect_offset * BYTES_PER_SECTOR;
		src = MERGE_BUFFER_ADDR + bank * BYTES_PER_PAGE + BYTES_PER_SECTOR * buf_offset;

		mem_copy(dst, src, BYTES_PER_SECTOR);					
		//find collect data -> mem_copy to RD_BUFFER
	}
	else if (psn != NULL)							//data is in nand flash
		{
		SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);		//FCP command for read one sector
		SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR);
		SETREG(FCP_COL, nand_offset);						
		SETREG(FCP_DMA_ADDR, RD_BUF_PTR(g_ftl_read_buf_id) + (BYTES_PER_SECTOR * (sect_offset - nand_offset)));
		// nand_offset is COL, and RD_BUFFER_offset is COL. So we change DMA_ADDR to read data different sector offset.
		SETREG(FCP_OPTION, FO_P | FO_E );		
		SETREG(FCP_ROW_L(bank), row);				
		SETREG(FCP_ROW_H(bank), row);// change bm_read_limit

		flash_issue_cmd(bank, RETURN_ON_ISSUE);
		//Because we don't increase BM_STACK_RD_LIMIT to collect sectors 
	}
	else								//data that never written
	{
		mem_set_dram(RD_BUF_PTR(g_ftl_read_buf_id) + row * BYTES_PER_SECTOR, 0xFFFFFFFF, BYTES_PER_SECTOR); //add 0xfffff to data that never written
	}
}
Esempio n. 8
0
/* g_smt_target, g_smt_victim */
void load_smt_piece(UINT32 idx){
	UINT32 bank,row,block;
	UINT32 dest;
	UINT32 pblock;	// physical block which have target mapping table

	bank = idx / SMT_BANK_NUM;
	block = idx % SMT_BANK_NUM;

	pblock = block / SMT_BLOCK;
	row = smt_pos[idx] * SMT_INC_SIZE + (PAGES_PER_VBLK * g_bad_list[bank][pblock]);
	if( g_smt_full == 1){
		flush_smt_piece(g_smt_victim);
		g_smt_victim = (g_smt_victim +1 ) % SMT_DRAM;
	}
	SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);	
	SETREG(FCP_DMA_CNT,SMT_PIECE_BYTES);
	SETREG(FCP_COL, 0);
	dest = SMT_ADDR + (g_smt_target * SMT_PIECE_BYTES);
	SETREG(FCP_DMA_ADDR, dest);
	SETREG(FCP_OPTION, FO_P | FO_E );		
	SETREG(FCP_ROW_L(bank), row);
	SETREG(FCP_ROW_H(bank), row);
	
	// fully guarantee 
	//flash_issue_cmd(bank, RETURN_WHEN_DONE);
	while(_BSP_FSM(g_bank_to_wait) != BANK_IDLE);
	flash_issue_cmd(bank, RETURN_ON_ISSUE);
	g_bank_to_wait = bank;

	smt_dram_map[g_smt_target] = idx;
	smt_piece_map[idx] = g_smt_target;
	smt_bit_map[bank][block/NUM_BANKS_MAX] &= ~( 1 <<(block % NUM_BANKS_MAX) );

	/* init or not */
	if(( g_misc_meta[bank].smt_init[block/NUM_BANKS_MAX] & ( 1 << (block % NUM_BANKS_MAX) ) ) == 0){
		mem_set_dram( dest, 0x00, SMT_PIECE_BYTES);
		g_misc_meta[bank].smt_init[block/NUM_BANKS_MAX] |= (1 <<(block % NUM_BANKS_MAX));
	}
	g_smt_target = (g_smt_target + 1) % SMT_DRAM;
	if( g_smt_target == 0 ){
		g_smt_full = 1;
	}
}
Esempio n. 9
0
/* g_smt_target, g_smt_victim */
void load_smt_piece(UINT32 idx){
	UINT32 bank,row,block;
	UINT32 dest;
	UINT32 pblock;
	bank = idx / SMT_NUM;
	block = idx % SMT_NUM;

	pblock = block / (NUM_BANKS_MAX *2);
	row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + (PAGES_PER_VBLK * g_bad_list[bank][pblock]);
	if(g_smt_full == 1){
		flush_smt_piece(g_smt_target);
		g_smt_victim = (g_smt_victim +1 ) % SMT_NUM;
	}
	SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);	
	SETREG(FCP_DMA_CNT,SMT_PIECE_BYTES);
	SETREG(FCP_COL, 0);
	dest = SMT_ADDR + (g_smt_target * SMT_PIECE_BYTES);
	SETREG(FCP_DMA_ADDR, dest);
	SETREG(FCP_OPTION, FO_P | FO_E );		
	SETREG(FCP_ROW_L(bank), row);
	SETREG(FCP_ROW_H(bank), row);
	
	// fully guarantee 
	flash_issue_cmd(bank, RETURN_WHEN_DONE);

	smt_dram_map[g_smt_target] = idx;
	smt_piece_map[idx] = g_smt_target;
	smt_bit_map[bank][block/NUM_BANKS_MAX] &= ~( 1 << (block % NUM_BANKS_MAX) );
	if(( g_misc_meta[bank].smt_init[block / NUM_BANKS_MAX ] & ( 1 << (block % NUM_BANKS_MAX)) ) == 0){
		mem_set_dram( dest, 0x00, SMT_PIECE_BYTES);
		g_misc_meta[bank].smt_init[block / NUM_BANKS_MAX] |= (1 << (block % NUM_BANKS_MAX));
	}
	g_smt_target++;
	if( g_smt_target == SMT_NUM ){
		g_smt_target = 0;
		g_smt_full = 1;
	}
}
Esempio n. 10
0
static void write_format_mark(void)
{
	// This function writes a format mark to a page at (bank #0, block #0).

#ifdef __GNUC__
	extern UINT32 size_of_firmware_image;
	UINT32 firmware_image_pages = (((UINT32) (&size_of_firmware_image)) + BYTES_PER_FW_PAGE - 1) / BYTES_PER_FW_PAGE;
#else
	extern UINT32 Image$$ER_CODE$$RO$$Length;
	extern UINT32 Image$$ER_RW$$RW$$Length;
	UINT32 firmware_image_bytes = ((UINT32) &Image$$ER_CODE$$RO$$Length) + ((UINT32) &Image$$ER_RW$$RW$$Length);
	UINT32 firmware_image_pages = (firmware_image_bytes + BYTES_PER_FW_PAGE - 1) / BYTES_PER_FW_PAGE;
#endif

	UINT32 format_mark_page_offset = FW_PAGE_OFFSET + firmware_image_pages;

	mem_set_dram(FTL_BUF_ADDR, 0, BYTES_PER_SECTOR);

	SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
	SETREG(FCP_BANK, REAL_BANK(0));
	SETREG(FCP_OPTION, FO_E | FO_B_W_DRDY);
	SETREG(FCP_DMA_ADDR, FTL_BUF_ADDR); 	// DRAM -> flash
	SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR);
	SETREG(FCP_COL, 0);
	SETREG(FCP_ROW_L(0), format_mark_page_offset);
	SETREG(FCP_ROW_H(0), format_mark_page_offset);

	// At this point, we do not have to check Waiting Room status before issuing a command,
	// because we have waited for all the banks to become idle before returning from format().
	SETREG(FCP_ISSUE, NULL);

	// wait for the FC_COL_ROW_IN_PROG command to be accepted by bank #0
	while ((GETREG(WR_STAT) & 0x00000001) != 0);

	// wait until bank #0 finishes the write operation
	while (BSP_FSM(0) != BANK_IDLE);
}
Esempio n. 11
0
void increaseLpnHotBlkFirstUsage (UINT32 const bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("increaseLpnHotBlkFirstUsage\r\n");

    UINT32 lpn = ctrlBlock[bank].logLpn;
    UINT32 pageOffset = LogPageToOffset(lpn);

    if (pageOffset == 123)
    { // current rw log block is full. Write lpns list in the highest low page (125)
        uart_print("Blk full\r\n");
        UINT32 lbn = get_log_lbn(lpn);
        nand_page_ptprogram(bank,
                            get_log_vbn(bank, lbn),
                            125,
                            0,
                            (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                            ctrlBlock[bank].lpnsListAddr,
                            RETURN_WHEN_DONE);
        mem_set_dram(ctrlBlock[bank].lpnsListAddr, INVALID, (CHUNKS_PER_BLK * CHUNK_ADDR_BYTES));
        insertBlkInHeap(&heapDataFirstUsage, bank, lbn);

        findNewLpnForHotLog(bank, ctrlBlock);
    }

    else
    {
        if(pageOffset == 0)
        {
            ctrlBlock[bank].logLpn = lpn+1;
        }
        else
        {
            ctrlBlock[bank].logLpn = lpn+2;
        }
    }
}
Esempio n. 12
0
void test_nand_blocks(void)
{
	// This function is a utility that writes random data to flash pages and verifies them.
	// This function takes a long time to complete.

	UINT32 bank, vblk_offset, page_offset, data, bad;

	#define write_buffer_base	DRAM_BASE
	#define read_buffer_base	(DRAM_BASE + BYTES_PER_VBLK)

	disable_irq();
	flash_clear_irq();

	mem_set_sram(g_test_result, 0, sizeof(g_test_result));

	// Configure the flash controller so that any FIRQ_* does not lead to pause state.
	SETREG(FCONF_PAUSE, 0);

	// STEP 1 - prepare random data

	srand(10);

	for (page_offset = 0; page_offset < PAGES_PER_VBLK; page_offset++)
	{
		data = (rand() & 0xFFFF) | (rand() << 16);
		mem_set_dram(write_buffer_base + page_offset * BYTES_PER_PAGE, data, BYTES_PER_PAGE);
	}

	for (vblk_offset = 1; vblk_offset < VBLKS_PER_BANK; vblk_offset++)
	{
		// STEP 2 - erase a block at each bank

		for (bank = 0; bank < NUM_BANKS; bank++)
		{
			UINT32 rbank = REAL_BANK(bank);

			SETREG(FCP_CMD, FC_ERASE);
			SETREG(FCP_BANK, rbank);
			SETREG(FCP_OPTION, FO_P);
			SETREG(FCP_ROW_L(bank), vblk_offset * PAGES_PER_VBLK);
			SETREG(FCP_ROW_H(bank), vblk_offset * PAGES_PER_VBLK);

			while ((GETREG(WR_STAT) & 0x00000001) != 0);
			SETREG(FCP_ISSUE, NULL);
		}

		// STEP 3 - write to every pages of the erased block

		for (page_offset = 0; page_offset < PAGES_PER_VBLK; page_offset++)
		{
			for (bank = 0; bank < NUM_BANKS; bank++)
			{
				UINT32 rbank = REAL_BANK(bank);

				SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
				SETREG(FCP_BANK, rbank);
				SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
				SETREG(FCP_DMA_ADDR, write_buffer_base + page_offset * BYTES_PER_PAGE);
				SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);
				SETREG(FCP_COL, 0);
				SETREG(FCP_ROW_L(bank), vblk_offset * PAGES_PER_VBLK + page_offset);
				SETREG(FCP_ROW_H(bank), vblk_offset * PAGES_PER_VBLK + page_offset);

				while ((GETREG(WR_STAT) & 0x00000001) != 0);
				SETREG(FCP_ISSUE, NULL);
			}
		}

		// STEP 4 - check the FC_ERASE and FC_COL_ROW_IN_PROG results.

		bad = 0;

		while (GETREG(MON_CHABANKIDLE) != 0);

		for (bank = 0; bank < NUM_BANKS; bank++)
		{
			if (BSP_INTR(bank) & (FIRQ_BADBLK_H | FIRQ_BADBLK_L))
			{
				bad |= (1 << bank);
				CLR_BSP_INTR(bank, 0xFF);
				g_test_result[bank].erase_prog_fail++;
			}
		}

		// STEP 5 - read and verify
		// We check ECC/CRC results for verification.

		for (page_offset = 0; page_offset < PAGES_PER_VBLK; page_offset++)
		{
			for (bank = 0; bank < NUM_BANKS; bank++)
			{
				UINT32 rbank = REAL_BANK(bank);

				if (bad & (1 << bank))
					continue;

				SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);
				SETREG(FCP_BANK, rbank);
				SETREG(FCP_OPTION, FO_P | FO_E);
				SETREG(FCP_DMA_ADDR, read_buffer_base + bank * BYTES_PER_PAGE);
				SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);
				SETREG(FCP_COL, 0);
				SETREG(FCP_ROW_L(bank), vblk_offset * PAGES_PER_VBLK + page_offset);
				SETREG(FCP_ROW_H(bank), vblk_offset * PAGES_PER_VBLK + page_offset);

				while ((GETREG(WR_STAT) & 0x00000001) != 0);
				SETREG(FCP_ISSUE, NULL);
			}
		}

		// STEP 6 - check the FC_COL_ROW_READ_OUT results

		while (GETREG(MON_CHABANKIDLE) != 0);

		for (bank = 0; bank < NUM_BANKS; bank++)
		{
			if (BSP_INTR(bank) & FIRQ_DATA_CORRUPT)
			{
				bad |= (1 << bank);
				CLR_BSP_INTR(bank, 0xFF);
				g_test_result[bank].read_fail++;
			}
		}

		// STEP 7 - erase the blocks, but not the bad ones

		for (bank = 0; bank < NUM_BANKS; bank++)
		{
			UINT32 rbank = REAL_BANK(bank);

			if (bad & (1 << bank))
				continue;

			SETREG(FCP_CMD, FC_ERASE);
			SETREG(FCP_BANK, rbank);
			SETREG(FCP_OPTION, FO_P);
			SETREG(FCP_ROW_L(bank), vblk_offset * PAGES_PER_VBLK);
			SETREG(FCP_ROW_H(bank), vblk_offset * PAGES_PER_VBLK);

			while ((GETREG(WR_STAT) & 0x00000001) != 0);
			SETREG(FCP_ISSUE, NULL);
		}
	}

	// Now that bad blocks contain non-0xFF data, it is a good time to use install.exe to scan bad blocks.
}
Esempio n. 13
0
void ftl_open(void)
{
	sanity_check();

	// STEP 1 - read scan lists from NAND flash

	scan_list_t* scan_list = (scan_list_t*) SCAN_LIST_ADDR;
	UINT32 bank;
	UINT32 bad_block, i , j ;
	// Since we are going to check the flash interrupt flags within this function, ftl_isr() should not be called.
	disable_irq();

	flash_clear_irq();	// clear any flash interrupt flags that might have been set
	
	for (bank = 0; bank < NUM_BANKS; bank++)
	{
		//g_misc_meta[bank].g_merge_buff_sect = 0;
		SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);			// FC_COL_ROW_READ_OUT = sensing and data output
		SETREG(FCP_OPTION, FO_E);						// scan list was written in 1-plane mode by install.exe, so there is no FO_P
		SETREG(FCP_DMA_ADDR, scan_list + bank);			// target address should be DRAM or SRAM (see flash.h for rules)
		SETREG(FCP_DMA_CNT, SCAN_LIST_SIZE);			// number of bytes for data output
		SETREG(FCP_COL, 0);
		SETREG(FCP_ROW_L(bank), SCAN_LIST_PAGE_OFFSET);	// scan list was written to this position by install.exe
		SETREG(FCP_ROW_H(bank), SCAN_LIST_PAGE_OFFSET);	// Tutorial FTL always uses the same row addresses for high chip and low chip

		flash_issue_cmd(bank, RETURN_ON_ISSUE);			// Take a look at the source code of flash_issue_cmd() now.
	}

	// This while() statement waits the last issued command to be accepted.
	// If bit #0 of WR_STAT is one, a flash command is in the Waiting Room, because the target bank has not accepted it yet.
	while ((GETREG(WR_STAT) & 0x00000001) != 0);

	// Now, FC_COL_ROW_READ_OUT commands are accepted by all the banks.
	// Before checking whether scan lists are corrupted or not, we have to wait the completion of read operations.
	// This code shows how to wait for ALL the banks to become idle.
	while (GETREG(MON_CHABANKIDLE) != 0);

	// Now we can check the flash interrupt flags.

	for (bank = 0; bank < NUM_BANKS; bank++)
	{
		UINT32 num_entries = NULL;
		UINT32 result = OK;

		if (BSP_INTR(bank) & FIRQ_DATA_CORRUPT)
		{
			// Too many bits are corrupted so that they cannot be corrected by ECC.
			result = FAIL;
		}
		else
		{
			// Even though the scan list is not corrupt, we have to check whether its contents make sense.

			UINT32 i;

			num_entries = read_dram_16(&(scan_list[bank].num_entries));

			if (num_entries > SCAN_LIST_ITEMS)
			{
				result = FAIL;	// We cannot trust this scan list. Perhaps a software bug.
			}
			else
			{
				for (i = 0; i < num_entries; i++)
				{
					UINT16 entry = read_dram_16(&(scan_list[bank].list[i]));
					UINT16 pblk_offset = entry & 0x7FFF;

					if (pblk_offset == 0 || pblk_offset >= PBLKS_PER_BANK)
					{
						#if OPTION_REDUCED_CAPACITY == FALSE
						result = FAIL;	// We cannot trust this scan list. Perhaps a software bug.
						#endif
					}
					else
					{
						// Bit position 15 of scan list entry is high-chip/low-chip flag.
						// Remove the flag in order to make is_bad_block() simple.

						write_dram_16(&(scan_list[bank].list[i]), pblk_offset);
					}
				}
			}
		}

		if (result == FAIL)
		{
			mem_set_dram(scan_list + bank, 0, SCAN_LIST_SIZE);
			g_misc_meta[bank].g_scan_list_entries = 0;
		}
		else
		{
			write_dram_16(&(scan_list[bank].num_entries), 0);
			g_misc_meta[bank].g_scan_list_entries = num_entries;
		}
	}

	// STEP 2 - If necessary, do low-level format
	// format() should be called after loading scan lists, because format() calls is_bad_block().
	init_meta_data();

	// save non bad block list for metadata block
	// block#0 : list, block#1 : misc meta
	// block#2 ~ map table meta and data
	for(i = 0 ;i < NUM_BANKS;i++){
		bad_block = 2;
		for(j = 0 ;j < NUM_BANKS_MAX;j++){
			while(is_bad_block(i, bad_block) && j < VBLKS_PER_BANK)
			{
				bad_block++;
			}
			g_bad_list[i][j] = bad_block++;
		}
		g_free_start[i] = g_bad_list[i][NUM_BANKS_MAX-1] + 1;
	}
	//if (check_format_mark() == FALSE)
	if( TRUE)
	{
		// When ftl_open() is called for the first time (i.e. the SSD is powered up the first time)
		// format() is called.

		format();
	}
	else{
		loading_misc_meta();
	}


	//*Red//
	// STEP 3 - initialize sector mapping table pieces
	// The page mapping table is too large to fit in SRAM and DRAM.
	// gyuhwa
//	init_metadata();
	// STEP 4 - initialize global variables that belong to FTL

	g_ftl_read_buf_id = 0;
	g_ftl_write_buf_id = 0;

	for (bank = 0; bank < NUM_BANKS; bank++)
	{
		g_misc_meta[bank].g_target_row = PAGES_PER_VBLK * (g_free_start[bank]);
	}

	flash_clear_irq();

	// This example FTL can handle runtime bad block interrupts and read fail (uncorrectable bit errors) interrupts

	SETREG(INTR_MASK, FIRQ_DATA_CORRUPT | FIRQ_BADBLK_L | FIRQ_BADBLK_H);
	SETREG(FCONF_PAUSE, FIRQ_DATA_CORRUPT | FIRQ_BADBLK_L | FIRQ_BADBLK_H);

	enable_irq();
}
Esempio n. 14
0
static void tc_write_rand(const UINT32 start_lsn, const UINT32 io_num, const UINT32 sector_size)
{
    UINT32 i, j, wr_buf_addr, rd_buf_addr, data, r_data;
    UINT32 lba, num_sectors = sector_size;
    UINT32 io_cnt = io_num;

    /* UINT32 volatile g_barrier = 0; while (g_barrier == 0); */
    led(0);
    srand(RANDOM_SEED);

    for (UINT32 loop = 0; loop < 1; loop++) {
        wr_buf_addr = WR_BUF_ADDR;
        data = 0;
        uart_printf("test loop cnt: %d", loop);

        for (i = 0; i < io_cnt; i++) {
            do {
                lba = rand() % IO_LIMIT;
            }while(lba + num_sectors >= IO_LIMIT);

            wr_buf_addr = WR_BUF_PTR(g_ftl_write_buf_id) + ((lba % SECTORS_PER_PAGE) * BYTES_PER_SECTOR);
            r_data = data;

            for (j = 0; j < num_sectors; j++) {
                mem_set_dram(wr_buf_addr, data, BYTES_PER_SECTOR);

                wr_buf_addr += BYTES_PER_SECTOR;

                if (wr_buf_addr >= WR_BUF_ADDR + WR_BUF_BYTES) {
                    wr_buf_addr = WR_BUF_ADDR;
                }
                data++;
            }
/*             ptimer_start(); */
            ftl_write(lba, num_sectors);
/*             ptimer_stop_and_uart_print(); */
            rd_buf_addr = RD_BUF_PTR(g_ftl_read_buf_id) + ((lba % SECTORS_PER_PAGE) * BYTES_PER_SECTOR);
/*             ptimer_start(); */
            ftl_read(lba, num_sectors);
/*             ptimer_stop_and_uart_print(); */

            flash_finish();

            for (j = 0; j < num_sectors; j++) {
                UINT32 sample = read_dram_32(rd_buf_addr);

                if (sample != r_data) {
                    uart_printf("ftl test fail...io#: %d, %d", lba, num_sectors);
                    uart_printf("sample data %d should be %d", sample, r_data);
                    led_blink();
                }
                rd_buf_addr += BYTES_PER_SECTOR;

                if (rd_buf_addr >= RD_BUF_ADDR + RD_BUF_BYTES) {
                    rd_buf_addr = RD_BUF_ADDR;
                }
                r_data++;
            }
        } // end for
    }
    ftl_flush();
}
Esempio n. 15
0
static void tc_write_seq(const UINT32 start_lsn, const UINT32 io_num, const UINT32 sector_size)
{
    UINT32 i, j, wr_buf_addr, rd_buf_addr, data;
    UINT32 lba, num_sectors = sector_size;
    UINT32 io_cnt = io_num;
    UINT32 const start_lba = start_lsn;

    /* UINT32 volatile g_barrier = 0; while (g_barrier == 0); */
    led(0);

    // STEP 1 - write
    for (UINT32 loop = 0; loop < 5; loop++)
    {
        wr_buf_addr = WR_BUF_ADDR;
        data = 0;
        lba  = start_lba;

        uart_print_32(loop); uart_print("");

        for (i = 0; i < io_cnt; i++)
        {
            wr_buf_addr = WR_BUF_PTR(g_ftl_write_buf_id) + ((lba % SECTORS_PER_PAGE) * BYTES_PER_SECTOR);
            for (j = 0; j < num_sectors; j++)
            {
                mem_set_dram(wr_buf_addr, data, BYTES_PER_SECTOR);

                wr_buf_addr += BYTES_PER_SECTOR;

                if (wr_buf_addr >= WR_BUF_ADDR + WR_BUF_BYTES)
                {
                    wr_buf_addr = WR_BUF_ADDR;
                }
                data++;
            }
	    if( i == 0x0000081C)
		    i = i;
            ptimer_start();
            ftl_write(lba, num_sectors);
            ptimer_stop_and_uart_print();

            lba += num_sectors;

            if (lba >= (UINT32)NUM_LSECTORS)
            {
                uart_print("adjust lba because of out of lba");
                lba = 0;
            }
        }

        // STEP 2 - read and verify
        rd_buf_addr = RD_BUF_ADDR;
        data = 0;
        lba  = start_lba;
        num_sectors = MIN(num_sectors, NUM_RD_BUFFERS * SECTORS_PER_PAGE);

        for (i = 0; i < io_cnt; i++)
        {
            rd_buf_addr = RD_BUF_PTR(g_ftl_read_buf_id) + ((lba % SECTORS_PER_PAGE) * BYTES_PER_SECTOR);
            /* ptimer_start(); */
	    if( i == 0x0000081C)
		    i = i;
            ftl_read(lba, num_sectors);

            flash_finish();
            /* ptimer_stop_and_uart_print(); */

            for (j = 0; j < num_sectors; j++)
            {
                UINT32 sample = read_dram_32(rd_buf_addr);

                if (sample != data)
                {
                    uart_printf("ftl test fail...io#: %d, %d", lba, num_sectors);
                    uart_printf("sample data %d should be %d", sample, data);
                    led_blink();
                }

                rd_buf_addr += BYTES_PER_SECTOR;

                if (rd_buf_addr >= RD_BUF_ADDR + RD_BUF_BYTES)
                {
                    rd_buf_addr = RD_BUF_ADDR;
                }
                data++;
            }

            lba += num_sectors;

            if (lba >= IO_LIMIT + num_sectors)
            {
                lba = 0;
            }
        }
    }
    ftl_flush();
}
Esempio n. 16
0
static void evict_mapping(void)
{
	if(cmt[cmt_hand].lpn == INVALID)
		return;
	while(1)
	{
		if(cmt[cmt_hand].sc == TRUE)
		{
			cmt[cmt_hand].sc = FALSE;
			cmt_hand = (cmt_hand + 1) % CMT_SIZE;
		}
		else
			break;
	}

	UINT32 gtd_index;
	UINT32 victim_lpn, victim_vpn;
	UINT32 mapping_vpn;
	UINT32 mapping_bank;
	victim_vpn = cmt[cmt_hand].vpn;

	/*
	 * VICTIM : cmt_hand
	 * dirty : 같은 translation page 에 속하는 dirty를
	 * 같이 업데이트 해 준다
	 * clean : 그냥 버린다
	 */
	if(IS_CLEAN(victim_vpn))
	{
		return;
	}

	//Dirty
	victim_lpn = cmt[cmt_hand].lpn;

	gtd_index = victim_lpn / (MAPPINGS_PER_PAGE*NUM_BANKS);
	mapping_bank = get_num_bank(victim_lpn);
	mapping_vpn = gtd[mapping_bank][gtd_index];

	if(mapping_vpn != INVALID)
	{
		map_read++;

		nand_page_read(mapping_bank,
				mapping_vpn / PAGES_PER_BLK,
				mapping_vpn % PAGES_PER_BLK,
				TRANS_BUF(mapping_bank));
	}
	else
	{
		mem_set_dram(TRANS_BUF(mapping_bank), 0, BYTES_PER_PAGE);
	}

	int index;
	for(index = 0; index < CMT_SIZE; index++)
	{
		if(get_num_bank(cmt[index].lpn) == mapping_bank)
		{
			if((!IS_CLEAN(cmt[index].vpn)) && \
					((cmt[index].lpn / (MAPPINGS_PER_PAGE*NUM_BANKS)) == gtd_index))
			{
				cmt[index].vpn = SET_CLEAN(cmt[index].vpn);
				write_dram_32(TRANS_BUF(mapping_bank) + \
						sizeof(UINT32 ) * ((cmt[index].lpn/NUM_BANKS) % MAPPINGS_PER_PAGE),
						cmt[index].vpn);
			}
		}
	}

	mapping_vpn = assign_new_map_write_vpn(mapping_bank);

	gtd[mapping_bank][gtd_index] = mapping_vpn;

	map_prog++;
	nand_page_program(mapping_bank,
			mapping_vpn / PAGES_PER_BLK,
			mapping_vpn % PAGES_PER_BLK,
			TRANS_BUF(mapping_bank));
}
Esempio n. 17
0
static void chunkInvalid()
{
    uart_print("chunkInvalid\n");
    UINT32 dst = FTL_BUF(0) + (chunkIdx_*BYTES_PER_CHUNK);
    mem_set_dram (dst, INVALID, BYTES_PER_CHUNK);
}
Esempio n. 18
0
void increaseLpnHotBlkSecondUsage (UINT32 const bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("increaseLpnHotBlkSecondUsage\r\n");

    UINT32 lpn = ctrlBlock[bank].logLpn;
    UINT32 pageOffset = LogPageToOffset(lpn);

    if (pageOffset == UsedPagesPerLogBlk-1)
    {
        uart_print("Blk full\r\n");
        UINT32 lbn = LogPageToLogBlk(lpn);
        UINT32 vbn = get_log_vbn(bank, lbn);
        nand_page_ptprogram(bank,
                            vbn,
                            PAGES_PER_BLK - 1,
                            0,
                            (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                            ctrlBlock[bank].lpnsListAddr,
                            RETURN_WHEN_DONE); // write lpns list to the last high page
        mem_set_dram(ctrlBlock[bank].lpnsListAddr, INVALID, (CHUNKS_PER_BLK * CHUNK_ADDR_BYTES));
        insertBlkInHeap(&heapDataSecondUsage, bank, lbn);

        findNewLpnForHotLog(bank, ctrlBlock);
    }

    else
    {
        lpn++;
        ctrlBlock[bank].logLpn = lpn;
        pageOffset++;

        //uart_print_level_1("increaseLpnHotBlkSecondUsage ");
        //uart_print_level_1_int(bank);
        //uart_print_level_1(" ");
        //uart_print_level_1_int(pageOffset);
        //uart_print_level_1("\r\n");

        if (pageOffset % 2 == 1)
        { // Next page is low

            if (ctrlBlock[bank].nextLowPageOffset == pageOffset)
            { // The page tested positively
                // Here we don't care if the page has already been prefetched because this can be done asyncronously
                ctrlBlock[bank].updateChunkPtr = updateChunkPtrRecycledPage;
                ctrlBlock[bank].useRecycledPage = TRUE;
            }

            else
            {

                if (pageOffset == 1)
                { // Special case: pageOffset 1 comes immediately after another low page, so there was no time for precaching
                    if(canReuseLowPage(bank, pageOffset, ctrlBlock))
                    {
                        ctrlBlock[bank].updateChunkPtr = updateChunkPtrRecycledPage;
                        ctrlBlock[bank].useRecycledPage = TRUE;
                        ctrlBlock[bank].precacheDone = FALSE;
                        ctrlBlock[bank].nextLowPageOffset = pageOffset;
                        return;
                    }
                }

                // Skip this page because it tested negatively

                // Set the next page to the next high page
                ctrlBlock[bank].updateChunkPtr = updateChunkPtr;
                ctrlBlock[bank].useRecycledPage = FALSE;
                lpn++;
                pageOffset++;
                ctrlBlock[bank].logLpn = lpn;

                // Already test the next low page
                pageOffset++;
                if (pageOffset < UsedPagesPerLogBlk-1)
                {
                    if(canReuseLowPage(bank, pageOffset, ctrlBlock))
                    {
                        ctrlBlock[bank].precacheDone = FALSE;
                        ctrlBlock[bank].nextLowPageOffset = pageOffset;
                    }
                    else
                    {
                        ctrlBlock[bank].precacheDone = FALSE;
                        ctrlBlock[bank].nextLowPageOffset = INVALID;
                    }
                }

            }
        }

        else
        { // Next page is high
            ctrlBlock[bank].updateChunkPtr = updateChunkPtr;
            ctrlBlock[bank].useRecycledPage = FALSE;

            // Already test the next low page
            pageOffset++;
            if (pageOffset < UsedPagesPerLogBlk-1)
            {
                if(canReuseLowPage(bank, pageOffset, ctrlBlock))
                {
                    ctrlBlock[bank].precacheDone = FALSE;
                    ctrlBlock[bank].nextLowPageOffset = pageOffset;
                }
                else
                {
                    ctrlBlock[bank].precacheDone = FALSE;
                    ctrlBlock[bank].nextLowPageOffset = INVALID;
                }
            }
            else
            {
                ctrlBlock[bank].precacheDone = FALSE;
                ctrlBlock[bank].nextLowPageOffset = INVALID;
            }
        }
    }

    uart_print("New logLpn "); uart_print_int(ctrlBlock[bank].logLpn);
    uart_print(" offset "); uart_print_int(LogPageToOffset(ctrlBlock[bank].logLpn)); uart_print("\r\n");
}
Esempio n. 19
0
void initLog()
{

    uart_print("Initializing Write Log Space...\r\n");
    uart_print("Initializing clean list...");
    //testCleanList();
    cleanListInit(&cleanListDataWrite, CleanList(0), LOG_BLK_PER_BANK);
    uart_print("done\r\n");

    //int off = __builtin_offsetof(LogCtrlBlock, increaseLpn);

    for(int bank=0; bank<NUM_BANKS; bank++)
    {
        adaptiveStepDown[bank] = initStepDown;
        adaptiveStepUp[bank] = initStepUp;
        nStepUps[bank] = 0;
        nStepDowns[bank] = 0;

        for(int lbn=0; lbn<LOG_BLK_PER_BANK; lbn++)
        {
            cleanListPush(&cleanListDataWrite, bank, lbn);
        }

        UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);

        hotLogCtrl[bank] = (LogCtrlBlock)
        {
            .logLpn = lbn * PAGES_PER_BLK,
            .lpnsListAddr = LPNS_BUF_BASE_1(bank),
            .logBufferAddr = HOT_LOG_BUF(bank),
            .chunkPtr = 0,
            .increaseLpn=increaseLpnHotBlkFirstUsage,
            .updateChunkPtr=updateChunkPtr,
            .nextLowPageOffset=INVALID,
            .allChunksInLogAreValid = TRUE,
            .useRecycledPage=FALSE,
            .precacheDone=TRUE,
        };

        for(int chunk=0; chunk<CHUNKS_PER_PAGE; ++chunk)
        {
            hotLogCtrl[bank].dataLpn[chunk] = INVALID;
            hotLogCtrl[bank].chunkIdx[chunk] = INVALID;
        }

        lbn = cleanListPop(&cleanListDataWrite, bank);

        coldLogCtrl[bank] = (LogCtrlBlock)
        {
            .logLpn = lbn * PAGES_PER_BLK,
            .lpnsListAddr = LPNS_BUF_BASE_2(bank),
            .logBufferAddr = COLD_LOG_BUF(bank),
            .chunkPtr = 0,
            .increaseLpn=increaseLpnColdBlk,
            .updateChunkPtr=updateChunkPtr,
            .nextLowPageOffset=INVALID,
            .allChunksInLogAreValid = TRUE,
            .useRecycledPage=FALSE,
            .precacheDone=TRUE,
        };
        for(int chunk=0; chunk<CHUNKS_PER_PAGE; ++chunk)
        {
            coldLogCtrl[bank].dataLpn[chunk] = INVALID;
            coldLogCtrl[bank].chunkIdx[chunk] = INVALID;
        }

        nValidChunksFromHeap[bank] = INVALID;
    }
}

static void findNewLpnForColdLog(const UINT32 bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("findNewLpnForColdLog bank "); uart_print_int(bank);

    if (cleanListSize(&cleanListDataWrite, bank) > 2)
    {
        uart_print(" use clean blk\r\n");
        uart_print("cleanList size = "); uart_print_int(cleanListSize(&cleanListDataWrite, bank)); uart_print("\r\n");

        UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);
        ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;
        ctrlBlock[bank].increaseLpn = increaseLpnColdBlk;
    }
    else
    {
        if (reuseCondition(bank))
        {
#if PrintStats
            uart_print_level_1("REUSECOLD\r\n");
#endif
            uart_print(" second usage\r\n");
            UINT32 lbn = getVictim(&heapDataFirstUsage, bank);
            UINT32 nValidChunks = getVictimValidPagesNumber(&heapDataFirstUsage, bank);
            resetValidChunksAndRemove(&heapDataFirstUsage, bank, lbn, CHUNKS_PER_LOG_BLK_FIRST_USAGE);
            resetValidChunksAndRemove(&heapDataSecondUsage, bank, lbn, CHUNKS_PER_LOG_BLK_SECOND_USAGE);
            resetValidChunksAndRemove(&heapDataCold, bank, lbn, nValidChunks);
            ctrlBlock[bank].logLpn = (lbn * PAGES_PER_BLK) + 2;
            ctrlBlock[bank].increaseLpn = increaseLpnColdBlkReused;
            nand_page_ptread(bank,
                             get_log_vbn(bank, lbn),
                             125,
                             0,
                             (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                             ctrlBlock[bank].lpnsListAddr,
                             RETURN_WHEN_DONE); // Read the lpns list from the max low page (125) where it was previously written by incrementLpnHotBlkFirstUsage

        }
        else
        {
            uart_print(" get new block\r\n");
            UINT32 lbn = cleanListPop(&cleanListDataWrite, bank);
            ctrlBlock[bank].logLpn = lbn * PAGES_PER_BLK;
            ctrlBlock[bank].increaseLpn = increaseLpnColdBlk;
            while(cleanListSize(&cleanListDataWrite, bank) < 2)
            {
#if PrintStats
                uart_print_level_1("GCCOLD\r\n");
#endif
                garbageCollectLog(bank);
            }
        }
    }
}

void increaseLpnColdBlkReused (UINT32 const bank, LogCtrlBlock * ctrlBlock)
{
    uart_print("increaseLpnColdBlkReused bank "); uart_print_int(bank); uart_print("\r\n");

    UINT32 lpn = ctrlBlock[bank].logLpn;
    UINT32 pageOffset = LogPageToOffset(lpn);

    if (pageOffset == UsedPagesPerLogBlk-1)
    {
        UINT32 lbn = get_log_lbn(lpn);
        nand_page_ptprogram(bank,
                            get_log_vbn(bank, lbn),
                            PAGES_PER_BLK - 1,
                            0,
                            (CHUNK_ADDR_BYTES * CHUNKS_PER_LOG_BLK + BYTES_PER_SECTOR - 1) / BYTES_PER_SECTOR,
                            ctrlBlock[bank].lpnsListAddr,
                            RETURN_WHEN_DONE);
        mem_set_dram(ctrlBlock[bank].lpnsListAddr, INVALID, (CHUNKS_PER_BLK * CHUNK_ADDR_BYTES));
        insertBlkInHeap(&heapDataCold, bank, lbn);

        findNewLpnForColdLog(bank, ctrlBlock);
    }
    else
    {
        ctrlBlock[bank].logLpn = lpn+2;
    }

    uart_print("increaseLpnColdBlkReused (bank="); uart_print_int(bank); uart_print(") new lpn "); uart_print_int(ctrlBlock[bank].logLpn); uart_print("\r\n");
}
Esempio n. 20
0
void ftl_test()
{
	uart_print("Start testing flash utility...");

	UINT32 num_vsp, total_vsp = MAX_NUM_SP;
	UINT8  expected_bank = 0;

	srand(RAND_SEED);

	init_vsp_buf(0);
	init_val_buf(0);

	uart_printf("%u sub pages are going to be written and verified\r\n", total_vsp);

	uart_print("Write pages with different value in each sub page");
	num_vsp = 0;
	while (num_vsp < total_vsp) {
		UINT8 bank 	    = fu_get_idle_bank();
		BUG_ON("bank not as expected", expected_bank != bank);

		UINT32 vpn 	    = gc_allocate_new_vpn(bank);
		vp_t   vp	    = {.bank = bank, .vpn = vpn};

		uart_printf("write to bank %u, vpn %u\r\n", bank, vpn);

		UINT32 vspn  	    = vpn * SUB_PAGES_PER_PAGE;
		UINT8  vsp_offset   = 0;	
		while (vsp_offset < SUB_PAGES_PER_PAGE && num_vsp < total_vsp) {
			vsp_t  vsp	    = {.bank = bank, .vspn = vspn};
			vsp_or_int vsp2int  = {.as_vsp = vsp};
			UINT32 val	    = rand();

			set_vsp(num_vsp, vsp2int.as_int);
			set_val(num_vsp, val);
			mem_set_dram(FTL_WR_BUF(bank) + vsp_offset * BYTES_PER_SUB_PAGE,
				     val, BYTES_PER_SUB_PAGE);

			vspn++;
			vsp_offset++;
			num_vsp++;
		}
		
		fu_write_page(vp, FTL_WR_BUF(bank));
		// take a break so that idle banks can be predictable
		flash_finish();
		
		expected_bank = (expected_bank + 1) % NUM_BANKS;
	}
	
	uart_print("Read sub pages to validate page writing operation");	
	num_vsp = 0;
	while (num_vsp < total_vsp) {
		vsp_or_int 	int2vsp = {.as_int = get_vsp(num_vsp)};
		vsp_t		vsp = int2vsp.as_vsp;	
		UINT32		val = get_val(num_vsp);

		uart_printf("read from bank %u, vpn %u, sp_i %u\r\n", 
			    vsp.bank, 
			    vsp.vspn / SUB_PAGES_PER_PAGE, 
			    vsp.vspn % SUB_PAGES_PER_PAGE);

		fu_read_sub_page(vsp, COPY_BUF_ADDR, FU_SYNC);
		
		UINT8		sector_offset = vsp.vspn % SUB_PAGES_PER_PAGE * SECTORS_PER_SUB_PAGE;
		UINT8		wrong = is_buff_wrong(COPY_BUF_ADDR, 
						      val, 
				      		      sector_offset, 
						      SECTORS_PER_SUB_PAGE);
		BUG_ON("data read from flash is not the same as data written to flash", wrong);

		num_vsp++;
	}

	uart_print("Flash utility passed unit test ^_^");
}
Esempio n. 21
0
void init_jasmine(void)
{
	UINT32 i, bank;
	extern UINT32 Image$$ER_ZI$$ZI$$Base;
	extern UINT32 Image$$ER_ZI$$ZI$$Length;

	// PLL initialization

	SETREG(CLKSelCon, USE_BYPASS_CLK);

	SETREG(PllCon, PLL_PD); 					// power down
	delay(600);									// at least 500ns
	SETREG(PllCon, PLL_CLK_CONFIG | PLL_PD);	// change settings
	delay(600);									// at least 1us
	SETREG(PllCon, PLL_CLK_CONFIG);				// power up
	while ((GETREG(PllCon) & PLL_LD) == 0); 	// wait lock

	SETREG(CLKSelCon, USE_PLL_CLK);

	// reset hardware modules

	SETREG(PMU_ResetCon, RESET_SDRAM | RESET_BM | RESET_SATA | RESET_FLASH);

	// GPIO bits
	// There are 7 GPIO bits from 0 to 6.
	// 0: This bit is connected to J2 (Factory Mode jumper). The ROM firmware configures it as input mode.
	// While main firmware is running, it can be freely used for arbitrary purpose. Beware that a "1" output while
	// the Factory Mode jumper is set to Normal position (tied to ground) can lead to circuit damage.
	// A "0" output while the jumper is tied to Vcc will also lead to circuit damage.
	// 1: This bit is connected to J3 (Boot ROM). The controller hardware checks its status upon reset.
	// After the reset is done, you can remove the jumper and use the pin as output.
	// 2 through 5: The IO pins for these bits are shared between MAX3232C (UART chip) and J4.
	// In order to use J4, you have to turn on the switches of SW4 and turn off the switches 1 through 4 in SW2.
	// In order to use UART, you have to turn off the switches of SW4 and turn on the switches 1 through 4 in SW2.
	// 6: This bit is connected to D4 (LED) via SW2.

	#if OPTION_UART_DEBUG
	SETREG(GPIO_MOD, 0);
	SETREG(GPIO_DIR, BIT3 | BIT4 | BIT6);	// output pins: 3(UART_TXD), 4(UART_RTS), 6(LED)
	#else
	SETREG(GPIO_MOD, 7);
	SETREG(GPIO_DIR, BIT2 | BIT3 | BIT4 | BIT5 | BIT6);
	#endif

	SETREG(GPIO_REG, 0);					// initial state of LED is "off"

	// ZI region is zero-filled by hardware.
	mem_set_sram((UINT32) &Image$$ER_ZI$$ZI$$Base, 0x00000000, (UINT32) &Image$$ER_ZI$$ZI$$Length);

	SETREG(PHY_DEBUG, 0x40000139);
	while((GETREG(PHY_DEBUG) & BIT30) == 1);

	SETREG(SDRAM_INIT, SDRAM_PARAM_MAIN_FW_INIT);
	SETREG(SDRAM_REFRESH, SDRAM_PARAM_MAIN_FW_REFRESH);
	SETREG(SDRAM_TIMING, SDRAM_PARAM_MAIN_FW_TIMING);
	SETREG(SDRAM_MRS, SDRAM_PARAM_MAIN_FW_MRS);
	SETREG(SDRAM_CTRL, SDRAM_INITIALIZE);		// initialization of SDRAM begins now
	while (GETREG(SDRAM_STATUS) & 0x00000010);	// wait until the initialization completes (200us)

	for (i = 0; i < DRAM_SIZE / MU_MAX_BYTES; i++)
	{
		mem_set_dram(DRAM_BASE + i * MU_MAX_BYTES, 0x00000000, MU_MAX_BYTES);
	}

	#if OPTION_UART_DEBUG
	uart_init();
	uart_print("Welcome to OpenSSD");
	#endif

	SETREG(SDRAM_ECC_MON, 0xFFFFFFFF);

	// configure SDRAM interrupt
	SETREG(SDRAM_INTCTRL, SDRAM_INT_ENABLE);

	// clear interrupt flags in DRAM controller
	SETREG(SDRAM_INTSTATUS, 0xFFFFFFFF);

	// configure ICU
	SETREG(APB_ICU_CON, INTR_SATA);	// SATA = FIQ, other = IRQ
	SETREG(APB_INT_MSK, INTR_SATA | INTR_FLASH | INTR_SDRAM | INTR_TIMER_1 | INTR_TIMER_2 | INTR_TIMER_3);

	// clear interrupt flags in ICU
	SETREG(APB_INT_STS, 0xFFFFFFFF);

	flash_reset();

	SETREG(FCONF_PAUSE, 0);
	SETREG(INTR_MASK, 0);

	for (bank = 0; bank < NUM_BANKS; bank++)
	{
		flash_clear_irq();

		SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);
		SETREG(FCP_OPTION, 0x06);				// FO_E
		SETREG(FCP_DMA_ADDR, g_temp_mem);
		SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR);
		SETREG(FCP_COL, 0);
		SETREG(FCP_ROW_L(bank), STAMP_PAGE_OFFSET);
		SETREG(FCP_ROW_H(bank), STAMP_PAGE_OFFSET);

		flash_issue_cmd(bank, RETURN_WHEN_DONE);

		if ( (BSP_INTR(bank) & 0xFE)== 0 )
			break;
	}

	#if OPTION_FTL_TEST == FALSE
	sata_reset();
	#endif

    ftl_open();

	#if OPTION_FTL_TEST == TRUE
	extern void ftl_test();
	ftl_test();
    led(1);
    while (1);
    #endif
}
Esempio n. 22
0
static void write_page(UINT32 const lpn, UINT32 const sect_offset, UINT32 const num_sectors)
{
	write_p++;

	UINT32 bank, old_vpn, new_vpn;
	UINT32 vblock, page_num, page_offset, column_cnt;

	bank        = get_num_bank(lpn); // page striping
	page_offset = sect_offset;
	column_cnt  = num_sectors;

	new_vpn  = assign_new_write_vpn(bank);
	old_vpn  = get_vpn(lpn);
	if (old_vpn != NULL)
	{
		vblock   = old_vpn / PAGES_PER_BLK;
		page_num = old_vpn % PAGES_PER_BLK;
		if (num_sectors != SECTORS_PER_PAGE)
		{
			if ((num_sectors <= 8) && (page_offset != 0))
			{
				// one page async read
				data_read++;
				nand_page_read(bank,
						vblock,
						page_num,
						FTL_BUF(bank));
				// copy `left hole sectors' into SATA write buffer
				if (page_offset != 0)
				{
					mem_copy(WR_BUF_PTR(g_ftl_write_buf_id),
							FTL_BUF(bank),
							page_offset * BYTES_PER_SECTOR);
				}
				// copy `right hole sectors' into SATA write buffer
				if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
				{
					UINT32 const rhole_base = (page_offset + column_cnt) * BYTES_PER_SECTOR;

					mem_copy(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base,
							FTL_BUF(bank) + rhole_base,
							BYTES_PER_PAGE - rhole_base);
				}
			}
			// left/right hole async read operation (two partial page read)
			else
			{
				// read `left hole sectors'
				if (page_offset != 0)
				{
					data_read++;
					nand_page_ptread(bank,
							vblock,
							page_num,
							0,
							page_offset,
							WR_BUF_PTR(g_ftl_write_buf_id),
							RETURN_WHEN_DONE);
				}
				// read `right hole sectors'
				if ((page_offset + column_cnt) < SECTORS_PER_PAGE)
				{
					data_read++;
					nand_page_ptread(bank,
							vblock,
							page_num,
							page_offset + column_cnt,
							SECTORS_PER_PAGE - (page_offset + column_cnt),
							WR_BUF_PTR(g_ftl_write_buf_id),
							RETURN_WHEN_DONE);
				}
			}
		}
		set_vcount(bank, vblock, get_vcount(bank, vblock) - 1);
	}
	else if (num_sectors != SECTORS_PER_PAGE)
	{
		if(page_offset != 0)
			mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id),
					0,
					page_offset * BYTES_PER_SECTOR);
		if((page_offset + num_sectors) < SECTORS_PER_PAGE)
		{
			UINT32 const rhole_base = (page_offset + num_sectors) * BYTES_PER_SECTOR;
			mem_set_dram(WR_BUF_PTR(g_ftl_write_buf_id) + rhole_base, 0, BYTES_PER_PAGE - rhole_base);
		}
	}
	vblock   = new_vpn / PAGES_PER_BLK;
	page_num = new_vpn % PAGES_PER_BLK;

	// write new data (make sure that the new data is ready in the write buffer frame)
	// (c.f FO_B_SATA_W flag in flash.h)
	data_prog++;
	nand_page_program_from_host(bank,
			vblock,
			page_num);
	// update metadata
	set_lpn(bank, page_num, lpn);
	set_vpn(lpn, new_vpn);
	set_vcount(bank, vblock, get_vcount(bank, vblock) + 1);
}
Esempio n. 23
0
static void build_bad_blk_list(void)
{
	UINT32 bank, num_entries, result, vblk_offset;
	scan_list_t* scan_list = (scan_list_t*) TEMP_BUF_ADDR;

	mem_set_dram(BAD_BLK_BMP_ADDR, NULL, BAD_BLK_BMP_BYTES);

	disable_irq();

	flash_clear_irq();

	for (bank = 0; bank < NUM_BANKS; bank++)
	{
		SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);
		SETREG(FCP_BANK, REAL_BANK(bank));
		SETREG(FCP_OPTION, FO_E);
		SETREG(FCP_DMA_ADDR, (UINT32) scan_list);
		SETREG(FCP_DMA_CNT, SCAN_LIST_SIZE);
		SETREG(FCP_COL, 0);
		SETREG(FCP_ROW_L(bank), SCAN_LIST_PAGE_OFFSET);
		SETREG(FCP_ROW_H(bank), SCAN_LIST_PAGE_OFFSET);

		SETREG(FCP_ISSUE, NULL);
		while ((GETREG(WR_STAT) & 0x00000001) != 0);
		while (BSP_FSM(bank) != BANK_IDLE);

		num_entries = NULL;
		result = OK;

		if (BSP_INTR(bank) & FIRQ_DATA_CORRUPT)
		{
			result = FAIL;
		}
		else
		{
			UINT32 i;

			num_entries = read_dram_16(&(scan_list->num_entries));

			if (num_entries > SCAN_LIST_ITEMS)
			{
				result = FAIL;
			}
			else
			{
				for (i = 0; i < num_entries; i++)
				{
					UINT16 entry = read_dram_16(scan_list->list + i);
					UINT16 pblk_offset = entry & 0x7FFF;

					if (pblk_offset == 0 || pblk_offset >= PBLKS_PER_BANK)
					{
						#if OPTION_REDUCED_CAPACITY == FALSE
						result = FAIL;
						#endif
					}
					else
					{
						write_dram_16(scan_list->list + i, pblk_offset);
					}
				}
			}
		}

		if (result == FAIL)
		{
			num_entries = 0;  // We cannot trust this scan list. Perhaps a software bug.
		}
		else
		{
			write_dram_16(&(scan_list->num_entries), 0);
		}

		g_bad_blk_count[bank] = 0;

		for (vblk_offset = 1; vblk_offset < VBLKS_PER_BANK; vblk_offset++)
		{
			BOOL32 bad = FALSE;

			#if OPTION_2_PLANE
			{
				UINT32 pblk_offset;

				pblk_offset = vblk_offset * NUM_PLANES;

                // fix bug@jasmine v.1.1.0
				if (mem_search_equ_dram(scan_list, sizeof(UINT16), num_entries + 1, pblk_offset) < num_entries + 1)
				{
					bad = TRUE;
				}

				pblk_offset = vblk_offset * NUM_PLANES + 1;

                // fix bug@jasmine v.1.1.0
				if (mem_search_equ_dram(scan_list, sizeof(UINT16), num_entries + 1, pblk_offset) < num_entries + 1)
				{
					bad = TRUE;
				}
			}
			#else
			{
                // fix bug@jasmine v.1.1.0
				if (mem_search_equ_dram(scan_list, sizeof(UINT16), num_entries + 1, vblk_offset) < num_entries + 1)
				{
					bad = TRUE;
				}
			}
			#endif

			if (bad)
			{
				g_bad_blk_count[bank]++;
				set_bit_dram(BAD_BLK_BMP_ADDR + bank*(VBLKS_PER_BANK/8 + 1), vblk_offset);
			}
		}
	}
}