示例#1
0
void ftl_write(UINT32 const lba, UINT32 const total_sectors)
{
	UINT32 num_sectors_to_write;

	UINT32 sect_offset = lba % SECTORS_PER_PAGE;
	UINT32 remain_sectors = total_sectors;

	while (remain_sectors != 0)
	{
		if (sect_offset + remain_sectors >= SECTORS_PER_PAGE)
		{
			num_sectors_to_write = SECTORS_PER_PAGE - sect_offset;
		}
		else
		{
			num_sectors_to_write = remain_sectors;
		}

		while (g_ftl_write_buf_id == GETREG(SATA_WBUF_PTR));	// bm_write_limit should not outpace SATA_WBUF_PTR

		g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;		// Circular buffer

		SETREG(BM_STACK_WRSET, g_ftl_write_buf_id);	// change bm_write_limit
		SETREG(BM_STACK_RESET, 0x01);				// change bm_write_limit

		sect_offset = 0;
		remain_sectors -= num_sectors_to_write;
	}
}
示例#2
0
void ftl_read(UINT32 const lba, UINT32 const total_sectors)
{
	UINT32 num_sectors_to_read;

	UINT32 lpage_addr		= lba / SECTORS_PER_PAGE;	// logical page address
	UINT32 sect_offset 		= lba % SECTORS_PER_PAGE;	// sector offset within the page
	UINT32 sectors_remain	= total_sectors;

	while (sectors_remain != 0)	// one page per iteration
	{
		if (sect_offset + sectors_remain < SECTORS_PER_PAGE)
		{
			num_sectors_to_read = sectors_remain;
		}
		else
		{
			num_sectors_to_read = SECTORS_PER_PAGE - sect_offset;
		}

		UINT32 next_read_buf_id = (g_ftl_read_buf_id + 1) % NUM_RD_BUFFERS;

		while (next_read_buf_id == GETREG(SATA_RBUF_PTR));	// wait if the read buffer is full (slow host)

		SETREG(BM_STACK_RDSET, next_read_buf_id);	// change bm_read_limit
		SETREG(BM_STACK_RESET, 0x02);				// change bm_read_limit

		g_ftl_read_buf_id = next_read_buf_id;

		sect_offset = 0;
		sectors_remain -= num_sectors_to_read;
		lpage_addr++;
	}
}
示例#3
0
void ftl_trim(UINT32 const lba, UINT32 const num_sectors)
{
	ASSERT(num_sectors > 0);

	uart_printf("Num sectors: %u", num_sectors);
	uart_printf("SATA_WBUF_PTR: %u", GETREG(SATA_WBUF_PTR));
	uart_printf("g_ftl_write_buf_id: %u", g_ftl_write_buf_id);

	UINT32 next_write_buf_id = (g_ftl_write_buf_id + num_sectors) % NUM_WR_BUFFERS;

	for (UINT32 i=0;i<num_sectors;i++)
	{
		for (UINT32 j=0;j<512/8;j=j+2)
		{
			UINT32 address = read_dram_32(WR_BUF_PTR(g_ftl_write_buf_id)+j*sizeof(UINT32));
			UINT32 reg2 = read_dram_32(WR_BUF_PTR(g_ftl_write_buf_id)+(j+1)*sizeof(UINT32));
			UINT32 count = reg2 & 0xFFFF0000; // Count stored in the first four words.

			// If count is zero. We continue, but also, if address is 48bit.
			// We shouldn't get these unless it is an error.
			if (count == 0 || (reg2 & 0x0000FFFF) > 0) //
				continue;

//			uart_print_hex(address);
//			uart_print_hex(count);
		}

		g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
	}
	SETREG(BM_STACK_WRSET, next_write_buf_id);	// change bm_read_limit
	SETREG(BM_STACK_RESET, 0x02);				// change bm_read_limi
}
示例#4
0
void uart_init(void)
{
	UINT32 uart_ctrl;
	UINT32 clkdiv_baudrate;

	SETREG(UART_FIFOCTRL, 0x03);		// reset FIFO

	ASSERT((GETREG(UART_FIFOCNT) == 0x800));

	uart_ctrl = (WORDLEN_8BIT << 0)
				| (ONE_STOPBIT << 2)
				| (NO_PARITY << 3)
				| (0 << 6)				// uart_enable
				| (0 << 7)				// clear_polarity
				| (0 << 8)				// rdy_polarity
				| (0 << 9)				// auto_flow_control_enable
				| (0 << 10)				// ir_rx_invmode
				| (0 << 11);			// ir_mode_enable

	SETREG(UART_CTRL, uart_ctrl);

	clkdiv_baudrate = GETREG(UART_BAUDRATE) & 0xFFFF0000;
	clkdiv_baudrate |= (UINT32) (1 << 21) * (UART_COMMBAUDRATE / 100) / (CLOCK_SPEED/200) + 1;
	SETREG(UART_BAUDRATE, clkdiv_baudrate);
	SETREG(UART_FIFOCTRL, 0x00000000);

	uart_ctrl = uart_ctrl | (1 << 6);	// uart_enable
	SETREG(UART_CTRL, uart_ctrl);
}
示例#5
0
static int
msm_bus_receive(struct uart_softc *sc)
{
	struct msm_uart_softc *u = (struct msm_uart_softc *)sc;
	struct uart_bas *bas;
	int c;

	bas = &sc->sc_bas;
	uart_lock(sc->sc_hwmtx);

	/* Initialize Receive Path and interrupt */
	SETREG(bas, UART_DM_CR, UART_DM_RESET_STALE_INT);
	SETREG(bas, UART_DM_CR, UART_DM_STALE_EVENT_ENABLE);
	u->ier |= UART_DM_RXLEV;
	SETREG(bas, UART_DM_IMR, u->ier);

	/* Loop over until we are full, or no data is available */
	while (uart_getreg(bas, UART_DM_SR) & UART_DM_SR_RXRDY) {
		if (uart_rx_full(sc)) {
			/* No space left in input buffer */
			sc->sc_rxbuf[sc->sc_rxput] = UART_STAT_OVERRUN;
			break;
		}

		/* Read RX FIFO */
		c = uart_getreg(bas, UART_DM_RF(0));
		uart_barrier(bas);

		uart_rx_put(sc, c);
	}

	uart_unlock(sc->sc_hwmtx);

	return (0);
}
示例#6
0
/* NOTE: This function calls rebuildPageToFtlBuf with GcMode, therefore the valid chunks counters of old blocks are already managed.
 * Do not call manageOldChunks before calling this!
 */
static void appendPageToSWBlock (const UINT32 dataLpn, const UINT32 sectOffset, const UINT32 nSects)
{
    uart_print("appendPageToSWBlock dataLpn="); uart_print_int(dataLpn);
    uart_print(", sectOffset="); uart_print_int(sectOffset);
    uart_print(", nSects="); uart_print_int(nSects); uart_print("\r\n");
    UINT32 nSectsToWrite = SECTORS_PER_PAGE - sectOffset;
    UINT32 logLpn = getSWLpn(bank_);
    UINT32 vBlk = get_log_vbn(bank_, LogPageToLogBlk(logLpn));
    UINT32 dst = FTL_BUF(0) + (sectOffset*BYTES_PER_SECTOR);
    UINT32 src = WR_BUF_PTR(g_ftl_write_buf_id)+(sectOffset*BYTES_PER_SECTOR);
    rebuildPageToFtlBuf(dataLpn, 0, SECTORS_PER_PAGE, GcMode); // Rebuild rest of the page in FTL buffer (rebuild entire page to be sure that all chunks are correctly garbage collected, especially if they are in DRAM)
    //waitBusyBank(bank_);
    flash_finish();
    mem_copy(dst, src, nSectsToWrite * BYTES_PER_SECTOR);                                       // Fill FTL buffer with new data
    //TODO: this program shouldn't be sincronous, need a global variable storing last bank writing data from FTL_BUF(0)
    nand_page_program(bank_, vBlk, LogPageToOffset(logLpn), FTL_BUF(0), RETURN_WHEN_DONE);      // Write FTL buffer to the next sequential page
    UINT32 chunkIdx;
    for(chunkIdx=0; chunkIdx<sectOffset / SECTORS_PER_CHUNK; ++chunkIdx)
    { // For sector before the start of new data we update only if previously there was some valid data, which is now in the new page, otherwise we insert invalid in the lpns list to speed up GC later
        if (ChunksMapTable(dataLpn, chunkIdx) > DRAM_BASE + DRAM_SIZE)
        {
            uart_print_level_1("ERROR in appendPageToSWBlk 1: reading above DRAM address space\r\n");
        }
        if (read_dram_32(ChunksMapTable(dataLpn, chunkIdx)) != INVALID)
        {
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 1: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
        }
        else
        { //Decrement valid chunks in the blk we're going to write in because we inserted null data
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 2: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), INVALID);
            decrementValidChunks(&heapDataWrite, bank_, LogPageToLogBlk(logLpn));
        }
    }
    for( ; chunkIdx < CHUNKS_PER_PAGE; ++chunkIdx)
    { // The new sectors are instead all valid, therefore we don't bother checking if they were valid before
            UINT32 lChunkAddr = (logLpn * CHUNKS_PER_PAGE) + chunkIdx;
            if((chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)) >=(DRAM_BASE + DRAM_SIZE))
            {
                uart_print_level_1("ERROR in write::appendPageToSWBlk 3: writing to "); uart_print_level_1_int(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx)); uart_print_level_1("\r\n");
            }
            write_dram_32(chunkInLpnsList(SWCtrl[bank_].lpnsListPtr, LogPageToOffset(logLpn), chunkIdx), dataLpn);
            write_dram_32(ChunksMapTable(dataLpn, chunkIdx), (bank_ * LOG_BLK_PER_BANK * CHUNKS_PER_BLK) + lChunkAddr);
    }
    SWCtrl[bank_].nextDataLpn=dataLpn+1;
    increaseSWLpn(bank_);
    g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
    SETREG (BM_STACK_WRSET, g_ftl_write_buf_id);
    SETREG (BM_STACK_RESET, 0x01);
}
示例#7
0
static UINT32 opDIVUr(void)	// divu r1,r2
{
	UINT32 op1=GETREG(GET1);
	UINT32 op2=GETREG(GET2);
	if(op1)
	{
		SETREG(30,(INT32)(op2%op1));
		SETREG(GET2,(INT32)(op2/op1));
		SET_OV((op1^op2^GETREG(GET2)) == 0x80000000);
		CHECK_ZS(GETREG(GET2));
	}
	return clkIF;
}
示例#8
0
void nand_block_erase_sync(UINT32 const bank, UINT32 const vblock)
{
    ASSERT(bank < NUM_BANKS);
    ASSERT(vblock < VBLKS_PER_BANK);

	SETREG(FCP_CMD, FC_ERASE);
	SETREG(FCP_BANK, REAL_BANK(bank));
	SETREG(FCP_OPTION, FO_P); // if OPTION_2_PLANE == 0, FO_P will be zero.
	SETREG(FCP_ROW_H(bank), vblock * PAGES_PER_VBLK);
	SETREG(FCP_ROW_L(bank), vblock * PAGES_PER_VBLK);

    flash_issue_cmd(bank, RETURN_WHEN_DONE);
}
示例#9
0
__irq void irq_handler(void)
#endif
{
	UINT32 intr_stat = GETREG(APB_INT_STS);

	if (intr_stat & (INTR_TIMER_1 | INTR_TIMER_2 | INTR_TIMER_3))
	{
		g_timer_interrupt_count++;

		CLEAR_TIMER_INTR(TIMER_CH1);
		CLEAR_TIMER_INTR(TIMER_CH2);
		CLEAR_TIMER_INTR(TIMER_CH3);
		SETREG(APB_INT_STS, INTR_TIMER_1 | INTR_TIMER_2 | INTR_TIMER_3);
	}
	else if (intr_stat & INTR_FLASH)
	{
		ftl_isr();
	}
	else if (intr_stat & INTR_SDRAM)
	{
		UINT32 sdram_interrupt = GETREG(SDRAM_INTSTATUS);

	    SETREG(SDRAM_INTSTATUS, 0xFFFFFFFF);

		// clear the DRAM interrupt flag at the interrupt controller
	    SETREG(APB_INT_STS, INTR_SDRAM);

		if (sdram_interrupt & SDRAM_INT_ECC_CORR)
		{
			// Bit errors were detected and corrected.
			// Usually this is NOT an indication of poor SDRAM quality.
			// If the firmware has a bug due to which SDRAM is written by CPU without the help of mem util functions,
			// ECC correction or ECC failure can happen.

			g_sdram_ecc_count++;
		}

		if (sdram_interrupt & SDRAM_INT_ECC_FAIL)
		{
			// Bit errors were detected but could not be corrected.
			g_sdram_ecc_fail_count++;
		}

		if (sdram_interrupt & SDRAM_INT_ADDR_OF)
		{
			// There was an attempt to access beyond DRAM address boundary.
            uart_printf("Error: SDRAM interrupt occred: attempt to access beyond DRAM address boundary");
            led_blink();
		}
	}
}
示例#10
0
void nand_page_ptprogram_from_host(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const sect_offset, UINT32 const num_sectors)
{
    UINT32 row;

    ASSERT(bank < NUM_BANKS);
    ASSERT(vblock < VBLKS_PER_BANK);
    ASSERT(page_num < PAGES_PER_BLK);

    row = (vblock * PAGES_PER_BLK) + page_num;

    SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
#if OPTION_FTL_TEST == TRUE
    SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
#else
    SETREG(FCP_OPTION, FO_P | FO_E | FO_B_SATA_W);
#endif
    SETREG(FCP_DMA_ADDR, WR_BUF_PTR(g_ftl_write_buf_id));
    SETREG(FCP_DMA_CNT, num_sectors * BYTES_PER_SECTOR);

    SETREG(FCP_COL, sect_offset);
    SETREG(FCP_ROW_L(bank), row);
    SETREG(FCP_ROW_H(bank), row);

    flash_issue_cmd(bank, RETURN_ON_ISSUE);

    g_ftl_write_buf_id = (g_ftl_write_buf_id + 1) % NUM_WR_BUFFERS;
}
示例#11
0
void nand_block_erase(UINT32 const bank, UINT32 const vblock)
{
    ASSERT(bank < NUM_BANKS);
    ASSERT(vblock < VBLKS_PER_BANK);

	SETREG(FCP_CMD, FC_ERASE);
	SETREG(FCP_BANK, REAL_BANK(bank));
	SETREG(FCP_OPTION, FO_P); // if OPTION_2_PLANE == 0, FO_P will be zero.
	SETREG(FCP_ROW_H(bank), vblock * PAGES_PER_VBLK);
	SETREG(FCP_ROW_L(bank), vblock * PAGES_PER_VBLK);

    flash_issue_cmd(bank, RETURN_ON_ISSUE);

/*     uart_printf("erase block #: %d, %d", bank, vblock); */
}
示例#12
0
void ata_read_native_max_address(UINT32 lba, UINT32 sector_count)
{
	UINT32 fis_type = FISTYPE_REGISTER_D2H;
	UINT32 flags = B_IRQ;
	UINT32 status = B_DRDY | BIT4;

	SETREG(SATA_FIS_D2H_0, fis_type | (flags << 8) | (status << 16));

	UINT32 fis_d1 = GETREG(SATA_FIS_H2D_1);

	if (g_sata_context.slow_cmd.code == ATA_READ_NATIVE_MAX_ADDRESS_EXT)
	{
		SETREG(SATA_FIS_D2H_1, (fis_d1 & 0xFF000000) | (MAX_LBA & 0x00FFFFFF));
		SETREG(SATA_FIS_D2H_2, MAX_LBA >> 24);
	}
示例#13
0
static void
msm_bus_grab(struct uart_softc *sc)
{
	struct uart_bas *bas = &sc->sc_bas;

	/*
	 * XXX: Turn off all interrupts to enter polling mode. Leave the
	 * saved mask alone. We'll restore whatever it was in ungrab.
	 */
	uart_lock(sc->sc_hwmtx);
	SETREG(bas, UART_DM_CR, UART_DM_RESET_STALE_INT);
	SETREG(bas, UART_DM_IMR, 0);
	uart_barrier(bas);
	uart_unlock(sc->sc_hwmtx);
}
示例#14
0
static BOOL32 check_format_mark(void)
{
	// This function reads a flash page from (bank #0, block #0) in order to check whether the SSD is formatted or not.

#ifdef __GNUC__
	extern UINT32 size_of_firmware_image;
	UINT32 firmware_image_pages = (((UINT32) (&size_of_firmware_image)) + BYTES_PER_FW_PAGE - 1) / BYTES_PER_FW_PAGE;
#else
	extern UINT32 Image$$ER_CODE$$RO$$Length;
	extern UINT32 Image$$ER_RW$$RW$$Length;
	UINT32 firmware_image_bytes = ((UINT32) &Image$$ER_CODE$$RO$$Length) + ((UINT32) &Image$$ER_RW$$RW$$Length);
	UINT32 firmware_image_pages = (firmware_image_bytes + BYTES_PER_FW_PAGE - 1) / BYTES_PER_FW_PAGE;
#endif

	UINT32 format_mark_page_offset = FW_PAGE_OFFSET + firmware_image_pages;
	UINT32 temp;

	flash_clear_irq();	// clear any flash interrupt flags that might have been set

	SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);
	SETREG(FCP_BANK, REAL_BANK(0));
	SETREG(FCP_OPTION, FO_E);
	SETREG(FCP_DMA_ADDR, FTL_BUF_ADDR); 	// flash -> DRAM
	SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR);
	SETREG(FCP_COL, 0);
	SETREG(FCP_ROW_L(0), format_mark_page_offset);
	SETREG(FCP_ROW_H(0), format_mark_page_offset);

	// At this point, we do not have to check Waiting Room status before issuing a command,
	// because scan list loading has been completed just before this function is called.
	SETREG(FCP_ISSUE, NULL);

	// wait for the FC_COL_ROW_READ_OUT command to be accepted by bank #0
	while ((GETREG(WR_STAT) & 0x00000001) != 0);

	// wait until bank #0 finishes the read operation
	while (BSP_FSM(0) != BANK_IDLE);

	// Now that the read operation is complete, we can check interrupt flags.
	temp = BSP_INTR(0) & FIRQ_ALL_FF;

	// clear interrupt flags
	CLR_BSP_INTR(0, 0xFF);

	if (temp != 0)
	{
		return FALSE;	// the page contains all-0xFF (the format mark does not exist.)
	}
	else
	{
		return TRUE;	// the page contains something other than 0xFF (it must be the format mark)
	}
}
示例#15
0
static void
msm_putc(struct uart_bas *bas, int c)
{
	int limit;

	/*
	 * Write to NO_CHARS_FOR_TX register the number of characters
	 * to be transmitted. However, before writing TX_FIFO must
	 * be empty as indicated by TX_READY interrupt in IMR register
	 */

	/*
	 * Check if transmit FIFO is empty.
	 * If not wait for TX_READY interrupt.
	 */
	limit = 1000;
	if (!(uart_getreg(bas, UART_DM_SR) & UART_DM_SR_TXEMT)) {
		while ((uart_getreg(bas, UART_DM_ISR) & UART_DM_TX_READY) == 0
		    && --limit)
			DELAY(4);
	}
	/* FIFO is ready, write number of characters to be written */
	uart_setreg(bas, UART_DM_NO_CHARS_FOR_TX, 1);

	/* Wait till TX FIFO has space */
	while ((uart_getreg(bas, UART_DM_SR) & UART_DM_SR_TXRDY) == 0)
		DELAY(4);

	/* TX FIFO has space. Write char */
	SETREG(bas, UART_DM_TF(0), (c & 0xff));
}
示例#16
0
void readFromLogBlk (UINT32 const dataLpn, UINT32 const sectOffset, UINT32 const nSects){
    uart_print("readFromLogBlk dataLpn="); uart_print_int(dataLpn);
    uart_print(", sect_offset="); uart_print_int(sectOffset);
    uart_print(", num_sectors="); uart_print_int(nSects); uart_print("\r\n");

    UINT32 dst = RD_BUF_PTR(g_ftl_read_buf_id)+(sectOffset*BYTES_PER_SECTOR);
    UINT32 src = FTL_BUF(0)+(sectOffset*BYTES_PER_SECTOR);
    rebuildPageToFtlBuf(dataLpn, sectOffset, nSects, ReadMode);
    mem_copy(dst, src, nSects*BYTES_PER_SECTOR);
    g_ftl_read_buf_id = (g_ftl_read_buf_id + 1) % NUM_RD_BUFFERS;
    SETREG (BM_STACK_RDSET, g_ftl_read_buf_id);    // change bm_read_limit
    SETREG (BM_STACK_RESET, 0x02);    // change bm_read_limit
    //UINT32 bank = choose new bank
    //int bankToGarbageCollect = (bank + NUM_BANKS - 1) % NUM_BANKS;
    //callPM(bankToGarbageCollect);
}
示例#17
0
static UINT32 opMULUr(void)	// mulu r1,r2
{
	UINT32 op1=GETREG(GET1);
	UINT32 op2=GETREG(GET2);
	UINT64 tmp;
	tmp=(UINT64)op1*(UINT64)op2;
	op2=tmp&0xffffffff;
	tmp>>=32;
	CHECK_ZS(tmp);//z = bad!
	SET_Z( (tmp|op2)==0 );
	SET_OV((tmp!=0));
	SET_CY((tmp!=0));
	SETREG(GET2,op2);
	SETREG(30,tmp);
	return clkIF;
}
示例#18
0
/*
 * Write the current transmit buffer to the TX FIFO. 
 */
static int
msm_bus_transmit(struct uart_softc *sc)
{
	struct msm_uart_softc *u = (struct msm_uart_softc *)sc;
	struct uart_bas *bas = &sc->sc_bas;
	int i;

	uart_lock(sc->sc_hwmtx);

	/* Write some data */
	for (i = 0; i < sc->sc_txdatasz; i++) {
		/* Write TX data */
		msm_putc(bas, sc->sc_txbuf[i]);
		uart_barrier(bas);
	}

	/* TX FIFO is empty now, enable TX_READY interrupt */
	u->ier |= UART_DM_TX_READY;
	SETREG(bas, UART_DM_IMR, u->ier);
	uart_barrier(bas);

	/*
	 * Inform upper layer that it is transmitting data to hardware,
	 * this will be cleared when TXIDLE interrupt occurs.
	 */
	sc->sc_txbusy = 1;
	uart_unlock(sc->sc_hwmtx);

	return (0);
}
示例#19
0
void nand_page_read_to_host(UINT32 const bank, UINT32 const vblock, UINT32 const page_num)
{

#if PrintStats
    uart_print_level_1("FR ");
    uart_print_level_1_int(SECTORS_PER_PAGE);
    uart_print_level_1("\r\n");
#endif

    UINT32 row;

    ASSERT(bank < NUM_BANKS);
    ASSERT(vblock < VBLKS_PER_BANK);
    ASSERT(page_num < PAGES_PER_BLK);

    row = (vblock * PAGES_PER_BLK) + page_num;

    uart_print("nand_page_read_to_host bank="); uart_print_int(bank);
    uart_print(", vblock="); uart_print_int(vblock);
    uart_print(", page="); uart_print_int(page_num); uart_print("\r\n");
    uart_print("Reading row="); uart_print_int(row); uart_print("\r\n");

    uart_print("read flash: bank="); uart_print_int(bank);
    uart_print(", page="); uart_print_int(row); uart_print("\r\n");

    SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);
    SETREG(FCP_DMA_ADDR, RD_BUF_PTR(g_ftl_read_buf_id));
    SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);

    SETREG(FCP_COL, 0);
#if OPTION_FTL_TEST == TRUE
    SETREG(FCP_OPTION, FO_P | FO_E);
#else
    SETREG(FCP_OPTION, FO_P | FO_E | FO_B_SATA_R);
#endif
    SETREG(FCP_ROW_L(bank), row);
    SETREG(FCP_ROW_H(bank), row);

    g_ftl_read_buf_id = (g_ftl_read_buf_id + 1) % NUM_RD_BUFFERS;

    #if OPTION_FTL_TEST == FALSE
    {
        int count=0;
        while (1) {
            count ++;
            if (count > 100000) {
                uart_print_level_1("Warning1 in nand_page_read_to_host\r\n");
                count=0;
            }
            UINT32 sata_id = GETREG(SATA_RBUF_PTR);
            if (g_ftl_read_buf_id != sata_id)
                break;
        }
    }
    #endif
    flash_issue_cmd(bank, RETURN_ON_ISSUE);
}
示例#20
0
static void
imx_uart_putc(struct uart_bas *bas, int c)
{

	while (!(IS(bas, USR1, TRDY)))
		;
	SETREG(bas, REG(UTXD), c);
}
示例#21
0
static void opCVTS(void)
{
	float val1=u2f(GETREG(GET1));
	SET_OV(0);
	SET_Z((val1==0.0)?1:0);
	SET_S((val1<0.0)?1:0);
	SETREG(GET2,(INT32)val1);
}
示例#22
0
void ftl_write(UINT32 const lba, UINT32 const total_sectors)
{
	UINT32 i, num_sectors_to_write;
	UINT32 remain_sectors = total_sectors;
	UINT32 next_lba = lba;
	UINT32 sect_offset = lba % SECTORS_PER_PAGE;
	
	/* until write operations end */
	while(remain_sectors != 0)
	{
#if OPTION_FTL_TEST == 0
		// wait data which is from sata host
		while(g_ftl_write_buf_id == GETREG(SATA_WBUF_PTR) );
#endif
		/* requested sector sizes are more than sector sizes per one virtual page */
		if( sect_offset + remain_sectors >= SECTORS_PER_PAGE)
		{
			num_sectors_to_write = SECTORS_PER_PAGE - sect_offset;
		}
		else
		{
			num_sectors_to_write = remain_sectors;
		}


		remain_sectors -= num_sectors_to_write;
		while(num_sectors_to_write != 0){
			if( g_target_sect + num_sectors_to_write >= SECTORS_PER_PAGE )
			{
				i = SECTORS_PER_PAGE - g_target_sect;
			}
			else
			{
				i  = num_sectors_to_write;
			}
			ftl_write_sector(next_lba,i);
			next_lba += i;
			num_sectors_to_write -= i; 
		}
		sect_offset = 0;
		/* incread bm_write_limit and g_ftl_write_buf_id for next sata host's data sending */
		g_ftl_write_buf_id = (g_ftl_write_buf_id + 1 ) % NUM_WR_BUFFERS;
		SETREG(BM_STACK_WRSET, g_ftl_write_buf_id);	// change bm_write_limit
		SETREG(BM_STACK_RESET, 0x01);				// change bm_write_limit
	}
}
示例#23
0
static UINT32 opSETFi(void)	// setf imm5,r2
{
	UINT32 op1=I5(OP);
	UINT32 op2=PSW&0xf;
	op1&=0xf;
	SETREG(GET2,(op1==op2)?1:0);
	return clkIF;
}
示例#24
0
void flush_merge_buffer()
{
	UINT32 new_row, new_psn;
	UINT32 new_bank = g_target_bank;

	int i;
	if( g_target_sect != 0 ){
		// get free page from target bank
		new_row = get_free_page(new_bank);

		// set registers to write a data to nand flash memory
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		// Address is merge buffer address which contains actual data
		SETREG(FCP_DMA_ADDR, MERGE_BUFFER_ADDR + new_bank * BYTES_PER_PAGE);
		SETREG(FCP_DMA_CNT, BYTES_PER_SECTOR * g_target_sect);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(new_bank),new_row);
		SETREG(FCP_ROW_H(new_bank),new_row);

		flash_issue_cmd(new_bank,RETURN_ON_ISSUE);
		
		// for lba -> psn mapping information 
		new_psn = new_bank * SECTORS_PER_BANK + new_row * SECTORS_PER_PAGE;
		// Update mapping information
		for(i = 0 ;i < g_target_sect; i++ )
		{
			set_psn( g_merge_buffer_lsn[i],
					new_psn + i );
		}
	}
}
示例#25
0
void flush_smt_piece(UINT32 idx)
{
	UINT32 bank,row,block;

	bank = smt_dram_map[idx] / NUM_BANKS_MAX;
	block = smt_dram_map[idx] % NUM_BANKS_MAX;
	if((smt_bit_map[bank] & (1<<block)) != 0){
		//  smt piece data
		if( g_misc_meta[bank].smt_pieces[block] >= SMT_LIMIT - 1){
			// erase 
			nand_block_erase(bank,g_bad_list[bank][block]);
		}
		//update and flash 
		g_misc_meta[bank].smt_pieces[block] = (g_misc_meta[bank].smt_pieces[block] + SMT_INC_SIZE) % SMT_LIMIT;
		row = (g_misc_meta[bank].smt_pieces[block] * SMT_PIECE_BYTES);
		row = ((row + BYTES_PER_PAGE -1 ) / BYTES_PER_PAGE) + (PAGES_PER_VBLK * g_bad_list[bank][block]);
		// flash map data to nand
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		SETREG(FCP_DMA_ADDR,SMT_ADDR + (g_smt_victim * SMT_PIECE_BYTES));
		SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(bank),row);
		SETREG(FCP_ROW_H(bank),row);
		flash_issue_cmd(bank,RETURN_ON_ISSUE);
	}
	smt_dram_bit[bank] ^= ( 1 <<block );
}
示例#26
0
// General purpose page read function
// synchronous page read (for reading metadata)
// asynchronous page read (left/right hole async read user data)
void nand_page_ptread(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const sect_offset, UINT32 const num_sectors, UINT32 const buf_addr, UINT32 const issue_flag)
{
    UINT32 row;

/*     uart_printf("--ptread: bank %d vblock %d page_num %d sect_offset %d, num_sectors %d", bank, vblock, page_num, sect_offset, num_sectors); */
    ASSERT(bank < NUM_BANKS);
    ASSERT(vblock < VBLKS_PER_BANK);
    ASSERT(page_num < PAGES_PER_BLK);

    // row means ppn
    row = (vblock * PAGES_PER_BLK) + page_num;

    SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);
    SETREG(FCP_OPTION, FO_P | FO_E);
    SETREG(FCP_DMA_ADDR, buf_addr);
    SETREG(FCP_DMA_CNT, num_sectors * BYTES_PER_SECTOR);

    SETREG(FCP_COL, sect_offset);
    SETREG(FCP_ROW_L(bank), row);
    SETREG(FCP_ROW_H(bank), row);

    // issue_flag:
    // RETURN_ON_ISSUE, RETURN_WHEN_DONE, RETURN_ON_ACCEPT
    flash_issue_cmd(bank, issue_flag);
}
示例#27
0
void flush_smt_piece(UINT32 idx)
{
	UINT32 bank,row,block;
	UINT32 dest;
	bank = smt_dram_map[idx] / NUM_BANKS_MAX;
	block = smt_dram_map[idx] % NUM_BANKS_MAX;
	if((smt_bit_map[bank] & (1<<block)) != 0){
		//  smt piece data
		if( g_misc_meta[bank].smt_pieces[block] >= SMT_LIMIT - 1){
			// erase 
			nand_block_erase(bank,g_bad_list[bank][block]);
		}
		//update and flash 
		g_misc_meta[bank].smt_pieces[block] = (g_misc_meta[bank].smt_pieces[block] + 1) % SMT_LIMIT;
		row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + ( PAGES_PER_VBLK * g_bad_list[bank][block]);
		// flash map data to nand
		SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
		SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
		SETREG(FCP_COL,0);
		SETREG(FCP_ROW_L(bank),row);
		SETREG(FCP_ROW_H(bank),row);
		dest = SMT_ADDR + (idx * SMT_PIECE_BYTES);
		SETREG(FCP_DMA_ADDR,dest);
		SETREG(FCP_DMA_CNT, SMT_PIECE_BYTES);
		while(_BSP_FSM(bank) != BANK_IDLE)
		{
			bank = bank;
		}
		flash_issue_cmd(bank,RETURN_WHEN_DONE);
	}
	smt_piece_map[smt_dram_map[idx]] = (UINT32)-1;
}
示例#28
0
void nand_page_program(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const buf_addr, UINT32 const issue_flag)
{
#if PrintStats
    uart_print_level_1("FP ");
    uart_print_level_1_int(SECTORS_PER_PAGE);
    uart_print_level_1("\r\n");
#endif

    totSecWrites += SECTORS_PER_PAGE;
    UINT32 row;

    ASSERT(bank < NUM_BANKS);
    ASSERT(vblock < VBLKS_PER_BANK);
    ASSERT(page_num < PAGES_PER_BLK);

    row = (vblock * PAGES_PER_BLK) + page_num;
    uart_print("nand_page_program bank="); uart_print_int(bank);
    uart_print(", vblock="); uart_print_int(vblock);
    uart_print(", page="); uart_print_int(page_num); uart_print("\r\n");
    uart_print("Writing row="); uart_print_int(row); uart_print("\r\n");

    SETREG(FCP_CMD, FC_COL_ROW_IN_PROG);
    SETREG(FCP_OPTION, FO_P | FO_E | FO_B_W_DRDY);
    SETREG(FCP_DMA_ADDR, buf_addr);
    SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);
    SETREG(FCP_COL, 0);
    SETREG(FCP_ROW_L(bank), row);
    SETREG(FCP_ROW_H(bank), row);

    //flash_issue_cmd(bank, RETURN_WHEN_DONE);
    //flash_issue_cmd(bank, RETURN_ON_ACCEPT);
    //flash_issue_cmd(bank, RETURN_ON_ISSUE);
    flash_issue_cmd(bank, issue_flag);
}
示例#29
0
// synchronous one full page read
void nand_page_read(UINT32 const bank, UINT32 const vblock, UINT32 const page_num, UINT32 const buf_addr)
{

#if PrintStats
    uart_print_level_1("FR ");
    uart_print_level_1_int(SECTORS_PER_PAGE);
    uart_print_level_1("\r\n");
#endif

    UINT32 row;
    ASSERT(bank < NUM_BANKS);
    ASSERT(vblock < VBLKS_PER_BANK);
    ASSERT(page_num < PAGES_PER_BLK);
    row = (vblock * PAGES_PER_BLK) + page_num; // row means ppn
    uart_print("nand_page_read bank="); uart_print_int(bank);
    uart_print(", vblock="); uart_print_int(vblock);
    uart_print(", page="); uart_print_int(page_num);
    uart_print(", dst_addr="); uart_print_int(buf_addr); uart_print("\r\n");
    uart_print("Reading row="); uart_print_int(row); uart_print("\r\n");

    SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);
    SETREG(FCP_OPTION, FO_P | FO_E);
    SETREG(FCP_DMA_ADDR, buf_addr);
    SETREG(FCP_DMA_CNT, BYTES_PER_PAGE);
    SETREG(FCP_COL, 0);
    SETREG(FCP_ROW_L(bank), row);
    SETREG(FCP_ROW_H(bank), row);
    flash_issue_cmd(bank, RETURN_WHEN_DONE);
}
示例#30
0
/* g_smt_target, g_smt_victim */
void load_smt_piece(UINT32 idx){
	UINT32 bank,row,block;
	UINT32 dest;
	bank = idx / NUM_BANKS_MAX;
	block = idx % NUM_BANKS_MAX;
	row = g_misc_meta[bank].smt_pieces[block] * SMT_INC_SIZE + (PAGES_PER_VBLK * g_bad_list[bank][block]);
	if( g_smt_target == NUM_BANKS_MAX || g_smt_full == 1){
		g_smt_full = 1;
		g_smt_victim = (g_smt_victim + 1 ) % NUM_BANKS_MAX;
		flush_smt_piece(g_smt_victim);
		g_smt_target = (g_smt_target + 1) % NUM_BANKS_MAX;
	}
	SETREG(FCP_CMD, FC_COL_ROW_READ_OUT);	
	SETREG(FCP_DMA_CNT,SMT_PIECE_BYTES);
	SETREG(FCP_COL, 0);
	dest = SMT_ADDR + (g_smt_target * SMT_PIECE_BYTES);
	SETREG(FCP_DMA_ADDR, dest);
	SETREG(FCP_OPTION, FO_P | FO_E );		
	SETREG(FCP_ROW_L(bank), row);
	SETREG(FCP_ROW_H(bank), row);
	flash_issue_cmd(bank, RETURN_WHEN_DONE);

	smt_dram_map[g_smt_target] = idx;
	smt_piece_map[idx] = g_smt_target;
	smt_bit_map[bank] &= ~( 1 <<block );
	if(( g_misc_meta[bank].smt_init & ( 1 << block ) ) == 0){
		mem_set_dram( dest, 0x00, SMT_PIECE_BYTES);
		g_misc_meta[bank].smt_init |= (1 <<block);
	}
	g_smt_target++;
}