Exemplo n.º 1
0
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
			    uint8_t *buf, int oob_required, int page)
{
	unsigned int max_bitflips;
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	dma_addr_t addr = denali->buf.dma_buf;
	size_t size = mtd->writesize + mtd->oobsize;

	uint32_t irq_status;
	uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
			    INTR_STATUS__ECC_ERR;
	bool check_erased_page = false;

	if (page != denali->page) {
		dev_err(denali->dev,
			"IN %s: page %d is not equal to denali->page %d",
			__func__, page, denali->page);
		BUG();
	}

	setup_ecc_for_xfer(denali, true, false);

	denali_enable_dma(denali, true);
	dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);

	clear_interrupts(denali);
	denali_setup_dma(denali, DENALI_READ);

	/* wait for operation to complete */
	irq_status = wait_for_irq(denali, irq_mask);

	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);

	memcpy(buf, denali->buf.buf, mtd->writesize);

	check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
	denali_enable_dma(denali, false);

	if (check_erased_page) {
		read_oob_data(mtd, chip->oob_poi, denali->page);

		/* check ECC failures that may have occurred on erased pages */
		if (check_erased_page) {
			if (!is_erased(buf, mtd->writesize))
				mtd->ecc_stats.failed++;
			if (!is_erased(buf, mtd->oobsize))
				mtd->ecc_stats.failed++;
		}
	}
	return max_bitflips;
}
Exemplo n.º 2
0
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
			    uint8_t *buf, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	dma_addr_t addr = denali->buf.dma_buf;
	size_t size = denali->mtd.writesize + denali->mtd.oobsize;

	uint32_t irq_status = 0;
	uint32_t irq_mask = INTR_STATUS__ECC_TRANSACTION_DONE |
			    INTR_STATUS__ECC_ERR;
	bool check_erased_page = false;

	if (page != denali->page) {
		dev_err(denali->dev, "IN %s: page %d is not"
				" equal to denali->page %d, investigate!!",
				__func__, page, denali->page);
		BUG();
	}

	setup_ecc_for_xfer(denali, true, false);

	denali_enable_dma(denali, true);
	dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);

	clear_interrupts(denali);
	denali_setup_dma(denali, DENALI_READ);

	
	irq_status = wait_for_irq(denali, irq_mask);

	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);

	memcpy(buf, denali->buf.buf, mtd->writesize);

	check_erased_page = handle_ecc(denali, buf, irq_status);
	denali_enable_dma(denali, false);

	if (check_erased_page) {
		read_oob_data(&denali->mtd, chip->oob_poi, denali->page);

		
		if (check_erased_page) {
			if (!is_erased(buf, denali->mtd.writesize))
				denali->mtd.ecc_stats.failed++;
			if (!is_erased(buf, denali->mtd.oobsize))
				denali->mtd.ecc_stats.failed++;
		}
	}
	return 0;
}
Exemplo n.º 3
0
/* writes a page. user specifies type, and this function handles the
 * configuration details. */
static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
			const uint8_t *buf, bool raw_xfer)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	dma_addr_t addr = denali->buf.dma_buf;
	size_t size = denali->mtd.writesize + denali->mtd.oobsize;

	uint32_t irq_status = 0;
	uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
						INTR_STATUS__PROGRAM_FAIL;

	/* if it is a raw xfer, we want to disable ecc, and send
	 * the spare area.
	 * !raw_xfer - enable ecc
	 * raw_xfer - transfer spare
	 */
	setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);

	/* copy buffer into DMA buffer */
	memcpy(denali->buf.buf, buf, mtd->writesize);

	if (raw_xfer) {
		/* transfer the data to the spare area */
		memcpy(denali->buf.buf + mtd->writesize,
			chip->oob_poi,
			mtd->oobsize);
	}

	dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);

	clear_interrupts(denali);
	denali_enable_dma(denali, true);

	denali_setup_dma(denali, DENALI_WRITE);

	/* wait for operation to complete */
	irq_status = wait_for_irq(denali, irq_mask);

	if (irq_status == 0) {
		dev_err(denali->dev,
				"timeout on write_page (type = %d)\n",
				raw_xfer);
		denali->status =
			(irq_status & INTR_STATUS__PROGRAM_FAIL) ?
			NAND_STATUS_FAIL : PASS;
	}

	denali_enable_dma(denali, false);
	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
}
Exemplo n.º 4
0
static int denali_ooblayout_free(struct mtd_info *mtd, int section,
				 struct mtd_oob_region *oobregion)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	struct nand_chip *chip = mtd_to_nand(mtd);

	if (section)
		return -ERANGE;

	oobregion->offset = chip->ecc.total + denali->bbtskipbytes;
	oobregion->length = mtd->oobsize - oobregion->offset;

	return 0;
}
Exemplo n.º 5
0
static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
			   int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	uint32_t addr, id;
	int i;

	switch (cmd) {
	case NAND_CMD_PAGEPROG:
		break;
	case NAND_CMD_STATUS:
		read_status(denali);
		break;
	case NAND_CMD_READID:
	case NAND_CMD_PARAM:
		reset_buf(denali);
		/*sometimes ManufactureId read from register is not right
		 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
		 * So here we send READID cmd to NAND insteand
		 * */
		addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
		index_addr(denali, (uint32_t)addr | 0, 0x90);
		index_addr(denali, (uint32_t)addr | 1, 0);
		for (i = 0; i < 5; i++) {
			index_addr_read_data(denali,
						(uint32_t)addr | 2,
						&id);
			write_byte_to_buf(denali, id);
		}
		break;
	case NAND_CMD_READ0:
	case NAND_CMD_SEQIN:
		denali->page = page;
		break;
	case NAND_CMD_RESET:
		reset_bank(denali);
		break;
	case NAND_CMD_READOOB:
		/* TODO: Read OOB data */
		break;
	default:
		printk(KERN_ERR ": unsupported command"
				" received 0x%x\n", cmd);
		break;
	}
}
Exemplo n.º 6
0
static void write_page(struct mtd_info *mtd, struct nand_chip *chip,
			const uint8_t *buf, bool raw_xfer)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	dma_addr_t addr = denali->buf.dma_buf;
	size_t size = denali->mtd.writesize + denali->mtd.oobsize;

	uint32_t irq_status = 0;
	uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP |
						INTR_STATUS__PROGRAM_FAIL;

	setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);

	
	memcpy(denali->buf.buf, buf, mtd->writesize);

	if (raw_xfer) {
		
		memcpy(denali->buf.buf + mtd->writesize,
			chip->oob_poi,
			mtd->oobsize);
	}

	dma_sync_single_for_device(denali->dev, addr, size, DMA_TO_DEVICE);

	clear_interrupts(denali);
	denali_enable_dma(denali, true);

	denali_setup_dma(denali, DENALI_WRITE);

	
	irq_status = wait_for_irq(denali, irq_mask);

	if (irq_status == 0) {
		dev_err(denali->dev,
				"timeout on write_page (type = %d)\n",
				raw_xfer);
		denali->status =
			(irq_status & INTR_STATUS__PROGRAM_FAIL) ?
			NAND_STATUS_FAIL : PASS;
	}

	denali_enable_dma(denali, false);
	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_TO_DEVICE);
}
Exemplo n.º 7
0
static int denali_erase(struct mtd_info *mtd, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	uint32_t cmd, irq_status;

	clear_interrupts(denali);

	/* setup page read request for access type */
	cmd = MODE_10 | BANK(denali->flash_bank) | page;
	index_addr(denali, cmd, 0x1);

	/* wait for erase to complete or failure to occur */
	irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
					INTR_STATUS__ERASE_FAIL);

	return irq_status & INTR_STATUS__ERASE_FAIL ? NAND_STATUS_FAIL : PASS;
}
Exemplo n.º 8
0
/*
 * this is the callback that the NAND core calls to write a page. Since
 * writing a page with ECC or without is similar, all the work is done
 * by write_page above.
 */
static int denali_write_page(struct mtd_info *mtd, struct nand_chip *chip,
				const uint8_t *buf, int oob_required, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	/*
	 * for regular page writes, we let HW handle all the ECC
	 * data written to the device.
	 */
	if (oob_required)
		/* switch to main + spare access */
		denali_mode_main_spare_access(denali);
	else
		/* switch to main access only */
		denali_mode_main_access(denali);

	return write_page(mtd, chip, buf, false, oob_required);
}
Exemplo n.º 9
0
static void denali_read_buf(struct mtd_info *mtd, uint8_t *buf, int len)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	uint32_t i, addr, result;

	/* delay for tR (data transfer from Flash array to data register) */
	udelay(25);

	/* ensure device completed else additional delay and polling */
	wait_for_irq(denali, INTR_STATUS__INT_ACT);

	addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
	for (i = 0; i < len; i++) {
		index_addr_read_data(denali, (uint32_t)addr | 2, &result);
		write_byte_to_buf(denali, result);
	}
	memcpy(buf, denali->buf.buf, len);
}
Exemplo n.º 10
0
/*
 * This is the callback that the NAND core calls to write a page without ECC.
 * raw access is similar to ECC page writes, so all the work is done in the
 * write_page() function above.
 */
static int denali_write_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
					const uint8_t *buf, int oob_required)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	/*
	 * for raw page writes, we want to disable ECC and simply write
	 * whatever data is in the buffer.
	 */

	if (oob_required)
		/* switch to main + spare access */
		denali_mode_main_spare_access(denali);
	else
		/* switch to main access only */
		denali_mode_main_access(denali);

	return write_page(mtd, chip, buf, true, oob_required);
}
Exemplo n.º 11
0
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
				uint8_t *buf, int oob_required, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	uint32_t irq_status, irq_mask =	INTR_STATUS__DMA_CMD_COMP;

	if (denali->page != page) {
		debug("Missing NAND_CMD_READ0 command\n");
		return -EIO;
	}

	if (oob_required)
		/* switch to main + spare access */
		denali_mode_main_spare_access(denali);
	else
		/* switch to main access only */
		denali_mode_main_access(denali);

	/* setting up the DMA where ecc_enable is true */
	irq_status = denali_dma_configuration(denali, DENALI_READ, false,
					      irq_mask, oob_required);

	memcpy(buf, denali->buf.dma_buf, mtd->writesize);

	/* check whether any ECC error */
	if (irq_status & INTR_STATUS__ECC_UNCOR_ERR) {
		/* is the ECC cause by erase page, check using read_page_raw */
		debug("  Uncorrected ECC detected\n");
		denali_read_page_raw(mtd, chip, buf, oob_required,
				     denali->page);

		if (is_erased(buf, mtd->writesize) == true &&
		    is_erased(chip->oob_poi, mtd->oobsize) == true) {
			debug("  ECC error cause by erased block\n");
			/* false alarm, return the 0xFF */
		} else {
			return -EBADMSG;
		}
	}
	memcpy(buf, denali->buf.dma_buf, mtd->writesize);
	return 0;
}
Exemplo n.º 12
0
static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
			   int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	uint32_t addr, id;
	int i;

	switch (cmd) {
	case NAND_CMD_PAGEPROG:
		break;
	case NAND_CMD_STATUS:
		read_status(denali);
		break;
	case NAND_CMD_READID:
	case NAND_CMD_PARAM:
		reset_buf(denali);
		addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
		index_addr(denali, (uint32_t)addr | 0, 0x90);
		index_addr(denali, (uint32_t)addr | 1, 0);
		for (i = 0; i < 5; i++) {
			index_addr_read_data(denali,
						(uint32_t)addr | 2,
						&id);
			write_byte_to_buf(denali, id);
		}
		break;
	case NAND_CMD_READ0:
	case NAND_CMD_SEQIN:
		denali->page = page;
		break;
	case NAND_CMD_RESET:
		reset_bank(denali);
		break;
	case NAND_CMD_READOOB:
		
		break;
	default:
		printk(KERN_ERR ": unsupported command"
				" received 0x%x\n", cmd);
		break;
	}
}
Exemplo n.º 13
0
static void denali_erase(struct mtd_info *mtd, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	uint32_t cmd = 0x0, irq_status = 0;

	
	clear_interrupts(denali);

	
	cmd = MODE_10 | BANK(denali->flash_bank) | page;
	index_addr(denali, (uint32_t)cmd, 0x1);

	
	irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
					INTR_STATUS__ERASE_FAIL);

	denali->status = (irq_status & INTR_STATUS__ERASE_FAIL) ?
						NAND_STATUS_FAIL : PASS;
}
Exemplo n.º 14
0
static void denali_erase(struct mtd_info *mtd, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	uint32_t cmd, irq_status;

	/* clear interrupts */
	clear_interrupts(denali);

	/* setup page read request for access type */
	cmd = MODE_10 | BANK(denali->flash_bank) | page;
	index_addr(denali, cmd, 0x1);

	/* wait for erase to complete or failure to occur */
	irq_status = wait_for_irq(denali, INTR_STATUS__ERASE_COMP |
					INTR_STATUS__ERASE_FAIL);

	if (irq_status & INTR_STATUS__ERASE_FAIL ||
	    irq_status & INTR_STATUS__LOCKED_BLK)
		denali->status = NAND_STATUS_FAIL;
	else
		denali->status = 0;
}
Exemplo n.º 15
0
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
				uint8_t *buf, int oob_required, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	dma_addr_t addr = denali->buf.dma_buf;
	size_t size = denali->mtd.writesize + denali->mtd.oobsize;

	uint32_t irq_status = 0;
	uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;

	if (page != denali->page) {
		dev_err(denali->dev, "IN %s: page %d is not"
				" equal to denali->page %d, investigate!!",
				__func__, page, denali->page);
		BUG();
	}

	setup_ecc_for_xfer(denali, false, true);
	denali_enable_dma(denali, true);

	dma_sync_single_for_device(denali->dev, addr, size, DMA_FROM_DEVICE);

	clear_interrupts(denali);
	denali_setup_dma(denali, DENALI_READ);

	/* wait for operation to complete */
	irq_status = wait_for_irq(denali, irq_mask);

	dma_sync_single_for_cpu(denali->dev, addr, size, DMA_FROM_DEVICE);

	denali_enable_dma(denali, false);

	memcpy(buf, denali->buf.buf, mtd->writesize);
	memcpy(chip->oob_poi, denali->buf.buf + mtd->writesize, mtd->oobsize);

	return 0;
}
Exemplo n.º 16
0
static void read_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	uint32_t irq_mask = INTR_STATUS__LOAD_COMP,
			 irq_status = 0, addr = 0x0, cmd = 0x0;

	denali->page = page;

	if (denali_send_pipeline_cmd(denali, false, true, SPARE_ACCESS,
							DENALI_READ) == PASS) {
		read_data_from_flash_mem(denali, buf, mtd->oobsize);

		irq_status = wait_for_irq(denali, irq_mask);

		if (irq_status == 0)
			dev_err(denali->dev, "page on OOB timeout %d\n",
					denali->page);

		addr = BANK(denali->flash_bank) | denali->page;
		cmd = MODE_10 | addr;
		index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
	}
}
Exemplo n.º 17
0
/* writes OOB data to the device */
static int write_oob_data(struct mtd_info *mtd, uint8_t *buf, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	uint32_t irq_status;
	uint32_t irq_mask = INTR_STATUS__PROGRAM_COMP |
						INTR_STATUS__PROGRAM_FAIL;
	int status = 0;

	denali->page = page;

	if (denali_send_pipeline_cmd(denali, false, false, SPARE_ACCESS,
							DENALI_WRITE) == PASS) {
		write_data_to_flash_mem(denali, buf, mtd->oobsize);

		/* wait for operation to complete */
		irq_status = wait_for_irq(denali, irq_mask);

		if (irq_status == 0) {
			dev_err(denali->dev, "OOB write failed\n");
			status = -EIO;
		}

		/* set the device back to MAIN_ACCESS */
		{
			uint32_t addr;
			uint32_t cmd;
			addr = BANK(denali->flash_bank) | denali->page;
			cmd = MODE_10 | addr;
			index_addr(denali, (uint32_t)cmd, MAIN_ACCESS);
		}

	} else {
		dev_err(denali->dev, "unable to send pipeline command\n");
		status = -EIO;
	}
	return status;
}
Exemplo n.º 18
0
static int write_page(struct mtd_info *mtd, struct nand_chip *chip,
			const uint8_t *buf, bool raw_xfer, int oob_required)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	uint32_t irq_status = 0;
	uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;

	denali->status = 0;

	/* copy buffer into DMA buffer */
	memcpy(denali->buf.dma_buf, buf, mtd->writesize);

	/* need extra memcpy for raw transfer */
	if (raw_xfer)
		memcpy(denali->buf.dma_buf + mtd->writesize,
		       chip->oob_poi, mtd->oobsize);

	/* setting up DMA */
	irq_status = denali_dma_configuration(denali, DENALI_WRITE, raw_xfer,
					      irq_mask, oob_required);

	/* if timeout happen, error out */
	if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) {
		debug("DMA timeout for denali write_page\n");
		denali->status = NAND_STATUS_FAIL;
		return -EIO;
	}

	if (irq_status & INTR_STATUS__LOCKED_BLK) {
		debug("Failed as write to locked block\n");
		denali->status = NAND_STATUS_FAIL;
		return -EIO;
	}
	return 0;
}
Exemplo n.º 19
0
/* raw include ECC value and all the spare area */
static int denali_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip,
				uint8_t *buf, int oob_required, int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	uint32_t irq_status, irq_mask = INTR_STATUS__DMA_CMD_COMP;

	if (denali->page != page) {
		debug("Missing NAND_CMD_READ0 command\n");
		return -EIO;
	}

	if (oob_required)
		/* switch to main + spare access */
		denali_mode_main_spare_access(denali);
	else
		/* switch to main access only */
		denali_mode_main_access(denali);

	/* setting up the DMA where ecc_enable is false */
	irq_status = denali_dma_configuration(denali, DENALI_READ, true,
					      irq_mask, oob_required);

	/* if timeout happen, error out */
	if (!(irq_status & INTR_STATUS__DMA_CMD_COMP)) {
		debug("DMA timeout for denali_read_page_raw\n");
		return -EIO;
	}

	/* splitting the content to destination buffer holder */
	memcpy(chip->oob_poi, (denali->buf.dma_buf + mtd->writesize),
	       mtd->oobsize);
	memcpy(buf, denali->buf.dma_buf, mtd->writesize);

	return 0;
}
Exemplo n.º 20
0
static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
			   int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	uint32_t addr;

	switch (cmd) {
	case NAND_CMD_PAGEPROG:
		break;
	case NAND_CMD_STATUS:
		addr = MODE_11 | BANK(denali->flash_bank);
		index_addr(denali, addr | 0, cmd);
		break;
	case NAND_CMD_READID:
	case NAND_CMD_PARAM:
		reset_buf(denali);
		/*
		 * sometimes ManufactureId read from register is not right
		 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
		 * So here we send READID cmd to NAND insteand
		 */
		addr = MODE_11 | BANK(denali->flash_bank);
		index_addr(denali, addr | 0, cmd);
		index_addr(denali, addr | 1, col & 0xFF);
		if (cmd == NAND_CMD_PARAM)
			udelay(50);
		break;
	case NAND_CMD_RNDOUT:
		addr = MODE_11 | BANK(denali->flash_bank);
		index_addr(denali, addr | 0, cmd);
		index_addr(denali, addr | 1, col & 0xFF);
		index_addr(denali, addr | 1, col >> 8);
		index_addr(denali, addr | 0, NAND_CMD_RNDOUTSTART);
		break;
	case NAND_CMD_READ0:
	case NAND_CMD_SEQIN:
		denali->page = page;
		break;
	case NAND_CMD_RESET:
		reset_bank(denali);
		break;
	case NAND_CMD_READOOB:
		/* TODO: Read OOB data */
		break;
	case NAND_CMD_ERASE1:
		/*
		 * supporting block erase only, not multiblock erase as
		 * it will cross plane and software need complex calculation
		 * to identify the block count for the cross plane
		 */
		denali_erase(mtd, page);
		break;
	case NAND_CMD_ERASE2:
		/* nothing to do here as it was done during NAND_CMD_ERASE1 */
		break;
	case NAND_CMD_UNLOCK1:
		addr = MODE_10 | BANK(denali->flash_bank) | page;
		index_addr(denali, addr | 0, DENALI_UNLOCK_START);
		break;
	case NAND_CMD_UNLOCK2:
		addr = MODE_10 | BANK(denali->flash_bank) | page;
		index_addr(denali, addr | 0, DENALI_UNLOCK_END);
		break;
	case NAND_CMD_LOCK:
		addr = MODE_10 | BANK(denali->flash_bank);
		index_addr(denali, addr | 0, DENALI_LOCK);
		break;
	default:
		printf(": unsupported command received 0x%x\n", cmd);
		break;
	}
}
Exemplo n.º 21
0
static int denali_read_page(struct mtd_info *mtd, struct nand_chip *chip,
			    uint8_t *buf, int oob_required, int page)
{
	unsigned int max_bitflips = 0;
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	dma_addr_t addr = (unsigned long)denali->buf.buf;
	size_t size = denali->mtd.writesize + denali->mtd.oobsize;

	uint32_t irq_status;
	uint32_t irq_mask = denali->have_hw_ecc_fixup ?
		(INTR_STATUS__DMA_CMD_COMP) :
		(INTR_STATUS__ECC_TRANSACTION_DONE | INTR_STATUS__ECC_ERR);
	bool check_erased_page = false;

	if (page != denali->page) {
		dev_err(denali->dev,
			"IN %s: page %d is not equal to denali->page %d",
			__func__, page, denali->page);
		BUG();
	}

	setup_ecc_for_xfer(denali, true, false);

	denali_enable_dma(denali, true);
	dma_sync_single_for_device(addr, size, DMA_FROM_DEVICE);

	clear_interrupts(denali);
	denali_setup_dma(denali, DENALI_READ);

	/* wait for operation to complete */
	irq_status = wait_for_irq(denali, irq_mask);

	dma_sync_single_for_cpu(addr, size, DMA_FROM_DEVICE);

	memcpy(buf, denali->buf.buf, mtd->writesize);

	check_erased_page = handle_ecc(denali, buf, irq_status, &max_bitflips);
	denali_enable_dma(denali, false);

	if (check_erased_page) {
		if (denali->have_hw_ecc_fixup) {
			/* When we have hw ecc fixup, don't check oob.
			 * That code below looks jacked up anyway.  I mean,
			 * look at it, wtf? */
			if (!is_erased(buf, denali->mtd.writesize))
				denali->mtd.ecc_stats.failed++;
		} else {
			read_oob_data(&denali->mtd, chip->oob_poi,
				denali->page);

			/* check ECC failures that may have occurred on
			 * erased pages */
			if (check_erased_page) {
				if (!is_erased(buf, denali->mtd.writesize))
					denali->mtd.ecc_stats.failed++;
				if (!is_erased(buf, denali->mtd.oobsize))
					denali->mtd.ecc_stats.failed++;
			}
		}
	}
	return max_bitflips;
}
Exemplo n.º 22
0
static void denali_select_chip(struct mtd_info *mtd, int chip)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);

	denali->flash_bank = chip;
}
Exemplo n.º 23
0
static void denali_cmdfunc(struct mtd_info *mtd, unsigned int cmd, int col,
			   int page)
{
	struct denali_nand_info *denali = mtd_to_denali(mtd);
	uint32_t addr, id;
	uint32_t pages_per_block;
	uint32_t block;
	int i;

	switch (cmd) {
	case NAND_CMD_PAGEPROG:
		break;
	case NAND_CMD_STATUS:
		read_status(denali);
		break;
	case NAND_CMD_READID:
		reset_buf(denali);
		/*
		 * sometimes ManufactureId read from register is not right
		 * e.g. some of Micron MT29F32G08QAA MLC NAND chips
		 * So here we send READID cmd to NAND insteand
		 */
		addr = MODE_11 | BANK(denali->flash_bank);
		index_addr(denali, addr | 0, 0x90);
		index_addr(denali, addr | 1, col);
		for (i = 0; i < 8; i++) {
			index_addr_read_data(denali, addr | 2, &id);
			write_byte_to_buf(denali, id);
		}
		break;
	case NAND_CMD_PARAM:
		reset_buf(denali);

		/* turn on R/B interrupt */
		denali_set_intr_modes(denali, false);
		denali_irq_mask = DENALI_IRQ_ALL | INTR_STATUS__INT_ACT;
		clear_interrupts(denali);
		denali_irq_enable(denali, denali_irq_mask);
		denali_set_intr_modes(denali, true);

		addr = (uint32_t)MODE_11 | BANK(denali->flash_bank);
		index_addr(denali, (uint32_t)addr | 0, cmd);
		index_addr(denali, (uint32_t)addr | 1, col & 0xFF);
		/* Wait tR time... */
		udelay(25);
		/* And then wait for R/B interrupt */
		wait_for_irq(denali, INTR_STATUS__INT_ACT);

		/* turn off R/B interrupt now */
		denali_irq_mask = DENALI_IRQ_ALL;
		denali_set_intr_modes(denali, false);
		denali_irq_enable(denali, denali_irq_mask);
		denali_set_intr_modes(denali, true);

		for (i = 0; i < 256; i++) {
			index_addr_read_data(denali,
						(uint32_t)addr | 2,
						&id);
			write_byte_to_buf(denali, id);
		}
		break;
	case NAND_CMD_READ0:
	case NAND_CMD_SEQIN:
		denali->page = page;
		break;
	case NAND_CMD_RESET:
		reset_bank(denali);
		break;
	case NAND_CMD_READOOB:
		/* TODO: Read OOB data */
		break;
	case NAND_CMD_UNLOCK1:
		pages_per_block = mtd->erasesize / mtd->writesize;
		block = page / pages_per_block;
		addr = (uint32_t)MODE_10 | (block * pages_per_block);
		index_addr(denali, addr, 0x10);
		break;
	case NAND_CMD_UNLOCK2:
		pages_per_block = mtd->erasesize / mtd->writesize;
		block = (page+pages_per_block-1) / pages_per_block;
		addr = (uint32_t)MODE_10 | (block * pages_per_block);
		index_addr(denali, addr, 0x11);
		break;
	case NAND_CMD_ERASE1:
	case NAND_CMD_ERASE2:
		addr = MODE_10 | BANK(denali->flash_bank) | page;
		index_addr(denali, addr, 0x1);
		break;
	default:
		pr_err(": unsupported command received 0x%x\n", cmd);
		break;
	}
}