static int meson_nfc_ecc_correct(struct nand_chip *nand, u32 *bitflips, u64 *correct_bitmap) { struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); __le64 *info; int ret = 0, i; for (i = 0; i < nand->ecc.steps; i++) { info = &meson_chip->info_buf[i]; if (ECC_ERR_CNT(*info) != ECC_UNCORRECTABLE) { mtd->ecc_stats.corrected += ECC_ERR_CNT(*info); *bitflips = max_t(u32, *bitflips, ECC_ERR_CNT(*info)); *correct_bitmap |= 1 >> i; continue; } if ((nand->options & NAND_NEED_SCRAMBLING) && ECC_ZERO_CNT(*info) < nand->ecc.strength) { mtd->ecc_stats.corrected += ECC_ZERO_CNT(*info); *bitflips = max_t(u32, *bitflips, ECC_ZERO_CNT(*info)); ret = ECC_CHECK_RETURN_FF; } else { ret = -EBADMSG; } }
static int sm_block_markbad(struct nand_chip *chip, loff_t ofs) { struct mtd_info *mtd = nand_to_mtd(chip); struct mtd_oob_ops ops; struct sm_oob oob; int ret; memset(&oob, -1, SM_OOB_SIZE); oob.block_status = 0x0F; /* As long as this function is called on erase block boundaries it will work correctly for 256 byte nand */ ops.mode = MTD_OPS_PLACE_OOB; ops.ooboffs = 0; ops.ooblen = mtd->oobsize; ops.oobbuf = (void *)&oob; ops.datbuf = NULL; ret = mtd_write_oob(mtd, ofs, &ops); if (ret < 0 || ops.oobretlen != SM_OOB_SIZE) { pr_notice("sm_common: can't mark sector at %i as bad\n", (int)ofs); return -EIO; } return 0; }
void board_nand_init(void) { struct mtd_info *mtd = nand_to_mtd(&lpc32xx_chip); int ret; /* Set all BOARDSPECIFIC (actually core-specific) fields */ lpc32xx_chip.IO_ADDR_R = &lpc32xx_nand_mlc_registers->buff; lpc32xx_chip.IO_ADDR_W = &lpc32xx_nand_mlc_registers->buff; lpc32xx_chip.cmd_ctrl = lpc32xx_cmd_ctrl; /* do not set init_size: nand_base.c will read sizes from chip */ lpc32xx_chip.dev_ready = lpc32xx_dev_ready; /* do not set setup_read_retry: this is NAND-chip-specific */ /* do not set chip_delay: we have dev_ready defined. */ lpc32xx_chip.options |= NAND_NO_SUBPAGE_WRITE; /* Set needed ECC fields */ lpc32xx_chip.ecc.mode = NAND_ECC_HW; lpc32xx_chip.ecc.layout = &lpc32xx_largepage_ecclayout; lpc32xx_chip.ecc.size = 512; lpc32xx_chip.ecc.bytes = 10; lpc32xx_chip.ecc.strength = 4; lpc32xx_chip.ecc.read_page = lpc32xx_read_page_hwecc; lpc32xx_chip.ecc.read_page_raw = lpc32xx_read_page_raw; lpc32xx_chip.ecc.write_page = lpc32xx_write_page_hwecc; lpc32xx_chip.ecc.write_page_raw = lpc32xx_write_page_raw; lpc32xx_chip.ecc.read_oob = lpc32xx_read_oob; lpc32xx_chip.ecc.write_oob = lpc32xx_write_oob; lpc32xx_chip.waitfunc = lpc32xx_waitfunc; lpc32xx_chip.read_byte = lpc32xx_read_byte; /* FIXME: NEEDED? */ /* BBT options: read from last two pages */ lpc32xx_chip.bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_LASTBLOCK | NAND_BBT_SCANLASTPAGE | NAND_BBT_SCAN2NDPAGE | NAND_BBT_WRITE; /* Initialize NAND interface */ lpc32xx_nand_init(); /* identify chip */ ret = nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_CHIPS, NULL); if (ret) { error("nand_scan_ident returned %i", ret); return; } /* finish scanning the chip */ ret = nand_scan_tail(mtd); if (ret) { error("nand_scan_tail returned %i", ret); return; } /* chip is good, register it */ ret = nand_register(0, mtd); if (ret) error("nand_register returned %i", ret); }
static u8 bcm47xxnflash_ops_bcm4706_read_byte(struct nand_chip *nand_chip) { struct mtd_info *mtd = nand_to_mtd(nand_chip); struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip); struct bcma_drv_cc *cc = b47n->cc; u32 tmp = 0; switch (b47n->curr_command) { case NAND_CMD_READID: if (b47n->curr_column >= ARRAY_SIZE(b47n->id_data)) { pr_err("Requested invalid id_data: %d\n", b47n->curr_column); return 0; } return b47n->id_data[b47n->curr_column++]; case NAND_CMD_STATUS: if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_READ)) return 0; return bcma_cc_read32(cc, BCMA_CC_NFLASH_DATA) & 0xff; case NAND_CMD_READOOB: bcm47xxnflash_ops_bcm4706_read(mtd, (u8 *)&tmp, 4); return tmp & 0xFF; } pr_err("Invalid command for byte read: 0x%X\n", b47n->curr_command); return 0; }
static void meson_nfc_cmd_access(struct nand_chip *nand, int raw, bool dir, int scrambler) { struct mtd_info *mtd = nand_to_mtd(nand); struct meson_nfc *nfc = nand_get_controller_data(mtd_to_nand(mtd)); struct meson_nfc_nand_chip *meson_chip = to_meson_nand(nand); u32 bch = meson_chip->bch_mode, cmd; int len = mtd->writesize, pagesize, pages; pagesize = nand->ecc.size; if (raw) { len = mtd->writesize + mtd->oobsize; cmd = (len & GENMASK(5, 0)) | scrambler | DMA_DIR(dir); writel(cmd, nfc->reg_base + NFC_REG_CMD); return; } pages = len / nand->ecc.size; cmd = CMDRWGEN(DMA_DIR(dir), scrambler, bch, NFC_CMD_SHORTMODE_DISABLE, pagesize, pages); writel(cmd, nfc->reg_base + NFC_REG_CMD); }
/* * Remove a NAND device. */ static int xway_nand_remove(struct platform_device *pdev) { struct xway_nand_data *data = platform_get_drvdata(pdev); nand_release(nand_to_mtd(&data->chip)); return 0; }
static int bcm47xxnflash_remove(struct platform_device *pdev) { struct bcm47xxnflash *nflash = platform_get_drvdata(pdev); nand_release(nand_to_mtd(&nflash->nand_chip)); return 0; }
static int ndfc_remove(struct platform_device *ofdev) { struct ndfc_controller *ndfc = dev_get_drvdata(&ofdev->dev); struct mtd_info *mtd = nand_to_mtd(&ndfc->chip); nand_release(mtd); kfree(mtd->name); return 0; }
/* * Remove a NAND device. */ static int plat_nand_remove(struct platform_device *pdev) { struct plat_nand_data *data = platform_get_drvdata(pdev); struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev); nand_release(nand_to_mtd(&data->chip)); if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); return 0; }
/* * Initialize chip structure */ static int ndfc_chip_init(struct ndfc_controller *ndfc, struct device_node *node) { struct device_node *flash_np; struct nand_chip *chip = &ndfc->chip; struct mtd_info *mtd = nand_to_mtd(chip); int ret; chip->IO_ADDR_R = ndfc->ndfcbase + NDFC_DATA; chip->IO_ADDR_W = ndfc->ndfcbase + NDFC_DATA; chip->cmd_ctrl = ndfc_hwcontrol; chip->dev_ready = ndfc_ready; chip->select_chip = ndfc_select_chip; chip->chip_delay = 50; chip->controller = &ndfc->ndfc_control; chip->read_buf = ndfc_read_buf; chip->write_buf = ndfc_write_buf; chip->ecc.correct = nand_correct_data; chip->ecc.hwctl = ndfc_enable_hwecc; chip->ecc.calculate = ndfc_calculate_ecc; chip->ecc.mode = NAND_ECC_HW; chip->ecc.size = 256; chip->ecc.bytes = 3; chip->ecc.strength = 1; nand_set_controller_data(chip, ndfc); mtd->dev.parent = &ndfc->ofdev->dev; flash_np = of_get_next_child(node, NULL); if (!flash_np) return -ENODEV; nand_set_flash_node(chip, flash_np); mtd->name = kasprintf(GFP_KERNEL, "%s.%s", dev_name(&ndfc->ofdev->dev), flash_np->name); if (!mtd->name) { ret = -ENOMEM; goto err; } ret = nand_scan(mtd, 1); if (ret) goto err; ret = mtd_device_register(mtd, NULL, 0); err: of_node_put(flash_np); if (ret) kfree(mtd->name); return ret; }
static int txx9ndfmc_attach_chip(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); if (mtd->writesize >= 512) { chip->ecc.size = 512; chip->ecc.bytes = 6; } else { chip->ecc.size = 256; chip->ecc.bytes = 3; } return 0; }
static void bcm47xxnflash_ops_bcm4706_write_buf(struct nand_chip *nand_chip, const uint8_t *buf, int len) { struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip); switch (b47n->curr_command) { case NAND_CMD_SEQIN: bcm47xxnflash_ops_bcm4706_write(nand_to_mtd(nand_chip), buf, len); return; } pr_err("Invalid command for buf write: 0x%X\n", b47n->curr_command); }
static void bcm47xxnflash_ops_bcm4706_read_buf(struct nand_chip *nand_chip, uint8_t *buf, int len) { struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip); switch (b47n->curr_command) { case NAND_CMD_READ0: case NAND_CMD_READOOB: bcm47xxnflash_ops_bcm4706_read(nand_to_mtd(nand_chip), buf, len); return; } pr_err("Invalid command for buf read: 0x%X\n", b47n->curr_command); }
/* driver exit point */ void denali_remove(struct denali_nand_info *denali) { struct mtd_info *mtd = nand_to_mtd(&denali->nand); /* * Pre-compute DMA buffer size to avoid any problems in case * nand_release() ever changes in a way that mtd->writesize and * mtd->oobsize are not reliable after this call. */ int bufsize = mtd->writesize + mtd->oobsize; nand_release(mtd); denali_irq_cleanup(denali->irq, denali); dma_unmap_single(denali->dev, denali->buf.dma_buf, bufsize, DMA_BIDIRECTIONAL); }
static int sm_attach_chip(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); /* Bad block marker position */ chip->badblockpos = 0x05; chip->badblockbits = 7; chip->legacy.block_markbad = sm_block_markbad; /* ECC layout */ if (mtd->writesize == SM_SECTOR_SIZE) mtd_set_ooblayout(mtd, &oob_sm_ops); else if (mtd->writesize == SM_SMALL_PAGE) mtd_set_ooblayout(mtd, &oob_sm_small_ops); else return -ENODEV; return 0; }
static int bcm47xxnflash_probe(struct platform_device *pdev) { struct bcma_nflash *nflash = dev_get_platdata(&pdev->dev); struct bcm47xxnflash *b47n; struct mtd_info *mtd; int err = 0; b47n = devm_kzalloc(&pdev->dev, sizeof(*b47n), GFP_KERNEL); if (!b47n) return -ENOMEM; nand_set_controller_data(&b47n->nand_chip, b47n); mtd = nand_to_mtd(&b47n->nand_chip); mtd->dev.parent = &pdev->dev; b47n->cc = container_of(nflash, struct bcma_drv_cc, nflash); if (b47n->cc->core->bus->chipinfo.id == BCMA_CHIP_ID_BCM4706) { err = bcm47xxnflash_ops_bcm4706_init(b47n); } else { pr_err("Device not supported\n"); err = -ENOTSUPP; } if (err) { pr_err("Initialization failed: %d\n", err); return err; } platform_set_drvdata(pdev, b47n); err = mtd_device_parse_register(mtd, probes, NULL, NULL, 0); if (err) { pr_err("Failed to register MTD device: %d\n", err); return err; } return 0; }
/* * Probe for the NAND device. */ static int xway_nand_probe(struct platform_device *pdev) { struct xway_nand_data *data; struct mtd_info *mtd; struct resource *res; int err; u32 cs; u32 cs_flag = 0; /* Allocate memory for the device structure (and zero it) */ data = devm_kzalloc(&pdev->dev, sizeof(struct xway_nand_data), GFP_KERNEL); if (!data) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->nandaddr = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(data->nandaddr)) return PTR_ERR(data->nandaddr); nand_set_flash_node(&data->chip, pdev->dev.of_node); mtd = nand_to_mtd(&data->chip); mtd->dev.parent = &pdev->dev; data->chip.cmd_ctrl = xway_cmd_ctrl; data->chip.dev_ready = xway_dev_ready; data->chip.select_chip = xway_select_chip; data->chip.write_buf = xway_write_buf; data->chip.read_buf = xway_read_buf; data->chip.read_byte = xway_read_byte; data->chip.chip_delay = 30; data->chip.ecc.mode = NAND_ECC_SOFT; data->chip.ecc.algo = NAND_ECC_HAMMING; platform_set_drvdata(pdev, data); nand_set_controller_data(&data->chip, data); /* load our CS from the DT. Either we find a valid 1 or default to 0 */ err = of_property_read_u32(pdev->dev.of_node, "lantiq,cs", &cs); if (!err && cs == 1) cs_flag = NAND_CON_IN_CS1 | NAND_CON_OUT_CS1; /* setup the EBU to run in NAND mode on our base addr */ ltq_ebu_w32(CPHYSADDR(data->nandaddr) | ADDSEL1_MASK(3) | ADDSEL1_REGEN, EBU_ADDSEL1); ltq_ebu_w32(BUSCON1_SETUP | BUSCON1_BCGEN_RES | BUSCON1_WAITWRC2 | BUSCON1_WAITRDC2 | BUSCON1_HOLDC1 | BUSCON1_RECOVC1 | BUSCON1_CMULT4, LTQ_EBU_BUSCON1); ltq_ebu_w32(NAND_CON_NANDM | NAND_CON_CSMUX | NAND_CON_CS_P | NAND_CON_SE_P | NAND_CON_WP_P | NAND_CON_PRE_P | cs_flag, EBU_NAND_CON); /* Scan to find existence of the device */ err = nand_scan(mtd, 1); if (err) return err; err = mtd_device_register(mtd, NULL, 0); if (err) nand_release(mtd); return err; }
static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf, uint32_t irq_status, unsigned int *max_bitflips) { bool check_erased_page = false; unsigned int bitflips = 0; if (irq_status & INTR_STATUS__ECC_ERR) { /* read the ECC errors. we'll ignore them for now */ uint32_t err_address, err_correction_info, err_byte, err_sector, err_device, err_correction_value; denali_set_intr_modes(denali, false); do { err_address = ioread32(denali->flash_reg + ECC_ERROR_ADDRESS); err_sector = ECC_SECTOR(err_address); err_byte = ECC_BYTE(err_address); err_correction_info = ioread32(denali->flash_reg + ERR_CORRECTION_INFO); err_correction_value = ECC_CORRECTION_VALUE(err_correction_info); err_device = ECC_ERR_DEVICE(err_correction_info); if (ECC_ERROR_CORRECTABLE(err_correction_info)) { /* * If err_byte is larger than ECC_SECTOR_SIZE, * means error happened in OOB, so we ignore * it. It's no need for us to correct it * err_device is represented the NAND error * bits are happened in if there are more * than one NAND connected. */ if (err_byte < ECC_SECTOR_SIZE) { struct mtd_info *mtd = nand_to_mtd(&denali->nand); int offset; offset = (err_sector * ECC_SECTOR_SIZE + err_byte) * denali->devnum + err_device; /* correct the ECC error */ buf[offset] ^= err_correction_value; mtd->ecc_stats.corrected++; bitflips++; } } else { /* * if the error is not correctable, need to * look at the page to see if it is an erased * page. if so, then it's not a real ECC error */ check_erased_page = true; } } while (!ECC_LAST_ERR(err_correction_info)); /* * Once handle all ecc errors, controller will triger * a ECC_TRANSACTION_DONE interrupt, so here just wait * for a while for this interrupt */ while (!(read_interrupt_status(denali) & INTR_STATUS__ECC_TRANSACTION_DONE)) cpu_relax(); clear_interrupts(denali); denali_set_intr_modes(denali, true); } *max_bitflips = bitflips; return check_erased_page; }
/* * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise. */ int nand_onfi_detect(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); struct nand_onfi_params *p; struct onfi_params *onfi; int onfi_version = 0; char id[4]; int i, ret, val; /* Try ONFI for unknown chip or LP */ ret = nand_readid_op(chip, 0x20, id, sizeof(id)); if (ret || strncmp(id, "ONFI", 4)) return 0; /* ONFI chip: allocate a buffer to hold its parameter page */ p = kzalloc((sizeof(*p) * 3), GFP_KERNEL); if (!p) return -ENOMEM; ret = nand_read_param_page_op(chip, 0, NULL, 0); if (ret) { ret = 0; goto free_onfi_param_page; } for (i = 0; i < 3; i++) { ret = nand_read_data_op(chip, &p[i], sizeof(*p), true); if (ret) { ret = 0; goto free_onfi_param_page; } if (onfi_crc16(ONFI_CRC_BASE, (u8 *)&p[i], 254) == le16_to_cpu(p->crc)) { if (i) memcpy(p, &p[i], sizeof(*p)); break; } } if (i == 3) { const void *srcbufs[3] = {p, p + 1, p + 2}; pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n"); nand_bit_wise_majority(srcbufs, ARRAY_SIZE(srcbufs), p, sizeof(*p)); if (onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 254) != le16_to_cpu(p->crc)) { pr_err("ONFI parameter recovery failed, aborting\n"); goto free_onfi_param_page; } } if (chip->manufacturer.desc && chip->manufacturer.desc->ops && chip->manufacturer.desc->ops->fixup_onfi_param_page) chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p); /* Check version */ val = le16_to_cpu(p->revision); if (val & ONFI_VERSION_2_3) onfi_version = 23; else if (val & ONFI_VERSION_2_2) onfi_version = 22; else if (val & ONFI_VERSION_2_1) onfi_version = 21; else if (val & ONFI_VERSION_2_0) onfi_version = 20; else if (val & ONFI_VERSION_1_0) onfi_version = 10; if (!onfi_version) { pr_info("unsupported ONFI version: %d\n", val); goto free_onfi_param_page; } sanitize_string(p->manufacturer, sizeof(p->manufacturer)); sanitize_string(p->model, sizeof(p->model)); chip->parameters.model = kstrdup(p->model, GFP_KERNEL); if (!chip->parameters.model) { ret = -ENOMEM; goto free_onfi_param_page; } mtd->writesize = le32_to_cpu(p->byte_per_page); /* * pages_per_block and blocks_per_lun may not be a power-of-2 size * (don't ask me who thought of this...). MTD assumes that these * dimensions will be power-of-2, so just truncate the remaining area. */ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1); mtd->erasesize *= mtd->writesize; mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); /* See erasesize comment */ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1); chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count; chip->bits_per_cell = p->bits_per_cell; chip->max_bb_per_die = le16_to_cpu(p->bb_per_lun); chip->blocks_per_die = le32_to_cpu(p->blocks_per_lun); if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS) chip->options |= NAND_BUSWIDTH_16; if (p->ecc_bits != 0xff) { chip->ecc_strength_ds = p->ecc_bits; chip->ecc_step_ds = 512; } else if (onfi_version >= 21 && (le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) { /* * The nand_flash_detect_ext_param_page() uses the * Change Read Column command which maybe not supported * by the chip->legacy.cmdfunc. So try to update the * chip->legacy.cmdfunc now. We do not replace user supplied * command function. */ nand_legacy_adjust_cmdfunc(chip); /* The Extended Parameter Page is supported since ONFI 2.1. */ if (nand_flash_detect_ext_param_page(chip, p)) pr_warn("Failed to detect ONFI extended param page\n"); } else { pr_warn("Could not retrieve ONFI ECC requirements\n"); } /* Save some parameters from the parameter page for future use */ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) { chip->parameters.supports_set_get_features = true; bitmap_set(chip->parameters.get_feature_list, ONFI_FEATURE_ADDR_TIMING_MODE, 1); bitmap_set(chip->parameters.set_feature_list, ONFI_FEATURE_ADDR_TIMING_MODE, 1); } onfi = kzalloc(sizeof(*onfi), GFP_KERNEL); if (!onfi) { ret = -ENOMEM; goto free_model; } onfi->version = onfi_version; onfi->tPROG = le16_to_cpu(p->t_prog); onfi->tBERS = le16_to_cpu(p->t_bers); onfi->tR = le16_to_cpu(p->t_r); onfi->tCCS = le16_to_cpu(p->t_ccs); onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode); onfi->vendor_revision = le16_to_cpu(p->vendor_revision); memcpy(onfi->vendor, p->vendor, sizeof(p->vendor)); chip->parameters.onfi = onfi; /* Identification done, free the full ONFI parameter page and exit */ kfree(p); return 1; free_model: kfree(chip->parameters.model); free_onfi_param_page: kfree(p); return ret; }
/* * spinand_probe - [spinand Interface] * @spi_nand: registered device driver. * * Description: * To set up the device driver parameters to make the device available. */ static int spinand_probe(struct spi_device *spi_nand) { struct mtd_info *mtd; struct nand_chip *chip; struct spinand_info *info; struct spinand_state *state; info = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_info), GFP_KERNEL); if (!info) return -ENOMEM; info->spi = spi_nand; spinand_lock_block(spi_nand, BL_ALL_UNLOCKED); state = devm_kzalloc(&spi_nand->dev, sizeof(struct spinand_state), GFP_KERNEL); if (!state) return -ENOMEM; info->priv = state; state->buf_ptr = 0; state->buf = devm_kzalloc(&spi_nand->dev, BUFSIZE, GFP_KERNEL); if (!state->buf) return -ENOMEM; chip = devm_kzalloc(&spi_nand->dev, sizeof(struct nand_chip), GFP_KERNEL); if (!chip) return -ENOMEM; #ifdef CONFIG_MTD_SPINAND_ONDIEECC chip->ecc.mode = NAND_ECC_HW; chip->ecc.size = 0x200; chip->ecc.bytes = 0x6; chip->ecc.steps = 0x4; chip->ecc.strength = 1; chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; chip->ecc.layout = &spinand_oob_64; chip->ecc.read_page = spinand_read_page_hwecc; chip->ecc.write_page = spinand_write_page_hwecc; #else chip->ecc.mode = NAND_ECC_SOFT; if (spinand_disable_ecc(spi_nand) < 0) pr_info("%s: disable ecc failed!\n", __func__); #endif nand_set_flash_node(chip, spi_nand->dev.of_node); nand_set_controller_data(chip, info); chip->read_buf = spinand_read_buf; chip->write_buf = spinand_write_buf; chip->read_byte = spinand_read_byte; chip->cmdfunc = spinand_cmdfunc; chip->waitfunc = spinand_wait; chip->options |= NAND_CACHEPRG; chip->select_chip = spinand_select_chip; mtd = nand_to_mtd(chip); dev_set_drvdata(&spi_nand->dev, mtd); mtd->dev.parent = &spi_nand->dev; mtd->oobsize = 64; if (nand_scan(mtd, 1)) return -ENXIO; return mtd_device_register(mtd, NULL, 0); }
/* * Probe for the NAND device. */ static int plat_nand_probe(struct platform_device *pdev) { struct platform_nand_data *pdata = dev_get_platdata(&pdev->dev); struct plat_nand_data *data; struct mtd_info *mtd; struct resource *res; const char **part_types; int err = 0; if (!pdata) { dev_err(&pdev->dev, "platform_nand_data is missing\n"); return -EINVAL; } if (pdata->chip.nr_chips < 1) { dev_err(&pdev->dev, "invalid number of chips specified\n"); return -EINVAL; } /* Allocate memory for the device structure (and zero it) */ data = devm_kzalloc(&pdev->dev, sizeof(struct plat_nand_data), GFP_KERNEL); if (!data) return -ENOMEM; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); data->io_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(data->io_base)) return PTR_ERR(data->io_base); nand_set_flash_node(&data->chip, pdev->dev.of_node); mtd = nand_to_mtd(&data->chip); mtd->dev.parent = &pdev->dev; data->chip.IO_ADDR_R = data->io_base; data->chip.IO_ADDR_W = data->io_base; data->chip.cmd_ctrl = pdata->ctrl.cmd_ctrl; data->chip.dev_ready = pdata->ctrl.dev_ready; data->chip.select_chip = pdata->ctrl.select_chip; data->chip.write_buf = pdata->ctrl.write_buf; data->chip.read_buf = pdata->ctrl.read_buf; data->chip.read_byte = pdata->ctrl.read_byte; data->chip.chip_delay = pdata->chip.chip_delay; data->chip.options |= pdata->chip.options; data->chip.bbt_options |= pdata->chip.bbt_options; data->chip.ecc.hwctl = pdata->ctrl.hwcontrol; data->chip.ecc.mode = NAND_ECC_SOFT; data->chip.ecc.algo = NAND_ECC_HAMMING; platform_set_drvdata(pdev, data); /* Handle any platform specific setup */ if (pdata->ctrl.probe) { err = pdata->ctrl.probe(pdev); if (err) goto out; } /* Scan to find existence of the device */ err = nand_scan(mtd, pdata->chip.nr_chips); if (err) goto out; part_types = pdata->chip.part_probe_types; err = mtd_device_parse_register(mtd, part_types, NULL, pdata->chip.partitions, pdata->chip.nr_partitions); if (!err) return err; nand_release(mtd); out: if (pdata->ctrl.remove) pdata->ctrl.remove(pdev); return err; }
int bcm47xxnflash_ops_bcm4706_init(struct bcm47xxnflash *b47n) { struct nand_chip *nand_chip = (struct nand_chip *)&b47n->nand_chip; int err; u32 freq; u16 clock; u8 w0, w1, w2, w3, w4; unsigned long chipsize; /* MiB */ u8 tbits, col_bits, col_size, row_bits, row_bsize; u32 val; b47n->nand_chip.select_chip = bcm47xxnflash_ops_bcm4706_select_chip; nand_chip->cmd_ctrl = bcm47xxnflash_ops_bcm4706_cmd_ctrl; nand_chip->dev_ready = bcm47xxnflash_ops_bcm4706_dev_ready; b47n->nand_chip.cmdfunc = bcm47xxnflash_ops_bcm4706_cmdfunc; b47n->nand_chip.read_byte = bcm47xxnflash_ops_bcm4706_read_byte; b47n->nand_chip.read_buf = bcm47xxnflash_ops_bcm4706_read_buf; b47n->nand_chip.write_buf = bcm47xxnflash_ops_bcm4706_write_buf; nand_chip->chip_delay = 50; b47n->nand_chip.bbt_options = NAND_BBT_USE_FLASH; b47n->nand_chip.ecc.mode = NAND_ECC_NONE; /* TODO: implement ECC */ /* Enable NAND flash access */ bcma_cc_set32(b47n->cc, BCMA_CC_4706_FLASHSCFG, BCMA_CC_4706_FLASHSCFG_NF1); /* Configure wait counters */ if (b47n->cc->status & BCMA_CC_CHIPST_4706_PKG_OPTION) { /* 400 MHz */ freq = 400000000 / 4; } else { freq = bcma_chipco_pll_read(b47n->cc, 4); freq = (freq & 0xFFF) >> 3; /* Fixed reference clock 25 MHz and m = 2 */ freq = (freq * 25000000 / 2) / 4; } clock = freq / 1000000; w0 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(15, clock); w1 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(20, clock); w2 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock); w3 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(10, clock); w4 = bcm47xxnflash_ops_bcm4706_ns_to_cycle(100, clock); bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_WAITCNT0, (w4 << 24 | w3 << 18 | w2 << 12 | w1 << 6 | w0)); /* Scan NAND */ err = nand_scan(nand_to_mtd(&b47n->nand_chip), 1); if (err) { pr_err("Could not scan NAND flash: %d\n", err); goto exit; } /* Configure FLASH */ chipsize = b47n->nand_chip.chipsize >> 20; tbits = ffs(chipsize); /* find first bit set */ if (!tbits || tbits != fls(chipsize)) { pr_err("Invalid flash size: 0x%lX\n", chipsize); err = -ENOTSUPP; goto exit; } tbits += 19; /* Broadcom increases *index* by 20, we increase *pos* */ col_bits = b47n->nand_chip.page_shift + 1; col_size = (col_bits + 7) / 8; row_bits = tbits - col_bits + 1; row_bsize = (row_bits + 7) / 8; val = ((row_bsize - 1) << 6) | ((col_size - 1) << 4) | 2; bcma_cc_write32(b47n->cc, BCMA_CC_NFLASH_CONF, val); exit: if (err) bcma_cc_mask32(b47n->cc, BCMA_CC_4706_FLASHSCFG, ~BCMA_CC_4706_FLASHSCFG_NF1); return err; }
/* * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise. */ int nand_jedec_detect(struct nand_chip *chip) { struct mtd_info *mtd = nand_to_mtd(chip); struct nand_jedec_params *p; struct jedec_ecc_info *ecc; int jedec_version = 0; char id[5]; int i, val, ret; /* Try JEDEC for unknown chip or LP */ ret = nand_readid_op(chip, 0x40, id, sizeof(id)); if (ret || strncmp(id, "JEDEC", sizeof(id))) return 0; /* JEDEC chip: allocate a buffer to hold its parameter page */ p = kzalloc(sizeof(*p), GFP_KERNEL); if (!p) return -ENOMEM; ret = nand_read_param_page_op(chip, 0x40, NULL, 0); if (ret) { ret = 0; goto free_jedec_param_page; } for (i = 0; i < 3; i++) { ret = nand_read_data_op(chip, p, sizeof(*p), true); if (ret) { ret = 0; goto free_jedec_param_page; } if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 510) == le16_to_cpu(p->crc)) break; } if (i == 3) { pr_err("Could not find valid JEDEC parameter page; aborting\n"); goto free_jedec_param_page; } /* Check version */ val = le16_to_cpu(p->revision); if (val & (1 << 2)) jedec_version = 10; else if (val & (1 << 1)) jedec_version = 1; /* vendor specific version */ if (!jedec_version) { pr_info("unsupported JEDEC version: %d\n", val); goto free_jedec_param_page; } sanitize_string(p->manufacturer, sizeof(p->manufacturer)); sanitize_string(p->model, sizeof(p->model)); chip->parameters.model = kstrdup(p->model, GFP_KERNEL); if (!chip->parameters.model) { ret = -ENOMEM; goto free_jedec_param_page; } mtd->writesize = le32_to_cpu(p->byte_per_page); /* Please reference to the comment for nand_flash_detect_onfi. */ mtd->erasesize = 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1); mtd->erasesize *= mtd->writesize; mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page); /* Please reference to the comment for nand_flash_detect_onfi. */ chip->chipsize = 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1); chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count; chip->bits_per_cell = p->bits_per_cell; if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS) chip->options |= NAND_BUSWIDTH_16; /* ECC info */ ecc = &p->ecc_info[0]; if (ecc->codeword_size >= 9) { chip->ecc_strength_ds = ecc->ecc_bits; chip->ecc_step_ds = 1 << ecc->codeword_size; } else { pr_warn("Invalid codeword size\n"); } ret = 1; free_jedec_param_page: kfree(p); return ret; }
int denali_init(struct denali_nand_info *denali) { struct mtd_info *mtd = nand_to_mtd(&denali->nand); int ret; denali_hw_init(denali); mtd->name = "denali-nand"; mtd->owner = THIS_MODULE; /* register the driver with the NAND core subsystem */ denali->nand.select_chip = denali_select_chip; denali->nand.cmdfunc = denali_cmdfunc; denali->nand.read_byte = denali_read_byte; denali->nand.read_buf = denali_read_buf; denali->nand.waitfunc = denali_waitfunc; /* * scan for NAND devices attached to the controller * this is the first stage in a two step process to register * with the nand subsystem */ if (nand_scan_ident(mtd, denali->max_banks, NULL)) { ret = -ENXIO; goto fail; } #ifdef CONFIG_SYS_NAND_USE_FLASH_BBT /* check whether flash got BBT table (located at end of flash). As we * use NAND_BBT_NO_OOB, the BBT page will start with * bbt_pattern. We will have mirror pattern too */ denali->nand.bbt_options |= NAND_BBT_USE_FLASH; /* * We are using main + spare with ECC support. As BBT need ECC support, * we need to ensure BBT code don't write to OOB for the BBT pattern. * All BBT info will be stored into data area with ECC support. */ denali->nand.bbt_options |= NAND_BBT_NO_OOB; #endif denali->nand.ecc.mode = NAND_ECC_HW; denali->nand.ecc.size = CONFIG_NAND_DENALI_ECC_SIZE; /* no subpage writes on denali */ denali->nand.options |= NAND_NO_SUBPAGE_WRITE; /* * Tell driver the ecc strength. This register may be already set * correctly. So we read this value out. */ denali->nand.ecc.strength = readl(denali->flash_reg + ECC_CORRECTION); switch (denali->nand.ecc.size) { case 512: denali->nand.ecc.bytes = (denali->nand.ecc.strength * 13 + 15) / 16 * 2; break; case 1024: denali->nand.ecc.bytes = (denali->nand.ecc.strength * 14 + 15) / 16 * 2; break; default: pr_err("Unsupported ECC size\n"); ret = -EINVAL; goto fail; } nand_oob.eccbytes = denali->nand.ecc.bytes; denali->nand.ecc.layout = &nand_oob; writel(mtd->erasesize / mtd->writesize, denali->flash_reg + PAGES_PER_BLOCK); writel(denali->nand.options & NAND_BUSWIDTH_16 ? 1 : 0, denali->flash_reg + DEVICE_WIDTH); writel(mtd->writesize, denali->flash_reg + DEVICE_MAIN_AREA_SIZE); writel(mtd->oobsize, denali->flash_reg + DEVICE_SPARE_AREA_SIZE); if (readl(denali->flash_reg + DEVICES_CONNECTED) == 0) writel(1, denali->flash_reg + DEVICES_CONNECTED); /* override the default operations */ denali->nand.ecc.read_page = denali_read_page; denali->nand.ecc.read_page_raw = denali_read_page_raw; denali->nand.ecc.write_page = denali_write_page; denali->nand.ecc.write_page_raw = denali_write_page_raw; denali->nand.ecc.read_oob = denali_read_oob; denali->nand.ecc.write_oob = denali_write_oob; if (nand_scan_tail(mtd)) { ret = -ENXIO; goto fail; } ret = nand_register(0, mtd); fail: return ret; }
/* * Main initialization routine */ static int ams_delta_init(struct platform_device *pdev) { struct nand_chip *this; struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); void __iomem *io_base; int err = 0; if (!res) return -ENXIO; /* Allocate memory for MTD device structure and private data */ this = kzalloc(sizeof(struct nand_chip), GFP_KERNEL); if (!this) { printk (KERN_WARNING "Unable to allocate E3 NAND MTD device structure.\n"); err = -ENOMEM; goto out; } ams_delta_mtd = nand_to_mtd(this); ams_delta_mtd->owner = THIS_MODULE; /* * Don't try to request the memory region from here, * it should have been already requested from the * gpio-omap driver and requesting it again would fail. */ io_base = ioremap(res->start, resource_size(res)); if (io_base == NULL) { dev_err(&pdev->dev, "ioremap failed\n"); err = -EIO; goto out_free; } nand_set_controller_data(this, (void *)io_base); /* Set address of NAND IO lines */ this->IO_ADDR_R = io_base + OMAP_MPUIO_INPUT_LATCH; this->IO_ADDR_W = io_base + OMAP_MPUIO_OUTPUT; this->read_byte = ams_delta_read_byte; this->write_buf = ams_delta_write_buf; this->read_buf = ams_delta_read_buf; this->cmd_ctrl = ams_delta_hwcontrol; if (gpio_request(AMS_DELTA_GPIO_PIN_NAND_RB, "nand_rdy") == 0) { this->dev_ready = ams_delta_nand_ready; } else { this->dev_ready = NULL; printk(KERN_NOTICE "Couldn't request gpio for Delta NAND ready.\n"); } /* 25 us command delay time */ this->chip_delay = 30; this->ecc.mode = NAND_ECC_SOFT; this->ecc.algo = NAND_ECC_HAMMING; platform_set_drvdata(pdev, io_base); /* Set chip enabled, but */ err = gpio_request_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); if (err) goto out_gpio; /* Scan to find existence of the device */ err = nand_scan(ams_delta_mtd, 1); if (err) goto out_mtd; /* Register the partitions */ mtd_device_register(ams_delta_mtd, partition_info, ARRAY_SIZE(partition_info)); goto out; out_mtd: gpio_free_array(_mandatory_gpio, ARRAY_SIZE(_mandatory_gpio)); out_gpio: gpio_free(AMS_DELTA_GPIO_PIN_NAND_RB); iounmap(io_base); out_free: kfree(this); out: return err; }
/* * Probe for the NAND device. */ static int oxnas_nand_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct device_node *nand_np; struct oxnas_nand_ctrl *oxnas; struct nand_chip *chip; struct mtd_info *mtd; struct resource *res; int nchips = 0; int count = 0; int err = 0; /* Allocate memory for the device structure (and zero it) */ oxnas = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip), GFP_KERNEL); if (!oxnas) return -ENOMEM; nand_hw_control_init(&oxnas->base); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); oxnas->io_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(oxnas->io_base)) return PTR_ERR(oxnas->io_base); oxnas->clk = devm_clk_get(&pdev->dev, NULL); if (IS_ERR(oxnas->clk)) oxnas->clk = NULL; /* Only a single chip node is supported */ count = of_get_child_count(np); if (count > 1) return -EINVAL; clk_prepare_enable(oxnas->clk); device_reset_optional(&pdev->dev); for_each_child_of_node(np, nand_np) { chip = devm_kzalloc(&pdev->dev, sizeof(struct nand_chip), GFP_KERNEL); if (!chip) return -ENOMEM; chip->controller = &oxnas->base; nand_set_flash_node(chip, nand_np); nand_set_controller_data(chip, oxnas); mtd = nand_to_mtd(chip); mtd->dev.parent = &pdev->dev; mtd->priv = chip; chip->cmd_ctrl = oxnas_nand_cmd_ctrl; chip->read_buf = oxnas_nand_read_buf; chip->read_byte = oxnas_nand_read_byte; chip->write_buf = oxnas_nand_write_buf; chip->chip_delay = 30; /* Scan to find existence of the device */ err = nand_scan(mtd, 1); if (err) return err; err = mtd_device_register(mtd, NULL, 0); if (err) { nand_release(mtd); return err; } oxnas->chips[nchips] = chip; ++nchips; }
int denali_init(struct denali_nand_info *denali) { struct mtd_info *mtd = nand_to_mtd(&denali->nand); int ret; if (denali->platform == INTEL_CE4100) { /* * Due to a silicon limitation, we can only support * ONFI timing mode 1 and below. */ if (onfi_timing_mode < -1 || onfi_timing_mode > 1) { pr_err("Intel CE4100 only supports ONFI timing mode 1 or below\n"); return -EINVAL; } } /* allocate a temporary buffer for nand_scan_ident() */ denali->buf.buf = devm_kzalloc(denali->dev, PAGE_SIZE, GFP_DMA | GFP_KERNEL); if (!denali->buf.buf) return -ENOMEM; mtd->dev.parent = denali->dev; denali_hw_init(denali); denali_drv_init(denali); /* Request IRQ after all the hardware initialization is finished */ ret = devm_request_irq(denali->dev, denali->irq, denali_isr, IRQF_SHARED, DENALI_NAND_NAME, denali); if (ret) { dev_err(denali->dev, "Unable to request IRQ\n"); return ret; } /* now that our ISR is registered, we can enable interrupts */ denali_set_intr_modes(denali, true); mtd->name = "denali-nand"; /* register the driver with the NAND core subsystem */ denali->nand.select_chip = denali_select_chip; denali->nand.cmdfunc = denali_cmdfunc; denali->nand.read_byte = denali_read_byte; denali->nand.waitfunc = denali_waitfunc; /* * scan for NAND devices attached to the controller * this is the first stage in a two step process to register * with the nand subsystem */ ret = nand_scan_ident(mtd, denali->max_banks, NULL); if (ret) goto failed_req_irq; /* allocate the right size buffer now */ devm_kfree(denali->dev, denali->buf.buf); denali->buf.buf = devm_kzalloc(denali->dev, mtd->writesize + mtd->oobsize, GFP_KERNEL); if (!denali->buf.buf) { ret = -ENOMEM; goto failed_req_irq; } /* Is 32-bit DMA supported? */ ret = dma_set_mask(denali->dev, DMA_BIT_MASK(32)); if (ret) { dev_err(denali->dev, "No usable DMA configuration\n"); goto failed_req_irq; } denali->buf.dma_buf = dma_map_single(denali->dev, denali->buf.buf, mtd->writesize + mtd->oobsize, DMA_BIDIRECTIONAL); if (dma_mapping_error(denali->dev, denali->buf.dma_buf)) { dev_err(denali->dev, "Failed to map DMA buffer\n"); ret = -EIO; goto failed_req_irq; } /* * support for multi nand * MTD known nothing about multi nand, so we should tell it * the real pagesize and anything necessery */ denali->devnum = ioread32(denali->flash_reg + DEVICES_CONNECTED); denali->nand.chipsize <<= denali->devnum - 1; denali->nand.page_shift += denali->devnum - 1; denali->nand.pagemask = (denali->nand.chipsize >> denali->nand.page_shift) - 1; denali->nand.bbt_erase_shift += denali->devnum - 1; denali->nand.phys_erase_shift = denali->nand.bbt_erase_shift; denali->nand.chip_shift += denali->devnum - 1; mtd->writesize <<= denali->devnum - 1; mtd->oobsize <<= denali->devnum - 1; mtd->erasesize <<= denali->devnum - 1; mtd->size = denali->nand.numchips * denali->nand.chipsize; denali->bbtskipbytes *= denali->devnum; /* * second stage of the NAND scan * this stage requires information regarding ECC and * bad block management. */ /* Bad block management */ denali->nand.bbt_td = &bbt_main_descr; denali->nand.bbt_md = &bbt_mirror_descr; /* skip the scan for now until we have OOB read and write support */ denali->nand.bbt_options |= NAND_BBT_USE_FLASH; denali->nand.options |= NAND_SKIP_BBTSCAN; denali->nand.ecc.mode = NAND_ECC_HW_SYNDROME; /* no subpage writes on denali */ denali->nand.options |= NAND_NO_SUBPAGE_WRITE; /* * Denali Controller only support 15bit and 8bit ECC in MRST, * so just let controller do 15bit ECC for MLC and 8bit ECC for * SLC if possible. * */ if (!nand_is_slc(&denali->nand) && (mtd->oobsize > (denali->bbtskipbytes + ECC_15BITS * (mtd->writesize / ECC_SECTOR_SIZE)))) { /* if MLC OOB size is large enough, use 15bit ECC*/ denali->nand.ecc.strength = 15; denali->nand.ecc.bytes = ECC_15BITS; iowrite32(15, denali->flash_reg + ECC_CORRECTION); } else if (mtd->oobsize < (denali->bbtskipbytes + ECC_8BITS * (mtd->writesize / ECC_SECTOR_SIZE))) { pr_err("Your NAND chip OOB is not large enough to contain 8bit ECC correction codes"); goto failed_req_irq; } else { denali->nand.ecc.strength = 8; denali->nand.ecc.bytes = ECC_8BITS; iowrite32(8, denali->flash_reg + ECC_CORRECTION); } mtd_set_ooblayout(mtd, &denali_ooblayout_ops); denali->nand.ecc.bytes *= denali->devnum; denali->nand.ecc.strength *= denali->devnum; /* override the default read operations */ denali->nand.ecc.size = ECC_SECTOR_SIZE * denali->devnum; denali->nand.ecc.read_page = denali_read_page; denali->nand.ecc.read_page_raw = denali_read_page_raw; denali->nand.ecc.write_page = denali_write_page; denali->nand.ecc.write_page_raw = denali_write_page_raw; denali->nand.ecc.read_oob = denali_read_oob; denali->nand.ecc.write_oob = denali_write_oob; denali->nand.erase = denali_erase; ret = nand_scan_tail(mtd); if (ret) goto failed_req_irq; ret = mtd_device_register(mtd, NULL, 0); if (ret) { dev_err(denali->dev, "Failed to register MTD: %d\n", ret); goto failed_req_irq; } return 0; failed_req_irq: denali_irq_cleanup(denali->irq, denali); return ret; }
static int __init txx9ndfmc_probe(struct platform_device *dev) { struct txx9ndfmc_platform_data *plat = dev_get_platdata(&dev->dev); int hold, spw; int i; struct txx9ndfmc_drvdata *drvdata; unsigned long gbusclk = plat->gbus_clock; struct resource *res; drvdata = devm_kzalloc(&dev->dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; res = platform_get_resource(dev, IORESOURCE_MEM, 0); drvdata->base = devm_ioremap_resource(&dev->dev, res); if (IS_ERR(drvdata->base)) return PTR_ERR(drvdata->base); hold = plat->hold ?: 20; /* tDH */ spw = plat->spw ?: 90; /* max(tREADID, tWP, tRP) */ hold = TXX9NDFMC_NS_TO_CYC(gbusclk, hold); spw = TXX9NDFMC_NS_TO_CYC(gbusclk, spw); if (plat->flags & NDFMC_PLAT_FLAG_HOLDADD) hold -= 2; /* actual hold time : (HOLD + 2) BUSCLK */ spw -= 1; /* actual wait time : (SPW + 1) BUSCLK */ hold = clamp(hold, 1, 15); drvdata->hold = hold; spw = clamp(spw, 1, 15); drvdata->spw = spw; dev_info(&dev->dev, "CLK:%ldMHz HOLD:%d SPW:%d\n", (gbusclk + 500000) / 1000000, hold, spw); nand_controller_init(&drvdata->controller); drvdata->controller.ops = &txx9ndfmc_controller_ops; platform_set_drvdata(dev, drvdata); txx9ndfmc_initialize(dev); for (i = 0; i < MAX_TXX9NDFMC_DEV; i++) { struct txx9ndfmc_priv *txx9_priv; struct nand_chip *chip; struct mtd_info *mtd; if (!(plat->ch_mask & (1 << i))) continue; txx9_priv = kzalloc(sizeof(struct txx9ndfmc_priv), GFP_KERNEL); if (!txx9_priv) continue; chip = &txx9_priv->chip; mtd = nand_to_mtd(chip); mtd->dev.parent = &dev->dev; chip->read_byte = txx9ndfmc_read_byte; chip->read_buf = txx9ndfmc_read_buf; chip->write_buf = txx9ndfmc_write_buf; chip->cmd_ctrl = txx9ndfmc_cmd_ctrl; chip->dev_ready = txx9ndfmc_dev_ready; chip->ecc.calculate = txx9ndfmc_calculate_ecc; chip->ecc.correct = txx9ndfmc_correct_data; chip->ecc.hwctl = txx9ndfmc_enable_hwecc; chip->ecc.mode = NAND_ECC_HW; chip->ecc.strength = 1; chip->chip_delay = 100; chip->controller = &drvdata->controller; nand_set_controller_data(chip, txx9_priv); txx9_priv->dev = dev; if (plat->ch_mask != 1) { txx9_priv->cs = i; txx9_priv->mtdname = kasprintf(GFP_KERNEL, "%s.%u", dev_name(&dev->dev), i); } else { txx9_priv->cs = -1; txx9_priv->mtdname = kstrdup(dev_name(&dev->dev), GFP_KERNEL); } if (!txx9_priv->mtdname) { kfree(txx9_priv); dev_err(&dev->dev, "Unable to allocate MTD name.\n"); continue; } if (plat->wide_mask & (1 << i)) chip->options |= NAND_BUSWIDTH_16; if (nand_scan(mtd, 1)) { kfree(txx9_priv->mtdname); kfree(txx9_priv); continue; } mtd->name = txx9_priv->mtdname; mtd_device_register(mtd, NULL, 0); drvdata->mtds[i] = mtd; } return 0; }
/* * Default nand_command and nand_command_lp don't match BCM4706 hardware layout. * For example, reading chip id is performed in a non-standard way. * Setting column and page is also handled differently, we use a special * registers of ChipCommon core. Hacking cmd_ctrl to understand and convert * standard commands would be much more complicated. */ static void bcm47xxnflash_ops_bcm4706_cmdfunc(struct nand_chip *nand_chip, unsigned command, int column, int page_addr) { struct mtd_info *mtd = nand_to_mtd(nand_chip); struct bcm47xxnflash *b47n = nand_get_controller_data(nand_chip); struct bcma_drv_cc *cc = b47n->cc; u32 ctlcode; int i; if (column != -1) b47n->curr_column = column; if (page_addr != -1) b47n->curr_page_addr = page_addr; switch (command) { case NAND_CMD_RESET: nand_chip->legacy.cmd_ctrl(nand_chip, command, NAND_CTRL_CLE); ndelay(100); nand_wait_ready(nand_chip); break; case NAND_CMD_READID: ctlcode = NCTL_CSA | 0x01000000 | NCTL_CMD1W | NCTL_CMD0; ctlcode |= NAND_CMD_READID; if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) { pr_err("READID error\n"); break; } /* * Reading is specific, last one has to go without NCTL_CSA * bit. We don't know how many reads NAND subsystem is going * to perform, so cache everything. */ for (i = 0; i < ARRAY_SIZE(b47n->id_data); i++) { ctlcode = NCTL_CSA | NCTL_READ; if (i == ARRAY_SIZE(b47n->id_data) - 1) ctlcode &= ~NCTL_CSA; if (bcm47xxnflash_ops_bcm4706_ctl_cmd(b47n->cc, ctlcode)) { pr_err("READID error\n"); break; } b47n->id_data[i] = bcma_cc_read32(b47n->cc, BCMA_CC_NFLASH_DATA) & 0xFF; } break; case NAND_CMD_STATUS: ctlcode = NCTL_CSA | NCTL_CMD0 | NAND_CMD_STATUS; if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) pr_err("STATUS command error\n"); break; case NAND_CMD_READ0: break; case NAND_CMD_READOOB: if (page_addr != -1) b47n->curr_column += mtd->writesize; break; case NAND_CMD_ERASE1: bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR, b47n->curr_page_addr); ctlcode = NCTL_ROW | NCTL_CMD1W | NCTL_CMD0 | NAND_CMD_ERASE1 | (NAND_CMD_ERASE2 << 8); if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) pr_err("ERASE1 failed\n"); break; case NAND_CMD_ERASE2: break; case NAND_CMD_SEQIN: /* Set page and column */ bcma_cc_write32(cc, BCMA_CC_NFLASH_COL_ADDR, b47n->curr_column); bcma_cc_write32(cc, BCMA_CC_NFLASH_ROW_ADDR, b47n->curr_page_addr); /* Prepare to write */ ctlcode = 0x40000000 | NCTL_ROW | NCTL_COL | NCTL_CMD0; ctlcode |= NAND_CMD_SEQIN; if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, ctlcode)) pr_err("SEQIN failed\n"); break; case NAND_CMD_PAGEPROG: if (bcm47xxnflash_ops_bcm4706_ctl_cmd(cc, NCTL_CMD0 | NAND_CMD_PAGEPROG)) pr_err("PAGEPROG failed\n"); if (bcm47xxnflash_ops_bcm4706_poll(cc)) pr_err("PAGEPROG not ready\n"); break; default: pr_err("Command 0x%X unsupported\n", command); break; } b47n->curr_command = command; }