/* Scheduled work - when we can't proceed without erasing a block */ static void mtdoops_workfunc_erase(struct work_struct *work) { struct mtdoops_context *cxt = container_of(work, struct mtdoops_context, work_erase); struct mtd_info *mtd = cxt->mtd; int i = 0, j, ret, mod; /* We were unregistered */ if (!mtd) return; mod = (cxt->nextpage * record_size) % mtd->erasesize; if (mod != 0) { cxt->nextpage = cxt->nextpage + ((mtd->erasesize - mod) / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; } while (mtd_can_have_bb(mtd)) { ret = mtd_block_isbad(mtd, cxt->nextpage * record_size); if (!ret) break; if (ret < 0) { printk(KERN_ERR "mtdoops: block_isbad failed, aborting\n"); return; } badblock: printk(KERN_WARNING "mtdoops: bad block at %08lx\n", cxt->nextpage * record_size); i++; cxt->nextpage = cxt->nextpage + (mtd->erasesize / record_size); if (cxt->nextpage >= cxt->oops_pages) cxt->nextpage = 0; if (i == cxt->oops_pages / (mtd->erasesize / record_size)) { printk(KERN_ERR "mtdoops: all blocks bad!\n"); return; } } for (j = 0, ret = -1; (j < 3) && (ret < 0); j++) ret = mtdoops_erase_block(cxt, cxt->nextpage * record_size); if (ret >= 0) { printk(KERN_DEBUG "mtdoops: ready %d, %d\n", cxt->nextpage, cxt->nextcount); return; } if (mtd_can_have_bb(mtd) && ret == -EIO) { ret = mtd_block_markbad(mtd, cxt->nextpage * record_size); if (ret < 0) { printk(KERN_ERR "mtdoops: block_markbad failed, aborting\n"); return; } } goto badblock; }
static int scan_for_bad_eraseblocks(void) { int i, bad = 0; bbt = kzalloc(ebcnt, GFP_KERNEL); if (!bbt) { printk(PRINT_PREF "error: cannot allocate memory\n"); return -ENOMEM; } if (!mtd_can_have_bb(mtd)) goto out; printk(PRINT_PREF "scanning for bad eraseblocks\n"); for (i = 0; i < ebcnt; ++i) { bbt[i] = is_block_bad(i) ? 1 : 0; if (bbt[i]) bad += 1; cond_resched(); } printk(PRINT_PREF "scanned %d eraseblocks, %d are bad\n", i, bad); out: goodebcnt = ebcnt - bad; return 0; }
static int concat_block_markbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_concat *concat = CONCAT(mtd); int i, err = -EINVAL; if (!mtd_can_have_bb(concat->subdev[0])) return 0; if (ofs > mtd->size) return -EINVAL; for (i = 0; i < concat->num_subdev; i++) { struct mtd_info *subdev = concat->subdev[i]; if (ofs >= subdev->size) { ofs -= subdev->size; continue; } err = mtd_block_markbad(subdev, ofs); if (!err) mtd->ecc_stats.badblocks++; break; } return err; }
static int concat_block_isbad(struct mtd_info *mtd, loff_t ofs) { struct mtd_concat *concat = CONCAT(mtd); int i, res = 0; if (!mtd_can_have_bb(concat->subdev[0])) return res; if (ofs > mtd->size) return -EINVAL; for (i = 0; i < concat->num_subdev; i++) { struct mtd_info *subdev = concat->subdev[i]; if (ofs >= subdev->size) { ofs -= subdev->size; continue; } res = mtd_block_isbad(subdev, ofs); break; } return res; }
static void find_next_position(struct mtdoops_context *cxt) { struct mtd_info *mtd = cxt->mtd; int ret, page, maxpos = 0; u32 count[2], maxcount = 0xffffffff; size_t retlen; for (page = 0; page < cxt->oops_pages; page++) { if (mtd_can_have_bb(mtd) && mtd_block_isbad(mtd, page * record_size)) continue; /* Assume the page is used */ mark_page_used(cxt, page); ret = mtd_read(mtd, page * record_size, MTDOOPS_HEADER_SIZE, &retlen, (u_char *)&count[0]); if (retlen != MTDOOPS_HEADER_SIZE || (ret < 0 && !mtd_is_bitflip(ret))) { printk(KERN_ERR "mtdoops: read failure at %ld (%td of %d read), err %d\n", page * record_size, retlen, MTDOOPS_HEADER_SIZE, ret); continue; } if (count[0] == 0xffffffff && count[1] == 0xffffffff) mark_page_unused(cxt, page); if (count[0] == 0xffffffff) continue; if (maxcount == 0xffffffff) { maxcount = count[0]; maxpos = page; } else if (count[0] < 0x40000000 && maxcount > 0xc0000000) { maxcount = count[0]; maxpos = page; } else if (count[0] > maxcount && count[0] < 0xc0000000) { maxcount = count[0]; maxpos = page; } else if (count[0] > maxcount && count[0] > 0xc0000000 && maxcount > 0x80000000) { maxcount = count[0]; maxpos = page; } } if (maxcount == 0xffffffff) { cxt->nextpage = 0; cxt->nextcount = 1; schedule_work(&cxt->work_erase); return; } cxt->nextpage = maxpos; cxt->nextcount = maxcount; mtdoops_inc_counter(cxt); }
int add_mtd_device(struct mtd_info *mtd, char *devname, int device_id) { struct mtddev_hook *hook; if (!devname) devname = "mtd"; strcpy(mtd->class_dev.name, devname); mtd->class_dev.id = device_id; if (mtd->parent) mtd->class_dev.parent = mtd->parent; register_device(&mtd->class_dev); mtd->cdev.ops = &mtd_ops; mtd->cdev.size = mtd->size; if (device_id == DEVICE_ID_SINGLE) mtd->cdev.name = xstrdup(devname); else mtd->cdev.name = asprintf("%s%d", devname, mtd->class_dev.id); mtd->cdev.priv = mtd; mtd->cdev.dev = &mtd->class_dev; mtd->cdev.mtd = mtd; if (IS_ENABLED(CONFIG_PARAMETER)) { dev_add_param_llint_ro(&mtd->class_dev, "size", mtd->size, "%llu"); dev_add_param_int_ro(&mtd->class_dev, "erasesize", mtd->erasesize, "%u"); dev_add_param_int_ro(&mtd->class_dev, "writesize", mtd->writesize, "%u"); dev_add_param_int_ro(&mtd->class_dev, "oobsize", mtd->oobsize, "%u"); } devfs_create(&mtd->cdev); if (mtd_can_have_bb(mtd)) mtd->cdev_bb = mtd_add_bb(mtd, NULL); if (mtd->parent && !mtd->master) of_parse_partitions(&mtd->cdev, mtd->parent->device_node); list_for_each_entry(hook, &mtd_register_hooks, hook) if (hook->add_mtd_device) hook->add_mtd_device(mtd, devname, &hook->priv); return 0; }
int mtdtest_scan_for_bad_eraseblocks(struct mtd_info *mtd, unsigned char *bbt, unsigned int eb, int ebcnt) { int i, bad = 0; if (!mtd_can_have_bb(mtd)) return 0; pr_info("scanning for bad eraseblocks\n"); for (i = 0; i < ebcnt; ++i) { bbt[i] = is_block_bad(mtd, eb + i) ? 1 : 0; if (bbt[i]) bad += 1; cond_resched(); } pr_info("scanned %d eraseblocks, %d are bad\n", i, bad); return 0; }
/** * io_init - initialize I/O sub-system for a given UBI device. * @ubi: UBI device description object * @max_beb_per1024: maximum expected number of bad PEB per 1024 PEBs * * If @ubi->vid_hdr_offset or @ubi->leb_start is zero, default offsets are * assumed: * o EC header is always at offset zero - this cannot be changed; * o VID header starts just after the EC header at the closest address * aligned to @io->hdrs_min_io_size; * o data starts just after the VID header at the closest address aligned to * @io->min_io_size * * This function returns zero in case of success and a negative error code in * case of failure. */ static int io_init(struct ubi_device *ubi, int max_beb_per1024) { dbg_gen("sizeof(struct ubi_ainf_peb) %zu", sizeof(struct ubi_ainf_peb)); dbg_gen("sizeof(struct ubi_wl_entry) %zu", sizeof(struct ubi_wl_entry)); if (ubi->mtd->numeraseregions != 0) { /* * Some flashes have several erase regions. Different regions * may have different eraseblock size and other * characteristics. It looks like mostly multi-region flashes * have one "main" region and one or more small regions to * store boot loader code or boot parameters or whatever. I * guess we should just pick the largest region. But this is * not implemented. */ ubi_err("multiple regions, not implemented"); return -EINVAL; } if (ubi->vid_hdr_offset < 0) return -EINVAL; /* * Note, in this implementation we support MTD devices with 0x7FFFFFFF * physical eraseblocks maximum. */ ubi->peb_size = ubi->mtd->erasesize; ubi->peb_count = mtd_div_by_eb(ubi->mtd->size, ubi->mtd); ubi->flash_size = ubi->mtd->size; if (mtd_can_have_bb(ubi->mtd)) { ubi->bad_allowed = 1; ubi->bad_peb_limit = get_bad_peb_limit(ubi, max_beb_per1024); } if (ubi->mtd->type == MTD_NORFLASH) { ubi_assert(ubi->mtd->writesize == 1); ubi->nor_flash = 1; } ubi->min_io_size = ubi->mtd->writesize; ubi->hdrs_min_io_size = ubi->mtd->writesize >> ubi->mtd->subpage_sft; /* * Make sure minimal I/O unit is power of 2. Note, there is no * fundamental reason for this assumption. It is just an optimization * which allows us to avoid costly division operations. */ if (!is_power_of_2(ubi->min_io_size)) { ubi_err("min. I/O unit (%d) is not power of 2", ubi->min_io_size); return -EINVAL; } ubi_assert(ubi->hdrs_min_io_size > 0); ubi_assert(ubi->hdrs_min_io_size <= ubi->min_io_size); ubi_assert(ubi->min_io_size % ubi->hdrs_min_io_size == 0); ubi->max_write_size = ubi->mtd->writesize; /* FIXME: writebufsize */ /* * Maximum write size has to be greater or equivalent to min. I/O * size, and be multiple of min. I/O size. */ if (ubi->max_write_size < ubi->min_io_size || ubi->max_write_size % ubi->min_io_size || !is_power_of_2(ubi->max_write_size)) { ubi_err("bad write buffer size %d for %d min. I/O unit", ubi->max_write_size, ubi->min_io_size); return -EINVAL; } /* Calculate default aligned sizes of EC and VID headers */ ubi->ec_hdr_alsize = ALIGN(UBI_EC_HDR_SIZE, ubi->hdrs_min_io_size); ubi->vid_hdr_alsize = ALIGN(UBI_VID_HDR_SIZE, ubi->hdrs_min_io_size); dbg_gen("min_io_size %d", ubi->min_io_size); dbg_gen("max_write_size %d", ubi->max_write_size); dbg_gen("hdrs_min_io_size %d", ubi->hdrs_min_io_size); dbg_gen("ec_hdr_alsize %d", ubi->ec_hdr_alsize); dbg_gen("vid_hdr_alsize %d", ubi->vid_hdr_alsize); if (ubi->vid_hdr_offset == 0) /* Default offset */ ubi->vid_hdr_offset = ubi->vid_hdr_aloffset = ubi->ec_hdr_alsize; else { ubi->vid_hdr_aloffset = ubi->vid_hdr_offset & ~(ubi->hdrs_min_io_size - 1); ubi->vid_hdr_shift = ubi->vid_hdr_offset - ubi->vid_hdr_aloffset; } /* Similar for the data offset */ ubi->leb_start = ubi->vid_hdr_offset + UBI_VID_HDR_SIZE; ubi->leb_start = ALIGN(ubi->leb_start, ubi->min_io_size); dbg_gen("vid_hdr_offset %d", ubi->vid_hdr_offset); dbg_gen("vid_hdr_aloffset %d", ubi->vid_hdr_aloffset); dbg_gen("vid_hdr_shift %d", ubi->vid_hdr_shift); dbg_gen("leb_start %d", ubi->leb_start); /* The shift must be aligned to 32-bit boundary */ if (ubi->vid_hdr_shift % 4) { ubi_err("unaligned VID header shift %d", ubi->vid_hdr_shift); return -EINVAL; } /* Check sanity */ if (ubi->vid_hdr_offset < UBI_EC_HDR_SIZE || ubi->leb_start < ubi->vid_hdr_offset + UBI_VID_HDR_SIZE || ubi->leb_start > ubi->peb_size - UBI_VID_HDR_SIZE || ubi->leb_start & (ubi->min_io_size - 1)) { ubi_err("bad VID header (%d) or data offsets (%d)", ubi->vid_hdr_offset, ubi->leb_start); return -EINVAL; } /* * Set maximum amount of physical erroneous eraseblocks to be 10%. * Erroneous PEB are those which have read errors. */ ubi->max_erroneous = ubi->peb_count / 10; if (ubi->max_erroneous < 16) ubi->max_erroneous = 16; dbg_gen("max_erroneous %d", ubi->max_erroneous); /* * It may happen that EC and VID headers are situated in one minimal * I/O unit. In this case we can only accept this UBI image in * read-only mode. */ if (ubi->vid_hdr_offset + UBI_VID_HDR_SIZE <= ubi->hdrs_min_io_size) { ubi_warn("EC and VID headers are in the same minimal I/O unit, switch to read-only mode"); ubi->ro_mode = 1; } ubi->leb_size = ubi->peb_size - ubi->leb_start; if (!(ubi->mtd->flags & MTD_WRITEABLE)) { ubi_msg("MTD device %d is write-protected, attach in read-only mode", ubi->mtd->index); ubi->ro_mode = 1; } /* * Note, ideally, we have to initialize @ubi->bad_peb_count here. But * unfortunately, MTD does not provide this information. We should loop * over all physical eraseblocks and invoke mtd->block_is_bad() for * each physical eraseblock. So, we leave @ubi->bad_peb_count * uninitialized so far. */ return 0; }
static int __init tort_init(void) { int err = 0, i, infinite = !cycles_count; int bad_ebs[ebcnt]; printk(KERN_INFO "\n"); printk(KERN_INFO "=================================================\n"); pr_info("Warning: this program is trying to wear out your " "flash, stop it if this is not wanted.\n"); if (dev < 0) { pr_info("Please specify a valid mtd-device via module parameter\n"); pr_crit("CAREFUL: This test wipes all data on the specified MTD device!\n"); return -EINVAL; } pr_info("MTD device: %d\n", dev); pr_info("torture %d eraseblocks (%d-%d) of mtd%d\n", ebcnt, eb, eb + ebcnt - 1, dev); if (pgcnt) pr_info("torturing just %d pages per eraseblock\n", pgcnt); pr_info("write verify %s\n", check ? "enabled" : "disabled"); mtd = get_mtd_device(NULL, dev); if (IS_ERR(mtd)) { err = PTR_ERR(mtd); pr_err("error: cannot get MTD device\n"); return err; } if (mtd->writesize == 1) { pr_info("not NAND flash, assume page size is 512 " "bytes.\n"); pgsize = 512; } else pgsize = mtd->writesize; if (pgcnt && (pgcnt > mtd->erasesize / pgsize || pgcnt < 0)) { pr_err("error: invalid pgcnt value %d\n", pgcnt); goto out_mtd; } err = -ENOMEM; patt_5A5 = kmalloc(mtd->erasesize, GFP_KERNEL); if (!patt_5A5) { pr_err("error: cannot allocate memory\n"); goto out_mtd; } patt_A5A = kmalloc(mtd->erasesize, GFP_KERNEL); if (!patt_A5A) { pr_err("error: cannot allocate memory\n"); goto out_patt_5A5; } patt_FF = kmalloc(mtd->erasesize, GFP_KERNEL); if (!patt_FF) { pr_err("error: cannot allocate memory\n"); goto out_patt_A5A; } check_buf = kmalloc(mtd->erasesize, GFP_KERNEL); if (!check_buf) { pr_err("error: cannot allocate memory\n"); goto out_patt_FF; } err = 0; /* Initialize patterns */ memset(patt_FF, 0xFF, mtd->erasesize); for (i = 0; i < mtd->erasesize / pgsize; i++) { if (!(i & 1)) { memset(patt_5A5 + i * pgsize, 0x55, pgsize); memset(patt_A5A + i * pgsize, 0xAA, pgsize); } else { memset(patt_5A5 + i * pgsize, 0xAA, pgsize); memset(patt_A5A + i * pgsize, 0x55, pgsize); } } /* * Check if there is a bad eraseblock among those we are going to test. */ memset(&bad_ebs[0], 0, sizeof(int) * ebcnt); if (mtd_can_have_bb(mtd)) { for (i = eb; i < eb + ebcnt; i++) { err = mtd_block_isbad(mtd, (loff_t)i * mtd->erasesize); if (err < 0) { pr_info("block_isbad() returned %d " "for EB %d\n", err, i); goto out; } if (err) { pr_err("EB %d is bad. Skip it.\n", i); bad_ebs[i - eb] = 1; } } } start_timing(); while (1) { int i; void *patt; /* Erase all eraseblocks */ for (i = eb; i < eb + ebcnt; i++) { if (bad_ebs[i - eb]) continue; err = erase_eraseblock(i); if (err) goto out; cond_resched(); } /* Check if the eraseblocks contain only 0xFF bytes */ if (check) { for (i = eb; i < eb + ebcnt; i++) { if (bad_ebs[i - eb]) continue; err = check_eraseblock(i, patt_FF); if (err) { pr_info("verify failed" " for 0xFF... pattern\n"); goto out; } cond_resched(); } } /* Write the pattern */ for (i = eb; i < eb + ebcnt; i++) { if (bad_ebs[i - eb]) continue; if ((eb + erase_cycles) & 1) patt = patt_5A5; else patt = patt_A5A; err = write_pattern(i, patt); if (err) goto out; cond_resched(); } /* Verify what we wrote */ if (check) { for (i = eb; i < eb + ebcnt; i++) { if (bad_ebs[i - eb]) continue; if ((eb + erase_cycles) & 1) patt = patt_5A5; else patt = patt_A5A; err = check_eraseblock(i, patt); if (err) { pr_info("verify failed for %s" " pattern\n", ((eb + erase_cycles) & 1) ? "0x55AA55..." : "0xAA55AA..."); goto out; } cond_resched(); } } erase_cycles += 1; if (erase_cycles % gran == 0) { long ms; stop_timing(); ms = (finish.tv_sec - start.tv_sec) * 1000 + (finish.tv_usec - start.tv_usec) / 1000; pr_info("%08u erase cycles done, took %lu " "milliseconds (%lu seconds)\n", erase_cycles, ms, ms / 1000); start_timing(); } if (!infinite && --cycles_count == 0) break; } out: pr_info("finished after %u erase cycles\n", erase_cycles); kfree(check_buf); out_patt_FF: kfree(patt_FF); out_patt_A5A: kfree(patt_A5A); out_patt_5A5: kfree(patt_5A5); out_mtd: put_mtd_device(mtd); if (err) pr_info("error %d occurred during torturing\n", err); printk(KERN_INFO "=================================================\n"); return err; }