static int migrate_from_bad(int offset, u8 *write_dat, u8 *write_oob) { int page; int error_block = offset / BLOCK_SIZE_BMT; int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; int to_index; to_index = find_available_block(false); if (!to_index) { MSG("Cannot find an available block for BMT\n"); return 0; } for (page = 0; page < error_page; page++) { nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); if (error_block < system_block_count) { set_bad_index_to_oob(oob_buf, error_block); } if (nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) { MSG("Write page %d fail\n", PAGE_ADDR(to_index) + page); mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER); return migrate_from_bad(offset, write_dat, write_oob); } } { memset(oob_buf, 0xFF, sizeof(oob_buf)); #if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520) memcpy(oob_buf, write_oob, mtd_bmt->oobsize); #else memcpy(oob_buf, write_oob, 1 << nand_chip_bmt->flash->oob_shift); #endif if (error_block < system_block_count) set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. if (nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) { MSG("Write page %d fail\n", PAGE_ADDR(to_index) + error_page); mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER); return migrate_from_bad(offset, write_dat, write_oob); } } MSG("Migrate from %d to %d done!\n",error_block, to_index); return to_index; }
static bool write_bmt_to_flash(u8 *dat, u8 *oob) { bool need_erase = true; MSG(INIT, "Try to write BMT\n"); MSG(INIT, "bmt_block_index = 0x%x\n", bmt_block_index); if (bmt_block_index == 0) { // if we don't have index, we don't need to erase found block as it has been erased in find_available_block() need_erase = false; MSG(INIT, "set need_erase = 0x%x\n", need_erase); if ( !(bmt_block_index = find_available_block(true)) ) { MSG(INIT, "Cannot find an available block for BMT\n"); return false; } } MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index); MSG(INIT, "need_erase = 0x%x\n", need_erase); // write bmt to flash if (need_erase) { if (!nand_erase_bmt(OFFSET(bmt_block_index))) { MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index); mark_block_bad_bmt(OFFSET(bmt_block_index)); // bmt.bad_count++; bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } } if ( !nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob) ) { MSG(INIT, "Write BMT data fail, need to write again\n"); mark_block_bad_bmt(OFFSET(bmt_block_index)); // bmt.bad_count++; bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index); return true; }
static bool write_bmt_to_flash(u8 *dat, u8 *oob) { bool need_erase = true; MSG("Try to write BMT\n"); if (bmt_block_index == 0) { // if we don't have index, we don't need to erase found block as it has been erased in find_available_block() need_erase = false; if ( !(bmt_block_index = find_available_block(true)) ) { MSG("Cannot find an available block for BMT\n"); return false; } } // write bmt to flash if (need_erase) { if (nand_erase_bmt(OFFSET(bmt_block_index))) { MSG("BMT block erase fail, mark bad: 0x%x\n", bmt_block_index); mark_block_bad_bmt(OFFSET(bmt_block_index), BMT_BADBLOCK_GENERATE_LATER); bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } } if ( nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob) ) { MSG("Write BMT data fail \n"); mark_block_bad_bmt(OFFSET(bmt_block_index), BMT_BADBLOCK_GENERATE_LATER); bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } MSG("Write BMT to block %d success\n", bmt_block_index); return true; }
static bool write_bbt_to_flash(u8 *dat, u8 *oob) { if ( !(bbt_block_index = find_available_block(false)) ) { MSG("Cannot find an available block for BBT\n"); return false; } if ( nand_write_page_bmt(PAGE_ADDR(bbt_block_index), dat, oob) ) { MSG("Write BBT data fail \n"); mark_block_bad_bmt(OFFSET(bbt_block_index), BMT_BADBLOCK_GENERATE_LATER); bbt_block_index = 0; return write_bbt_to_flash(dat, oob); // recursive call } MSG("Write BBT to block %d success\n", bbt_block_index); return true; }
static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob) { int page; int error_block = offset / BLOCK_SIZE_BMT; int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; int to_index; memcpy(oob_buf, write_oob, MAX_OOB_SIZE); to_index = find_available_block(false); if (!to_index) { MSG(INIT, "Cannot find an available block for BMT\n"); return 0; } { // migrate error page first MSG(INIT, "Write error page: 0x%x\n", error_page); if (!write_dat) { nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL); write_dat = dat_buf; } // memcpy(oob_buf, write_oob, MAX_OOB_SIZE); if (error_block < system_block_count) set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } for (page = 0; page < page_per_block; page++) { if (page != error_page) { nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); if (is_page_used(dat_buf, oob_buf)) { if (error_block < system_block_count) { set_bad_index_to_oob(oob_buf, error_block); } MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page); if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } } } MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index); return to_index; }