/******************************************************************* * [BMT Interface] * * Description: * Update BMT. * * Parameter: * offset: update block/page offset. * reason: update reason, see update_reason_t for reason. * dat/oob: data and oob buffer for write fail. * * Return: * Return true for success, and false for failure. *******************************************************************/ bool update_bmt(u32 offset, update_reason_t reason, u8 *dat, u8 *oob) { int map_index; int orig_bad_block __attribute__((unused)) = -1; int i; int bad_index = offset / BLOCK_SIZE_BMT; if (reason == UPDATE_WRITE_FAIL) { if ( !(map_index = migrate_from_bad(offset, dat, oob)) ) { MSG("migrate fail \n"); return false; } } else { if ( !(map_index = find_available_block(false)) ) { MSG("Cannot find block in pool \n"); return false; } } // now let's update BMT if (bad_index >= system_block_count) // mapped block become bad, find original bad block { for (i = 0; i < bmt_block_count; i++) { if (bmt.table[i].mapped_index == bad_index) { orig_bad_block = bmt.table[i].bad_index; break; } } MSG("Mapped block becomes bad, orig bad block is %d \n", orig_bad_block); bmt.table[i].mapped_index = map_index; } else { bmt.table[bmt.mapped_count].mapped_index = map_index; bmt.table[bmt.mapped_count].bad_index = bad_index; bmt.mapped_count++; } memset(dat_buf, 0xFF, sizeof(dat_buf)); memset(oob_buf, 0xFF, sizeof(oob_buf)); fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf); if (!write_bmt_to_flash(dat_buf, oob_buf)) return false; if (bad_index >= system_block_count) mark_block_bad_bmt(offset, BMT_BADBLOCK_GENERATE_LATER); else mark_block_bad_bmt(offset, BAD_BLOCK_RAW); return true; }
static int migrate_from_bad(int offset, u8 *write_dat, u8 *write_oob) { int page; int error_block = offset / BLOCK_SIZE_BMT; int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; int to_index; to_index = find_available_block(false); if (!to_index) { MSG("Cannot find an available block for BMT\n"); return 0; } for (page = 0; page < error_page; page++) { nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); if (error_block < system_block_count) { set_bad_index_to_oob(oob_buf, error_block); } if (nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) { MSG("Write page %d fail\n", PAGE_ADDR(to_index) + page); mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER); return migrate_from_bad(offset, write_dat, write_oob); } } { memset(oob_buf, 0xFF, sizeof(oob_buf)); #if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520) memcpy(oob_buf, write_oob, mtd_bmt->oobsize); #else memcpy(oob_buf, write_oob, 1 << nand_chip_bmt->flash->oob_shift); #endif if (error_block < system_block_count) set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. if (nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) { MSG("Write page %d fail\n", PAGE_ADDR(to_index) + error_page); mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER); return migrate_from_bad(offset, write_dat, write_oob); } } MSG("Migrate from %d to %d done!\n",error_block, to_index); return to_index; }
// return valid index if found BMT, else return 0 static int load_bmt_data(int start, int pool_size) { int bmt_index = start + pool_size - 1; // find from the end phys_bmt_struct phys_table; int i; MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index); for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) { if (nand_block_bad_bmt(OFFSET(bmt_index))) { MSG(INIT, "Skip bad block: %d\n", bmt_index); continue; } if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) { MSG(INIT, "Error found when read block %d\n", bmt_index); continue; } if (!match_bmt_signature(dat_buf, oob_buf)) { continue; } MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index); memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table)); if (!valid_bmt_data(&phys_table)) { MSG(INIT, "BMT data is not correct %d\n", bmt_index); continue; } else { bmt.mapped_count = phys_table.header.mapped_count; bmt.version = phys_table.header.version; // bmt.bad_count = phys_table.header.bad_count; memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry)); MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count); for (i = 0; i < bmt.mapped_count; i++) { if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index))) { MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index); mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index)); } } return bmt_index; } } MSG(INIT, "bmt block not found!\n"); return 0; }
/******************************************************************* * [BMT Interface] * * Description: * Update BMT. * * Parameter: * offset: update block/page offset. * reason: update reason, see update_reason_t for reason. * dat/oob: data and oob buffer for write fail. * * Return: * Return true for success, and false for failure. *******************************************************************/ bool update_bmt(u32 offset, update_reason_t reason, u8 *dat, u8 *oob) { int map_index; int orig_bad_block = -1; // int bmt_update_index; int i; int bad_index = offset / BLOCK_SIZE_BMT; //return false; if (reason == UPDATE_WRITE_FAIL) { MSG(INIT, "Write fail, need to migrate\n"); if ( !(map_index = migrate_from_bad(offset, dat, oob)) ) { MSG(INIT, "migrate fail\n"); return false; } } else { if ( !(map_index = find_available_block(false)) ) { MSG(INIT, "Cannot find block in pool\n"); return false; } } // now let's update BMT if (bad_index >= system_block_count) // mapped block become bad, find original bad block { for (i = 0; i < bmt_block_count; i++) { if (bmt.table[i].mapped_index == bad_index) { orig_bad_block = bmt.table[i].bad_index; break; } } // bmt.bad_count++; MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block); bmt.table[i].mapped_index = map_index; } else { bmt.table[bmt.mapped_count].mapped_index = map_index; bmt.table[bmt.mapped_count].bad_index = bad_index; bmt.mapped_count++; } memset(oob_buf, 0xFF, sizeof(oob_buf)); fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf); if (!write_bmt_to_flash(dat_buf, oob_buf)) return false; mark_block_bad_bmt(offset); return true; }
static bool write_bmt_to_flash(u8 *dat, u8 *oob) { bool need_erase = true; MSG(INIT, "Try to write BMT\n"); MSG(INIT, "bmt_block_index = 0x%x\n", bmt_block_index); if (bmt_block_index == 0) { // if we don't have index, we don't need to erase found block as it has been erased in find_available_block() need_erase = false; MSG(INIT, "set need_erase = 0x%x\n", need_erase); if ( !(bmt_block_index = find_available_block(true)) ) { MSG(INIT, "Cannot find an available block for BMT\n"); return false; } } MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index); MSG(INIT, "need_erase = 0x%x\n", need_erase); // write bmt to flash if (need_erase) { if (!nand_erase_bmt(OFFSET(bmt_block_index))) { MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index); mark_block_bad_bmt(OFFSET(bmt_block_index)); // bmt.bad_count++; bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } } if ( !nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob) ) { MSG(INIT, "Write BMT data fail, need to write again\n"); mark_block_bad_bmt(OFFSET(bmt_block_index)); // bmt.bad_count++; bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index); return true; }
// return valid index if found BMT, else return 0 static int load_bmt_data(int start, int pool_size) { int bmt_index = start + pool_size - 1; // find from the end int i; MSG("begin to search BMT from block %d \n", bmt_index); for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) { if (nand_block_bad_bmt(OFFSET(bmt_index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(bmt_index), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block: %d \n", bmt_index); continue; } if (nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) { MSG("Error found when read block: %d\n", bmt_index); continue; } if (!match_bmt_signature(dat_buf, oob_buf)) { continue; } MSG("Match bmt signature @ block: %d\n", bmt_index); memcpy(&lbd_phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(lbd_phys_table)); if (!valid_bmt_data(&lbd_phys_table)) { MSG("BMT data is not correct: %d\n", bmt_index); continue; } else { bmt.mapped_count = lbd_phys_table.header.mapped_count; bmt.version = lbd_phys_table.header.version; memcpy(bmt.table, lbd_phys_table.table, bmt.mapped_count * sizeof(bmt_entry)); MSG("bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count); for (i = 0; i < bmt.mapped_count; i++) { if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index), BAD_BLOCK_RAW)) { MSG("block %d is not mark bad, should be power lost last time\n", bmt.table[i].bad_index); mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index), BAD_BLOCK_RAW); } } return bmt_index; } } MSG("bmt not found!\n"); return 0; }
static bool write_bmt_to_flash(u8 *dat, u8 *oob) { bool need_erase = true; MSG("Try to write BMT\n"); if (bmt_block_index == 0) { // if we don't have index, we don't need to erase found block as it has been erased in find_available_block() need_erase = false; if ( !(bmt_block_index = find_available_block(true)) ) { MSG("Cannot find an available block for BMT\n"); return false; } } // write bmt to flash if (need_erase) { if (nand_erase_bmt(OFFSET(bmt_block_index))) { MSG("BMT block erase fail, mark bad: 0x%x\n", bmt_block_index); mark_block_bad_bmt(OFFSET(bmt_block_index), BMT_BADBLOCK_GENERATE_LATER); bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } } if ( nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob) ) { MSG("Write BMT data fail \n"); mark_block_bad_bmt(OFFSET(bmt_block_index), BMT_BADBLOCK_GENERATE_LATER); bmt_block_index = 0; return write_bmt_to_flash(dat, oob); // recursive call } MSG("Write BMT to block %d success\n", bmt_block_index); return true; }
/******************************************************************* * Reconstruct bmt, called when found bmt info doesn't match bad * block info in flash. * * Return NULL for failure *******************************************************************/ bmt_struct *reconstruct_bmt(bmt_struct * bmt) { int i; int index = system_block_count; unsigned short bad_index; // init everything in BMT struct bmt->version = BMT_VERSION; bmt->bad_count = 0; bmt->mapped_count = 0; memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); for (i = 0; i < bmt_block_count; i++, index++) { if (nand_block_bad_bmt(OFFSET(index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(index), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block: %d \n", index); continue; } nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) { MSG("get bad index: 0x%x \n", bad_index); if (bad_index != 0xFFFF) MSG("Invalid bad index found in block: %d \n", index); continue; } MSG("Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); if (!nand_block_bad_bmt(OFFSET(bad_index), BAD_BLOCK_RAW)) { mark_block_bad_bmt(OFFSET(bad_index), BAD_BLOCK_RAW); MSG("block %d is not marked as bad, mark it\n", bad_index); } { // add mapping to BMT bmt->table[bmt->mapped_count].bad_index = bad_index; bmt->table[bmt->mapped_count].mapped_index = index; bmt->mapped_count++; } MSG("Add mapping: %d -> %d to BMT\n", bad_index, index); } MSG("Scan replace pool done, mapped block: %d\n", bmt->mapped_count); return bmt; }
static int find_available_block_reserve(int block) { int i; for (i = block + 1; i < reserve_block; i++) { if (!nand_block_bad_bmt(OFFSET(i))) { if (nand_erase_bmt(OFFSET(i))) { // MSG(INIT, "return 0x%x\n", i); return i; } else mark_block_bad_bmt(i); } } return 0; }
static bool write_bbt_to_flash(u8 *dat, u8 *oob) { if ( !(bbt_block_index = find_available_block(false)) ) { MSG("Cannot find an available block for BBT\n"); return false; } if ( nand_write_page_bmt(PAGE_ADDR(bbt_block_index), dat, oob) ) { MSG("Write BBT data fail \n"); mark_block_bad_bmt(OFFSET(bbt_block_index), BMT_BADBLOCK_GENERATE_LATER); bbt_block_index = 0; return write_bbt_to_flash(dat, oob); // recursive call } MSG("Write BBT to block %d success\n", bbt_block_index); return true; }
static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob) { int page; int error_block = offset / BLOCK_SIZE_BMT; int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; int to_index; memcpy(oob_buf, write_oob, MAX_OOB_SIZE); to_index = find_available_block(false); if (!to_index) { MSG(INIT, "Cannot find an available block for BMT\n"); return 0; } { // migrate error page first MSG(INIT, "Write error page: 0x%x\n", error_page); if (!write_dat) { nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL); write_dat = dat_buf; } // memcpy(oob_buf, write_oob, MAX_OOB_SIZE); if (error_block < system_block_count) set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } for (page = 0; page < page_per_block; page++) { if (page != error_page) { nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); if (is_page_used(dat_buf, oob_buf)) { if (error_block < system_block_count) { set_bad_index_to_oob(oob_buf, error_block); } MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page); if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } } } MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index); return to_index; }
/************************************************************************* * Find an available block and erase. * * start_from_end: if true, find available block from end of flash. * * else, find from the beginning of the pool * * need_erase: if true, all unmapped blocks in the pool will be erased * *************************************************************************/ static int find_available_block(bool start_from_end) { int i; // , j; int block = system_block_count; int direction; MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased); if (!pool_erased) { MSG(INIT, "Erase all un-mapped blocks in pool\n"); for (i = 0; i < bmt_block_count; i++) { if (block == bmt_block_index) { MSG(INIT, "Skip bmt block 0x%x\n", block); continue; } if (nand_block_bad_bmt(OFFSET(block + i))) { MSG(INIT, "Skip bad block 0x%x\n", block + i); continue; } if (is_block_mapped(block + i) >= 0) { MSG(INIT, "Skip mapped block 0x%x\n", block + i); continue; } if (!nand_erase_bmt(OFFSET(block + i))) { MSG(INIT, "Erase block 0x%x failed\n", block + i); mark_block_bad_bmt(OFFSET(block + i)); } } pool_erased = 1; } if (start_from_end) { block = total_block_count - 1; direction = -1; } else { block = system_block_count; direction = 1; } for (i = 0; i < bmt_block_count; i++, block += direction) { if (block == bmt_block_index) { MSG(INIT, "Skip bmt block 0x%x\n", block); continue; } if (nand_block_bad_bmt(OFFSET(block))) { MSG(INIT, "Skip bad block 0x%x\n", block); continue; } if (is_block_mapped(block) >= 0) { MSG(INIT, "Skip mapped block 0x%x\n", block); continue; } MSG(INIT, "Find block 0x%x available\n", block); return block; } return 0; }
/************************************************************************* * Find an available block and erase. * * start_from_end: if true, find available block from end of flash. * * else, find from the beginning of the pool * * need_erase: if true, all unmapped blocks in the pool will be erased * *************************************************************************/ static int find_available_block(bool start_from_end) { int i; int block = system_block_count; int direction; MSG("Try to find_available_block, pool_erase: %d\n", pool_erased); // erase all un-mapped blocks in pool when finding avaliable block if (!pool_erased) { for (i = 0; i < bmt_block_count; i++) { if ((block + i) == bmt_block_index) { MSG("Skip bmt block %d \n", block + i); continue; } if ((block + i) == bbt_block_index) { MSG("Skip bbt block %d \n", block + i); continue; } if (nand_block_bad_bmt(OFFSET(block + i), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(block + i), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block %d \n", block + i); continue; } if (is_block_mapped(block + i) >= 0) { MSG("Skip mapped block %d \n", block + i); continue; } if (nand_erase_bmt(OFFSET(block + i))) { MSG("Erase block %d fail\n", block + i); mark_block_bad_bmt(OFFSET(block + i), BMT_BADBLOCK_GENERATE_LATER); } } pool_erased = 1; } if (start_from_end) { block = total_block_count - 1; direction = -1; } else { block = system_block_count; direction = 1; } for (i = 0; i < bmt_block_count; i++, block += direction) { if (block == bmt_block_index) { MSG("Skip bmt block %d \n", block); continue; } if (block == bbt_block_index) { MSG("Skip bbt block %d \n", block); continue; } if (nand_block_bad_bmt(OFFSET(block), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(block), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block %d \n", block); continue; } if (is_block_mapped(block) >= 0) { MSG("Skip mapped block %d \n", block); continue; } MSG("Find block %d available\n", block); return block; } return 0; }