/******************************************************************* * [BMT Interface] * * Description: * Update BMT. * * Parameter: * offset: update block/page offset. * reason: update reason, see update_reason_t for reason. * dat/oob: data and oob buffer for write fail. * * Return: * Return true for success, and false for failure. *******************************************************************/ bool update_bmt(u64 offset, update_reason_t reason, u8 * dat, u8 * oob) { int map_index; int orig_bad_block = -1; // int bmt_update_index; int i; u32 bad_index = (u32)(offset >> nand_chip_bmt->phys_erase_shift); //return false; if (reason == UPDATE_WRITE_FAIL) { MSG(INIT, "Write fail, need to migrate\n"); if (!(map_index = migrate_from_bad(offset, dat, oob))) { MSG(INIT, "migrate fail\n"); return false; } } else { if (!(map_index = find_available_block(false))) { MSG(INIT, "Cannot find block in pool\n"); return false; } } // now let's update BMT if (bad_index >= system_block_count) // mapped block become bad, find original bad block { for (i = 0; i < bmt_block_count; i++) { if (bmt.table[i].mapped_index == bad_index) { orig_bad_block = bmt.table[i].bad_index; break; } } // bmt.bad_count++; MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block); bmt.table[i].mapped_index = map_index; } else { bmt.table[bmt.mapped_count].mapped_index = map_index; bmt.table[bmt.mapped_count].bad_index = bad_index; bmt.mapped_count++; } memset(oob_buf, 0xFF, sizeof(oob_buf)); fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf); if (!write_bmt_to_flash(dat_buf, oob_buf)) return false; mark_block_bad_bmt(offset); return true; }
/******************************************************************* * [BMT Interface] * * Description: * Update BMT. * * Parameter: * offset: update block/page offset. * reason: update reason, see update_reason_t for reason. * dat/oob: data and oob buffer for write fail. * * Return: * Return true for success, and false for failure. *******************************************************************/ bool update_bmt(u32 offset, update_reason_t reason, u8 * dat, u8 * oob) { int map_index; int orig_bad_block = -1; int i; int bad_index = offset / BLOCK_SIZE_BMT; if (reason == UPDATE_WRITE_FAIL) { MSG(INIT, "Write fail, need to migrate\n"); if (!(map_index = migrate_from_bad(offset, dat, oob))) { MSG(INIT, "migrate fail\n"); return false; } } else { if (!(map_index = find_available_block(false))) { MSG(INIT, "Cannot find block in pool\n"); return false; } } if (bad_index >= system_block_count) { for (i = 0; i < bmt_block_count; i++) { if (bmt.table[i].mapped_index == bad_index) { orig_bad_block = bmt.table[i].bad_index; break; } } MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block); bmt.table[i].mapped_index = map_index; } else { bmt.table[bmt.mapped_count].mapped_index = map_index; bmt.table[bmt.mapped_count].bad_index = bad_index; bmt.mapped_count++; } memset(oob_buf, 0xFF, sizeof(oob_buf)); fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf); if (!write_bmt_to_flash(dat_buf, oob_buf)) return false; mark_block_bad_bmt(offset); return true; }
static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob) { int page; int error_block = offset / BLOCK_SIZE_BMT; int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; int to_index; memcpy(oob_buf, write_oob, MAX_OOB_SIZE); to_index = find_available_block(false); if (!to_index) { MSG(INIT, "Cannot find an available block for BMT\n"); return 0; } { // migrate error page first MSG(INIT, "Write error page: 0x%x\n", error_page); if (!write_dat) { nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL); write_dat = dat_buf; } // memcpy(oob_buf, write_oob, MAX_OOB_SIZE); if (error_block < system_block_count) set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } for (page = 0; page < page_per_block; page++) { if (page != error_page) { nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); if (is_page_used(dat_buf, oob_buf)) { if (error_block < system_block_count) { set_bad_index_to_oob(oob_buf, error_block); } MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page); if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } } } MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index); return to_index; }
/******************************************************************* * [BMT Interface] * * Description: * Update BMT. * * Parameter: * offset: update block/page offset. * reason: update reason, see update_reason_t for reason. * dat/oob: data and oob buffer for write fail. * * Return: * Return true for success, and false for failure. *******************************************************************/ bool update_bmt(u32 offset, update_reason_t reason, u8 *dat, u8 *oob) { int map_index; int orig_bad_block = -1; // int bmt_update_index; int i; int bad_index = offset / BLOCK_SIZE_BMT; if (bad_index < reserve_block) { // MSG(INIT, "Update in reserve region\n"); if (reason == UPDATE_UNMAPPED_BLOCK) { return false; } else if (reason == UPDATE_WRITE_FAIL) { // MSG(INIT, "Write preloader/DSP_BL fail, SD update??\n"); if (!(map_index = migrate_from_bad(offset, dat, oob))) { MSG(INIT, "migrate fail\n"); return false; } } else if (reason == UPDATE_ERASE_FAIL) { // MSG(INIT, "Erase preloader/DSP_BL fail, SD update??\n"); if (!(map_index = find_available_block_reserve(bad_index))) return false; } mark_block_bad_bmt(offset); return map_reserve_region(reserve_block); } if (reason == UPDATE_WRITE_FAIL) { MSG(INIT, "Write fail, need to migrate\n"); if ( !(map_index = migrate_from_bad(offset, dat, oob)) ) { MSG(INIT, "migrate fail\n"); return false; } } else { if ( !(map_index = find_available_block(false)) ) { MSG(INIT, "Cannot find block in pool\n"); return false; } } // now let's update BMT if (bad_index >= system_block_count) // mapped block become bad, find original bad block { for (i = 0; i < bmt_block_count; i++) { if (bmt.table[i].mapped_index == bad_index) { orig_bad_block = bmt.table[i].bad_index; break; } } // bmt.bad_count++; MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block); bmt.table[i].mapped_index = map_index; } else { bmt.table[bmt.mapped_count].mapped_index = map_index; bmt.table[bmt.mapped_count].bad_index = bad_index; bmt.mapped_count++; } memset(oob_buf, 0xFF, sizeof(oob_buf)); fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf); if (!write_bmt_to_flash(dat_buf, oob_buf)) return false; mark_block_bad_bmt(offset); return true; }