// return valid index if found BMT, else return 0 static int load_bmt_data(int start, int pool_size) { int bmt_index = start + pool_size - 1; // find from the end phys_bmt_struct phys_table; int i; MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __FUNCTION__, bmt_index); for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) { if (nand_block_bad_bmt(OFFSET(bmt_index))) { MSG(INIT, "Skip bad block: %d\n", bmt_index); continue; } if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) { MSG(INIT, "Error found when read block %d\n", bmt_index); continue; } if (!match_bmt_signature(dat_buf, oob_buf)) { continue; } MSG(INIT, "Match bmt signature @ block: 0x%x\n", bmt_index); memcpy(&phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_table)); if (!valid_bmt_data(&phys_table)) { MSG(INIT, "BMT data is not correct %d\n", bmt_index); continue; } else { bmt.mapped_count = phys_table.header.mapped_count; bmt.version = phys_table.header.version; // bmt.bad_count = phys_table.header.bad_count; memcpy(bmt.table, phys_table.table, bmt.mapped_count * sizeof(bmt_entry)); MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count); for (i = 0; i < bmt.mapped_count; i++) { if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index))) { MSG(INIT, "block 0x%x is not mark bad, should be power lost last time\n", bmt.table[i].bad_index); mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index)); } } return bmt_index; } } MSG(INIT, "bmt block not found!\n"); return 0; }
// return valid index if found BMT, else return 0 static int load_bmt_data(int start, int pool_size) { int bmt_index = start + pool_size - 1; // find from the end int i; MSG("begin to search BMT from block %d \n", bmt_index); for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) { if (nand_block_bad_bmt(OFFSET(bmt_index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(bmt_index), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block: %d \n", bmt_index); continue; } if (nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) { MSG("Error found when read block: %d\n", bmt_index); continue; } if (!match_bmt_signature(dat_buf, oob_buf)) { continue; } MSG("Match bmt signature @ block: %d\n", bmt_index); memcpy(&lbd_phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(lbd_phys_table)); if (!valid_bmt_data(&lbd_phys_table)) { MSG("BMT data is not correct: %d\n", bmt_index); continue; } else { bmt.mapped_count = lbd_phys_table.header.mapped_count; bmt.version = lbd_phys_table.header.version; memcpy(bmt.table, lbd_phys_table.table, bmt.mapped_count * sizeof(bmt_entry)); MSG("bmt found at block: %d, mapped block: %d\n", bmt_index, bmt.mapped_count); for (i = 0; i < bmt.mapped_count; i++) { if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index), BAD_BLOCK_RAW)) { MSG("block %d is not mark bad, should be power lost last time\n", bmt.table[i].bad_index); mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index), BAD_BLOCK_RAW); } } return bmt_index; } } MSG("bmt not found!\n"); return 0; }
static int migrate_from_bad(int offset, u8 *write_dat, u8 *write_oob) { int page; int error_block = offset / BLOCK_SIZE_BMT; int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; int to_index; to_index = find_available_block(false); if (!to_index) { MSG("Cannot find an available block for BMT\n"); return 0; } for (page = 0; page < error_page; page++) { nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); if (error_block < system_block_count) { set_bad_index_to_oob(oob_buf, error_block); } if (nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) { MSG("Write page %d fail\n", PAGE_ADDR(to_index) + page); mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER); return migrate_from_bad(offset, write_dat, write_oob); } } { memset(oob_buf, 0xFF, sizeof(oob_buf)); #if defined(TCSUPPORT_CPU_MT7510)||defined(TCSUPPORT_CPU_MT7520) memcpy(oob_buf, write_oob, mtd_bmt->oobsize); #else memcpy(oob_buf, write_oob, 1 << nand_chip_bmt->flash->oob_shift); #endif if (error_block < system_block_count) set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. if (nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) { MSG("Write page %d fail\n", PAGE_ADDR(to_index) + error_page); mark_block_bad_bmt(OFFSET(to_index), BMT_BADBLOCK_GENERATE_LATER); return migrate_from_bad(offset, write_dat, write_oob); } } MSG("Migrate from %d to %d done!\n",error_block, to_index); return to_index; }
static int load_bbt_data(int start, int pool_size, init_bbt_struct *init_bbt) { int i; int ret = 0; int bbt_index = start; for(;bbt_index < (start + pool_size); bbt_index++) { if (nand_block_bad_bmt(OFFSET(bbt_index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(bbt_index), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block: %d\n", bbt_index); continue; } if (nand_read_page_bmt(PAGE_ADDR(bbt_index), dat_buf, oob_buf)) { MSG("Error found when read block %d\n", bbt_index); continue; } if (!match_bbt_signature(dat_buf, oob_buf)) { continue; } MSG("Match bbt signature \n"); memcpy(&lbd_init_table, dat_buf + BBT_SIGNATURE_OFFSET, sizeof(lbd_init_table)); if (!valid_bbt_data(&lbd_init_table)) { MSG("BBT data is not correct \n"); continue; } else { init_bbt->badblock_count = lbd_init_table.header.badblock_count; init_bbt->version = lbd_init_table.header.version; memcpy(init_bbt->badblock_table, lbd_init_table.badblock_table, (init_bbt->badblock_count) * 2); MSG("bbt found, bad block count: %d\n", lbd_init_bbt->badblock_count); for (i = 0; i < init_bbt->badblock_count; i++) { MSG("lbd_init_bbt->badblock_table[%d]: %d \n", i, lbd_init_bbt->badblock_table[i]); } return bbt_index; } } return ret; }
/******************************************************************* * Reconstruct bmt, called when found bmt info doesn't match bad * block info in flash. * * Return NULL for failure *******************************************************************/ bmt_struct *reconstruct_bmt(bmt_struct * bmt) { int i; int index = system_block_count; unsigned short bad_index; // init everything in BMT struct bmt->version = BMT_VERSION; bmt->bad_count = 0; bmt->mapped_count = 0; memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); for (i = 0; i < bmt_block_count; i++, index++) { if (nand_block_bad_bmt(OFFSET(index), BAD_BLOCK_RAW) || nand_block_bad_bmt(OFFSET(index), BMT_BADBLOCK_GENERATE_LATER)) { MSG("Skip bad block: %d \n", index); continue; } nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) { MSG("get bad index: 0x%x \n", bad_index); if (bad_index != 0xFFFF) MSG("Invalid bad index found in block: %d \n", index); continue; } MSG("Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); if (!nand_block_bad_bmt(OFFSET(bad_index), BAD_BLOCK_RAW)) { mark_block_bad_bmt(OFFSET(bad_index), BAD_BLOCK_RAW); MSG("block %d is not marked as bad, mark it\n", bad_index); } { // add mapping to BMT bmt->table[bmt->mapped_count].bad_index = bad_index; bmt->table[bmt->mapped_count].mapped_index = index; bmt->mapped_count++; } MSG("Add mapping: %d -> %d to BMT\n", bad_index, index); } MSG("Scan replace pool done, mapped block: %d\n", bmt->mapped_count); return bmt; }
/******************************************************************* * Reconstruct bmt, called when found bmt info doesn't match bad * block info in flash. * * Return NULL for failure *******************************************************************/ bmt_struct *reconstruct_bmt(bmt_struct * bmt) { int i; int index = system_block_count; unsigned short bad_index; int mapped; bmt->version = BMT_VERSION; bmt->bad_count = 0; bmt->mapped_count = 0; memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); for (i = 0; i < bmt_block_count; i++, index++) { if (nand_block_bad_bmt(OFFSET(index))) { MSG(INFO, "Skip bad block: 0x%x\n", index); continue; } nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) { MSG(INIT, "get bad index: 0x%x\n", bad_index); if (bad_index != 0xFFFF) MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n", index, bad_index); continue; } MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); if (!nand_block_bad_bmt(OFFSET(bad_index))) { MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index); continue; // no need to erase here, it will be erased later when trying to write BMT } if ((mapped = is_block_mapped(bad_index)) >= 0) { MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index); bmt->table[mapped].mapped_index = index; // use new one instead. } else { bmt->table[bmt->mapped_count].bad_index = bad_index; bmt->table[bmt->mapped_count].mapped_index = index; bmt->mapped_count++; } MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index); } MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count); memset(oob_buf, 0xFF, sizeof(oob_buf)); fill_nand_bmt_buffer(bmt, dat_buf, oob_buf); if (!write_bmt_to_flash(dat_buf, oob_buf)) { MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n"); } return bmt; }
static int migrate_from_bad(int offset, u8 * write_dat, u8 * write_oob) { int page; int error_block = offset / BLOCK_SIZE_BMT; int error_page = (offset / PAGE_SIZE_BMT) % page_per_block; int to_index; memcpy(oob_buf, write_oob, MAX_OOB_SIZE); to_index = find_available_block(false); if (!to_index) { MSG(INIT, "Cannot find an available block for BMT\n"); return 0; } { // migrate error page first MSG(INIT, "Write error page: 0x%x\n", error_page); if (!write_dat) { nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL); write_dat = dat_buf; } // memcpy(oob_buf, write_oob, MAX_OOB_SIZE); if (error_block < system_block_count) set_bad_index_to_oob(oob_buf, error_block); // if error_block is already a mapped block, original mapping index is in OOB. if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } for (page = 0; page < page_per_block; page++) { if (page != error_page) { nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf); if (is_page_used(dat_buf, oob_buf)) { if (error_block < system_block_count) { set_bad_index_to_oob(oob_buf, error_block); } MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n", PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page); if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) { MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + page); mark_block_bad_bmt(OFFSET(to_index)); return migrate_from_bad(offset, write_dat, write_oob); } } } } MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index); return to_index; }
/******************************************************************* * Reconstruct bmt, called when found bmt info doesn't match bad * block info in flash. * * Return NULL for failure *******************************************************************/ bmt_struct *reconstruct_bmt(bmt_struct * bmt) { int i; int index = system_block_count; unsigned short bad_index; int mapped; // init everything in BMT struct bmt->version = BMT_VERSION; bmt->bad_count = 0; bmt->mapped_count = 0; memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry)); for (i = 0; i < bmt_block_count; i++, index++) { if (nand_block_bad_bmt(OFFSET(index))) { // MSG(INIT, "Skip bad block: 0x%x\n", index); // bmt->bad_count++; continue; } // MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index)); nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf); /* if (mt6573_nand_read_page_hw(PAGE_ADDR(index), dat_buf)) { MSG(INIT, "Error when read block %d\n", bmt_block_index); continue; } */ if ((bad_index = get_bad_index_from_oob(oob_buf)) >= system_block_count) { // MSG(INIT, "get bad index: 0x%x\n", bad_index); if (bad_index != 0xFFFF) MSG(INIT, "warning @ 0x%x\n", index); continue; } // MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index); if (!nand_block_bad_bmt(OFFSET(bad_index))) { // MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n", bad_index); continue; // no need to erase here, it will be erased later when trying to write BMT } if ( (mapped = is_block_mapped(bad_index)) >= 0) { // MSG(INIT, "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n", // bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index); bmt->table[mapped].mapped_index = index; // use new one instead. } else { // add mapping to BMT bmt->table[bmt->mapped_count].bad_index = bad_index; bmt->table[bmt->mapped_count].mapped_index = index; bmt->mapped_count++; } MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index); } MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count); // dump_bmt_info(bmt); // fill NAND BMT buffer memset(oob_buf, 0xFF, sizeof(oob_buf)); fill_nand_bmt_buffer(bmt, dat_buf, oob_buf); // write BMT back if (!write_bmt_to_flash(dat_buf, oob_buf)) { MSG(INIT, "TRAGEDY\n"); } return bmt; }