bmt.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723
  1. #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
  2. #include <bmt.h>
  3. #include <linux/module.h>
  4. #include <linux/slab.h>
  5. typedef struct {
  6. char signature[3];
  7. u8 version;
  8. u8 bad_count; /* bad block count in pool */
  9. u8 mapped_count; /* mapped block count in pool */
  10. u8 checksum;
  11. u8 reseverd[13];
  12. } phys_bmt_header;
  13. typedef struct {
  14. phys_bmt_header header;
  15. bmt_entry table[MAX_BMT_SIZE];
  16. } phys_bmt_struct;
  17. typedef struct {
  18. char signature[3];
  19. } bmt_oob_data;
  20. static const char MAIN_SIGNATURE[] = "BMT";
  21. static const char OOB_SIGNATURE[] = "bmt";
  22. #define SIGNATURE_SIZE (3)
  23. #define MAX_DAT_SIZE 0x4000
  24. #define MAX_OOB_SIZE 0x800
  25. static struct mtd_info *mtd_bmt;
  26. static struct nand_chip *nand_chip_bmt;
  27. #define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
  28. #define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
  29. #define PAGE_PER_SIZE_BMT (1 << (nand_chip_bmt->phys_erase_shift-nand_chip_bmt->page_shift))
  30. #define OFFSET(block) (((u64)block) * BLOCK_SIZE_BMT)
  31. #define PAGE_ADDR(block) ((block) * PAGE_PER_SIZE_BMT)
  32. /*********************************************************************
  33. * Flash is splited into 2 parts, system part is for normal system *
  34. * system usage, size is system_block_count, another is replace pool *
  35. * +-------------------------------------------------+ *
  36. * | system_block_count | bmt_block_count | *
  37. * +-------------------------------------------------+ *
  38. *********************************************************************/
  39. static u32 total_block_count; /* block number in flash */
  40. static u32 system_block_count;
  41. static int bmt_block_count; /* bmt table size */
  42. /* static int bmt_count; // block used in bmt */
  43. static int page_per_block; /* page per count */
  44. static u32 bmt_block_index; /* bmt block index */
  45. static bmt_struct bmt; /* dynamic created global bmt table */
  46. static u8 dat_buf[MAX_DAT_SIZE];
  47. static u8 oob_buf[MAX_OOB_SIZE];
  48. static bool pool_erased;
  49. /***************************************************************
  50. *
  51. * Interface adaptor for preloader/uboot/kernel
  52. * These interfaces operate on physical address, read/write
  53. * physical data.
  54. *
  55. ***************************************************************/
  56. int nand_read_page_bmt(u32 page, u8 *dat, u8 *oob)
  57. {
  58. return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
  59. }
  60. bool nand_block_bad_bmt(u64 offset)
  61. {
  62. return mtk_nand_block_bad_hw(mtd_bmt, offset);
  63. }
  64. bool nand_erase_bmt(u64 offset)
  65. {
  66. int status;
  67. if (offset < 0x20000)
  68. pr_debug("erase offset: 0x%llx\n", offset);
  69. /* as nand_chip structure doesn't have a erase function defined */
  70. status = mtk_nand_erase_hw(mtd_bmt, (u32) (offset >> nand_chip_bmt->page_shift));
  71. if (status & NAND_STATUS_FAIL)
  72. return false;
  73. else
  74. return true;
  75. }
  76. int mark_block_bad_bmt(u64 offset)
  77. {
  78. return mtk_nand_block_markbad_hw(mtd_bmt, offset); /* mark_block_bad_hw(offset); */
  79. }
  80. bool nand_write_page_bmt(u32 page, u8 *dat, u8 *oob)
  81. {
  82. /* pr_debug("[xiaolei] nand_write_page_bmt 0x%x\n", (u32)dat); */
  83. if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
  84. return false;
  85. else
  86. return true;
  87. }
  88. /********************************************************
  89. * *
  90. * static internal function *
  91. * *
  92. *********************************************************/
  93. static void dump_bmt_info(bmt_struct *bmt)
  94. {
  95. int i;
  96. pr_notice("BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
  97. for (i = 0; i < bmt->mapped_count; i++)
  98. pr_notice("0x%x -> 0x%x ", bmt->table[i].bad_index, bmt->table[i].mapped_index);
  99. pr_notice("\n");
  100. }
  101. static bool match_bmt_signature(u8 *dat, u8 *oob)
  102. {
  103. if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
  104. return false;
  105. if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
  106. pr_debug("main signature match, oob signature doesn't match, but ignore\n");
  107. return true;
  108. }
  109. static u8 cal_bmt_checksum(phys_bmt_struct *phys_table, int bmt_size)
  110. {
  111. int i;
  112. u8 checksum = 0;
  113. u8 *dat = (u8 *) phys_table;
  114. checksum += phys_table->header.version;
  115. checksum += phys_table->header.mapped_count;
  116. dat += sizeof(phys_bmt_header);
  117. for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
  118. checksum += dat[i];
  119. return checksum;
  120. }
  121. static int is_block_mapped(int index)
  122. {
  123. int i;
  124. for (i = 0; i < bmt.mapped_count; i++) {
  125. if (index == bmt.table[i].mapped_index)
  126. return i;
  127. }
  128. return -1;
  129. }
  130. static bool is_page_used(u8 *dat, u8 *oob)
  131. {
  132. if (2048 == PAGE_SIZE_BMT)
  133. return ((oob[13] != 0xFF) || (oob[14] != 0xFF));
  134. else
  135. return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF));
  136. }
  137. static bool valid_bmt_data(phys_bmt_struct *phys_table)
  138. {
  139. int i;
  140. u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
  141. /* checksum correct? */
  142. if (phys_table->header.checksum != checksum) {
  143. pr_err("BMT Data checksum error: %x %x\n", phys_table->header.checksum,
  144. checksum);
  145. return false;
  146. }
  147. pr_debug("BMT Checksum is: 0x%x\n", phys_table->header.checksum);
  148. /* block index correct? */
  149. for (i = 0; i < phys_table->header.mapped_count; i++) {
  150. if (phys_table->table[i].bad_index >= total_block_count
  151. || phys_table->table[i].mapped_index >= total_block_count
  152. || phys_table->table[i].mapped_index < system_block_count) {
  153. pr_info("index error: bad_index: %d, mapped_index: %d\n",
  154. phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
  155. return false;
  156. }
  157. }
  158. /* pass check, valid bmt. */
  159. pr_debug("Valid BMT, version v%d\n", phys_table->header.version);
  160. return true;
  161. }
  162. static void fill_nand_bmt_buffer(bmt_struct *bmt, u8 *dat, u8 *oob)
  163. {
  164. phys_bmt_struct *phys_bmt = NULL;
  165. phys_bmt = kmalloc(sizeof(phys_bmt_struct), GFP_KERNEL);
  166. if (!phys_bmt) {
  167. /* pr_err("[fill_nand_bmt_buffer]kmalloc phys_bmt_struct fail!\n"); */
  168. while (1)
  169. ;
  170. }
  171. dump_bmt_info(bmt);
  172. /* fill phys_bmt_struct structure with bmt_struct */
  173. memset(phys_bmt, 0xFF, sizeof(phys_bmt_struct));
  174. memcpy(phys_bmt->header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
  175. phys_bmt->header.version = BMT_VERSION;
  176. /* phys_bmt.header.bad_count = bmt->bad_count; */
  177. phys_bmt->header.mapped_count = bmt->mapped_count;
  178. memcpy(phys_bmt->table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
  179. phys_bmt->header.checksum = cal_bmt_checksum(phys_bmt, bmt_block_count);
  180. memcpy(dat + MAIN_SIGNATURE_OFFSET, phys_bmt, sizeof(phys_bmt_struct));
  181. memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
  182. kfree(phys_bmt);
  183. }
  184. /* return valid index if found BMT, else return 0 */
  185. static int load_bmt_data(int start, int pool_size)
  186. {
  187. int bmt_index = start + pool_size - 1; /* find from the end */
  188. phys_bmt_struct *phys_table = NULL;
  189. int i;
  190. phys_table = kmalloc(sizeof(phys_bmt_struct), GFP_KERNEL);
  191. if (!phys_table) {
  192. /* pr_err("[load_bmt_data]kmalloc phys_bmt_struct fail!\n"); */
  193. /* while (1) */
  194. /* ; */
  195. return -ENOMEM;
  196. }
  197. pr_debug("[%s]: begin to search BMT from block 0x%x\n", __func__, bmt_index);
  198. for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) {
  199. if (nand_block_bad_bmt(OFFSET(bmt_index))) {
  200. pr_debug("Skip bad block: %d\n", bmt_index);
  201. continue;
  202. }
  203. if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) {
  204. pr_debug("Error found when read block %d\n", bmt_index);
  205. continue;
  206. }
  207. if (!match_bmt_signature(dat_buf, oob_buf))
  208. continue;
  209. pr_debug("Match bmt signature @ block: 0x%x\n", bmt_index);
  210. memcpy(phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(phys_bmt_struct));
  211. if (!valid_bmt_data(phys_table)) {
  212. pr_err("BMT data is not correct %d\n", bmt_index);
  213. continue;
  214. } else {
  215. bmt.mapped_count = phys_table->header.mapped_count;
  216. bmt.version = phys_table->header.version;
  217. /* bmt.bad_count = phys_table.header.bad_count; */
  218. memcpy(bmt.table, phys_table->table, bmt.mapped_count * sizeof(bmt_entry));
  219. pr_debug("bmt found at block: %d, mapped block: %d\n", bmt_index,
  220. bmt.mapped_count);
  221. for (i = 0; i < bmt.mapped_count; i++) {
  222. if (!nand_block_bad_bmt(OFFSET(bmt.table[i].bad_index))) {
  223. pr_debug("block 0x%x is not mark bad, should be power lost last time\n",
  224. bmt.table[i].bad_index);
  225. mark_block_bad_bmt(OFFSET(bmt.table[i].bad_index));
  226. }
  227. }
  228. kfree(phys_table);
  229. return bmt_index;
  230. }
  231. }
  232. pr_err("bmt block not found!\n");
  233. kfree(phys_table);
  234. return 0;
  235. }
  236. /************************************************************************
  237. * Find an available block and erase. *
  238. * start_from_end: if true, find available block from end of flash. *
  239. * else, find from the beginning of the pool *
  240. * need_erase: if true, all unmapped blocks in the pool will be erased *
  241. ************************************************************************/
  242. static int find_available_block(bool start_from_end)
  243. {
  244. int i; /* , j; */
  245. int block = system_block_count;
  246. int direction;
  247. /* int avail_index = 0; */
  248. pr_debug("Try to find_available_block, pool_erase: %d\n", pool_erased);
  249. /* erase all un-mapped blocks in pool when finding available block */
  250. if (!pool_erased) {
  251. pr_debug("Erase all un-mapped blocks in pool\n");
  252. for (i = 0; i < bmt_block_count; i++) {
  253. if (block == bmt_block_index) {
  254. pr_debug("Skip bmt block 0x%x\n", block);
  255. continue;
  256. }
  257. if (nand_block_bad_bmt(OFFSET(block + i))) {
  258. pr_debug("Skip bad block 0x%x\n", block + i);
  259. continue;
  260. }
  261. /* if(block==4095) */
  262. /* { */
  263. /* continue; */
  264. /* } */
  265. if (is_block_mapped(block + i) >= 0) {
  266. pr_debug("Skip mapped block 0x%x\n", block + i);
  267. continue;
  268. }
  269. if (!nand_erase_bmt(OFFSET(block + i))) {
  270. pr_debug("Erase block 0x%x failed\n", block + i);
  271. mark_block_bad_bmt(OFFSET(block + i));
  272. }
  273. }
  274. pool_erased = 1;
  275. }
  276. if (start_from_end) {
  277. block = total_block_count - 1;
  278. direction = -1;
  279. } else {
  280. block = system_block_count;
  281. direction = 1;
  282. }
  283. for (i = 0; i < bmt_block_count; i++, block += direction) {
  284. if (block == bmt_block_index) {
  285. pr_debug("Skip bmt block 0x%x\n", block);
  286. continue;
  287. }
  288. if (nand_block_bad_bmt(OFFSET(block))) {
  289. pr_debug("Skip bad block 0x%x\n", block);
  290. continue;
  291. }
  292. if (is_block_mapped(block) >= 0) {
  293. pr_debug("Skip mapped block 0x%x\n", block);
  294. continue;
  295. }
  296. pr_debug("Find block 0x%x available\n", block);
  297. return block;
  298. }
  299. return 0;
  300. }
  301. static unsigned short get_bad_index_from_oob(u8 *oob_buf)
  302. {
  303. unsigned short index;
  304. if (2048 == PAGE_SIZE_BMT) { /* sector 1024 FDM size = 16, mark location moved */
  305. memcpy(&index, oob_buf + 13, OOB_INDEX_SIZE);
  306. } else {
  307. memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
  308. }
  309. return index;
  310. }
  311. void set_bad_index_to_oob(u8 *oob, u16 index)
  312. {
  313. if (2048 == PAGE_SIZE_BMT)
  314. memcpy(oob + 13, &index, sizeof(index));
  315. else
  316. memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
  317. }
  318. static int migrate_from_bad(u64 offset, u8 *write_dat, u8 *write_oob)
  319. {
  320. int page;
  321. u32 error_block = (u32) (offset >> nand_chip_bmt->phys_erase_shift);
  322. u32 error_page = (u32) (offset >> nand_chip_bmt->page_shift) % page_per_block;
  323. int to_index;
  324. memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
  325. to_index = find_available_block(false);
  326. if (!to_index) {
  327. pr_err("Cannot find an available block for BMT\n");
  328. return 0;
  329. }
  330. { /* migrate error page first */
  331. pr_debug("Write error page: 0x%x\n", error_page);
  332. if (!write_dat) {
  333. nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
  334. write_dat = dat_buf;
  335. }
  336. /* memcpy(oob_buf, write_oob, MAX_OOB_SIZE); */
  337. /* if error_block is already a mapped block, original mapping index is in OOB. */
  338. if (error_block < system_block_count)
  339. set_bad_index_to_oob(oob_buf, error_block);
  340. if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) {
  341. pr_debug("Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
  342. mark_block_bad_bmt(OFFSET(to_index));
  343. return migrate_from_bad(offset, write_dat, write_oob);
  344. }
  345. }
  346. for (page = 0; page < page_per_block; page++) {
  347. if (page != error_page) {
  348. nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
  349. if (is_page_used(dat_buf, oob_buf)) {
  350. if (error_block < system_block_count)
  351. set_bad_index_to_oob(oob_buf, error_block);
  352. pr_debug("\tmigrate page 0x%x to page 0x%x\n",
  353. PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
  354. if (!nand_write_page_bmt(PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) {
  355. pr_debug("Write to page 0x%x fail\n",
  356. PAGE_ADDR(to_index) + page);
  357. mark_block_bad_bmt(OFFSET(to_index));
  358. return migrate_from_bad(offset, write_dat, write_oob);
  359. }
  360. }
  361. }
  362. }
  363. pr_debug("Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
  364. return to_index;
  365. }
  366. static bool write_bmt_to_flash(u8 *dat, u8 *oob)
  367. {
  368. bool need_erase = true;
  369. pr_debug("Try to write BMT\n");
  370. if (bmt_block_index == 0) {
  371. /* if we don't have index, we don't need to erase found block
  372. as it has been erased in find_available_block() */
  373. need_erase = false;
  374. bmt_block_index = find_available_block(true);
  375. if (!bmt_block_index) {
  376. pr_info("Cannot find an available block for BMT\n");
  377. return false;
  378. }
  379. }
  380. pr_debug("Find BMT block: 0x%x\n", bmt_block_index);
  381. /* write bmt to flash */
  382. if (need_erase) {
  383. if (!nand_erase_bmt(OFFSET(bmt_block_index))) {
  384. pr_debug("BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
  385. mark_block_bad_bmt(OFFSET(bmt_block_index));
  386. /* bmt.bad_count++; */
  387. bmt_block_index = 0;
  388. return write_bmt_to_flash(dat, oob); /* recursive call */
  389. }
  390. }
  391. if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob)) {
  392. pr_debug("Write BMT data fail, need to write again\n");
  393. mark_block_bad_bmt(OFFSET(bmt_block_index));
  394. /* bmt.bad_count++; */
  395. bmt_block_index = 0;
  396. return write_bmt_to_flash(dat, oob); /* recursive call */
  397. }
  398. pr_debug("Write BMT data to block 0x%x success\n", bmt_block_index);
  399. return true;
  400. }
  401. /*******************************************************************
  402. * Reconstruct bmt, called when found bmt info doesn't match bad
  403. * block info in flash.
  404. *
  405. * Return NULL for failure
  406. *******************************************************************/
  407. bmt_struct *reconstruct_bmt(bmt_struct *bmt)
  408. {
  409. int i;
  410. int index = system_block_count;
  411. unsigned short bad_index;
  412. int mapped;
  413. /* init everything in BMT struct */
  414. bmt->version = BMT_VERSION;
  415. bmt->bad_count = 0;
  416. bmt->mapped_count = 0;
  417. memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
  418. for (i = 0; i < bmt_block_count; i++, index++) {
  419. if (nand_block_bad_bmt(OFFSET(index))) {
  420. pr_debug("Skip bad block: 0x%x\n", index);
  421. /* bmt->bad_count++; */
  422. continue;
  423. }
  424. pr_debug("read page: 0x%x\n", PAGE_ADDR(index));
  425. nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
  426. /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
  427. {
  428. pr_debug( "Error when read block %d\n", bmt_block_index);
  429. continue;
  430. } */
  431. bad_index = get_bad_index_from_oob(oob_buf);
  432. if (bad_index >= system_block_count) {
  433. pr_debug("get bad index: 0x%x\n", bad_index);
  434. if (bad_index != 0xFFFF)
  435. pr_debug("Invalid bad index found in block 0x%x, bad index 0x%x\n",
  436. index, bad_index);
  437. continue;
  438. }
  439. pr_debug("Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
  440. if (!nand_block_bad_bmt(OFFSET(bad_index))) {
  441. pr_debug("\tbut block 0x%x is not marked as bad, invalid mapping\n",
  442. bad_index);
  443. continue; /* no need to erase here, it will be erased later when trying to write BMT */
  444. }
  445. mapped = is_block_mapped(bad_index);
  446. if (mapped >= 0) {
  447. pr_debug("bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n",
  448. bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
  449. bmt->table[mapped].mapped_index = index; /* use new one instead. */
  450. } else {
  451. /* add mapping to BMT */
  452. bmt->table[bmt->mapped_count].bad_index = bad_index;
  453. bmt->table[bmt->mapped_count].mapped_index = index;
  454. bmt->mapped_count++;
  455. }
  456. pr_debug("Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
  457. }
  458. pr_debug("Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
  459. /* dump_bmt_info(bmt); */
  460. /* fill NAND BMT buffer */
  461. memset(oob_buf, 0xFF, sizeof(oob_buf));
  462. fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
  463. /* write BMT back */
  464. if (!write_bmt_to_flash(dat_buf, oob_buf))
  465. pr_notice("TRAGEDY: cannot find a place to write BMT!!!!\n");
  466. return bmt;
  467. }
  468. /*******************************************************************
  469. * [BMT Interface]
  470. *
  471. * Description:
  472. * Init bmt from nand. Reconstruct if not found or data error
  473. *
  474. * Parameter:
  475. * size: size of bmt and replace pool
  476. *
  477. * Return:
  478. * NULL for failure, and a bmt struct for success
  479. *******************************************************************/
  480. bmt_struct *init_bmt(struct nand_chip *chip, int size)
  481. {
  482. struct mtk_nand_host *host;
  483. if (size > 0 && size < MAX_BMT_SIZE) {
  484. pr_debug("Init bmt table, size: %d\n", size);
  485. bmt_block_count = size;
  486. } else {
  487. pr_debug("Invalid bmt table size: %d\n", size);
  488. return NULL;
  489. }
  490. nand_chip_bmt = chip;
  491. system_block_count = (u32) (chip->chipsize >> chip->phys_erase_shift);
  492. total_block_count = bmt_block_count + system_block_count;
  493. page_per_block = BLOCK_SIZE_BMT / PAGE_SIZE_BMT;
  494. host = (struct mtk_nand_host *)chip->priv;
  495. mtd_bmt = &host->mtd;
  496. pr_debug("mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt);
  497. pr_debug("bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
  498. /* set this flag, and unmapped block in pool will be erased. */
  499. pool_erased = 0;
  500. memset(bmt.table, 0, size * sizeof(bmt_entry));
  501. bmt_block_index = load_bmt_data(system_block_count, size);
  502. if (bmt_block_index) {
  503. pr_debug("Load bmt data success @ block 0x%x\n", bmt_block_index);
  504. dump_bmt_info(&bmt);
  505. return &bmt;
  506. }
  507. pr_debug("Load bmt data fail, need re-construct!\n");
  508. if (reconstruct_bmt(&bmt))
  509. return &bmt;
  510. else
  511. return NULL;
  512. }
  513. EXPORT_SYMBOL_GPL(init_bmt);
  514. /*******************************************************************
  515. * [BMT Interface]
  516. *
  517. * Description:
  518. * Update BMT.
  519. *
  520. * Parameter:
  521. * offset: update block/page offset.
  522. * reason: update reason, see update_reason_t for reason.
  523. * dat/oob: data and oob buffer for write fail.
  524. *
  525. * Return:
  526. * Return true for success, and false for failure.
  527. *******************************************************************/
  528. bool update_bmt(u64 offset, update_reason_t reason, u8 *dat, u8 *oob)
  529. {
  530. int map_index;
  531. int orig_bad_block = -1;
  532. /* int bmt_update_index; */
  533. int i;
  534. u32 bad_index = (u32) (offset >> nand_chip_bmt->phys_erase_shift);
  535. /* return false; */
  536. if (reason == UPDATE_WRITE_FAIL) {
  537. pr_debug("Write fail, need to migrate\n");
  538. map_index = migrate_from_bad(offset, dat, oob);
  539. if (!map_index) {
  540. pr_debug("migrate fail\n");
  541. return false;
  542. }
  543. } else {
  544. map_index = find_available_block(false);
  545. if (!map_index) {
  546. pr_debug("Cannot find block in pool\n");
  547. return false;
  548. }
  549. }
  550. /* now let's update BMT */
  551. if (bad_index >= system_block_count) { /* mapped block become bad, find original bad block */
  552. for (i = 0; i < bmt_block_count; i++) {
  553. if (bmt.table[i].mapped_index == bad_index) {
  554. orig_bad_block = bmt.table[i].bad_index;
  555. break;
  556. }
  557. }
  558. /* bmt.bad_count++; */
  559. pr_debug("Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
  560. bmt.table[i].mapped_index = map_index;
  561. } else {
  562. bmt.table[bmt.mapped_count].mapped_index = map_index;
  563. bmt.table[bmt.mapped_count].bad_index = bad_index;
  564. bmt.mapped_count++;
  565. }
  566. memset(oob_buf, 0xFF, sizeof(oob_buf));
  567. fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
  568. if (!write_bmt_to_flash(dat_buf, oob_buf))
  569. return false;
  570. mark_block_bad_bmt(offset);
  571. return true;
  572. }
  573. EXPORT_SYMBOL_GPL(update_bmt);
  574. /*******************************************************************
  575. * [BMT Interface]
  576. *
  577. * Description:
  578. * Given an block index, return mapped index if it's mapped, else
  579. * return given index.
  580. *
  581. * Parameter:
  582. * index: given an block index. This value cannot exceed
  583. * system_block_count.
  584. *
  585. * Return NULL for failure
  586. *******************************************************************/
  587. u16 get_mapping_block_index(int index)
  588. {
  589. int i;
  590. /* return index; */
  591. if (index > system_block_count)
  592. return index;
  593. for (i = 0; i < bmt.mapped_count; i++) {
  594. if (bmt.table[i].bad_index == index)
  595. return bmt.table[i].mapped_index;
  596. }
  597. return index;
  598. }
  599. EXPORT_SYMBOL_GPL(get_mapping_block_index);
  600. MODULE_LICENSE("GPL");
  601. MODULE_AUTHOR("MediaTek");
  602. MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");