bmt.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797
  1. /*
  2. * Copyright (C) 2015 MediaTek Inc.
  3. *
  4. * This program is free software: you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License version 2 as
  6. * published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  11. * GNU General Public License for more details.
  12. */
  13. #include <mach/mtk_nand.h>
  14. #include <mtk_nand_util.h>
  15. #include "bmt.h"
  16. #include <linux/module.h>
  17. #include <linux/slab.h>
  18. #include <asm/div64.h>
  19. static const char MAIN_SIGNATURE[] = "BMT";
  20. static const char OOB_SIGNATURE[] = "bmt";
  21. #define SIGNATURE_SIZE (3)
  22. #define MAX_DAT_SIZE 0x4000
  23. #define MAX_OOB_SIZE 0x800
  24. static struct mtd_info *mtd_bmt;
  25. static struct nand_chip *nand_chip_bmt;
  26. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  27. #define BLOCK_SIZE_BMT (gn_devinfo.blocksize * 1024)
  28. #define PAGE_PER_SIZE_BMT ((gn_devinfo.blocksize * 1024) / gn_devinfo.pagesize)
  29. #define PAGE_ADDR(block) ((block) * PAGE_PER_SIZE_BMT)
  30. #else
  31. #define BLOCK_SIZE_BMT (1 << nand_chip_bmt->phys_erase_shift)
  32. #define PAGE_PER_SIZE_BMT (1 << (nand_chip_bmt->phys_erase_shift-nand_chip_bmt->page_shift))
  33. #define OFFSET(block) (((u64)block) * BLOCK_SIZE_BMT)
  34. #define PAGE_ADDR(block) ((block) * PAGE_PER_SIZE_BMT)
  35. #endif
  36. #define PAGE_SIZE_BMT (1 << nand_chip_bmt->page_shift)
  37. struct phys_bmt_header {
  38. char signature[3];
  39. u8 version;
  40. u8 bad_count; /* bad block count in pool */
  41. u8 mapped_count; /* mapped block count in pool */
  42. u8 checksum;
  43. u8 reseverd[13];
  44. };
  45. struct phys_bmt_struct {
  46. struct phys_bmt_header header;
  47. bmt_entry table[MAX_BMT_SIZE];
  48. };
  49. struct bmt_oob_data {
  50. char signature[3];
  51. };
  52. /*********************************************************************
  53. * Flash is splited into 2 parts, system part is for normal system *
  54. * system usage, size is system_block_count, another is replace pool *
  55. * +-------------------------------------------------+ *
  56. * | system_block_count | bmt_block_count | *
  57. * +-------------------------------------------------+ *
  58. *********************************************************************/
  59. static u32 total_block_count; /* block number in flash */
  60. u32 system_block_count;
  61. static int bmt_block_count; /* bmt table size */
  62. /* static int bmt_count; block used in bmt */
  63. static int page_per_block; /* page per count */
  64. static u32 bmt_block_index; /* bmt block index */
  65. static bmt_struct bmt; /* dynamic created global bmt table */
  66. static u8 dat_buf[MAX_DAT_SIZE];
  67. static u8 oob_buf[MAX_OOB_SIZE];
  68. static bool pool_erased;
  69. /***************************************************************
  70. *
  71. * Interface adaptor for preloader/uboot/kernel
  72. * These interfaces operate on physical address, read/write
  73. * physical data.
  74. *
  75. ***************************************************************/
  76. int nand_read_page_bmt(u32 page, u8 *dat, u8 *oob)
  77. {
  78. return mtk_nand_exec_read_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob);
  79. }
  80. bool nand_block_bad_bmt(u64 offset)
  81. {
  82. return mtk_nand_block_bad_hw(mtd_bmt, offset);
  83. }
  84. bool nand_erase_bmt(u64 offset)
  85. {
  86. int status;
  87. if (offset < 0x20000)
  88. MSG(INIT, "erase offset: 0x%llx\n", offset);
  89. status = mtk_nand_erase_hw(mtd_bmt, (u32)(offset >> nand_chip_bmt->page_shift));
  90. /* as nand_chip structure doesn't have a erase function defined */
  91. if (status & NAND_STATUS_FAIL)
  92. return false;
  93. else
  94. return true;
  95. }
  96. int mark_block_bad_bmt(u64 offset)
  97. {
  98. return mtk_nand_block_markbad_hw(mtd_bmt, offset); /* mark_block_bad_hw(offset); */
  99. }
  100. bool nand_write_page_bmt(u32 page, u8 *dat, u8 *oob)
  101. {
  102. if (mtk_nand_exec_write_page(mtd_bmt, page, PAGE_SIZE_BMT, dat, oob))
  103. return false;
  104. else
  105. return true;
  106. }
  107. /***************************************************************
  108. *
  109. * static internal function
  110. *
  111. ***************************************************************/
  112. static void dump_bmt_info(bmt_struct *bmt)
  113. {
  114. int i;
  115. MSG(INIT, "BMT v%d. total %d mapping:\n", bmt->version, bmt->mapped_count);
  116. for (i = 0; i < bmt->mapped_count; i++)
  117. MSG(INIT, "\t0x%x -> 0x%x\n", bmt->table[i].bad_index, bmt->table[i].mapped_index);
  118. }
  119. static bool match_bmt_signature(u8 *dat, u8 *oob)
  120. {
  121. if (memcmp(dat + MAIN_SIGNATURE_OFFSET, MAIN_SIGNATURE, SIGNATURE_SIZE))
  122. return false;
  123. if (memcmp(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE))
  124. MSG(INIT, "main signature match, oob signature doesn't match, but ignore\n");
  125. return true;
  126. }
  127. static u8 cal_bmt_checksum(struct phys_bmt_struct *phys_table, int bmt_size)
  128. {
  129. int i;
  130. u8 checksum = 0;
  131. u8 *dat = (u8 *) phys_table;
  132. checksum += phys_table->header.version;
  133. checksum += phys_table->header.mapped_count;
  134. dat += sizeof(struct phys_bmt_header);
  135. for (i = 0; i < bmt_size * sizeof(bmt_entry); i++)
  136. checksum += dat[i];
  137. return checksum;
  138. }
  139. static int is_block_mapped(int index)
  140. {
  141. int i;
  142. for (i = 0; i < bmt.mapped_count; i++) {
  143. if (index == bmt.table[i].mapped_index)
  144. return i;
  145. }
  146. return -1;
  147. }
  148. static bool is_page_used(u8 *dat, u8 *oob)
  149. {
  150. if (2048 == PAGE_SIZE_BMT)
  151. return ((oob[13] != 0xFF) || (oob[14] != 0xFF));
  152. return ((oob[OOB_INDEX_OFFSET] != 0xFF) || (oob[OOB_INDEX_OFFSET + 1] != 0xFF));
  153. }
  154. static bool valid_bmt_data(struct phys_bmt_struct *phys_table)
  155. {
  156. int i;
  157. u8 checksum = cal_bmt_checksum(phys_table, bmt_block_count);
  158. /* checksum correct? */
  159. if (phys_table->header.checksum != checksum) {
  160. MSG(INIT, "BMT Data checksum error: %x %x\n", phys_table->header.checksum,
  161. checksum);
  162. return false;
  163. }
  164. MSG(INIT, "BMT Checksum is: 0x%x\n", phys_table->header.checksum);
  165. /* block index correct? */
  166. for (i = 0; i < phys_table->header.mapped_count; i++) {
  167. if (phys_table->table[i].bad_index >= total_block_count
  168. || phys_table->table[i].mapped_index >= total_block_count
  169. || phys_table->table[i].mapped_index < system_block_count) {
  170. MSG(INIT, "index error: bad_index: %d, mapped_index: %d\n",
  171. phys_table->table[i].bad_index, phys_table->table[i].mapped_index);
  172. return false;
  173. }
  174. }
  175. /* pass check, valid bmt. */
  176. MSG(INIT, "Valid BMT, version v%d\n", phys_table->header.version);
  177. return true;
  178. }
  179. static void fill_nand_bmt_buffer(bmt_struct *bmt, u8 *dat, u8 *oob)
  180. {
  181. struct phys_bmt_struct *phys_bmt;
  182. phys_bmt = kmalloc(sizeof(struct phys_bmt_struct), GFP_KERNEL);
  183. dump_bmt_info(bmt);
  184. /* fill struct phys_bmt_struct structure with bmt_struct */
  185. memset(phys_bmt, 0xFF, sizeof(struct phys_bmt_struct));
  186. memcpy(phys_bmt->header.signature, MAIN_SIGNATURE, SIGNATURE_SIZE);
  187. phys_bmt->header.version = BMT_VERSION;
  188. /* phys_bmt.header.bad_count = bmt->bad_count; */
  189. phys_bmt->header.mapped_count = bmt->mapped_count;
  190. memcpy(phys_bmt->table, bmt->table, sizeof(bmt_entry) * bmt_block_count);
  191. phys_bmt->header.checksum = cal_bmt_checksum(phys_bmt, bmt_block_count);
  192. memcpy(dat + MAIN_SIGNATURE_OFFSET, phys_bmt, sizeof(struct phys_bmt_struct));
  193. memcpy(oob + OOB_SIGNATURE_OFFSET, OOB_SIGNATURE, SIGNATURE_SIZE);
  194. kfree(phys_bmt);
  195. }
  196. /* return valid index if found BMT, else return 0 */
  197. static int load_bmt_data(int start, int pool_size)
  198. {
  199. int bmt_index = start + pool_size - 1; /* find from the end */
  200. struct phys_bmt_struct *phys_table;
  201. phys_table = kmalloc(sizeof(struct phys_bmt_struct), GFP_KERNEL);
  202. MSG(INIT, "[%s]: begin to search BMT from block 0x%x\n", __func__, bmt_index);
  203. for (bmt_index = start + pool_size - 1; bmt_index >= start; bmt_index--) {
  204. if (nand_block_bad_bmt(OFFSET(bmt_index))) {
  205. MSG(INIT, "Skip bad block: %d\n", bmt_index);
  206. continue;
  207. }
  208. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  209. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  210. && (gn_devinfo.tlcControl.normaltlc))
  211. gn_devinfo.tlcControl.slcopmodeEn = true;
  212. #endif
  213. pr_warn("[load_bmt_data] datbuf 0x%lx oobbuf 0x%lx\n", (unsigned long)dat_buf, (unsigned long)oob_buf);
  214. if (!nand_read_page_bmt(PAGE_ADDR(bmt_index), dat_buf, oob_buf)) {
  215. pr_debug("Error found when read block %d\n", bmt_index);
  216. continue;
  217. }
  218. if (!match_bmt_signature(dat_buf, oob_buf))
  219. continue;
  220. pr_debug("Match bmt signature @ block: %d, %d\n", bmt_index, PAGE_ADDR(bmt_index));
  221. memcpy(phys_table, dat_buf + MAIN_SIGNATURE_OFFSET, sizeof(struct phys_bmt_struct));
  222. if (!valid_bmt_data(phys_table)) {
  223. MSG(INIT, "BMT data is not correct %d\n", bmt_index);
  224. continue;
  225. } else {
  226. bmt.mapped_count = phys_table->header.mapped_count;
  227. bmt.version = phys_table->header.version;
  228. /* bmt.bad_count = phys_table.header.bad_count; */
  229. memcpy(bmt.table, phys_table->table, bmt.mapped_count * sizeof(bmt_entry));
  230. MSG(INIT, "bmt found at block: %d, mapped block: %d\n", bmt_index,
  231. bmt.mapped_count);
  232. kfree(phys_table);
  233. return bmt_index;
  234. }
  235. }
  236. MSG(INIT, "bmt block not found!\n");
  237. kfree(phys_table);
  238. return 0;
  239. }
  240. /*************************************************************************
  241. * Find an available block and erase. *
  242. * start_from_end: if true, find available block from end of flash. *
  243. * else, find from the beginning of the pool *
  244. * need_erase: if true, all unmapped blocks in the pool will be erased *
  245. *************************************************************************/
  246. static int find_available_block(bool start_from_end)
  247. {
  248. int i; /* , j; */
  249. int block = system_block_count;
  250. int direction;
  251. /* int avail_index = 0; */
  252. MSG(INIT, "Try to find_available_block, pool_erase: %d\n", pool_erased);
  253. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  254. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  255. && (gn_devinfo.tlcControl.normaltlc))
  256. gn_devinfo.tlcControl.slcopmodeEn = true;
  257. #endif
  258. if (start_from_end) {
  259. block = total_block_count - 1;
  260. direction = -1;
  261. } else {
  262. block = system_block_count;
  263. direction = 1;
  264. }
  265. for (i = 0; i < bmt_block_count; i++, block += direction) {
  266. if (block == bmt_block_index) {
  267. MSG(INIT, "Skip bmt block 0x%x\n", block);
  268. continue;
  269. }
  270. if (nand_block_bad_bmt(OFFSET(block))) {
  271. MSG(INIT, "Skip bad block 0x%x\n", block);
  272. continue;
  273. }
  274. if (is_block_mapped(block) >= 0) {
  275. MSG(INIT, "Skip mapped block 0x%x\n", block);
  276. continue;
  277. }
  278. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  279. if (!nand_erase_bmt(((u64)block) * (gn_devinfo.blocksize * 1024))) {
  280. MSG(INIT, "Erase block 0x%x failed\n", block);
  281. mark_block_bad_bmt(((u64)block) * (gn_devinfo.blocksize * 1024));
  282. continue;
  283. }
  284. #endif
  285. MSG(INIT, "Find block 0x%x available\n", block);
  286. return block;
  287. }
  288. return 0;
  289. }
  290. static unsigned short get_bad_index_from_oob(u8 *oob_buf)
  291. {
  292. unsigned short index;
  293. if (2048 == PAGE_SIZE_BMT)
  294. memcpy(&index, oob_buf + 13, OOB_INDEX_SIZE);
  295. else
  296. memcpy(&index, oob_buf + OOB_INDEX_OFFSET, OOB_INDEX_SIZE);
  297. return index;
  298. }
  299. void set_bad_index_to_oob(u8 *oob, u16 index)
  300. {
  301. if (2048 == PAGE_SIZE_BMT)
  302. memcpy(oob + 13, &index, sizeof(index));
  303. else
  304. memcpy(oob + OOB_INDEX_OFFSET, &index, sizeof(index));
  305. }
  306. static int migrate_from_bad(u64 offset, u8 *write_dat, u8 *write_oob)
  307. {
  308. int page;
  309. u64 temp;
  310. u32 error_block;
  311. u32 error_page = (u32)(offset >> nand_chip_bmt->page_shift) % page_per_block;
  312. u32 orig_block;
  313. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  314. u32 idx;
  315. bool tlc_mode_block = false;
  316. int bRet;
  317. #endif
  318. int to_index;
  319. u32 tick = 1;
  320. temp = offset;
  321. do_div(temp, ((gn_devinfo.blocksize * 1024) & 0xFFFFFFFF));
  322. error_block = (u32) temp;
  323. orig_block = error_block;
  324. memcpy(oob_buf, write_oob, MAX_OOB_SIZE);
  325. to_index = find_available_block(false);
  326. if (!to_index) {
  327. MSG(INIT, "Cannot find an available block for BMT\n");
  328. return 0;
  329. }
  330. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  331. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  332. && (gn_devinfo.tlcControl.normaltlc)) {
  333. if (error_block >= system_block_count) {
  334. for (idx = 0; idx < bmt_block_count; idx++) {
  335. if (bmt.table[idx].mapped_index == error_block) {
  336. orig_block = bmt.table[idx].bad_index;
  337. break;
  338. }
  339. }
  340. }
  341. temp = (u64)orig_block & 0xFFFFFFFF;
  342. tlc_mode_block = mtk_block_istlc(temp * (gn_devinfo.blocksize * 1024));
  343. if (!tlc_mode_block) {
  344. gn_devinfo.tlcControl.slcopmodeEn = true; /*slc mode*/
  345. tick = 3;
  346. } else
  347. gn_devinfo.tlcControl.slcopmodeEn = false;
  348. }
  349. #endif
  350. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  351. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  352. && (gn_devinfo.tlcControl.normaltlc) && tlc_mode_block) {
  353. if (error_block >= system_block_count)
  354. set_bad_index_to_oob(oob_buf, orig_block);
  355. else
  356. set_bad_index_to_oob(oob_buf, error_block);
  357. memcpy(nand_chip_bmt->oob_poi, oob_buf, mtd_bmt->oobsize);
  358. nand_erase_bmt(((u64)to_index) * (gn_devinfo.blocksize * 1024));
  359. bRet = mtk_nand_write_tlc_block_hw(mtd_bmt, nand_chip_bmt, write_dat, to_index);
  360. if (bRet != 0) {
  361. MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
  362. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  363. mark_block_bad_bmt(((u64)to_index) * (gn_devinfo.blocksize * 1024));
  364. #else
  365. mark_block_bad_bmt(OFFSET(to_index));
  366. #endif
  367. return migrate_from_bad(offset, write_dat, write_oob);
  368. }
  369. } else
  370. #endif
  371. {
  372. { /* migrate error page first */
  373. MSG(INIT, "Write error page: 0x%x\n", error_page);
  374. if (!write_dat) {
  375. nand_read_page_bmt(PAGE_ADDR(error_block) + error_page, dat_buf, NULL);
  376. write_dat = dat_buf;
  377. }
  378. /* memcpy(oob_buf, write_oob, MAX_OOB_SIZE); */
  379. if (error_block < system_block_count)
  380. set_bad_index_to_oob(oob_buf, error_block);
  381. /* if error_block is already a mapped block,
  382. original mapping index is in OOB. */
  383. if (!nand_write_page_bmt(PAGE_ADDR(to_index) + error_page, write_dat, oob_buf)) {
  384. MSG(INIT, "Write to page 0x%x fail\n", PAGE_ADDR(to_index) + error_page);
  385. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  386. mark_block_bad_bmt(((u64)to_index) * (gn_devinfo.blocksize * 1024));
  387. #else
  388. mark_block_bad_bmt(OFFSET(to_index));
  389. #endif
  390. return migrate_from_bad(offset, write_dat, write_oob);
  391. }
  392. }
  393. for (page = 0; page < page_per_block; (page += tick)) {
  394. if (page != error_page) {
  395. nand_read_page_bmt(PAGE_ADDR(error_block) + page, dat_buf, oob_buf);
  396. if (is_page_used(dat_buf, oob_buf)) {
  397. if (error_block < system_block_count)
  398. set_bad_index_to_oob(oob_buf, error_block);
  399. MSG(INIT, "\tmigrate page 0x%x to page 0x%x\n",
  400. PAGE_ADDR(error_block) + page, PAGE_ADDR(to_index) + page);
  401. if (!nand_write_page_bmt
  402. (PAGE_ADDR(to_index) + page, dat_buf, oob_buf)) {
  403. MSG(INIT, "Write to page 0x%x fail\n",
  404. PAGE_ADDR(to_index) + page);
  405. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  406. mark_block_bad_bmt(((u64)to_index) * (gn_devinfo.blocksize * 1024));
  407. #else
  408. mark_block_bad_bmt(OFFSET(to_index));
  409. #endif
  410. return migrate_from_bad(offset, write_dat, write_oob);
  411. }
  412. }
  413. }
  414. }
  415. }
  416. MSG(INIT, "Migrate from 0x%x to 0x%x done!\n", error_block, to_index);
  417. return to_index;
  418. }
  419. static bool write_bmt_to_flash(u8 *dat, u8 *oob)
  420. {
  421. bool need_erase = true;
  422. MSG(INIT, "Try to write BMT\n");
  423. if (bmt_block_index == 0) {
  424. need_erase = false;
  425. bmt_block_index = find_available_block(true);
  426. if (!bmt_block_index) {
  427. MSG(INIT, "Cannot find an available block for BMT\n");
  428. return false;
  429. }
  430. }
  431. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  432. if ((gn_devinfo.NAND_FLASH_TYPE == NAND_FLASH_TLC)
  433. && (gn_devinfo.tlcControl.normaltlc))
  434. gn_devinfo.tlcControl.slcopmodeEn = true; /*change to slc mode*/
  435. #endif
  436. MSG(INIT, "Find BMT block: 0x%x\n", bmt_block_index);
  437. /* write bmt to flash */
  438. if (need_erase) {
  439. if (!nand_erase_bmt(((u64)bmt_block_index) * (gn_devinfo.blocksize * 1024))) {
  440. MSG(INIT, "BMT block erase fail, mark bad: 0x%x\n", bmt_block_index);
  441. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  442. mark_block_bad_bmt(((u64)bmt_block_index) * (gn_devinfo.blocksize * 1024));
  443. #else
  444. mark_block_bad_bmt(OFFSET(bmt_block_index));
  445. #endif
  446. bmt_block_index = 0;
  447. return write_bmt_to_flash(dat, oob); /* recursive call */
  448. }
  449. }
  450. if (!nand_write_page_bmt(PAGE_ADDR(bmt_block_index), dat, oob)) {
  451. MSG(INIT, "Write BMT data fail, need to write again\n");
  452. #if defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  453. mark_block_bad_bmt(((u64)bmt_block_index) * (gn_devinfo.blocksize * 1024));
  454. #else
  455. mark_block_bad_bmt(OFFSET(bmt_block_index));
  456. #endif
  457. /* bmt.bad_count++; */
  458. bmt_block_index = 0;
  459. return write_bmt_to_flash(dat, oob); /* recursive call */
  460. }
  461. MSG(INIT, "Write BMT data to block 0x%x success\n", bmt_block_index);
  462. return true;
  463. }
  464. /*******************************************************************
  465. * Reconstruct bmt, called when found bmt info doesn't match bad
  466. * block info in flash.
  467. *
  468. * Return NULL for failure
  469. *******************************************************************/
  470. bmt_struct *reconstruct_bmt(bmt_struct *bmt)
  471. {
  472. int i;
  473. int index = system_block_count;
  474. unsigned short bad_index;
  475. int mapped;
  476. /* init everything in BMT struct */
  477. bmt->version = BMT_VERSION;
  478. bmt->bad_count = 0;
  479. bmt->mapped_count = 0;
  480. memset(bmt->table, 0, bmt_block_count * sizeof(bmt_entry));
  481. for (i = 0; i < bmt_block_count; i++, index++) {
  482. if (nand_block_bad_bmt(OFFSET(index))) {
  483. MSG(INIT, "Skip bad block: 0x%x\n", index);
  484. /* bmt->bad_count++; */
  485. continue;
  486. }
  487. MSG(INIT, "read page: 0x%x\n", PAGE_ADDR(index));
  488. nand_read_page_bmt(PAGE_ADDR(index), dat_buf, oob_buf);
  489. /* if (mtk_nand_read_page_hw(PAGE_ADDR(index), dat_buf))
  490. {
  491. MSG(INIT, "Error when read block %d\n", bmt_block_index);
  492. continue;
  493. } */
  494. bad_index = get_bad_index_from_oob(oob_buf);
  495. if (bad_index >= system_block_count) {
  496. MSG(INIT, "get bad index: 0x%x\n", bad_index);
  497. if (bad_index != 0xFFFF)
  498. MSG(INIT, "Invalid bad index found in block 0x%x, bad index 0x%x\n",
  499. index, bad_index);
  500. continue;
  501. }
  502. MSG(INIT, "Block 0x%x is mapped to bad block: 0x%x\n", index, bad_index);
  503. if (!nand_block_bad_bmt(OFFSET(bad_index))) {
  504. MSG(INIT, "\tbut block 0x%x is not marked as bad, invalid mapping\n",
  505. bad_index);
  506. continue; /* no need to erase here, it will be erased later when trying to write BMT */
  507. }
  508. mapped = is_block_mapped(bad_index);
  509. if (mapped >= 0) {
  510. MSG(INIT,
  511. "bad block 0x%x is mapped to 0x%x, should be caused by power lost, replace with one\n",
  512. bmt->table[mapped].bad_index, bmt->table[mapped].mapped_index);
  513. bmt->table[mapped].mapped_index = index; /* use new one instead. */
  514. } else {
  515. /* add mapping to BMT */
  516. bmt->table[bmt->mapped_count].bad_index = bad_index;
  517. bmt->table[bmt->mapped_count].mapped_index = index;
  518. bmt->mapped_count++;
  519. }
  520. MSG(INIT, "Add mapping: 0x%x -> 0x%x to BMT\n", bad_index, index);
  521. }
  522. MSG(INIT, "Scan replace pool done, mapped block: %d\n", bmt->mapped_count);
  523. /* dump_bmt_info(bmt); */
  524. /* fill NAND BMT buffer */
  525. memset(oob_buf, 0xFF, sizeof(oob_buf));
  526. fill_nand_bmt_buffer(bmt, dat_buf, oob_buf);
  527. /* write BMT back */
  528. if (!write_bmt_to_flash(dat_buf, oob_buf))
  529. MSG(INIT, "TRAGEDY: cannot find a place to write BMT!!!!\n");
  530. return bmt;
  531. }
  532. /*******************************************************************
  533. * [BMT Interface]
  534. *
  535. * Description:
  536. * Init bmt from nand. Reconstruct if not found or data error
  537. *
  538. * Parameter:
  539. * size: size of bmt and replace pool
  540. *
  541. * Return:
  542. * NULL for failure, and a bmt struct for success
  543. *******************************************************************/
  544. bmt_struct *init_bmt(struct nand_chip *chip, int size)
  545. {
  546. struct mtk_nand_host *host;
  547. u64 temp;
  548. if (size > 0 && size < MAX_BMT_SIZE) {
  549. MSG(INIT, "Init bmt table, size: %d\n", size);
  550. bmt_block_count = size;
  551. } else {
  552. MSG(INIT, "Invalid bmt table size: %d\n", size);
  553. return NULL;
  554. }
  555. nand_chip_bmt = chip;
  556. temp = chip->chipsize;
  557. do_div(temp, ((gn_devinfo.blocksize * 1024) & 0xFFFFFFFF));
  558. system_block_count = (u32)temp; /* (u32)(chip->chipsize / (gn_devinfo.blocksize * 1024)); */
  559. total_block_count = bmt_block_count + system_block_count;
  560. page_per_block = (gn_devinfo.blocksize * 1024) / gn_devinfo.pagesize;
  561. host = (struct mtk_nand_host *)chip->priv;
  562. mtd_bmt = &host->mtd;
  563. MSG(INIT, "mtd_bmt: %p, nand_chip_bmt: %p\n", mtd_bmt, nand_chip_bmt);
  564. MSG(INIT, "bmt count: %d, system count: %d\n", bmt_block_count, system_block_count);
  565. /* set this flag, and unmapped block in pool will be erased. */
  566. pool_erased = 0;
  567. memset(bmt.table, 0, size * sizeof(bmt_entry));
  568. bmt_block_index = load_bmt_data(system_block_count, size);
  569. if (bmt_block_index) {
  570. MSG(INIT, "Load bmt data success @ block 0x%x\n", bmt_block_index);
  571. dump_bmt_info(&bmt);
  572. return &bmt;
  573. }
  574. MSG(INIT, "Load bmt data fail, need re-construct!\n");
  575. #if !defined(CONFIG_MTK_TLC_NAND_SUPPORT)
  576. if (reconstruct_bmt(&bmt))
  577. return &bmt;
  578. #endif
  579. return NULL;
  580. }
  581. EXPORT_SYMBOL_GPL(init_bmt);
  582. /*******************************************************************
  583. * [BMT Interface]
  584. *
  585. * Description:
  586. * Update BMT.
  587. *
  588. * Parameter:
  589. * offset: update block/page offset.
  590. * reason: update reason, see update_reason_t for reason.
  591. * dat/oob: data and oob buffer for write fail.
  592. *
  593. * Return:
  594. * Return true for success, and false for failure.
  595. *******************************************************************/
  596. bool update_bmt(u64 offset, update_reason_t reason, u8 *dat, u8 *oob)
  597. {
  598. int map_index;
  599. int orig_bad_block = -1;
  600. int i;
  601. u64 temp;
  602. u32 bad_index; /* = (u32)(offset / (gn_devinfo.blocksize * 1024)); */
  603. temp = offset;
  604. do_div(temp, ((gn_devinfo.blocksize * 1024) & 0xFFFFFFFF));
  605. bad_index = (u32)temp;
  606. /* return false; */
  607. if (reason == UPDATE_WRITE_FAIL) {
  608. MSG(INIT, "Write fail, need to migrate\n");
  609. map_index = migrate_from_bad(offset, dat, oob);
  610. if (!map_index) {
  611. MSG(INIT, "migrate fail\n");
  612. return false;
  613. }
  614. } else {
  615. map_index = find_available_block(false);
  616. if (!map_index) {
  617. MSG(INIT, "Cannot find block in pool\n");
  618. return false;
  619. }
  620. }
  621. /* now let's update BMT */
  622. if (bad_index >= system_block_count) {
  623. for (i = 0; i < bmt_block_count; i++) {
  624. if (bmt.table[i].mapped_index == bad_index) {
  625. orig_bad_block = bmt.table[i].bad_index;
  626. break;
  627. }
  628. }
  629. /* bmt.bad_count++; */
  630. MSG(INIT, "Mapped block becomes bad, orig bad block is 0x%x\n", orig_bad_block);
  631. bmt.table[i].mapped_index = map_index;
  632. } else {
  633. bmt.table[bmt.mapped_count].mapped_index = map_index;
  634. bmt.table[bmt.mapped_count].bad_index = bad_index;
  635. bmt.mapped_count++;
  636. }
  637. memset(oob_buf, 0xFF, sizeof(oob_buf));
  638. fill_nand_bmt_buffer(&bmt, dat_buf, oob_buf);
  639. if (!write_bmt_to_flash(dat_buf, oob_buf))
  640. return false;
  641. mark_block_bad_bmt(offset);
  642. return true;
  643. }
  644. EXPORT_SYMBOL_GPL(update_bmt);
  645. /*******************************************************************
  646. * [BMT Interface]
  647. *
  648. * Description:
  649. * Given an block index, return mapped index if it's mapped, else
  650. * return given index.
  651. *
  652. * Parameter:
  653. * index: given an block index. This value cannot exceed
  654. * system_block_count.
  655. *
  656. * Return NULL for failure
  657. *******************************************************************/
  658. u16 get_mapping_block_index(int index)
  659. {
  660. int i;
  661. /* return index; */
  662. if (index > system_block_count)
  663. return index;
  664. for (i = 0; i < bmt.mapped_count; i++) {
  665. if (bmt.table[i].bad_index == index)
  666. return bmt.table[i].mapped_index;
  667. }
  668. return index;
  669. }
  670. EXPORT_SYMBOL_GPL(get_mapping_block_index);
  671. MODULE_LICENSE("GPL");
  672. MODULE_AUTHOR("MediaTek");
  673. MODULE_DESCRIPTION("Bad Block mapping management for MediaTek NAND Flash Driver");