io.c 45 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587
  1. /*
  2. * Copyright (c) International Business Machines Corp., 2006
  3. * Copyright (c) Nokia Corporation, 2006, 2007
  4. *
  5. * This program is free software; you can redistribute it and/or modify
  6. * it under the terms of the GNU General Public License as published by
  7. * the Free Software Foundation; either version 2 of the License, or
  8. * (at your option) any later version.
  9. *
  10. * This program is distributed in the hope that it will be useful,
  11. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  12. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  13. * the GNU General Public License for more details.
  14. *
  15. * You should have received a copy of the GNU General Public License
  16. * along with this program; if not, write to the Free Software
  17. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  18. *
  19. * Author: Artem Bityutskiy (Битюцкий Артём)
  20. */
  21. /*
  22. * UBI input/output sub-system.
  23. *
  24. * This sub-system provides a uniform way to work with all kinds of the
  25. * underlying MTD devices. It also implements handy functions for reading and
  26. * writing UBI headers.
  27. *
  28. * We are trying to have a paranoid mindset and not to trust to what we read
  29. * from the flash media in order to be more secure and robust. So this
  30. * sub-system validates every single header it reads from the flash media.
  31. *
  32. * Some words about how the eraseblock headers are stored.
  33. *
  34. * The erase counter header is always stored at offset zero. By default, the
  35. * VID header is stored after the EC header at the closest aligned offset
  36. * (i.e. aligned to the minimum I/O unit size). Data starts next to the VID
  37. * header at the closest aligned offset. But this default layout may be
  38. * changed. For example, for different reasons (e.g., optimization) UBI may be
  39. * asked to put the VID header at further offset, and even at an unaligned
  40. * offset. Of course, if the offset of the VID header is unaligned, UBI adds
  41. * proper padding in front of it. Data offset may also be changed but it has to
  42. * be aligned.
  43. *
  44. * About minimal I/O units. In general, UBI assumes flash device model where
  45. * there is only one minimal I/O unit size. E.g., in case of NOR flash it is 1,
  46. * in case of NAND flash it is a NAND page, etc. This is reported by MTD in the
  47. * @ubi->mtd->writesize field. But as an exception, UBI admits of using another
  48. * (smaller) minimal I/O unit size for EC and VID headers to make it possible
  49. * to do different optimizations.
  50. *
  51. * This is extremely useful in case of NAND flashes which admit of several
  52. * write operations to one NAND page. In this case UBI can fit EC and VID
  53. * headers at one NAND page. Thus, UBI may use "sub-page" size as the minimal
  54. * I/O unit for the headers (the @ubi->hdrs_min_io_size field). But it still
  55. * reports NAND page size (@ubi->min_io_size) as a minimal I/O unit for the UBI
  56. * users.
  57. *
  58. * Example: some Samsung NANDs with 2KiB pages allow 4x 512-byte writes, so
  59. * although the minimal I/O unit is 2K, UBI uses 512 bytes for EC and VID
  60. * headers.
  61. *
  62. * Q: why not just to treat sub-page as a minimal I/O unit of this flash
  63. * device, e.g., make @ubi->min_io_size = 512 in the example above?
  64. *
  65. * A: because when writing a sub-page, MTD still writes a full 2K page but the
  66. * bytes which are not relevant to the sub-page are 0xFF. So, basically,
  67. * writing 4x512 sub-pages is 4 times slower than writing one 2KiB NAND page.
  68. * Thus, we prefer to use sub-pages only for EC and VID headers.
  69. *
  70. * As it was noted above, the VID header may start at a non-aligned offset.
  71. * For example, in case of a 2KiB page NAND flash with a 512 bytes sub-page,
  72. * the VID header may reside at offset 1984 which is the last 64 bytes of the
  73. * last sub-page (EC header is always at offset zero). This causes some
  74. * difficulties when reading and writing VID headers.
  75. *
  76. * Suppose we have a 64-byte buffer and we read a VID header at it. We change
  77. * the data and want to write this VID header out. As we can only write in
  78. * 512-byte chunks, we have to allocate one more buffer and copy our VID header
  79. * to offset 448 of this buffer.
  80. *
  81. * The I/O sub-system does the following trick in order to avoid this extra
  82. * copy. It always allocates a @ubi->vid_hdr_alsize bytes buffer for the VID
  83. * header and returns a pointer to offset @ubi->vid_hdr_shift of this buffer.
  84. * When the VID header is being written out, it shifts the VID header pointer
  85. * back and writes the whole sub-page.
  86. */
  87. #include <linux/crc32.h>
  88. #include <linux/err.h>
  89. #include <linux/slab.h>
  90. #include "ubi.h"
  91. #ifdef CONFIG_PWR_LOSS_MTK_SPOH
  92. #include <mach/power_loss_test.h>
  93. #endif
  94. static int self_check_not_bad(const struct ubi_device *ubi, int pnum);
  95. static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum);
  96. static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
  97. const struct ubi_ec_hdr *ec_hdr);
  98. static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum);
  99. static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
  100. const struct ubi_vid_hdr *vid_hdr);
  101. static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
  102. int offset, int len);
  103. /**
  104. * ubi_io_read - read data from a physical eraseblock.
  105. * @ubi: UBI device description object
  106. * @buf: buffer where to store the read data
  107. * @pnum: physical eraseblock number to read from
  108. * @offset: offset within the physical eraseblock from where to read
  109. * @len: how many bytes to read
  110. *
  111. * This function reads data from offset @offset of physical eraseblock @pnum
  112. * and stores the read data in the @buf buffer. The following return codes are
  113. * possible:
  114. *
  115. * o %0 if all the requested data were successfully read;
  116. * o %UBI_IO_BITFLIPS if all the requested data were successfully read, but
  117. * correctable bit-flips were detected; this is harmless but may indicate
  118. * that this eraseblock may become bad soon (but do not have to);
  119. * o %-EBADMSG if the MTD subsystem reported about data integrity problems, for
  120. * example it can be an ECC error in case of NAND; this most probably means
  121. * that the data is corrupted;
  122. * o %-EIO if some I/O error occurred;
  123. * o other negative error codes in case of other errors.
  124. */
  125. int ubi_io_read(const struct ubi_device *ubi, void *buf, int pnum, int offset,
  126. int len)
  127. {
  128. int err, retries = 0;
  129. size_t read;
  130. loff_t addr;
  131. dbg_io("read %d bytes from PEB %d:%d", len, pnum, offset);
  132. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  133. ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
  134. ubi_assert(len > 0);
  135. err = self_check_not_bad(ubi, pnum);
  136. if (err)
  137. return err;
  138. /*
  139. * Deliberately corrupt the buffer to improve robustness. Indeed, if we
  140. * do not do this, the following may happen:
  141. * 1. The buffer contains data from previous operation, e.g., read from
  142. * another PEB previously. The data looks like expected, e.g., if we
  143. * just do not read anything and return - the caller would not
  144. * notice this. E.g., if we are reading a VID header, the buffer may
  145. * contain a valid VID header from another PEB.
  146. * 2. The driver is buggy and returns us success or -EBADMSG or
  147. * -EUCLEAN, but it does not actually put any data to the buffer.
  148. *
  149. * This may confuse UBI or upper layers - they may think the buffer
  150. * contains valid data while in fact it is just old data. This is
  151. * especially possible because UBI (and UBIFS) relies on CRC, and
  152. * treats data as correct even in case of ECC errors if the CRC is
  153. * correct.
  154. *
  155. * Try to prevent this situation by changing the first byte of the
  156. * buffer.
  157. */
  158. *((uint8_t *)buf) ^= 0xFF;
  159. addr = (loff_t)pnum * ubi->peb_size + offset;
  160. retry:
  161. err = mtd_read(ubi->mtd, addr, len, &read, buf);
  162. if (err) {
  163. const char *errstr = mtd_is_eccerr(err) ? " (ECC error)" : "";
  164. if (mtd_is_bitflip(err)) {
  165. /*
  166. * -EUCLEAN is reported if there was a bit-flip which
  167. * was corrected, so this is harmless.
  168. *
  169. * We do not report about it here unless debugging is
  170. * enabled. A corresponding message will be printed
  171. * later, when it is has been scrubbed.
  172. */
  173. ubi_msg("fixable bit-flip detected at PEB %d", pnum);
  174. ubi_assert(len == read);
  175. return UBI_IO_BITFLIPS;
  176. }
  177. if (retries++ < UBI_IO_RETRIES) {
  178. ubi_warn("error %d%s while reading %d bytes from PEB %d:%d, read only %zd bytes, retry",
  179. err, errstr, len, pnum, offset, read);
  180. /* yield(); */
  181. goto retry;
  182. }
  183. ubi_err("error %d%s while reading %d bytes from PEB %d:%d, read %zd bytes",
  184. err, errstr, len, pnum, offset, read);
  185. dump_stack();
  186. /*
  187. * The driver should never return -EBADMSG if it failed to read
  188. * all the requested data. But some buggy drivers might do
  189. * this, so we change it to -EIO.
  190. */
  191. if (read != len && mtd_is_eccerr(err)) {
  192. ubi_assert(0);
  193. err = -EIO;
  194. }
  195. } else {
  196. ubi_assert(len == read);
  197. if (ubi_dbg_is_bitflip(ubi)) {
  198. dbg_gen("bit-flip (emulated)");
  199. err = UBI_IO_BITFLIPS;
  200. }
  201. }
  202. return err;
  203. }
  204. /**
  205. * ubi_io_write - write data to a physical eraseblock.
  206. * @ubi: UBI device description object
  207. * @buf: buffer with the data to write
  208. * @pnum: physical eraseblock number to write to
  209. * @offset: offset within the physical eraseblock where to write
  210. * @len: how many bytes to write
  211. *
  212. * This function writes @len bytes of data from buffer @buf to offset @offset
  213. * of physical eraseblock @pnum. If all the data were successfully written,
  214. * zero is returned. If an error occurred, this function returns a negative
  215. * error code. If %-EIO is returned, the physical eraseblock most probably went
  216. * bad.
  217. *
  218. * Note, in case of an error, it is possible that something was still written
  219. * to the flash media, but may be some garbage.
  220. */
  221. int ubi_io_write(struct ubi_device *ubi, const void *buf, int pnum, int offset,
  222. int len)
  223. {
  224. int err;
  225. size_t written;
  226. loff_t addr;
  227. dbg_io("write %d bytes to PEB %d:%d", len, pnum, offset);
  228. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  229. ubi_assert(offset >= 0 && offset + len <= ubi->peb_size);
  230. ubi_assert(offset % ubi->hdrs_min_io_size == 0);
  231. ubi_assert(len > 0 && len % ubi->hdrs_min_io_size == 0);
  232. if (ubi->ro_mode) {
  233. ubi_err("read-only mode");
  234. return -EROFS;
  235. }
  236. err = self_check_not_bad(ubi, pnum);
  237. if (err)
  238. return err;
  239. /* The area we are writing to has to contain all 0xFF bytes */
  240. err = ubi_self_check_all_ff(ubi, pnum, offset, len);
  241. if (err)
  242. return err;
  243. if (offset >= ubi->leb_start) {
  244. /*
  245. * We write to the data area of the physical eraseblock. Make
  246. * sure it has valid EC and VID headers.
  247. */
  248. err = self_check_peb_ec_hdr(ubi, pnum);
  249. if (err)
  250. return err;
  251. err = self_check_peb_vid_hdr(ubi, pnum);
  252. if (err)
  253. return err;
  254. }
  255. if (ubi_dbg_is_write_failure(ubi)) {
  256. ubi_err("cannot write %d bytes to PEB %d:%d (emulated)",
  257. len, pnum, offset);
  258. dump_stack();
  259. return -EIO;
  260. }
  261. addr = (loff_t)pnum * ubi->peb_size + offset;
  262. err = mtd_write(ubi->mtd, addr, len, &written, buf);
  263. if (err) {
  264. ubi_err("error %d while writing %d bytes to PEB %d:%d, written %zd bytes",
  265. err, len, pnum, offset, written);
  266. dump_stack();
  267. ubi_dump_flash(ubi, pnum, offset, len);
  268. } else
  269. ubi_assert(written == len);
  270. if (!err) {
  271. err = self_check_write(ubi, buf, pnum, offset, len);
  272. if (err)
  273. return err;
  274. /*
  275. * Since we always write sequentially, the rest of the PEB has
  276. * to contain only 0xFF bytes.
  277. */
  278. offset += len;
  279. len = ubi->peb_size - offset;
  280. if (len)
  281. err = ubi_self_check_all_ff(ubi, pnum, offset, len);
  282. }
  283. return err;
  284. }
  285. /**
  286. * erase_callback - MTD erasure call-back.
  287. * @ei: MTD erase information object.
  288. *
  289. * Note, even though MTD erase interface is asynchronous, all the current
  290. * implementations are synchronous anyway.
  291. */
  292. static void erase_callback(struct erase_info *ei)
  293. {
  294. wake_up_interruptible((wait_queue_head_t *)ei->priv);
  295. }
  296. /**
  297. * do_sync_erase - synchronously erase a physical eraseblock.
  298. * @ubi: UBI device description object
  299. * @pnum: the physical eraseblock number to erase
  300. *
  301. * This function synchronously erases physical eraseblock @pnum and returns
  302. * zero in case of success and a negative error code in case of failure. If
  303. * %-EIO is returned, the physical eraseblock most probably went bad.
  304. */
  305. static int do_sync_erase(struct ubi_device *ubi, int pnum)
  306. {
  307. int err, retries = 0;
  308. struct erase_info ei;
  309. wait_queue_head_t wq;
  310. dbg_io("erase PEB %d", pnum);
  311. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  312. if (ubi->ro_mode) {
  313. ubi_err("read-only mode");
  314. return -EROFS;
  315. }
  316. retry:
  317. init_waitqueue_head(&wq);
  318. memset(&ei, 0, sizeof(struct erase_info));
  319. ei.mtd = ubi->mtd;
  320. ei.addr = (loff_t)pnum * ubi->peb_size;
  321. ei.len = ubi->peb_size;
  322. ei.callback = erase_callback;
  323. ei.priv = (unsigned long)&wq;
  324. err = mtd_erase(ubi->mtd, &ei);
  325. atomic_inc(&ubi->ec_count); /*MTK*/
  326. if (err) {
  327. if (retries++ < UBI_IO_RETRIES) {
  328. ubi_warn("error %d while erasing PEB %d, retry",
  329. err, pnum);
  330. /* yield(); */
  331. goto retry;
  332. }
  333. ubi_err("cannot erase PEB %d, error %d", pnum, err);
  334. dump_stack();
  335. return err;
  336. }
  337. err = wait_event_interruptible(wq, ei.state == MTD_ERASE_DONE ||
  338. ei.state == MTD_ERASE_FAILED);
  339. if (err) {
  340. ubi_err("interrupted PEB %d erasure", pnum);
  341. return -EINTR;
  342. }
  343. if (ei.state == MTD_ERASE_FAILED) {
  344. if (retries++ < UBI_IO_RETRIES) {
  345. ubi_warn("error while erasing PEB %d, retry", pnum);
  346. /* yield(); */
  347. goto retry;
  348. }
  349. ubi_err("cannot erase PEB %d", pnum);
  350. dump_stack();
  351. return -EIO;
  352. }
  353. err = ubi_self_check_all_ff(ubi, pnum, 0, ubi->peb_size);
  354. if (err)
  355. return err;
  356. if (ubi_dbg_is_erase_failure(ubi)) {
  357. ubi_err("cannot erase PEB %d (emulated)", pnum);
  358. return -EIO;
  359. }
  360. return 0;
  361. }
  362. /* Patterns to write to a physical eraseblock when torturing it */
  363. static uint8_t patterns[] = {0xa5, 0x5a, 0x0};
  364. /**
  365. * torture_peb - test a supposedly bad physical eraseblock.
  366. * @ubi: UBI device description object
  367. * @pnum: the physical eraseblock number to test
  368. *
  369. * This function returns %-EIO if the physical eraseblock did not pass the
  370. * test, a positive number of erase operations done if the test was
  371. * successfully passed, and other negative error codes in case of other errors.
  372. */
  373. static int torture_peb(struct ubi_device *ubi, int pnum)
  374. {
  375. int err, i, patt_count;
  376. ubi_msg("run torture test for PEB %d", pnum);
  377. patt_count = ARRAY_SIZE(patterns);
  378. ubi_assert(patt_count > 0);
  379. #ifdef CONFIG_UBI_SHARE_BUFFER
  380. mutex_lock(&ubi_buf_mutex);
  381. #else
  382. mutex_lock(&ubi->buf_mutex);
  383. #endif
  384. for (i = 0; i < patt_count; i++) {
  385. err = do_sync_erase(ubi, pnum);
  386. if (err)
  387. goto out;
  388. /* Make sure the PEB contains only 0xFF bytes */
  389. err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
  390. if (err)
  391. goto out;
  392. err = ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->peb_size);
  393. if (err == 0) {
  394. ubi_err("erased PEB %d, but a non-0xFF byte found",
  395. pnum);
  396. err = -EIO;
  397. goto out;
  398. }
  399. /* Write a pattern and check it */
  400. memset(ubi->peb_buf, patterns[i], ubi->peb_size);
  401. err = ubi_io_write(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
  402. if (err)
  403. goto out;
  404. memset(ubi->peb_buf, ~patterns[i], ubi->peb_size);
  405. err = ubi_io_read(ubi, ubi->peb_buf, pnum, 0, ubi->peb_size);
  406. if (err)
  407. goto out;
  408. err = ubi_check_pattern(ubi->peb_buf, patterns[i],
  409. ubi->peb_size);
  410. if (err == 0) {
  411. ubi_err("pattern %x checking failed for PEB %d",
  412. patterns[i], pnum);
  413. err = -EIO;
  414. goto out;
  415. }
  416. }
  417. err = patt_count;
  418. ubi_msg("PEB %d passed torture test, do not mark it as bad", pnum);
  419. out:
  420. #ifdef CONFIG_UBI_SHARE_BUFFER
  421. mutex_unlock(&ubi_buf_mutex);
  422. #else
  423. mutex_unlock(&ubi->buf_mutex);
  424. #endif
  425. if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
  426. /*
  427. * If a bit-flip or data integrity error was detected, the test
  428. * has not passed because it happened on a freshly erased
  429. * physical eraseblock which means something is wrong with it.
  430. */
  431. ubi_err("read problems on freshly erased PEB %d, must be bad",
  432. pnum);
  433. err = -EIO;
  434. }
  435. return err;
  436. }
  437. /**
  438. * nor_erase_prepare - prepare a NOR flash PEB for erasure.
  439. * @ubi: UBI device description object
  440. * @pnum: physical eraseblock number to prepare
  441. *
  442. * NOR flash, or at least some of them, have peculiar embedded PEB erasure
  443. * algorithm: the PEB is first filled with zeroes, then it is erased. And
  444. * filling with zeroes starts from the end of the PEB. This was observed with
  445. * Spansion S29GL512N NOR flash.
  446. *
  447. * This means that in case of a power cut we may end up with intact data at the
  448. * beginning of the PEB, and all zeroes at the end of PEB. In other words, the
  449. * EC and VID headers are OK, but a large chunk of data at the end of PEB is
  450. * zeroed. This makes UBI mistakenly treat this PEB as used and associate it
  451. * with an LEB, which leads to subsequent failures (e.g., UBIFS fails).
  452. *
  453. * This function is called before erasing NOR PEBs and it zeroes out EC and VID
  454. * magic numbers in order to invalidate them and prevent the failures. Returns
  455. * zero in case of success and a negative error code in case of failure.
  456. */
  457. static int nor_erase_prepare(struct ubi_device *ubi, int pnum)
  458. {
  459. int err;
  460. size_t written;
  461. loff_t addr;
  462. uint32_t data = 0;
  463. struct ubi_ec_hdr ec_hdr;
  464. /*
  465. * Note, we cannot generally define VID header buffers on stack,
  466. * because of the way we deal with these buffers (see the header
  467. * comment in this file). But we know this is a NOR-specific piece of
  468. * code, so we can do this. But yes, this is error-prone and we should
  469. * (pre-)allocate VID header buffer instead.
  470. */
  471. struct ubi_vid_hdr vid_hdr;
  472. /*
  473. * If VID or EC is valid, we have to corrupt them before erasing.
  474. * It is important to first invalidate the EC header, and then the VID
  475. * header. Otherwise a power cut may lead to valid EC header and
  476. * invalid VID header, in which case UBI will treat this PEB as
  477. * corrupted and will try to preserve it, and print scary warnings.
  478. */
  479. addr = (loff_t)pnum * ubi->peb_size;
  480. err = ubi_io_read_ec_hdr(ubi, pnum, &ec_hdr, 0);
  481. if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
  482. err != UBI_IO_FF){
  483. err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
  484. if (err)
  485. goto error;
  486. }
  487. err = ubi_io_read_vid_hdr(ubi, pnum, &vid_hdr, 0);
  488. if (err != UBI_IO_BAD_HDR_EBADMSG && err != UBI_IO_BAD_HDR &&
  489. err != UBI_IO_FF){
  490. addr += ubi->vid_hdr_aloffset;
  491. err = mtd_write(ubi->mtd, addr, 4, &written, (void *)&data);
  492. if (err)
  493. goto error;
  494. }
  495. return 0;
  496. error:
  497. /*
  498. * The PEB contains a valid VID or EC header, but we cannot invalidate
  499. * it. Supposedly the flash media or the driver is screwed up, so
  500. * return an error.
  501. */
  502. ubi_err("cannot invalidate PEB %d, write returned %d", pnum, err);
  503. ubi_dump_flash(ubi, pnum, 0, ubi->peb_size);
  504. return -EIO;
  505. }
  506. /**
  507. * ubi_io_sync_erase - synchronously erase a physical eraseblock.
  508. * @ubi: UBI device description object
  509. * @pnum: physical eraseblock number to erase
  510. * @torture: if this physical eraseblock has to be tortured
  511. *
  512. * This function synchronously erases physical eraseblock @pnum. If @torture
  513. * flag is not zero, the physical eraseblock is checked by means of writing
  514. * different patterns to it and reading them back. If the torturing is enabled,
  515. * the physical eraseblock is erased more than once.
  516. *
  517. * This function returns the number of erasures made in case of success, %-EIO
  518. * if the erasure failed or the torturing test failed, and other negative error
  519. * codes in case of other errors. Note, %-EIO means that the physical
  520. * eraseblock is bad.
  521. */
  522. int ubi_io_sync_erase(struct ubi_device *ubi, int pnum, int torture)
  523. {
  524. int err, ret = 0;
  525. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  526. err = self_check_not_bad(ubi, pnum);
  527. if (err != 0)
  528. return err;
  529. if (ubi->ro_mode) {
  530. ubi_err("read-only mode");
  531. return -EROFS;
  532. }
  533. if (ubi->nor_flash) {
  534. err = nor_erase_prepare(ubi, pnum);
  535. if (err)
  536. return err;
  537. }
  538. if (torture) {
  539. ret = torture_peb(ubi, pnum);
  540. if (ret < 0)
  541. return ret;
  542. }
  543. err = do_sync_erase(ubi, pnum);
  544. if (err)
  545. return err;
  546. return ret + 1;
  547. }
  548. /**
  549. * ubi_io_is_bad - check if a physical eraseblock is bad.
  550. * @ubi: UBI device description object
  551. * @pnum: the physical eraseblock number to check
  552. *
  553. * This function returns a positive number if the physical eraseblock is bad,
  554. * zero if not, and a negative error code if an error occurred.
  555. */
  556. int ubi_io_is_bad(const struct ubi_device *ubi, int pnum)
  557. {
  558. struct mtd_info *mtd = ubi->mtd;
  559. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  560. if (ubi->bad_allowed) {
  561. int ret;
  562. ret = mtd_block_isbad(mtd, (loff_t)pnum * ubi->peb_size);
  563. if (ret < 0)
  564. ubi_err("error %d while checking if PEB %d is bad",
  565. ret, pnum);
  566. else if (ret)
  567. dbg_io("PEB %d is bad", pnum);
  568. return ret;
  569. }
  570. return 0;
  571. }
  572. /**
  573. * ubi_io_mark_bad - mark a physical eraseblock as bad.
  574. * @ubi: UBI device description object
  575. * @pnum: the physical eraseblock number to mark
  576. *
  577. * This function returns zero in case of success and a negative error code in
  578. * case of failure.
  579. */
  580. int ubi_io_mark_bad(const struct ubi_device *ubi, int pnum)
  581. {
  582. int err;
  583. struct mtd_info *mtd = ubi->mtd;
  584. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  585. if (ubi->ro_mode) {
  586. ubi_err("read-only mode");
  587. return -EROFS;
  588. }
  589. if (!ubi->bad_allowed)
  590. return 0;
  591. err = mtd_block_markbad(mtd, (loff_t)pnum * ubi->peb_size);
  592. if (err)
  593. ubi_err("cannot mark PEB %d bad, error %d", pnum, err);
  594. return err;
  595. }
  596. /**
  597. * validate_ec_hdr - validate an erase counter header.
  598. * @ubi: UBI device description object
  599. * @ec_hdr: the erase counter header to check
  600. *
  601. * This function returns zero if the erase counter header is OK, and %1 if
  602. * not.
  603. */
  604. static int validate_ec_hdr(const struct ubi_device *ubi,
  605. const struct ubi_ec_hdr *ec_hdr)
  606. {
  607. long long ec;
  608. int vid_hdr_offset, leb_start;
  609. ec = be64_to_cpu(ec_hdr->ec);
  610. vid_hdr_offset = be32_to_cpu(ec_hdr->vid_hdr_offset);
  611. leb_start = be32_to_cpu(ec_hdr->data_offset);
  612. if (ec_hdr->version != UBI_VERSION) {
  613. ubi_err("node with incompatible UBI version found: this UBI version is %d, image version is %d",
  614. UBI_VERSION, (int)ec_hdr->version);
  615. goto bad;
  616. }
  617. if (vid_hdr_offset != ubi->vid_hdr_offset) {
  618. ubi_err("bad VID header offset %d, expected %d",
  619. vid_hdr_offset, ubi->vid_hdr_offset);
  620. goto bad;
  621. }
  622. if (leb_start != ubi->leb_start) {
  623. ubi_err("bad data offset %d, expected %d",
  624. leb_start, ubi->leb_start);
  625. goto bad;
  626. }
  627. if (ec < 0 || ec > UBI_MAX_ERASECOUNTER) {
  628. ubi_err("bad erase counter %lld", ec);
  629. goto bad;
  630. }
  631. return 0;
  632. bad:
  633. ubi_err("bad EC header");
  634. ubi_dump_ec_hdr(ec_hdr);
  635. dump_stack();
  636. return 1;
  637. }
  638. /**
  639. * ubi_io_read_ec_hdr - read and check an erase counter header.
  640. * @ubi: UBI device description object
  641. * @pnum: physical eraseblock to read from
  642. * @ec_hdr: a &struct ubi_ec_hdr object where to store the read erase counter
  643. * header
  644. * @verbose: be verbose if the header is corrupted or was not found
  645. *
  646. * This function reads erase counter header from physical eraseblock @pnum and
  647. * stores it in @ec_hdr. This function also checks CRC checksum of the read
  648. * erase counter header. The following codes may be returned:
  649. *
  650. * o %0 if the CRC checksum is correct and the header was successfully read;
  651. * o %UBI_IO_BITFLIPS if the CRC is correct, but bit-flips were detected
  652. * and corrected by the flash driver; this is harmless but may indicate that
  653. * this eraseblock may become bad soon (but may be not);
  654. * o %UBI_IO_BAD_HDR if the erase counter header is corrupted (a CRC error);
  655. * o %UBI_IO_BAD_HDR_EBADMSG is the same as %UBI_IO_BAD_HDR, but there also was
  656. * a data integrity error (uncorrectable ECC error in case of NAND);
  657. * o %UBI_IO_FF if only 0xFF bytes were read (the PEB is supposedly empty)
  658. * o a negative error code in case of failure.
  659. */
  660. int ubi_io_read_ec_hdr(struct ubi_device *ubi, int pnum,
  661. struct ubi_ec_hdr *ec_hdr, int verbose)
  662. {
  663. int err, read_err;
  664. uint32_t crc, magic, hdr_crc;
  665. dbg_io("read EC header from PEB %d", pnum);
  666. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  667. read_err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
  668. if (read_err) {
  669. if (read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
  670. return read_err;
  671. /*
  672. * We read all the data, but either a correctable bit-flip
  673. * occurred, or MTD reported a data integrity error
  674. * (uncorrectable ECC error in case of NAND). The former is
  675. * harmless, the later may mean that the read data is
  676. * corrupted. But we have a CRC check-sum and we will detect
  677. * this. If the EC header is still OK, we just report this as
  678. * there was a bit-flip, to force scrubbing.
  679. */
  680. }
  681. magic = be32_to_cpu(ec_hdr->magic);
  682. if (magic != UBI_EC_HDR_MAGIC) {
  683. if (mtd_is_eccerr(read_err))
  684. return UBI_IO_BAD_HDR_EBADMSG;
  685. /*
  686. * The magic field is wrong. Let's check if we have read all
  687. * 0xFF. If yes, this physical eraseblock is assumed to be
  688. * empty.
  689. */
  690. if (ubi_check_pattern(ec_hdr, 0xFF, UBI_EC_HDR_SIZE)) {
  691. /* The physical eraseblock is supposedly empty */
  692. if (verbose)
  693. ubi_warn("no EC header found at PEB %d, only 0xFF bytes",
  694. pnum);
  695. dbg_bld("no EC header found at PEB %d, only 0xFF bytes",
  696. pnum);
  697. if (!read_err)
  698. return UBI_IO_FF;
  699. else
  700. return UBI_IO_FF_BITFLIPS;
  701. }
  702. /*
  703. * This is not a valid erase counter header, and these are not
  704. * 0xFF bytes. Report that the header is corrupted.
  705. */
  706. if (verbose) {
  707. ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
  708. pnum, magic, UBI_EC_HDR_MAGIC);
  709. ubi_dump_ec_hdr(ec_hdr);
  710. }
  711. dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
  712. pnum, magic, UBI_EC_HDR_MAGIC);
  713. return UBI_IO_BAD_HDR;
  714. }
  715. crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
  716. hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
  717. if (hdr_crc != crc) {
  718. if (verbose) {
  719. ubi_warn("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
  720. pnum, crc, hdr_crc);
  721. ubi_dump_ec_hdr(ec_hdr);
  722. }
  723. dbg_bld("bad EC header CRC at PEB %d, calculated %#08x, read %#08x",
  724. pnum, crc, hdr_crc);
  725. if (!read_err)
  726. return UBI_IO_BAD_HDR;
  727. else
  728. return UBI_IO_BAD_HDR_EBADMSG;
  729. }
  730. /* And of course validate what has just been read from the media */
  731. err = validate_ec_hdr(ubi, ec_hdr);
  732. if (err) {
  733. ubi_err("validation failed for PEB %d", pnum);
  734. return -EINVAL;
  735. }
  736. /*
  737. * If there was %-EBADMSG, but the header CRC is still OK, report about
  738. * a bit-flip to force scrubbing on this PEB.
  739. */
  740. return read_err ? UBI_IO_BITFLIPS : 0;
  741. }
  742. /**
  743. * ubi_io_write_ec_hdr - write an erase counter header.
  744. * @ubi: UBI device description object
  745. * @pnum: physical eraseblock to write to
  746. * @ec_hdr: the erase counter header to write
  747. *
  748. * This function writes erase counter header described by @ec_hdr to physical
  749. * eraseblock @pnum. It also fills most fields of @ec_hdr before writing, so
  750. * the caller do not have to fill them. Callers must only fill the @ec_hdr->ec
  751. * field.
  752. *
  753. * This function returns zero in case of success and a negative error code in
  754. * case of failure. If %-EIO is returned, the physical eraseblock most probably
  755. * went bad.
  756. */
  757. int ubi_io_write_ec_hdr(struct ubi_device *ubi, int pnum,
  758. struct ubi_ec_hdr *ec_hdr)
  759. {
  760. int err;
  761. uint32_t crc;
  762. dbg_io("write EC header to PEB %d", pnum);
  763. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  764. ec_hdr->magic = cpu_to_be32(UBI_EC_HDR_MAGIC);
  765. ec_hdr->version = UBI_VERSION;
  766. ec_hdr->vid_hdr_offset = cpu_to_be32(ubi->vid_hdr_offset);
  767. ec_hdr->data_offset = cpu_to_be32(ubi->leb_start);
  768. ec_hdr->image_seq = cpu_to_be32(ubi->image_seq);
  769. crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
  770. ec_hdr->hdr_crc = cpu_to_be32(crc);
  771. err = self_check_ec_hdr(ubi, pnum, ec_hdr);
  772. if (err)
  773. return err;
  774. #ifdef CONFIG_PWR_LOSS_MTK_SPOH
  775. PL_RESET_ON_CASE("NAND", "WRITE_EC_Header");
  776. #endif
  777. err = ubi_io_write(ubi, ec_hdr, pnum, 0, ubi->ec_hdr_alsize);
  778. return err;
  779. }
  780. /**
  781. * validate_vid_hdr - validate a volume identifier header.
  782. * @ubi: UBI device description object
  783. * @vid_hdr: the volume identifier header to check
  784. *
  785. * This function checks that data stored in the volume identifier header
  786. * @vid_hdr. Returns zero if the VID header is OK and %1 if not.
  787. */
  788. static int validate_vid_hdr(const struct ubi_device *ubi,
  789. const struct ubi_vid_hdr *vid_hdr)
  790. {
  791. int vol_type = vid_hdr->vol_type;
  792. int copy_flag = vid_hdr->copy_flag;
  793. int vol_id = be32_to_cpu(vid_hdr->vol_id);
  794. int lnum = be32_to_cpu(vid_hdr->lnum);
  795. int compat = vid_hdr->compat;
  796. int data_size = be32_to_cpu(vid_hdr->data_size);
  797. int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
  798. int data_pad = be32_to_cpu(vid_hdr->data_pad);
  799. int data_crc = be32_to_cpu(vid_hdr->data_crc);
  800. int usable_leb_size = ubi->leb_size - data_pad;
  801. if (copy_flag != 0 && copy_flag != 1) {
  802. ubi_err("bad copy_flag");
  803. goto bad;
  804. }
  805. if (vol_id < 0 || lnum < 0 || data_size < 0 || used_ebs < 0 ||
  806. data_pad < 0) {
  807. ubi_err("negative values");
  808. goto bad;
  809. }
  810. if (vol_id >= UBI_MAX_VOLUMES && vol_id < UBI_INTERNAL_VOL_START) {
  811. ubi_err("bad vol_id");
  812. goto bad;
  813. }
  814. if (vol_id < UBI_INTERNAL_VOL_START && compat != 0) {
  815. ubi_err("bad compat");
  816. goto bad;
  817. }
  818. if (vol_id >= UBI_INTERNAL_VOL_START && compat != UBI_COMPAT_DELETE &&
  819. compat != UBI_COMPAT_RO && compat != UBI_COMPAT_PRESERVE &&
  820. compat != UBI_COMPAT_REJECT) {
  821. #if defined(CONFIG_MTK_MLC_NAND_SUPPORT)
  822. if (vol_id == UBI_LAYOUT_VOLUME_ID) {
  823. ubi_err("bad compat");
  824. goto bad;
  825. }
  826. if (vol_id != UBI_BACKUP_VOLUME_ID) {
  827. ubi_err("bad compat");
  828. goto bad;
  829. }
  830. #endif
  831. }
  832. if (vol_type != UBI_VID_DYNAMIC && vol_type != UBI_VID_STATIC) {
  833. ubi_err("bad vol_type");
  834. goto bad;
  835. }
  836. if (data_pad >= ubi->leb_size / 2) {
  837. ubi_err("bad data_pad");
  838. goto bad;
  839. }
  840. if (vol_type == UBI_VID_STATIC) {
  841. /*
  842. * Although from high-level point of view static volumes may
  843. * contain zero bytes of data, but no VID headers can contain
  844. * zero at these fields, because they empty volumes do not have
  845. * mapped logical eraseblocks.
  846. */
  847. if (used_ebs == 0) {
  848. ubi_err("zero used_ebs");
  849. goto bad;
  850. }
  851. if (data_size == 0) {
  852. ubi_err("zero data_size");
  853. goto bad;
  854. }
  855. if (lnum < used_ebs - 1) {
  856. if (data_size != usable_leb_size) {
  857. ubi_err("bad data_size");
  858. goto bad;
  859. }
  860. } else if (lnum == used_ebs - 1) {
  861. if (data_size == 0) {
  862. ubi_err("bad data_size at last LEB");
  863. goto bad;
  864. }
  865. } else {
  866. ubi_err("too high lnum");
  867. goto bad;
  868. }
  869. } else {
  870. if (copy_flag == 0) {
  871. if (data_crc != 0) {
  872. ubi_err("non-zero data CRC");
  873. goto bad;
  874. }
  875. if (data_size != 0) {
  876. ubi_err("non-zero data_size");
  877. goto bad;
  878. }
  879. } else {
  880. if (data_size == 0) {
  881. ubi_err("zero data_size of copy");
  882. goto bad;
  883. }
  884. }
  885. if (used_ebs != 0) {
  886. ubi_err("bad used_ebs");
  887. goto bad;
  888. }
  889. }
  890. return 0;
  891. bad:
  892. ubi_err("bad VID header");
  893. ubi_dump_vid_hdr(vid_hdr);
  894. dump_stack();
  895. return 1;
  896. }
  897. /**
  898. * ubi_io_read_vid_hdr - read and check a volume identifier header.
  899. * @ubi: UBI device description object
  900. * @pnum: physical eraseblock number to read from
  901. * @vid_hdr: &struct ubi_vid_hdr object where to store the read volume
  902. * identifier header
  903. * @verbose: be verbose if the header is corrupted or wasn't found
  904. *
  905. * This function reads the volume identifier header from physical eraseblock
  906. * @pnum and stores it in @vid_hdr. It also checks CRC checksum of the read
  907. * volume identifier header. The error codes are the same as in
  908. * 'ubi_io_read_ec_hdr()'.
  909. *
  910. * Note, the implementation of this function is also very similar to
  911. * 'ubi_io_read_ec_hdr()', so refer commentaries in 'ubi_io_read_ec_hdr()'.
  912. */
  913. int ubi_io_read_vid_hdr(struct ubi_device *ubi, int pnum,
  914. struct ubi_vid_hdr *vid_hdr, int verbose)
  915. {
  916. int err, read_err;
  917. uint32_t crc, magic, hdr_crc;
  918. void *p;
  919. dbg_io("read VID header from PEB %d", pnum);
  920. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  921. p = (char *)vid_hdr - ubi->vid_hdr_shift;
  922. read_err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
  923. ubi->vid_hdr_alsize);
  924. if (read_err && read_err != UBI_IO_BITFLIPS && !mtd_is_eccerr(read_err))
  925. return read_err;
  926. magic = be32_to_cpu(vid_hdr->magic);
  927. if (magic != UBI_VID_HDR_MAGIC) {
  928. if (mtd_is_eccerr(read_err))
  929. return UBI_IO_BAD_HDR_EBADMSG;
  930. if (ubi_check_pattern(vid_hdr, 0xFF, UBI_VID_HDR_SIZE)) {
  931. if (verbose)
  932. ubi_warn("no VID header found at PEB %d, only 0xFF bytes",
  933. pnum);
  934. dbg_bld("no VID header found at PEB %d, only 0xFF bytes",
  935. pnum);
  936. if (!read_err)
  937. return UBI_IO_FF;
  938. else
  939. return UBI_IO_FF_BITFLIPS;
  940. }
  941. if (verbose) {
  942. ubi_warn("bad magic number at PEB %d: %08x instead of %08x",
  943. pnum, magic, UBI_VID_HDR_MAGIC);
  944. ubi_dump_vid_hdr(vid_hdr);
  945. }
  946. dbg_bld("bad magic number at PEB %d: %08x instead of %08x",
  947. pnum, magic, UBI_VID_HDR_MAGIC);
  948. return UBI_IO_BAD_HDR;
  949. }
  950. crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
  951. hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
  952. if (hdr_crc != crc) {
  953. if (verbose) {
  954. ubi_warn("bad CRC at PEB %d, calculated %#08x, read %#08x",
  955. pnum, crc, hdr_crc);
  956. ubi_dump_vid_hdr(vid_hdr);
  957. }
  958. dbg_bld("bad CRC at PEB %d, calculated %#08x, read %#08x",
  959. pnum, crc, hdr_crc);
  960. if (!read_err)
  961. return UBI_IO_BAD_HDR;
  962. else
  963. return UBI_IO_BAD_HDR_EBADMSG;
  964. }
  965. err = validate_vid_hdr(ubi, vid_hdr);
  966. if (err) {
  967. ubi_err("validation failed for PEB %d", pnum);
  968. return -EINVAL;
  969. }
  970. return read_err ? UBI_IO_BITFLIPS : 0;
  971. }
  972. /**
  973. * ubi_io_write_vid_hdr - write a volume identifier header.
  974. * @ubi: UBI device description object
  975. * @pnum: the physical eraseblock number to write to
  976. * @vid_hdr: the volume identifier header to write
  977. *
  978. * This function writes the volume identifier header described by @vid_hdr to
  979. * physical eraseblock @pnum. This function automatically fills the
  980. * @vid_hdr->magic and the @vid_hdr->version fields, as well as calculates
  981. * header CRC checksum and stores it at vid_hdr->hdr_crc.
  982. *
  983. * This function returns zero in case of success and a negative error code in
  984. * case of failure. If %-EIO is returned, the physical eraseblock probably went
  985. * bad.
  986. */
  987. int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
  988. struct ubi_vid_hdr *vid_hdr)
  989. {
  990. int err;
  991. uint32_t crc;
  992. void *p;
  993. dbg_io("write VID header to PEB %d", pnum);
  994. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  995. err = self_check_peb_ec_hdr(ubi, pnum);
  996. if (err)
  997. return err;
  998. vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
  999. vid_hdr->version = UBI_VERSION;
  1000. crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
  1001. vid_hdr->hdr_crc = cpu_to_be32(crc);
  1002. err = self_check_vid_hdr(ubi, pnum, vid_hdr);
  1003. if (err)
  1004. return err;
  1005. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  1006. {
  1007. int vol_id = be32_to_cpu(vid_hdr->vol_id);
  1008. if (vol_id < UBI_INTERNAL_VOL_START) {
  1009. lockdep_off();
  1010. blb_record_page1(ubi, pnum, vid_hdr, 0);
  1011. lockdep_on();
  1012. }
  1013. }
  1014. #endif
  1015. p = (char *)vid_hdr - ubi->vid_hdr_shift;
  1016. err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
  1017. ubi->vid_hdr_alsize);
  1018. return err;
  1019. }
  1020. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  1021. int ubi_io_write_vid_hdr_blb(struct ubi_device *ubi, int pnum,
  1022. struct ubi_vid_hdr *vid_hdr)
  1023. {
  1024. int err;
  1025. uint32_t crc;
  1026. void *p;
  1027. dbg_io("write VID header to PEB %d", pnum);
  1028. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  1029. err = self_check_peb_ec_hdr(ubi, pnum);
  1030. if (err)
  1031. return err;
  1032. vid_hdr->magic = cpu_to_be32(UBI_VID_HDR_MAGIC);
  1033. vid_hdr->version = UBI_VERSION;
  1034. crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_VID_HDR_SIZE_CRC);
  1035. vid_hdr->hdr_crc = cpu_to_be32(crc);
  1036. err = self_check_vid_hdr(ubi, pnum, vid_hdr);
  1037. if (err)
  1038. return err;
  1039. {
  1040. int vol_id = be32_to_cpu(vid_hdr->vol_id);
  1041. if (vol_id < UBI_INTERNAL_VOL_START) {
  1042. lockdep_off();
  1043. err = blb_record_page1(ubi, pnum, vid_hdr, 1);
  1044. lockdep_on();
  1045. if (err)
  1046. return err;
  1047. }
  1048. }
  1049. p = (char *)vid_hdr - ubi->vid_hdr_shift;
  1050. err = ubi_io_write(ubi, p, pnum, ubi->vid_hdr_aloffset,
  1051. ubi->vid_hdr_alsize);
  1052. return err;
  1053. }
  1054. #endif
  1055. /**
  1056. * self_check_not_bad - ensure that a physical eraseblock is not bad.
  1057. * @ubi: UBI device description object
  1058. * @pnum: physical eraseblock number to check
  1059. *
  1060. * This function returns zero if the physical eraseblock is good, %-EINVAL if
  1061. * it is bad and a negative error code if an error occurred.
  1062. */
  1063. static int self_check_not_bad(const struct ubi_device *ubi, int pnum)
  1064. {
  1065. int err;
  1066. if (!ubi_dbg_chk_io(ubi))
  1067. return 0;
  1068. err = ubi_io_is_bad(ubi, pnum);
  1069. if (!err)
  1070. return err;
  1071. ubi_err("self-check failed for PEB %d", pnum);
  1072. dump_stack();
  1073. return err > 0 ? -EINVAL : err;
  1074. }
  1075. /**
  1076. * self_check_ec_hdr - check if an erase counter header is all right.
  1077. * @ubi: UBI device description object
  1078. * @pnum: physical eraseblock number the erase counter header belongs to
  1079. * @ec_hdr: the erase counter header to check
  1080. *
  1081. * This function returns zero if the erase counter header contains valid
  1082. * values, and %-EINVAL if not.
  1083. */
  1084. static int self_check_ec_hdr(const struct ubi_device *ubi, int pnum,
  1085. const struct ubi_ec_hdr *ec_hdr)
  1086. {
  1087. int err;
  1088. uint32_t magic;
  1089. if (!ubi_dbg_chk_io(ubi))
  1090. return 0;
  1091. magic = be32_to_cpu(ec_hdr->magic);
  1092. if (magic != UBI_EC_HDR_MAGIC) {
  1093. ubi_err("bad magic %#08x, must be %#08x",
  1094. magic, UBI_EC_HDR_MAGIC);
  1095. goto fail;
  1096. }
  1097. err = validate_ec_hdr(ubi, ec_hdr);
  1098. if (err) {
  1099. ubi_err("self-check failed for PEB %d", pnum);
  1100. goto fail;
  1101. }
  1102. return 0;
  1103. fail:
  1104. ubi_dump_ec_hdr(ec_hdr);
  1105. dump_stack();
  1106. return -EINVAL;
  1107. }
  1108. /**
  1109. * self_check_peb_ec_hdr - check erase counter header.
  1110. * @ubi: UBI device description object
  1111. * @pnum: the physical eraseblock number to check
  1112. *
  1113. * This function returns zero if the erase counter header is all right and and
  1114. * a negative error code if not or if an error occurred.
  1115. */
  1116. static int self_check_peb_ec_hdr(const struct ubi_device *ubi, int pnum)
  1117. {
  1118. int err;
  1119. uint32_t crc, hdr_crc;
  1120. struct ubi_ec_hdr *ec_hdr;
  1121. if (!ubi_dbg_chk_io(ubi))
  1122. return 0;
  1123. ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_NOFS);
  1124. if (!ec_hdr)
  1125. return -ENOMEM;
  1126. err = ubi_io_read(ubi, ec_hdr, pnum, 0, UBI_EC_HDR_SIZE);
  1127. if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
  1128. goto exit;
  1129. crc = crc32(UBI_CRC32_INIT, ec_hdr, UBI_EC_HDR_SIZE_CRC);
  1130. hdr_crc = be32_to_cpu(ec_hdr->hdr_crc);
  1131. if (hdr_crc != crc) {
  1132. ubi_err("bad CRC, calculated %#08x, read %#08x", crc, hdr_crc);
  1133. ubi_err("self-check failed for PEB %d", pnum);
  1134. ubi_dump_ec_hdr(ec_hdr);
  1135. dump_stack();
  1136. err = -EINVAL;
  1137. goto exit;
  1138. }
  1139. err = self_check_ec_hdr(ubi, pnum, ec_hdr);
  1140. exit:
  1141. kfree(ec_hdr);
  1142. return err;
  1143. }
  1144. /**
  1145. * self_check_vid_hdr - check that a volume identifier header is all right.
  1146. * @ubi: UBI device description object
  1147. * @pnum: physical eraseblock number the volume identifier header belongs to
  1148. * @vid_hdr: the volume identifier header to check
  1149. *
  1150. * This function returns zero if the volume identifier header is all right, and
  1151. * %-EINVAL if not.
  1152. */
  1153. static int self_check_vid_hdr(const struct ubi_device *ubi, int pnum,
  1154. const struct ubi_vid_hdr *vid_hdr)
  1155. {
  1156. int err;
  1157. uint32_t magic;
  1158. if (!ubi_dbg_chk_io(ubi))
  1159. return 0;
  1160. magic = be32_to_cpu(vid_hdr->magic);
  1161. if (magic != UBI_VID_HDR_MAGIC) {
  1162. ubi_err("bad VID header magic %#08x at PEB %d, must be %#08x",
  1163. magic, pnum, UBI_VID_HDR_MAGIC);
  1164. goto fail;
  1165. }
  1166. err = validate_vid_hdr(ubi, vid_hdr);
  1167. if (err) {
  1168. ubi_err("self-check failed for PEB %d", pnum);
  1169. goto fail;
  1170. }
  1171. return err;
  1172. fail:
  1173. ubi_err("self-check failed for PEB %d", pnum);
  1174. ubi_dump_vid_hdr(vid_hdr);
  1175. dump_stack();
  1176. return -EINVAL;
  1177. }
  1178. /**
  1179. * self_check_peb_vid_hdr - check volume identifier header.
  1180. * @ubi: UBI device description object
  1181. * @pnum: the physical eraseblock number to check
  1182. *
  1183. * This function returns zero if the volume identifier header is all right,
  1184. * and a negative error code if not or if an error occurred.
  1185. */
  1186. static int self_check_peb_vid_hdr(const struct ubi_device *ubi, int pnum)
  1187. {
  1188. int err;
  1189. uint32_t crc, hdr_crc;
  1190. struct ubi_vid_hdr *vid_hdr;
  1191. void *p;
  1192. if (!ubi_dbg_chk_io(ubi))
  1193. return 0;
  1194. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  1195. if (!vid_hdr)
  1196. return -ENOMEM;
  1197. p = (char *)vid_hdr - ubi->vid_hdr_shift;
  1198. err = ubi_io_read(ubi, p, pnum, ubi->vid_hdr_aloffset,
  1199. ubi->vid_hdr_alsize);
  1200. if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
  1201. goto exit;
  1202. crc = crc32(UBI_CRC32_INIT, vid_hdr, UBI_EC_HDR_SIZE_CRC);
  1203. hdr_crc = be32_to_cpu(vid_hdr->hdr_crc);
  1204. if (hdr_crc != crc) {
  1205. ubi_err("bad VID header CRC at PEB %d, calculated %#08x, read %#08x",
  1206. pnum, crc, hdr_crc);
  1207. ubi_err("self-check failed for PEB %d", pnum);
  1208. ubi_dump_vid_hdr(vid_hdr);
  1209. dump_stack();
  1210. err = -EINVAL;
  1211. goto exit;
  1212. }
  1213. err = self_check_vid_hdr(ubi, pnum, vid_hdr);
  1214. exit:
  1215. ubi_free_vid_hdr(ubi, vid_hdr);
  1216. return err;
  1217. }
  1218. /**
  1219. * self_check_write - make sure write succeeded.
  1220. * @ubi: UBI device description object
  1221. * @buf: buffer with data which were written
  1222. * @pnum: physical eraseblock number the data were written to
  1223. * @offset: offset within the physical eraseblock the data were written to
  1224. * @len: how many bytes were written
  1225. *
  1226. * This functions reads data which were recently written and compares it with
  1227. * the original data buffer - the data have to match. Returns zero if the data
  1228. * match and a negative error code if not or in case of failure.
  1229. */
  1230. static int self_check_write(struct ubi_device *ubi, const void *buf, int pnum,
  1231. int offset, int len)
  1232. {
  1233. int err, i;
  1234. size_t read;
  1235. void *buf1;
  1236. loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
  1237. if (!ubi_dbg_chk_io(ubi))
  1238. return 0;
  1239. buf1 = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
  1240. if (!buf1) {
  1241. ubi_err("cannot allocate memory to check writes");
  1242. return 0;
  1243. }
  1244. err = mtd_read(ubi->mtd, addr, len, &read, buf1);
  1245. if (err && !mtd_is_bitflip(err))
  1246. goto out_free;
  1247. for (i = 0; i < len; i++) {
  1248. uint8_t c = ((uint8_t *)buf)[i];
  1249. uint8_t c1 = ((uint8_t *)buf1)[i];
  1250. int dump_len;
  1251. if (c == c1)
  1252. continue;
  1253. ubi_err("self-check failed for PEB %d:%d, len %d",
  1254. pnum, offset, len);
  1255. ubi_msg("data differ at position %d", i);
  1256. dump_len = max_t(int, 128, len - i);
  1257. ubi_msg("hex dump of the original buffer from %d to %d",
  1258. i, i + dump_len);
  1259. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
  1260. buf + i, dump_len, 1);
  1261. ubi_msg("hex dump of the read buffer from %d to %d",
  1262. i, i + dump_len);
  1263. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
  1264. buf1 + i, dump_len, 1);
  1265. dump_stack();
  1266. err = -EINVAL;
  1267. goto out_free;
  1268. }
  1269. vfree(buf1);
  1270. return 0;
  1271. out_free:
  1272. vfree(buf1);
  1273. return err;
  1274. }
  1275. /**
  1276. * ubi_self_check_all_ff - check that a region of flash is empty.
  1277. * @ubi: UBI device description object
  1278. * @pnum: the physical eraseblock number to check
  1279. * @offset: the starting offset within the physical eraseblock to check
  1280. * @len: the length of the region to check
  1281. *
  1282. * This function returns zero if only 0xFF bytes are present at offset
  1283. * @offset of the physical eraseblock @pnum, and a negative error code if not
  1284. * or if an error occurred.
  1285. */
  1286. int ubi_self_check_all_ff(struct ubi_device *ubi, int pnum, int offset, int len)
  1287. {
  1288. size_t read;
  1289. int err;
  1290. void *buf;
  1291. loff_t addr = (loff_t)pnum * ubi->peb_size + offset;
  1292. if (!ubi_dbg_chk_io(ubi))
  1293. return 0;
  1294. buf = __vmalloc(len, GFP_NOFS, PAGE_KERNEL);
  1295. if (!buf) {
  1296. ubi_err("cannot allocate memory to check for 0xFFs");
  1297. return 0;
  1298. }
  1299. err = mtd_read(ubi->mtd, addr, len, &read, buf);
  1300. if (err && !mtd_is_bitflip(err)) {
  1301. ubi_err("error %d while reading %d bytes from PEB %d:%d, read %zd bytes",
  1302. err, len, pnum, offset, read);
  1303. goto error;
  1304. }
  1305. err = ubi_check_pattern(buf, 0xFF, len);
  1306. if (err == 0) {
  1307. ubi_err("flash region at PEB %d:%d, length %d does not contain all 0xFF bytes",
  1308. pnum, offset, len);
  1309. goto fail;
  1310. }
  1311. vfree(buf);
  1312. return 0;
  1313. fail:
  1314. ubi_err("self-check failed for PEB %d", pnum);
  1315. ubi_msg("hex dump of the %d-%d region", offset, offset + len);
  1316. print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1, buf, len, 1);
  1317. err = -EINVAL;
  1318. error:
  1319. dump_stack();
  1320. vfree(buf);
  1321. return err;
  1322. }
  1323. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  1324. /* Read one page with oob one time */
  1325. int ubi_io_read_oob(const struct ubi_device *ubi, void *databuf, void *oobbuf,
  1326. int pnum, int offset) {
  1327. int err;
  1328. loff_t addr;
  1329. struct mtd_oob_ops ops;
  1330. dbg_io("read from PEB %d:%d", pnum, offset);
  1331. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  1332. ubi_assert(offset >= 0 && offset + ubi->mtd->writesize <= ubi->peb_size);
  1333. addr = (loff_t)pnum * ubi->peb_size + offset;
  1334. ops.mode = MTD_OPS_AUTO_OOB;
  1335. ops.ooblen = ubi->mtd->oobavail;
  1336. ops.oobbuf = oobbuf;
  1337. ops.ooboffs = 0;
  1338. ops.len = ubi->mtd->writesize;
  1339. ops.datbuf = databuf;
  1340. ops.retlen = ops.oobretlen = 0;
  1341. err = mtd_read_oob(ubi->mtd, addr, &ops);
  1342. if (err) {
  1343. if (err == -EUCLEAN) {
  1344. /*
  1345. * -EUCLEAN is reported if there was a bit-flip which
  1346. * was corrected, so this is harmless.
  1347. *
  1348. * We do not report about it here unless debugging is
  1349. * enabled. A corresponding message will be printed
  1350. * later, when it is has been scrubbed.
  1351. */
  1352. ubi_msg("fixable bit-flip detected at addr %lld", addr);
  1353. if (oobbuf)
  1354. ubi_assert(ops.oobretlen == ops.ooblen);
  1355. return UBI_IO_BITFLIPS;
  1356. }
  1357. if (ops.retlen != ops.len && err == -EBADMSG) {
  1358. ubi_err("err(%d), retlen(%zu), len(%zu)", err, ops.retlen, ops.len);
  1359. dump_stack();
  1360. err = -EIO;
  1361. }
  1362. ubi_msg("mtd_read_oob err %d\n", err);
  1363. }
  1364. return err;
  1365. }
  1366. /* Write one page with oob one time */
  1367. int ubi_io_write_oob(const struct ubi_device *ubi, void *databuf, void *oobbuf,
  1368. int pnum, int offset)
  1369. {
  1370. int err;
  1371. loff_t addr;
  1372. struct mtd_oob_ops ops;
  1373. dbg_io("read from PEB %d:%d", pnum, offset);
  1374. ubi_assert(pnum >= 0 && pnum < ubi->peb_count);
  1375. ubi_assert(offset >= 0 && offset + ubi->mtd->writesize <= ubi->peb_size);
  1376. addr = (loff_t)pnum * ubi->peb_size + offset;
  1377. ops.mode = MTD_OPS_AUTO_OOB;
  1378. ops.ooblen = ubi->mtd->oobavail;
  1379. ops.oobbuf = oobbuf;
  1380. ops.ooboffs = 0;
  1381. ops.len = ubi->mtd->writesize;
  1382. ops.datbuf = databuf;
  1383. ops.retlen = ops.oobretlen = 0;
  1384. err = mtd_write_oob(ubi->mtd, addr, &ops);
  1385. if (err) {
  1386. ubi_err("error %d while writing to addr %lld peb%d:0x%x, written ",
  1387. err, addr, pnum, offset);
  1388. dump_stack();
  1389. } else
  1390. ubi_assert(ops.retlen == ops.len);
  1391. return err;
  1392. }
  1393. #endif