eba.c 52 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879
  1. /*
  2. * Copyright (c) International Business Machines Corp., 2006
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  12. * the GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. *
  18. * Author: Artem Bityutskiy (Битюцкий Артём)
  19. */
  20. /*
  21. * The UBI Eraseblock Association (EBA) sub-system.
  22. *
  23. * This sub-system is responsible for I/O to/from logical eraseblock.
  24. *
  25. * Although in this implementation the EBA table is fully kept and managed in
  26. * RAM, which assumes poor scalability, it might be (partially) maintained on
  27. * flash in future implementations.
  28. *
  29. * The EBA sub-system implements per-logical eraseblock locking. Before
  30. * accessing a logical eraseblock it is locked for reading or writing. The
  31. * per-logical eraseblock locking is implemented by means of the lock tree. The
  32. * lock tree is an RB-tree which refers all the currently locked logical
  33. * eraseblocks. The lock tree elements are &struct ubi_ltree_entry objects.
  34. * They are indexed by (@vol_id, @lnum) pairs.
  35. *
  36. * EBA also maintains the global sequence counter which is incremented each
  37. * time a logical eraseblock is mapped to a physical eraseblock and it is
  38. * stored in the volume identifier header. This means that each VID header has
  39. * a unique sequence number. The sequence number is only increased an we assume
  40. * 64 bits is enough to never overflow.
  41. */
  42. #include <linux/slab.h>
  43. #include <linux/crc32.h>
  44. #include <linux/err.h>
  45. #include "ubi.h"
  46. #ifdef CONFIG_PWR_LOSS_MTK_SPOH
  47. #include <mach/power_loss_test.h>
  48. #endif
  49. /* Number of physical eraseblocks reserved for atomic LEB change operation */
  50. #define EBA_RESERVED_PEBS 1
  51. /**
  52. * next_sqnum - get next sequence number.
  53. * @ubi: UBI device description object
  54. *
  55. * This function returns next sequence number to use, which is just the current
  56. * global sequence counter value. It also increases the global sequence
  57. * counter.
  58. */
  59. unsigned long long ubi_next_sqnum(struct ubi_device *ubi)
  60. {
  61. unsigned long long sqnum;
  62. spin_lock(&ubi->ltree_lock);
  63. sqnum = ubi->global_sqnum++;
  64. spin_unlock(&ubi->ltree_lock);
  65. return sqnum;
  66. }
  67. /**
  68. * ubi_get_compat - get compatibility flags of a volume.
  69. * @ubi: UBI device description object
  70. * @vol_id: volume ID
  71. *
  72. * This function returns compatibility flags for an internal volume. User
  73. * volumes have no compatibility flags, so %0 is returned.
  74. */
  75. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  76. int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
  77. #else
  78. static int ubi_get_compat(const struct ubi_device *ubi, int vol_id)
  79. #endif
  80. {
  81. if (vol_id == UBI_LAYOUT_VOLUME_ID)
  82. return UBI_LAYOUT_VOLUME_COMPAT;
  83. return 0;
  84. }
  85. /**
  86. * ltree_lookup - look up the lock tree.
  87. * @ubi: UBI device description object
  88. * @vol_id: volume ID
  89. * @lnum: logical eraseblock number
  90. *
  91. * This function returns a pointer to the corresponding &struct ubi_ltree_entry
  92. * object if the logical eraseblock is locked and %NULL if it is not.
  93. * @ubi->ltree_lock has to be locked.
  94. */
  95. static struct ubi_ltree_entry *ltree_lookup(struct ubi_device *ubi, int vol_id,
  96. int lnum)
  97. {
  98. struct rb_node *p;
  99. p = ubi->ltree.rb_node;
  100. while (p) {
  101. struct ubi_ltree_entry *le;
  102. le = rb_entry(p, struct ubi_ltree_entry, rb);
  103. if (vol_id < le->vol_id)
  104. p = p->rb_left;
  105. else if (vol_id > le->vol_id)
  106. p = p->rb_right;
  107. else {
  108. if (lnum < le->lnum)
  109. p = p->rb_left;
  110. else if (lnum > le->lnum)
  111. p = p->rb_right;
  112. else
  113. return le;
  114. }
  115. }
  116. return NULL;
  117. }
  118. /**
  119. * ltree_add_entry - add new entry to the lock tree.
  120. * @ubi: UBI device description object
  121. * @vol_id: volume ID
  122. * @lnum: logical eraseblock number
  123. *
  124. * This function adds new entry for logical eraseblock (@vol_id, @lnum) to the
  125. * lock tree. If such entry is already there, its usage counter is increased.
  126. * Returns pointer to the lock tree entry or %-ENOMEM if memory allocation
  127. * failed.
  128. */
  129. static struct ubi_ltree_entry *ltree_add_entry(struct ubi_device *ubi,
  130. int vol_id, int lnum)
  131. {
  132. struct ubi_ltree_entry *le, *le1, *le_free;
  133. le = kmalloc(sizeof(struct ubi_ltree_entry), GFP_NOFS);
  134. if (!le)
  135. return ERR_PTR(-ENOMEM);
  136. le->users = 0;
  137. init_rwsem(&le->mutex);
  138. le->vol_id = vol_id;
  139. le->lnum = lnum;
  140. spin_lock(&ubi->ltree_lock);
  141. le1 = ltree_lookup(ubi, vol_id, lnum);
  142. if (le1) {
  143. /*
  144. * This logical eraseblock is already locked. The newly
  145. * allocated lock entry is not needed.
  146. */
  147. le_free = le;
  148. le = le1;
  149. } else {
  150. struct rb_node **p, *parent = NULL;
  151. /*
  152. * No lock entry, add the newly allocated one to the
  153. * @ubi->ltree RB-tree.
  154. */
  155. le_free = NULL;
  156. p = &ubi->ltree.rb_node;
  157. while (*p) {
  158. parent = *p;
  159. le1 = rb_entry(parent, struct ubi_ltree_entry, rb);
  160. if (vol_id < le1->vol_id)
  161. p = &(*p)->rb_left;
  162. else if (vol_id > le1->vol_id)
  163. p = &(*p)->rb_right;
  164. else {
  165. ubi_assert(lnum != le1->lnum);
  166. if (lnum < le1->lnum)
  167. p = &(*p)->rb_left;
  168. else
  169. p = &(*p)->rb_right;
  170. }
  171. }
  172. rb_link_node(&le->rb, parent, p);
  173. rb_insert_color(&le->rb, &ubi->ltree);
  174. }
  175. le->users += 1;
  176. spin_unlock(&ubi->ltree_lock);
  177. kfree(le_free);
  178. return le;
  179. }
  180. /**
  181. * leb_read_lock - lock logical eraseblock for reading.
  182. * @ubi: UBI device description object
  183. * @vol_id: volume ID
  184. * @lnum: logical eraseblock number
  185. *
  186. * This function locks a logical eraseblock for reading. Returns zero in case
  187. * of success and a negative error code in case of failure.
  188. */
  189. static int leb_read_lock(struct ubi_device *ubi, int vol_id, int lnum)
  190. {
  191. struct ubi_ltree_entry *le;
  192. le = ltree_add_entry(ubi, vol_id, lnum);
  193. if (IS_ERR(le))
  194. return PTR_ERR(le);
  195. down_read(&le->mutex);
  196. return 0;
  197. }
  198. /**
  199. * leb_read_unlock - unlock logical eraseblock.
  200. * @ubi: UBI device description object
  201. * @vol_id: volume ID
  202. * @lnum: logical eraseblock number
  203. */
  204. static void leb_read_unlock(struct ubi_device *ubi, int vol_id, int lnum)
  205. {
  206. struct ubi_ltree_entry *le;
  207. spin_lock(&ubi->ltree_lock);
  208. le = ltree_lookup(ubi, vol_id, lnum);
  209. le->users -= 1;
  210. ubi_assert(le->users >= 0);
  211. up_read(&le->mutex);
  212. if (le->users == 0) {
  213. rb_erase(&le->rb, &ubi->ltree);
  214. kfree(le);
  215. }
  216. spin_unlock(&ubi->ltree_lock);
  217. }
  218. /**
  219. * leb_write_lock - lock logical eraseblock for writing.
  220. * @ubi: UBI device description object
  221. * @vol_id: volume ID
  222. * @lnum: logical eraseblock number
  223. *
  224. * This function locks a logical eraseblock for writing. Returns zero in case
  225. * of success and a negative error code in case of failure.
  226. */
  227. static int leb_write_lock(struct ubi_device *ubi, int vol_id, int lnum)
  228. {
  229. struct ubi_ltree_entry *le;
  230. le = ltree_add_entry(ubi, vol_id, lnum);
  231. if (IS_ERR(le))
  232. return PTR_ERR(le);
  233. down_write(&le->mutex);
  234. return 0;
  235. }
  236. /**
  237. * leb_write_lock - lock logical eraseblock for writing.
  238. * @ubi: UBI device description object
  239. * @vol_id: volume ID
  240. * @lnum: logical eraseblock number
  241. *
  242. * This function locks a logical eraseblock for writing if there is no
  243. * contention and does nothing if there is contention. Returns %0 in case of
  244. * success, %1 in case of contention, and and a negative error code in case of
  245. * failure.
  246. */
  247. static int leb_write_trylock(struct ubi_device *ubi, int vol_id, int lnum)
  248. {
  249. struct ubi_ltree_entry *le;
  250. le = ltree_add_entry(ubi, vol_id, lnum);
  251. if (IS_ERR(le))
  252. return PTR_ERR(le);
  253. if (down_write_trylock(&le->mutex))
  254. return 0;
  255. /* Contention, cancel */
  256. spin_lock(&ubi->ltree_lock);
  257. le->users -= 1;
  258. ubi_assert(le->users >= 0);
  259. if (le->users == 0) {
  260. rb_erase(&le->rb, &ubi->ltree);
  261. kfree(le);
  262. }
  263. spin_unlock(&ubi->ltree_lock);
  264. return 1;
  265. }
  266. /**
  267. * leb_write_unlock - unlock logical eraseblock.
  268. * @ubi: UBI device description object
  269. * @vol_id: volume ID
  270. * @lnum: logical eraseblock number
  271. */
  272. static void leb_write_unlock(struct ubi_device *ubi, int vol_id, int lnum)
  273. {
  274. struct ubi_ltree_entry *le;
  275. spin_lock(&ubi->ltree_lock);
  276. le = ltree_lookup(ubi, vol_id, lnum);
  277. le->users -= 1;
  278. ubi_assert(le->users >= 0);
  279. up_write(&le->mutex);
  280. if (le->users == 0) {
  281. rb_erase(&le->rb, &ubi->ltree);
  282. kfree(le);
  283. }
  284. spin_unlock(&ubi->ltree_lock);
  285. }
  286. /**
  287. * ubi_eba_unmap_leb - un-map logical eraseblock.
  288. * @ubi: UBI device description object
  289. * @vol: volume description object
  290. * @lnum: logical eraseblock number
  291. *
  292. * This function un-maps logical eraseblock @lnum and schedules corresponding
  293. * physical eraseblock for erasure. Returns zero in case of success and a
  294. * negative error code in case of failure.
  295. */
  296. int ubi_eba_unmap_leb(struct ubi_device *ubi, struct ubi_volume *vol,
  297. int lnum)
  298. {
  299. int err, pnum, vol_id = vol->vol_id;
  300. if (ubi->ro_mode)
  301. return -EROFS;
  302. err = leb_write_lock(ubi, vol_id, lnum);
  303. if (err)
  304. return err;
  305. pnum = vol->eba_tbl[lnum];
  306. if (pnum < 0)
  307. /* This logical eraseblock is already unmapped */
  308. goto out_unlock;
  309. dbg_eba("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
  310. #ifdef MTK_TMP_DEBUG_LOG
  311. ubi_msg("erase LEB %d:%d, PEB %d", vol_id, lnum, pnum);
  312. #endif
  313. #ifdef CONFIG_MTK_HIBERNATION
  314. if (strcmp(vol->name, IPOH_VOLUME_NANE) == 0 && ubi->ipoh_ops == 0) {
  315. struct ubi_vid_hdr *vid_hdr;
  316. struct ubi_wl_entry *e = ubi->lookuptbl[pnum];
  317. unsigned long long int old_ec = e->ec;
  318. err = sync_erase(ubi, e, 0);
  319. if (err) {
  320. ubi_err("erase PEB %d fail\n", pnum);
  321. goto skip_ipoh;
  322. }
  323. e->ec = old_ec;
  324. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  325. if (!vid_hdr) {
  326. ubi_err("malloc vid_hdr fail\n");
  327. goto skip_ipoh;
  328. }
  329. vid_hdr->vol_type = UBI_VID_DYNAMIC;
  330. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  331. vid_hdr->vol_id = cpu_to_be32(vol_id);
  332. vid_hdr->lnum = cpu_to_be32(lnum);
  333. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  334. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  335. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  336. ubi_free_vid_hdr(ubi, vid_hdr);
  337. if (err) {
  338. ubi_err("failed to write VID header to LEB %d:%d, PEB %d",
  339. vol_id, lnum, pnum);
  340. goto skip_ipoh;
  341. }
  342. goto out_unlock;
  343. }
  344. skip_ipoh:
  345. ubi->ipoh_ops = 0;
  346. #endif
  347. down_read(&ubi->fm_sem);
  348. vol->eba_tbl[lnum] = UBI_LEB_UNMAPPED;
  349. up_read(&ubi->fm_sem);
  350. err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 0);
  351. out_unlock:
  352. leb_write_unlock(ubi, vol_id, lnum);
  353. return err;
  354. }
  355. /**
  356. * ubi_eba_read_leb - read data.
  357. * @ubi: UBI device description object
  358. * @vol: volume description object
  359. * @lnum: logical eraseblock number
  360. * @buf: buffer to store the read data
  361. * @offset: offset from where to read
  362. * @len: how many bytes to read
  363. * @check: data CRC check flag
  364. *
  365. * If the logical eraseblock @lnum is unmapped, @buf is filled with 0xFF
  366. * bytes. The @check flag only makes sense for static volumes and forces
  367. * eraseblock data CRC checking.
  368. *
  369. * In case of success this function returns zero. In case of a static volume,
  370. * if data CRC mismatches - %-EBADMSG is returned. %-EBADMSG may also be
  371. * returned for any volume type if an ECC error was detected by the MTD device
  372. * driver. Other negative error cored may be returned in case of other errors.
  373. */
  374. int ubi_eba_read_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
  375. void *buf, int offset, int len, int check)
  376. {
  377. int err, pnum, scrub = 0, vol_id = vol->vol_id;
  378. struct ubi_vid_hdr *vid_hdr;
  379. uint32_t uninitialized_var(crc);
  380. err = leb_read_lock(ubi, vol_id, lnum);
  381. if (err)
  382. return err;
  383. pnum = vol->eba_tbl[lnum];
  384. if (pnum < 0) {
  385. /*
  386. * The logical eraseblock is not mapped, fill the whole buffer
  387. * with 0xFF bytes. The exception is static volumes for which
  388. * it is an error to read unmapped logical eraseblocks.
  389. */
  390. dbg_eba("read %d bytes from offset %d of LEB %d:%d (unmapped)",
  391. len, offset, vol_id, lnum);
  392. leb_read_unlock(ubi, vol_id, lnum);
  393. ubi_assert(vol->vol_type != UBI_STATIC_VOLUME);
  394. memset(buf, 0xFF, len);
  395. return 0;
  396. }
  397. dbg_eba("read %d bytes from offset %d of LEB %d:%d, PEB %d",
  398. len, offset, vol_id, lnum, pnum);
  399. if (vol->vol_type == UBI_DYNAMIC_VOLUME)
  400. check = 0;
  401. retry:
  402. if (check) {
  403. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  404. if (!vid_hdr) {
  405. err = -ENOMEM;
  406. goto out_unlock;
  407. }
  408. err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
  409. if (err && err != UBI_IO_BITFLIPS) {
  410. if (err > 0) {
  411. /*
  412. * The header is either absent or corrupted.
  413. * The former case means there is a bug -
  414. * switch to read-only mode just in case.
  415. * The latter case means a real corruption - we
  416. * may try to recover data. FIXME: but this is
  417. * not implemented.
  418. */
  419. if (err == UBI_IO_BAD_HDR_EBADMSG ||
  420. err == UBI_IO_BAD_HDR) {
  421. ubi_warn("corrupted VID header at PEB %d, LEB %d:%d",
  422. pnum, vol_id, lnum);
  423. err = -EBADMSG;
  424. } else
  425. ubi_ro_mode(ubi);
  426. }
  427. goto out_free;
  428. } else if (err == UBI_IO_BITFLIPS)
  429. scrub = 1;
  430. ubi_assert(lnum < be32_to_cpu(vid_hdr->used_ebs));
  431. ubi_assert(len == be32_to_cpu(vid_hdr->data_size));
  432. crc = be32_to_cpu(vid_hdr->data_crc);
  433. ubi_free_vid_hdr(ubi, vid_hdr);
  434. }
  435. err = ubi_io_read_data(ubi, buf, pnum, offset, len);
  436. if (err) {
  437. if (err == UBI_IO_BITFLIPS)
  438. scrub = 1;
  439. else if (mtd_is_eccerr(err)) {
  440. if (vol->vol_type == UBI_DYNAMIC_VOLUME)
  441. goto out_unlock;
  442. scrub = 1;
  443. if (!check) {
  444. ubi_msg("force data checking");
  445. check = 1;
  446. goto retry;
  447. }
  448. } else
  449. goto out_unlock;
  450. }
  451. if (check) {
  452. uint32_t crc1 = crc32(UBI_CRC32_INIT, buf, len);
  453. if (crc1 != crc) {
  454. ubi_warn("CRC error: calculated %#08x, must be %#08x",
  455. crc1, crc);
  456. err = -EBADMSG;
  457. goto out_unlock;
  458. }
  459. }
  460. if (scrub)
  461. err = ubi_wl_scrub_peb(ubi, pnum);
  462. leb_read_unlock(ubi, vol_id, lnum);
  463. return err;
  464. out_free:
  465. ubi_free_vid_hdr(ubi, vid_hdr);
  466. out_unlock:
  467. leb_read_unlock(ubi, vol_id, lnum);
  468. return err;
  469. }
  470. /**
  471. * recover_peb - recover from write failure.
  472. * @ubi: UBI device description object
  473. * @pnum: the physical eraseblock to recover
  474. * @vol_id: volume ID
  475. * @lnum: logical eraseblock number
  476. * @buf: data which was not written because of the write failure
  477. * @offset: offset of the failed write
  478. * @len: how many bytes should have been written
  479. *
  480. * This function is called in case of a write failure and moves all good data
  481. * from the potentially bad physical eraseblock to a good physical eraseblock.
  482. * This function also writes the data which was not written due to the failure.
  483. * Returns new physical eraseblock number in case of success, and a negative
  484. * error code in case of failure.
  485. */
  486. static int recover_peb(struct ubi_device *ubi, int pnum, int vol_id, int lnum,
  487. const void *buf, int offset, int len)
  488. {
  489. int err, idx = vol_id2idx(ubi, vol_id), new_pnum, data_size, tries = 0;
  490. struct ubi_volume *vol = ubi->volumes[idx];
  491. struct ubi_vid_hdr *vid_hdr;
  492. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  493. if (!vid_hdr)
  494. return -ENOMEM;
  495. retry:
  496. new_pnum = ubi_wl_get_peb(ubi);
  497. if (new_pnum < 0) {
  498. ubi_free_vid_hdr(ubi, vid_hdr);
  499. return new_pnum;
  500. }
  501. ubi_msg("recover PEB %d, move data to PEB %d", pnum, new_pnum);
  502. err = ubi_io_read_vid_hdr(ubi, pnum, vid_hdr, 1);
  503. if (err && err != UBI_IO_BITFLIPS) {
  504. if (err > 0)
  505. err = -EIO;
  506. goto out_put;
  507. }
  508. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  509. err = ubi_io_write_vid_hdr(ubi, new_pnum, vid_hdr);
  510. if (err)
  511. goto write_error;
  512. data_size = offset + len;
  513. #ifdef CONFIG_UBI_SHARE_BUFFER
  514. mutex_lock(&ubi_buf_mutex);
  515. #else
  516. mutex_lock(&ubi->buf_mutex);
  517. #endif
  518. memset(ubi->peb_buf + offset, 0xFF, len);
  519. /* Read everything before the area where the write failure happened */
  520. if (offset > 0) {
  521. err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, offset);
  522. if (err && err != UBI_IO_BITFLIPS)
  523. goto out_unlock;
  524. }
  525. memcpy(ubi->peb_buf + offset, buf, len);
  526. err = ubi_io_write_data(ubi, ubi->peb_buf, new_pnum, 0, data_size);
  527. if (err) {
  528. #ifdef CONFIG_UBI_SHARE_BUFFER
  529. mutex_unlock(&ubi_buf_mutex);
  530. #else
  531. mutex_unlock(&ubi->buf_mutex);
  532. #endif
  533. goto write_error;
  534. }
  535. #ifdef CONFIG_UBI_SHARE_BUFFER
  536. mutex_unlock(&ubi_buf_mutex);
  537. #else
  538. mutex_unlock(&ubi->buf_mutex);
  539. #endif
  540. ubi_free_vid_hdr(ubi, vid_hdr);
  541. down_read(&ubi->fm_sem);
  542. vol->eba_tbl[lnum] = new_pnum;
  543. up_read(&ubi->fm_sem);
  544. ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
  545. ubi_msg("data was successfully recovered");
  546. return 0;
  547. out_unlock:
  548. #ifdef CONFIG_UBI_SHARE_BUFFER
  549. mutex_unlock(&ubi_buf_mutex);
  550. #else
  551. mutex_unlock(&ubi->buf_mutex);
  552. #endif
  553. out_put:
  554. ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
  555. ubi_free_vid_hdr(ubi, vid_hdr);
  556. return err;
  557. write_error:
  558. /*
  559. * Bad luck? This physical eraseblock is bad too? Crud. Let's try to
  560. * get another one.
  561. */
  562. ubi_warn("failed to write to PEB %d", new_pnum);
  563. ubi_wl_put_peb(ubi, vol_id, lnum, new_pnum, 1);
  564. if (++tries > UBI_IO_RETRIES) {
  565. ubi_free_vid_hdr(ubi, vid_hdr);
  566. return err;
  567. }
  568. ubi_msg("try again");
  569. goto retry;
  570. }
  571. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  572. int blb_get_startpage(void)
  573. {
  574. int _start = 0, _start0, _start1;
  575. _start0 = mtk_nand_paired_page_transfer(0, false);
  576. _start1 = mtk_nand_paired_page_transfer(1, false);
  577. if (_start0 < _start1)
  578. _start = _start1;
  579. else
  580. _start = _start0;
  581. return _start+1;
  582. }
  583. int blb_renew_leb(struct ubi_device *ubi, int lnum)
  584. {
  585. struct ubi_volume *backup_vol;
  586. int err, old_pnum, backup_pnum, another_pnum;
  587. int _start = 0;
  588. int a_lnum = (lnum+1)%2, backup_tries = 0;
  589. struct ubi_vid_hdr *vid_hdr;
  590. backup_vol = ubi->volumes[vol_id2idx(ubi, UBI_BACKUP_VOLUME_ID)];
  591. backup_pnum = backup_vol->eba_tbl[lnum];
  592. another_pnum = backup_vol->eba_tbl[a_lnum];
  593. old_pnum = backup_pnum;
  594. _start = blb_get_startpage();
  595. peb_retry:
  596. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  597. if (!vid_hdr)
  598. return -ENOMEM;
  599. vid_hdr->vol_type = UBI_VID_DYNAMIC;
  600. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  601. vid_hdr->vol_id = cpu_to_be32(UBI_BACKUP_VOLUME_ID);
  602. vid_hdr->lnum = cpu_to_be32(lnum);
  603. vid_hdr->compat = ubi_get_compat(ubi, UBI_BACKUP_VOLUME_ID);
  604. vid_hdr->data_pad = cpu_to_be32(backup_vol->data_pad);
  605. backup_pnum = ubi_wl_get_peb(ubi);
  606. if (backup_pnum < 0) {
  607. ubi_free_vid_hdr(ubi, vid_hdr);
  608. return backup_pnum;
  609. }
  610. if (another_pnum != UBI_LEB_UNMAPPED) {
  611. uint32_t crc;
  612. struct ubi_blb_spare *blb_spare = (struct ubi_blb_spare *)ubi->oobbuf;
  613. blb_spare->num = cpu_to_be16(1);
  614. blb_spare->pnum = cpu_to_be16(backup_pnum);
  615. blb_spare->lnum = cpu_to_be16(lnum);
  616. blb_spare->vol_id = cpu_to_be32(UBI_BACKUP_VOLUME_ID);
  617. blb_spare->page = cpu_to_be16(1);
  618. blb_spare->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  619. crc = crc32(UBI_CRC32_INIT, blb_spare, sizeof(struct ubi_blb_spare)-4);
  620. blb_spare->crc = cpu_to_be32(crc);
  621. sprintf(ubi->databuf, "VIDVIDVID");
  622. if ((ubi->peb_size-ubi->next_offset[a_lnum]) < ubi->mtd->writesize) {
  623. ubi_msg("no space on backup %d peb %d\n", a_lnum, another_pnum);
  624. } else {
  625. err = ubi_io_write_oob(ubi, ubi->databuf, ubi->oobbuf, another_pnum, ubi->next_offset[a_lnum]);
  626. ubi_msg("backup[1] 'backup volume' %d:%d to %d:%d", backup_pnum, 1, another_pnum,
  627. ubi->next_offset[a_lnum]/ubi->mtd->writesize);
  628. if (err) {
  629. ubi_warn("failed to write to LEB 0x%x:%d, PEB %d",
  630. UBI_BACKUP_VOLUME_ID, 0, another_pnum);
  631. ubi_free_vid_hdr(ubi, vid_hdr);
  632. return err;
  633. }
  634. ubi->next_offset[a_lnum] += ubi->mtd->writesize;
  635. }
  636. }
  637. /* ubi_msg("map backup :%d", backup_pnum); */
  638. err = ubi_io_write_vid_hdr(ubi, backup_pnum, vid_hdr);
  639. ubi_free_vid_hdr(ubi, vid_hdr);
  640. if (err) {
  641. ubi_warn("failed to write VID header to LEB 0x%x:%d, PEB %d",
  642. UBI_BACKUP_VOLUME_ID, 0, backup_pnum);
  643. if (err != -EIO || !ubi->bad_allowed) {
  644. ubi_ro_mode(ubi);
  645. return err;
  646. }
  647. err = ubi_wl_put_peb(ubi, UBI_BACKUP_VOLUME_ID, lnum, backup_pnum, 1);
  648. if (err || ++backup_tries > UBI_IO_RETRIES) {
  649. ubi_ro_mode(ubi);
  650. return err;
  651. }
  652. ubi_msg("try another backup PEB");
  653. goto peb_retry;
  654. }
  655. ubi->next_offset[lnum] = _start * ubi->mtd->writesize; /*skip paired page of 0/1*/
  656. ubi_msg("blb write start from page %d:%d\n", backup_pnum, _start);
  657. backup_vol->eba_tbl[lnum] = backup_pnum;
  658. if (old_pnum != UBI_LEB_UNMAPPED) {
  659. err = ubi_wl_put_peb(ubi, UBI_BACKUP_VOLUME_ID, lnum, old_pnum, 0);
  660. if (err)
  661. return err;
  662. }
  663. return backup_pnum;
  664. }
  665. int blb_init_volume(struct ubi_device *ubi)
  666. {
  667. int err;
  668. err = blb_renew_leb(ubi, 0);
  669. if (err < 0)
  670. return err;
  671. err = blb_renew_leb(ubi, 1);
  672. if (err < 0)
  673. return err;
  674. return 0;
  675. }
  676. int blb_get_peb(struct ubi_device *ubi, int lnum, int renew)
  677. {
  678. struct ubi_volume *backup_vol;
  679. int err, backup_pnum;
  680. backup_vol = ubi->volumes[vol_id2idx(ubi, UBI_BACKUP_VOLUME_ID)];
  681. if (backup_vol == NULL || backup_vol->eba_tbl == NULL)
  682. return -1;
  683. if (backup_vol->eba_tbl[0] == UBI_LEB_UNMAPPED &&
  684. backup_vol->eba_tbl[1] == UBI_LEB_UNMAPPED) {
  685. err = blb_init_volume(ubi);
  686. if (err)
  687. return err;
  688. }
  689. backup_pnum = backup_vol->eba_tbl[lnum];
  690. if (backup_pnum == UBI_LEB_UNMAPPED)
  691. renew = 1;
  692. if (renew) {
  693. int a_lnum = (lnum+1)%2;
  694. ubi_msg("leb_write_lock %d %d:%d\n", __LINE__, UBI_BACKUP_VOLUME_ID, a_lnum);
  695. leb_write_lock(ubi, UBI_BACKUP_VOLUME_ID, a_lnum);
  696. backup_pnum = blb_renew_leb(ubi, lnum);
  697. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, a_lnum);
  698. if (backup_pnum < 0)
  699. return backup_pnum;
  700. }
  701. return backup_pnum;
  702. }
  703. int blb_record_page1(struct ubi_device *ubi, int pnum,
  704. struct ubi_vid_hdr *vidh, int work)
  705. {
  706. int err, backup_pnum, renew = 0, backup_tries = 0;
  707. struct ubi_blb_spare *blb_spare = (struct ubi_blb_spare *)ubi->oobbuf;
  708. int vol_id = be32_to_cpu(vidh->vol_id);
  709. int lnum = be32_to_cpu(vidh->lnum);
  710. uint32_t crc;
  711. #ifdef CONFIG_MTK_HIBERNATION
  712. struct ubi_volume *vol = NULL;
  713. #endif
  714. if (ubi->scanning == 1)
  715. return 0;
  716. #ifdef CONFIG_MTK_HIBERNATION
  717. vol = ubi->volumes[vol_id2idx(ubi, vol_id)];
  718. if (vol && strcmp(vol->name, IPOH_VOLUME_NANE) == 0 && ubi->ipoh_ops == 0)
  719. return 0;
  720. #endif
  721. if (work == 1) {
  722. if (mutex_trylock(&ubi->blb_mutex) == 0) {
  723. ubi_msg("mutex_trylock err");
  724. dump_stack();
  725. return -EIO;
  726. }
  727. } else {
  728. mutex_lock(&ubi->blb_mutex);
  729. }
  730. ubi_msg("leb_write_lock %d %d:%d\n", __LINE__, UBI_BACKUP_VOLUME_ID, 1);
  731. leb_write_lock(ubi, UBI_BACKUP_VOLUME_ID, 1);
  732. blb_vid_retry:
  733. backup_pnum = blb_get_peb(ubi, 1, renew);
  734. if (backup_pnum < 0) {
  735. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, 1);
  736. mutex_unlock(&ubi->blb_mutex);
  737. return err;
  738. }
  739. /* check if enough free pages */
  740. if ((ubi->peb_size-ubi->next_offset[1]) < ubi->mtd->writesize*10 || ubi->leb_scrub[1] == 1) {
  741. dbg_eba("not enough free space(%d) to backup(%d)",
  742. (ubi->peb_size-ubi->next_offset[1]), ubi->mtd->writesize);
  743. ubi->leb_scrub[1] = 0;
  744. renew = 1;
  745. goto blb_vid_retry;
  746. }
  747. blb_spare->num = cpu_to_be16(1);
  748. blb_spare->pnum = cpu_to_be16(pnum);
  749. blb_spare->lnum = cpu_to_be16(lnum);
  750. blb_spare->vol_id = cpu_to_be32(vol_id);
  751. blb_spare->page = cpu_to_be16(1);
  752. blb_spare->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  753. crc = crc32(UBI_CRC32_INIT, blb_spare, sizeof(struct ubi_blb_spare)-4);
  754. blb_spare->crc = cpu_to_be32(crc);
  755. sprintf(ubi->databuf, "VIDVIDVID");
  756. dbg_eba("write backup page to leb 0x%x:%d, PEB %d, Offset 0x%x",
  757. UBI_BACKUP_VOLUME_ID, 1, backup_pnum, ubi->next_offset[1]);
  758. err = ubi_io_write_oob(ubi, ubi->databuf, ubi->oobbuf, backup_pnum, ubi->next_offset[1]);
  759. ubi_msg("backup[1] %d:%d to %d:%d, num %d", pnum, 1, backup_pnum,
  760. ubi->next_offset[1]/ubi->mtd->writesize, 1);
  761. if (err) {
  762. ubi_warn("failed to write to LEB 0x%x:%d, PEB %d",
  763. UBI_BACKUP_VOLUME_ID, 1, backup_pnum);
  764. if (err != -EIO || !ubi->bad_allowed) {
  765. ubi_ro_mode(ubi);
  766. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, 1);
  767. mutex_unlock(&ubi->blb_mutex);
  768. return err;
  769. }
  770. err = ubi_wl_put_peb(ubi, UBI_BACKUP_VOLUME_ID, lnum, backup_pnum, 1);
  771. if (err || ++backup_tries > UBI_IO_RETRIES) {
  772. ubi_ro_mode(ubi);
  773. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, 1);
  774. mutex_unlock(&ubi->blb_mutex);
  775. return err;
  776. }
  777. ubi_msg("try another backup PEB");
  778. renew = 1;
  779. goto blb_vid_retry;
  780. }
  781. ubi->next_offset[1] += ubi->mtd->writesize;
  782. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, 1);
  783. mutex_unlock(&ubi->blb_mutex);
  784. return 0;
  785. }
  786. int blb_record_share(struct ubi_device *ubi, int vol_id, int lnum, int pnum, int offset, int len)
  787. {
  788. int page_start, page_no, backup_pnum;
  789. int i, backup_cnt = 0, had_backup_cnt = 0;
  790. int num, backup_tries = 0;
  791. struct ubi_blb_spare *blb_spare = (struct ubi_blb_spare *)ubi->oobbuf;
  792. int renew = 0;
  793. dbg_eba("write %d pages(%d) at page %d of PEB %d",
  794. len/ubi->mtd->writesize, len, (offset+ubi->leb_start)/ubi->mtd->writesize, pnum);
  795. page_start = (offset+ubi->leb_start)/ubi->mtd->writesize;
  796. for (i = 0; i < (len/ubi->mtd->writesize); i++) {
  797. page_no = mtk_nand_paired_page_transfer(page_start+i, true);
  798. if (page_no != (page_start+i)) {
  799. if (page_no >= page_start)
  800. break; /* not at risk, stop backup*/
  801. backup_cnt++;
  802. }
  803. }
  804. if (backup_cnt > 0) {
  805. dbg_eba("needs to backup %d LSB pages", backup_cnt);
  806. mutex_lock(&ubi->blb_mutex);
  807. ubi_msg("leb_write_lock %d %d:%d\n", __LINE__, UBI_BACKUP_VOLUME_ID, 0);
  808. leb_write_lock(ubi, UBI_BACKUP_VOLUME_ID, 0);
  809. retry_backup_leb:
  810. backup_pnum = blb_get_peb(ubi, 0, renew);
  811. if (backup_pnum < 0) {
  812. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, 0);
  813. mutex_unlock(&ubi->blb_mutex);
  814. return backup_pnum;
  815. }
  816. /* check if enough free pages */
  817. if ((ubi->peb_size-ubi->next_offset[0]) < (backup_cnt+10)*ubi->mtd->writesize
  818. || ubi->leb_scrub[0] == 1) {
  819. if (ubi->leb_scrub)
  820. dbg_eba("blb scrib");
  821. else
  822. dbg_eba("not enough free space(%d) to backup(%d)",
  823. (ubi->peb_size-ubi->next_offset[0]), backup_cnt*ubi->mtd->writesize);
  824. ubi->leb_scrub[0] = 0;
  825. renew = 1;
  826. goto retry_backup_leb;
  827. }
  828. blb_spare->pnum = cpu_to_be16(pnum);
  829. blb_spare->lnum = cpu_to_be16(lnum);
  830. blb_spare->vol_id = cpu_to_be32(vol_id);
  831. blb_spare->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  832. for (i = 0; i < (len/ubi->mtd->writesize); i++) {
  833. uint32_t crc;
  834. int err;
  835. page_no = mtk_nand_paired_page_transfer(page_start+i, true);
  836. if (backup_cnt == had_backup_cnt)
  837. break;
  838. if (page_no == page_start+i) /*skip lsb*/
  839. continue;
  840. had_backup_cnt++;
  841. err = ubi_io_read_oob(ubi, ubi->databuf, NULL, pnum, page_no * ubi->mtd->writesize);
  842. if (err && err != UBI_IO_BITFLIPS) {
  843. ubi_warn("failed to read from LEB %d:%d, PEB %d",
  844. vol_id, lnum, pnum);
  845. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, 0);
  846. mutex_unlock(&ubi->blb_mutex);
  847. return err;
  848. }
  849. num = (had_backup_cnt == backup_cnt) ? backup_cnt : 0;
  850. blb_spare->num = cpu_to_be16(num);
  851. blb_spare->page = cpu_to_be16(page_no);
  852. crc = crc32(UBI_CRC32_INIT, blb_spare, sizeof(struct ubi_blb_spare)-4);
  853. blb_spare->crc = cpu_to_be32(crc);
  854. dbg_eba("write backup page to leb 0x%x:%d, PEB %d, Offset 0x%x",
  855. UBI_BACKUP_VOLUME_ID, 0, backup_pnum, ubi->next_offset[0]);
  856. err = ubi_io_write_oob(ubi, ubi->databuf, ubi->oobbuf, backup_pnum, ubi->next_offset[0]);
  857. ubi_msg("backup[0] %d:%d to %d:%d, num %d", pnum, page_no, backup_pnum,
  858. ubi->next_offset[0]/ubi->mtd->writesize, num);
  859. if (err) {
  860. ubi_warn("failed to write to LEB 0x%x:%d, PEB %d",
  861. UBI_BACKUP_VOLUME_ID, 0, backup_pnum);
  862. if (err != -EIO || !ubi->bad_allowed) {
  863. ubi_ro_mode(ubi);
  864. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, 0);
  865. mutex_unlock(&ubi->blb_mutex);
  866. return err;
  867. }
  868. err = ubi_wl_put_peb(ubi, UBI_BACKUP_VOLUME_ID, lnum, backup_pnum, 1);
  869. if (err || ++backup_tries > UBI_IO_RETRIES) {
  870. ubi_ro_mode(ubi);
  871. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, 0);
  872. mutex_unlock(&ubi->blb_mutex);
  873. return err;
  874. }
  875. ubi_msg("try another backup PEB");
  876. renew = 1;
  877. goto retry_backup_leb;
  878. }
  879. ubi->next_offset[0] += ubi->mtd->writesize;
  880. }
  881. leb_write_unlock(ubi, UBI_BACKUP_VOLUME_ID, 0);
  882. mutex_unlock(&ubi->blb_mutex);
  883. }
  884. return 0;
  885. }
  886. #endif
  887. /**
  888. * ubi_eba_write_leb - write data to dynamic volume.
  889. * @ubi: UBI device description object
  890. * @vol: volume description object
  891. * @lnum: logical eraseblock number
  892. * @buf: the data to write
  893. * @offset: offset within the logical eraseblock where to write
  894. * @len: how many bytes to write
  895. *
  896. * This function writes data to logical eraseblock @lnum of a dynamic volume
  897. * @vol. Returns zero in case of success and a negative error code in case
  898. * of failure. In case of error, it is possible that something was still
  899. * written to the flash media, but may be some garbage.
  900. */
  901. int ubi_eba_write_leb(struct ubi_device *ubi, struct ubi_volume *vol, int lnum,
  902. const void *buf, int offset, int len)
  903. {
  904. int err, pnum, tries = 0, vol_id = vol->vol_id;
  905. struct ubi_vid_hdr *vid_hdr;
  906. if (ubi->ro_mode)
  907. return -EROFS;
  908. err = leb_write_lock(ubi, vol_id, lnum);
  909. if (err)
  910. return err;
  911. pnum = vol->eba_tbl[lnum];
  912. if (pnum >= 0) {
  913. dbg_eba("write %d bytes at offset %d of LEB %d:%d, PEB %d",
  914. len, offset, vol_id, lnum, pnum);
  915. #ifdef MTK_TMP_DEBUG_LOG
  916. ubi_msg("write %d bytes at offset %d of LEB %d:%d, PEB %d",
  917. len, offset, vol_id, lnum, pnum);
  918. #endif
  919. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  920. #ifdef CONFIG_MTK_HIBERNATION
  921. if (strcmp(vol->name, IPOH_VOLUME_NANE) != 0) {
  922. #endif
  923. lockdep_off();
  924. err = blb_record_share(ubi, vol_id, lnum, pnum, offset, len);
  925. lockdep_on();
  926. if (err) {
  927. leb_write_unlock(ubi, vol_id, lnum);
  928. return err;
  929. }
  930. #ifdef CONFIG_MTK_HIBERNATION
  931. }
  932. #endif
  933. #endif
  934. err = ubi_io_write_data(ubi, buf, pnum, offset, len);
  935. if (err) {
  936. ubi_warn("failed to write data to PEB %d", pnum);
  937. if (err == -EIO && ubi->bad_allowed)
  938. err = recover_peb(ubi, pnum, vol_id, lnum, buf,
  939. offset, len);
  940. if (err)
  941. ubi_ro_mode(ubi);
  942. }
  943. leb_write_unlock(ubi, vol_id, lnum);
  944. return err;
  945. }
  946. /*
  947. * The logical eraseblock is not mapped. We have to get a free physical
  948. * eraseblock and write the volume identifier header there first.
  949. */
  950. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  951. if (!vid_hdr) {
  952. leb_write_unlock(ubi, vol_id, lnum);
  953. return -ENOMEM;
  954. }
  955. vid_hdr->vol_type = UBI_VID_DYNAMIC;
  956. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  957. vid_hdr->vol_id = cpu_to_be32(vol_id);
  958. vid_hdr->lnum = cpu_to_be32(lnum);
  959. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  960. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  961. retry:
  962. pnum = ubi_wl_get_peb(ubi);
  963. if (pnum < 0) {
  964. ubi_free_vid_hdr(ubi, vid_hdr);
  965. leb_write_unlock(ubi, vol_id, lnum);
  966. return pnum;
  967. }
  968. dbg_eba("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
  969. len, offset, vol_id, lnum, pnum);
  970. #ifdef MTK_TMP_DEBUG_LOG
  971. ubi_msg("write VID hdr and %d bytes at offset %d of LEB %d:%d, PEB %d",
  972. len, offset, vol_id, lnum, pnum);
  973. #endif
  974. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  975. if (err) {
  976. ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
  977. vol_id, lnum, pnum);
  978. goto write_error;
  979. }
  980. if (len) {
  981. err = ubi_io_write_data(ubi, buf, pnum, offset, len);
  982. if (err) {
  983. ubi_warn("failed to write %d bytes at offset %d of LEB %d:%d, PEB %d",
  984. len, offset, vol_id, lnum, pnum);
  985. goto write_error;
  986. }
  987. #ifdef CONFIG_MTK_HIBERNATION
  988. } else if (strcmp(vol->name, IPOH_VOLUME_NANE) == 0) {
  989. ubi_wl_move_pg_to_used(ubi, pnum);
  990. #endif
  991. }
  992. down_read(&ubi->fm_sem);
  993. vol->eba_tbl[lnum] = pnum;
  994. up_read(&ubi->fm_sem);
  995. leb_write_unlock(ubi, vol_id, lnum);
  996. ubi_free_vid_hdr(ubi, vid_hdr);
  997. return 0;
  998. write_error:
  999. if (err != -EIO || !ubi->bad_allowed) {
  1000. ubi_ro_mode(ubi);
  1001. leb_write_unlock(ubi, vol_id, lnum);
  1002. ubi_free_vid_hdr(ubi, vid_hdr);
  1003. return err;
  1004. }
  1005. /*
  1006. * Fortunately, this is the first write operation to this physical
  1007. * eraseblock, so just put it and request a new one. We assume that if
  1008. * this physical eraseblock went bad, the erase code will handle that.
  1009. */
  1010. err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
  1011. if (err || ++tries > UBI_IO_RETRIES) {
  1012. ubi_ro_mode(ubi);
  1013. leb_write_unlock(ubi, vol_id, lnum);
  1014. ubi_free_vid_hdr(ubi, vid_hdr);
  1015. return err;
  1016. }
  1017. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  1018. ubi_msg("try another PEB");
  1019. goto retry;
  1020. }
  1021. /**
  1022. * ubi_eba_write_leb_st - write data to static volume.
  1023. * @ubi: UBI device description object
  1024. * @vol: volume description object
  1025. * @lnum: logical eraseblock number
  1026. * @buf: data to write
  1027. * @len: how many bytes to write
  1028. * @used_ebs: how many logical eraseblocks will this volume contain
  1029. *
  1030. * This function writes data to logical eraseblock @lnum of static volume
  1031. * @vol. The @used_ebs argument should contain total number of logical
  1032. * eraseblock in this static volume.
  1033. *
  1034. * When writing to the last logical eraseblock, the @len argument doesn't have
  1035. * to be aligned to the minimal I/O unit size. Instead, it has to be equivalent
  1036. * to the real data size, although the @buf buffer has to contain the
  1037. * alignment. In all other cases, @len has to be aligned.
  1038. *
  1039. * It is prohibited to write more than once to logical eraseblocks of static
  1040. * volumes. This function returns zero in case of success and a negative error
  1041. * code in case of failure.
  1042. */
  1043. int ubi_eba_write_leb_st(struct ubi_device *ubi, struct ubi_volume *vol,
  1044. int lnum, const void *buf, int len, int used_ebs)
  1045. {
  1046. int err, pnum, tries = 0, data_size = len, vol_id = vol->vol_id;
  1047. struct ubi_vid_hdr *vid_hdr;
  1048. uint32_t crc;
  1049. if (ubi->ro_mode)
  1050. return -EROFS;
  1051. if (lnum == used_ebs - 1)
  1052. /* If this is the last LEB @len may be unaligned */
  1053. len = ALIGN(data_size, ubi->min_io_size);
  1054. else
  1055. ubi_assert(!(len & (ubi->min_io_size - 1)));
  1056. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  1057. if (!vid_hdr)
  1058. return -ENOMEM;
  1059. err = leb_write_lock(ubi, vol_id, lnum);
  1060. if (err) {
  1061. ubi_free_vid_hdr(ubi, vid_hdr);
  1062. return err;
  1063. }
  1064. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  1065. vid_hdr->vol_id = cpu_to_be32(vol_id);
  1066. vid_hdr->lnum = cpu_to_be32(lnum);
  1067. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  1068. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  1069. crc = crc32(UBI_CRC32_INIT, buf, data_size);
  1070. vid_hdr->vol_type = UBI_VID_STATIC;
  1071. vid_hdr->data_size = cpu_to_be32(data_size);
  1072. vid_hdr->used_ebs = cpu_to_be32(used_ebs);
  1073. vid_hdr->data_crc = cpu_to_be32(crc);
  1074. retry:
  1075. pnum = ubi_wl_get_peb(ubi);
  1076. if (pnum < 0) {
  1077. ubi_free_vid_hdr(ubi, vid_hdr);
  1078. leb_write_unlock(ubi, vol_id, lnum);
  1079. return pnum;
  1080. }
  1081. dbg_eba("write VID hdr and %d bytes at LEB %d:%d, PEB %d, used_ebs %d",
  1082. len, vol_id, lnum, pnum, used_ebs);
  1083. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  1084. if (err) {
  1085. ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
  1086. vol_id, lnum, pnum);
  1087. goto write_error;
  1088. }
  1089. err = ubi_io_write_data(ubi, buf, pnum, 0, len);
  1090. if (err) {
  1091. ubi_warn("failed to write %d bytes of data to PEB %d",
  1092. len, pnum);
  1093. goto write_error;
  1094. }
  1095. ubi_assert(vol->eba_tbl[lnum] < 0);
  1096. down_read(&ubi->fm_sem);
  1097. vol->eba_tbl[lnum] = pnum;
  1098. up_read(&ubi->fm_sem);
  1099. leb_write_unlock(ubi, vol_id, lnum);
  1100. ubi_free_vid_hdr(ubi, vid_hdr);
  1101. return 0;
  1102. write_error:
  1103. if (err != -EIO || !ubi->bad_allowed) {
  1104. /*
  1105. * This flash device does not admit of bad eraseblocks or
  1106. * something nasty and unexpected happened. Switch to read-only
  1107. * mode just in case.
  1108. */
  1109. ubi_ro_mode(ubi);
  1110. leb_write_unlock(ubi, vol_id, lnum);
  1111. ubi_free_vid_hdr(ubi, vid_hdr);
  1112. return err;
  1113. }
  1114. err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
  1115. if (err || ++tries > UBI_IO_RETRIES) {
  1116. ubi_ro_mode(ubi);
  1117. leb_write_unlock(ubi, vol_id, lnum);
  1118. ubi_free_vid_hdr(ubi, vid_hdr);
  1119. return err;
  1120. }
  1121. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  1122. ubi_msg("try another PEB");
  1123. goto retry;
  1124. }
  1125. /*
  1126. * ubi_eba_atomic_leb_change - change logical eraseblock atomically.
  1127. * @ubi: UBI device description object
  1128. * @vol: volume description object
  1129. * @lnum: logical eraseblock number
  1130. * @buf: data to write
  1131. * @len: how many bytes to write
  1132. *
  1133. * This function changes the contents of a logical eraseblock atomically. @buf
  1134. * has to contain new logical eraseblock data, and @len - the length of the
  1135. * data, which has to be aligned. This function guarantees that in case of an
  1136. * unclean reboot the old contents is preserved. Returns zero in case of
  1137. * success and a negative error code in case of failure.
  1138. *
  1139. * UBI reserves one LEB for the "atomic LEB change" operation, so only one
  1140. * LEB change may be done at a time. This is ensured by @ubi->alc_mutex.
  1141. */
  1142. int ubi_eba_atomic_leb_change(struct ubi_device *ubi, struct ubi_volume *vol,
  1143. int lnum, const void *buf, int len)
  1144. {
  1145. int err, pnum, tries = 0, vol_id = vol->vol_id;
  1146. struct ubi_vid_hdr *vid_hdr;
  1147. uint32_t crc;
  1148. if (ubi->ro_mode)
  1149. return -EROFS;
  1150. if (len == 0) {
  1151. /*
  1152. * Special case when data length is zero. In this case the LEB
  1153. * has to be unmapped and mapped somewhere else.
  1154. */
  1155. err = ubi_eba_unmap_leb(ubi, vol, lnum);
  1156. if (err)
  1157. return err;
  1158. return ubi_eba_write_leb(ubi, vol, lnum, NULL, 0, 0);
  1159. }
  1160. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
  1161. if (!vid_hdr)
  1162. return -ENOMEM;
  1163. mutex_lock(&ubi->alc_mutex);
  1164. err = leb_write_lock(ubi, vol_id, lnum);
  1165. if (err)
  1166. goto out_mutex;
  1167. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  1168. vid_hdr->vol_id = cpu_to_be32(vol_id);
  1169. vid_hdr->lnum = cpu_to_be32(lnum);
  1170. vid_hdr->compat = ubi_get_compat(ubi, vol_id);
  1171. vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
  1172. crc = crc32(UBI_CRC32_INIT, buf, len);
  1173. vid_hdr->vol_type = UBI_VID_DYNAMIC;
  1174. vid_hdr->data_size = cpu_to_be32(len);
  1175. vid_hdr->copy_flag = 1;
  1176. vid_hdr->data_crc = cpu_to_be32(crc);
  1177. retry:
  1178. pnum = ubi_wl_get_peb(ubi);
  1179. if (pnum < 0) {
  1180. err = pnum;
  1181. goto out_leb_unlock;
  1182. }
  1183. dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
  1184. vol_id, lnum, vol->eba_tbl[lnum], pnum);
  1185. #ifdef CONFIG_PWR_LOSS_MTK_SPOH
  1186. PL_RESET_ON_CASE("NAND", "LEB_Change1");
  1187. #endif
  1188. err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
  1189. if (err) {
  1190. ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
  1191. vol_id, lnum, pnum);
  1192. goto write_error;
  1193. }
  1194. #ifdef CONFIG_PWR_LOSS_MTK_SPOH
  1195. PL_RESET_ON_CASE("NAND", "LEB_Change2");
  1196. #endif
  1197. #ifdef MTK_TMP_DEBUG_LOG
  1198. ubi_msg("change LEB %d:%d, PEB %d to PEB %d with len %d",
  1199. vol_id, lnum, vol->eba_tbl[lnum], pnum, len);
  1200. #endif
  1201. err = ubi_io_write_data(ubi, buf, pnum, 0, len);
  1202. if (err) {
  1203. ubi_warn("failed to write %d bytes of data to PEB %d",
  1204. len, pnum);
  1205. goto write_error;
  1206. }
  1207. if (vol->eba_tbl[lnum] >= 0) {
  1208. err = ubi_wl_put_peb(ubi, vol_id, lnum, vol->eba_tbl[lnum], 0);
  1209. if (err)
  1210. goto out_leb_unlock;
  1211. }
  1212. down_read(&ubi->fm_sem);
  1213. vol->eba_tbl[lnum] = pnum;
  1214. up_read(&ubi->fm_sem);
  1215. out_leb_unlock:
  1216. leb_write_unlock(ubi, vol_id, lnum);
  1217. out_mutex:
  1218. mutex_unlock(&ubi->alc_mutex);
  1219. ubi_free_vid_hdr(ubi, vid_hdr);
  1220. return err;
  1221. write_error:
  1222. if (err != -EIO || !ubi->bad_allowed) {
  1223. /*
  1224. * This flash device does not admit of bad eraseblocks or
  1225. * something nasty and unexpected happened. Switch to read-only
  1226. * mode just in case.
  1227. */
  1228. ubi_ro_mode(ubi);
  1229. goto out_leb_unlock;
  1230. }
  1231. err = ubi_wl_put_peb(ubi, vol_id, lnum, pnum, 1);
  1232. if (err || ++tries > UBI_IO_RETRIES) {
  1233. ubi_ro_mode(ubi);
  1234. goto out_leb_unlock;
  1235. }
  1236. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  1237. ubi_msg("try another PEB");
  1238. goto retry;
  1239. }
  1240. /**
  1241. * is_error_sane - check whether a read error is sane.
  1242. * @err: code of the error happened during reading
  1243. *
  1244. * This is a helper function for 'ubi_eba_copy_leb()' which is called when we
  1245. * cannot read data from the target PEB (an error @err happened). If the error
  1246. * code is sane, then we treat this error as non-fatal. Otherwise the error is
  1247. * fatal and UBI will be switched to R/O mode later.
  1248. *
  1249. * The idea is that we try not to switch to R/O mode if the read error is
  1250. * something which suggests there was a real read problem. E.g., %-EIO. Or a
  1251. * memory allocation failed (-%ENOMEM). Otherwise, it is safer to switch to R/O
  1252. * mode, simply because we do not know what happened at the MTD level, and we
  1253. * cannot handle this. E.g., the underlying driver may have become crazy, and
  1254. * it is safer to switch to R/O mode to preserve the data.
  1255. *
  1256. * And bear in mind, this is about reading from the target PEB, i.e. the PEB
  1257. * which we have just written.
  1258. */
  1259. static int is_error_sane(int err)
  1260. {
  1261. if (err == -EIO || err == -ENOMEM || err == UBI_IO_BAD_HDR ||
  1262. err == UBI_IO_BAD_HDR_EBADMSG || err == -ETIMEDOUT)
  1263. return 0;
  1264. return 1;
  1265. }
  1266. /**
  1267. * ubi_eba_copy_leb - copy logical eraseblock.
  1268. * @ubi: UBI device description object
  1269. * @from: physical eraseblock number from where to copy
  1270. * @to: physical eraseblock number where to copy
  1271. * @vid_hdr: VID header of the @from physical eraseblock
  1272. *
  1273. * This function copies logical eraseblock from physical eraseblock @from to
  1274. * physical eraseblock @to. The @vid_hdr buffer may be changed by this
  1275. * function. Returns:
  1276. * o %0 in case of success;
  1277. * o %MOVE_CANCEL_RACE, %MOVE_TARGET_WR_ERR, %MOVE_TARGET_BITFLIPS, etc;
  1278. * o a negative error code in case of failure.
  1279. */
  1280. int ubi_eba_copy_leb(struct ubi_device *ubi, int from, int to,
  1281. struct ubi_vid_hdr *vid_hdr, int do_wl)
  1282. {
  1283. int err, vol_id, lnum, data_size, aldata_size, idx;
  1284. struct ubi_volume *vol;
  1285. uint32_t crc;
  1286. vol_id = be32_to_cpu(vid_hdr->vol_id);
  1287. lnum = be32_to_cpu(vid_hdr->lnum);
  1288. dbg_eba("copy LEB %d:%d, PEB %d to PEB %d", vol_id, lnum, from, to);
  1289. if (vid_hdr->vol_type == UBI_VID_STATIC) {
  1290. data_size = be32_to_cpu(vid_hdr->data_size);
  1291. aldata_size = ALIGN(data_size, ubi->min_io_size);
  1292. } else
  1293. data_size = aldata_size =
  1294. ubi->leb_size - be32_to_cpu(vid_hdr->data_pad);
  1295. idx = vol_id2idx(ubi, vol_id);
  1296. spin_lock(&ubi->volumes_lock);
  1297. /*
  1298. * Note, we may race with volume deletion, which means that the volume
  1299. * this logical eraseblock belongs to might be being deleted. Since the
  1300. * volume deletion un-maps all the volume's logical eraseblocks, it will
  1301. * be locked in 'ubi_wl_put_peb()' and wait for the WL worker to finish.
  1302. */
  1303. vol = ubi->volumes[idx];
  1304. spin_unlock(&ubi->volumes_lock);
  1305. if (!vol) {
  1306. /* No need to do further work, cancel */
  1307. dbg_eba("volume %d is being removed, cancel", vol_id);
  1308. return MOVE_CANCEL_RACE;
  1309. }
  1310. /*
  1311. * We do not want anybody to write to this logical eraseblock while we
  1312. * are moving it, so lock it.
  1313. *
  1314. * Note, we are using non-waiting locking here, because we cannot sleep
  1315. * on the LEB, since it may cause deadlocks. Indeed, imagine a task is
  1316. * unmapping the LEB which is mapped to the PEB we are going to move
  1317. * (@from). This task locks the LEB and goes sleep in the
  1318. * 'ubi_wl_put_peb()' function on the @ubi->move_mutex. In turn, we are
  1319. * holding @ubi->move_mutex and go sleep on the LEB lock. So, if the
  1320. * LEB is already locked, we just do not move it and return
  1321. * %MOVE_RETRY. Note, we do not return %MOVE_CANCEL_RACE here because
  1322. * we do not know the reasons of the contention - it may be just a
  1323. * normal I/O on this LEB, so we want to re-try.
  1324. */
  1325. err = leb_write_trylock(ubi, vol_id, lnum);
  1326. if (err) {
  1327. dbg_eba("contention on LEB %d:%d, cancel", vol_id, lnum);
  1328. return MOVE_RETRY;
  1329. }
  1330. /*
  1331. * The LEB might have been put meanwhile, and the task which put it is
  1332. * probably waiting on @ubi->move_mutex. No need to continue the work,
  1333. * cancel it.
  1334. */
  1335. if (vol->eba_tbl[lnum] != from) {
  1336. dbg_eba("LEB %d:%d is no longer mapped to PEB %d, mapped to PEB %d, cancel",
  1337. vol_id, lnum, from, vol->eba_tbl[lnum]);
  1338. err = MOVE_CANCEL_RACE;
  1339. goto out_unlock_leb;
  1340. }
  1341. /*
  1342. * OK, now the LEB is locked and we can safely start moving it. Since
  1343. * this function utilizes the @ubi->peb_buf buffer which is shared
  1344. * with some other functions - we lock the buffer by taking the
  1345. * @ubi_buf_mutex.
  1346. */
  1347. #ifdef CONFIG_UBI_SHARE_BUFFER
  1348. mutex_lock(&ubi_buf_mutex);
  1349. #else
  1350. mutex_lock(&ubi->buf_mutex);
  1351. #endif
  1352. dbg_eba("read %d bytes of data", aldata_size);
  1353. err = ubi_io_read_data(ubi, ubi->peb_buf, from, 0, aldata_size);
  1354. if (err && err != UBI_IO_BITFLIPS) {
  1355. ubi_warn("error %d while reading data from PEB %d",
  1356. err, from);
  1357. err = MOVE_SOURCE_RD_ERR;
  1358. goto out_unlock_buf;
  1359. }
  1360. /*
  1361. * Now we have got to calculate how much data we have to copy. In
  1362. * case of a static volume it is fairly easy - the VID header contains
  1363. * the data size. In case of a dynamic volume it is more difficult - we
  1364. * have to read the contents, cut 0xFF bytes from the end and copy only
  1365. * the first part. We must do this to avoid writing 0xFF bytes as it
  1366. * may have some side-effects. And not only this. It is important not
  1367. * to include those 0xFFs to CRC because later the they may be filled
  1368. * by data.
  1369. */
  1370. if (vid_hdr->vol_type == UBI_VID_DYNAMIC)
  1371. aldata_size = data_size =
  1372. ubi_calc_data_len(ubi, ubi->peb_buf, data_size);
  1373. #ifdef MTK_TMP_DEBUG_LOG
  1374. ubi_msg("copy LEB %d:%d, PEB %d to PEB %d, size %d", vol_id, lnum, from, to, data_size);
  1375. #endif
  1376. cond_resched();
  1377. crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size);
  1378. cond_resched();
  1379. /*
  1380. * It may turn out to be that the whole @from physical eraseblock
  1381. * contains only 0xFF bytes. Then we have to only write the VID header
  1382. * and do not write any data. This also means we should not set
  1383. * @vid_hdr->copy_flag, @vid_hdr->data_size, and @vid_hdr->data_crc.
  1384. */
  1385. if (data_size > 0) {
  1386. vid_hdr->copy_flag = 1;
  1387. vid_hdr->data_size = cpu_to_be32(data_size);
  1388. vid_hdr->data_crc = cpu_to_be32(crc);
  1389. }
  1390. vid_hdr->sqnum = cpu_to_be64(ubi_next_sqnum(ubi));
  1391. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  1392. err = ubi_io_write_vid_hdr_blb(ubi, to, vid_hdr);
  1393. #else
  1394. err = ubi_io_write_vid_hdr(ubi, to, vid_hdr);
  1395. #endif
  1396. if (err) {
  1397. if (err == -EIO)
  1398. err = MOVE_TARGET_WR_ERR;
  1399. goto out_unlock_buf;
  1400. }
  1401. cond_resched();
  1402. /* Read the VID header back and check if it was written correctly */
  1403. err = ubi_io_read_vid_hdr(ubi, to, vid_hdr, 1);
  1404. if (err) {
  1405. if (err != UBI_IO_BITFLIPS) {
  1406. ubi_warn("error %d while reading VID header back from PEB %d",
  1407. err, to);
  1408. if (is_error_sane(err))
  1409. err = MOVE_TARGET_RD_ERR;
  1410. } else
  1411. err = MOVE_TARGET_BITFLIPS;
  1412. goto out_unlock_buf;
  1413. }
  1414. if (data_size > 0) {
  1415. err = ubi_io_write_data(ubi, ubi->peb_buf, to, 0, aldata_size);
  1416. if (err) {
  1417. if (err == -EIO)
  1418. err = MOVE_TARGET_WR_ERR;
  1419. goto out_unlock_buf;
  1420. }
  1421. /*MTK start: count wl/scrubbing size */
  1422. if (do_wl == 1) {
  1423. ubi->wl_count++;
  1424. ubi->wl_size += aldata_size;
  1425. } else if (do_wl == 2) {
  1426. ubi->scrub_count++;
  1427. ubi->scrub_size += aldata_size;
  1428. }
  1429. /*MTK end */
  1430. cond_resched();
  1431. /*
  1432. * We've written the data and are going to read it back to make
  1433. * sure it was written correctly.
  1434. */
  1435. memset(ubi->peb_buf, 0xFF, aldata_size);
  1436. err = ubi_io_read_data(ubi, ubi->peb_buf, to, 0, aldata_size);
  1437. if (err) {
  1438. if (err != UBI_IO_BITFLIPS) {
  1439. ubi_warn("error %d while reading data back from PEB %d",
  1440. err, to);
  1441. if (is_error_sane(err))
  1442. err = MOVE_TARGET_RD_ERR;
  1443. } else
  1444. err = MOVE_TARGET_BITFLIPS;
  1445. goto out_unlock_buf;
  1446. }
  1447. cond_resched();
  1448. if (crc != crc32(UBI_CRC32_INIT, ubi->peb_buf, data_size)) {
  1449. ubi_warn("read data back from PEB %d and it is different",
  1450. to);
  1451. err = -EINVAL;
  1452. goto out_unlock_buf;
  1453. }
  1454. }
  1455. ubi_assert(vol->eba_tbl[lnum] == from);
  1456. down_read(&ubi->fm_sem);
  1457. vol->eba_tbl[lnum] = to;
  1458. up_read(&ubi->fm_sem);
  1459. out_unlock_buf:
  1460. #ifdef CONFIG_UBI_SHARE_BUFFER
  1461. mutex_unlock(&ubi_buf_mutex);
  1462. #else
  1463. mutex_unlock(&ubi->buf_mutex);
  1464. #endif
  1465. out_unlock_leb:
  1466. leb_write_unlock(ubi, vol_id, lnum);
  1467. return err;
  1468. }
  1469. /**
  1470. * print_rsvd_warning - warn about not having enough reserved PEBs.
  1471. * @ubi: UBI device description object
  1472. *
  1473. * This is a helper function for 'ubi_eba_init()' which is called when UBI
  1474. * cannot reserve enough PEBs for bad block handling. This function makes a
  1475. * decision whether we have to print a warning or not. The algorithm is as
  1476. * follows:
  1477. * o if this is a new UBI image, then just print the warning
  1478. * o if this is an UBI image which has already been used for some time, print
  1479. * a warning only if we can reserve less than 10% of the expected amount of
  1480. * the reserved PEB.
  1481. *
  1482. * The idea is that when UBI is used, PEBs become bad, and the reserved pool
  1483. * of PEBs becomes smaller, which is normal and we do not want to scare users
  1484. * with a warning every time they attach the MTD device. This was an issue
  1485. * reported by real users.
  1486. */
  1487. static void print_rsvd_warning(struct ubi_device *ubi,
  1488. struct ubi_attach_info *ai)
  1489. {
  1490. /*
  1491. * The 1 << 18 (256KiB) number is picked randomly, just a reasonably
  1492. * large number to distinguish between newly flashed and used images.
  1493. */
  1494. if (ai->max_sqnum > (1 << 18)) {
  1495. int min = ubi->beb_rsvd_level / 10;
  1496. if (!min)
  1497. min = 1;
  1498. if (ubi->beb_rsvd_pebs > min)
  1499. return;
  1500. }
  1501. ubi_warn("cannot reserve enough PEBs for bad PEB handling, reserved %d, need %d",
  1502. ubi->beb_rsvd_pebs, ubi->beb_rsvd_level);
  1503. if (ubi->corr_peb_count)
  1504. ubi_warn("%d PEBs are corrupted and not used",
  1505. ubi->corr_peb_count);
  1506. }
  1507. /**
  1508. * self_check_eba - run a self check on the EBA table constructed by fastmap.
  1509. * @ubi: UBI device description object
  1510. * @ai_fastmap: UBI attach info object created by fastmap
  1511. * @ai_scan: UBI attach info object created by scanning
  1512. *
  1513. * Returns < 0 in case of an internal error, 0 otherwise.
  1514. * If a bad EBA table entry was found it will be printed out and
  1515. * ubi_assert() triggers.
  1516. */
  1517. int self_check_eba(struct ubi_device *ubi, struct ubi_attach_info *ai_fastmap,
  1518. struct ubi_attach_info *ai_scan)
  1519. {
  1520. int i, j, num_volumes, ret = 0;
  1521. int **scan_eba, **fm_eba;
  1522. struct ubi_ainf_volume *av;
  1523. struct ubi_volume *vol;
  1524. struct ubi_ainf_peb *aeb;
  1525. struct rb_node *rb;
  1526. num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
  1527. scan_eba = kmalloc(sizeof(*scan_eba) * num_volumes, GFP_KERNEL);
  1528. if (!scan_eba)
  1529. return -ENOMEM;
  1530. fm_eba = kmalloc(sizeof(*fm_eba) * num_volumes, GFP_KERNEL);
  1531. if (!fm_eba) {
  1532. kfree(scan_eba);
  1533. return -ENOMEM;
  1534. }
  1535. for (i = 0; i < num_volumes; i++) {
  1536. vol = ubi->volumes[i];
  1537. if (!vol)
  1538. continue;
  1539. scan_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**scan_eba),
  1540. GFP_KERNEL);
  1541. if (!scan_eba[i]) {
  1542. ret = -ENOMEM;
  1543. goto out_free;
  1544. }
  1545. fm_eba[i] = kmalloc(vol->reserved_pebs * sizeof(**fm_eba),
  1546. GFP_KERNEL);
  1547. if (!fm_eba[i]) {
  1548. ret = -ENOMEM;
  1549. goto out_free;
  1550. }
  1551. for (j = 0; j < vol->reserved_pebs; j++)
  1552. scan_eba[i][j] = fm_eba[i][j] = UBI_LEB_UNMAPPED;
  1553. av = ubi_find_av(ai_scan, idx2vol_id(ubi, i));
  1554. if (!av)
  1555. continue;
  1556. ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
  1557. scan_eba[i][aeb->lnum] = aeb->pnum;
  1558. av = ubi_find_av(ai_fastmap, idx2vol_id(ubi, i));
  1559. if (!av)
  1560. continue;
  1561. ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb)
  1562. fm_eba[i][aeb->lnum] = aeb->pnum;
  1563. for (j = 0; j < vol->reserved_pebs; j++) {
  1564. if (scan_eba[i][j] != fm_eba[i][j]) {
  1565. if (scan_eba[i][j] == UBI_LEB_UNMAPPED ||
  1566. fm_eba[i][j] == UBI_LEB_UNMAPPED)
  1567. continue;
  1568. ubi_err("LEB:%i:%i is PEB:%i instead of %i!",
  1569. vol->vol_id, i, fm_eba[i][j],
  1570. scan_eba[i][j]);
  1571. ubi_assert(0);
  1572. }
  1573. }
  1574. }
  1575. out_free:
  1576. for (i = 0; i < num_volumes; i++) {
  1577. if (!ubi->volumes[i])
  1578. continue;
  1579. kfree(scan_eba[i]);
  1580. kfree(fm_eba[i]);
  1581. }
  1582. kfree(scan_eba);
  1583. kfree(fm_eba);
  1584. return ret;
  1585. }
  1586. /**
  1587. * ubi_eba_init - initialize the EBA sub-system using attaching information.
  1588. * @ubi: UBI device description object
  1589. * @ai: attaching information
  1590. *
  1591. * This function returns zero in case of success and a negative error code in
  1592. * case of failure.
  1593. */
  1594. int ubi_eba_init(struct ubi_device *ubi, struct ubi_attach_info *ai)
  1595. {
  1596. int i, j, err, num_volumes;
  1597. struct ubi_ainf_volume *av;
  1598. struct ubi_volume *vol;
  1599. struct ubi_ainf_peb *aeb;
  1600. struct rb_node *rb;
  1601. dbg_eba("initialize EBA sub-system");
  1602. spin_lock_init(&ubi->ltree_lock);
  1603. mutex_init(&ubi->alc_mutex);
  1604. ubi->ltree = RB_ROOT;
  1605. ubi->global_sqnum = ai->max_sqnum + 1;
  1606. num_volumes = ubi->vtbl_slots + UBI_INT_VOL_COUNT;
  1607. for (i = 0; i < num_volumes; i++) {
  1608. vol = ubi->volumes[i];
  1609. if (!vol)
  1610. continue;
  1611. cond_resched();
  1612. vol->eba_tbl = kmalloc(vol->reserved_pebs * sizeof(int),
  1613. GFP_KERNEL);
  1614. if (!vol->eba_tbl) {
  1615. err = -ENOMEM;
  1616. goto out_free;
  1617. }
  1618. for (j = 0; j < vol->reserved_pebs; j++)
  1619. vol->eba_tbl[j] = UBI_LEB_UNMAPPED;
  1620. av = ubi_find_av(ai, idx2vol_id(ubi, i));
  1621. if (!av)
  1622. continue;
  1623. ubi_rb_for_each_entry(rb, aeb, &av->root, u.rb) {
  1624. if (aeb->lnum >= vol->reserved_pebs) {
  1625. /*
  1626. * This may happen in case of an unclean reboot
  1627. * during re-size.
  1628. */
  1629. ubi_move_aeb_to_list(av, aeb, &ai->erase);
  1630. } else
  1631. vol->eba_tbl[aeb->lnum] = aeb->pnum;
  1632. }
  1633. }
  1634. if (ubi->avail_pebs < EBA_RESERVED_PEBS) {
  1635. ubi_err("no enough physical eraseblocks (%d, need %d)",
  1636. ubi->avail_pebs, EBA_RESERVED_PEBS);
  1637. if (ubi->corr_peb_count)
  1638. ubi_err("%d PEBs are corrupted and not used",
  1639. ubi->corr_peb_count);
  1640. err = -ENOSPC;
  1641. goto out_free;
  1642. }
  1643. ubi->avail_pebs -= EBA_RESERVED_PEBS;
  1644. ubi->rsvd_pebs += EBA_RESERVED_PEBS;
  1645. if (ubi->bad_allowed) {
  1646. ubi_calculate_reserved(ubi);
  1647. if (ubi->avail_pebs < ubi->beb_rsvd_level) {
  1648. /* No enough free physical eraseblocks */
  1649. ubi->beb_rsvd_pebs = ubi->avail_pebs;
  1650. print_rsvd_warning(ubi, ai);
  1651. } else
  1652. ubi->beb_rsvd_pebs = ubi->beb_rsvd_level;
  1653. ubi->avail_pebs -= ubi->beb_rsvd_pebs;
  1654. ubi->rsvd_pebs += ubi->beb_rsvd_pebs;
  1655. }
  1656. dbg_eba("EBA sub-system is initialized");
  1657. return 0;
  1658. out_free:
  1659. for (i = 0; i < num_volumes; i++) {
  1660. if (!ubi->volumes[i])
  1661. continue;
  1662. kfree(ubi->volumes[i]->eba_tbl);
  1663. ubi->volumes[i]->eba_tbl = NULL;
  1664. }
  1665. return err;
  1666. }