attach.c 68 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269227022712272227322742275227622772278227922802281228222832284228522862287228822892290229122922293229422952296229722982299230023012302230323042305230623072308230923102311231223132314231523162317231823192320232123222323232423252326232723282329233023312332233323342335233623372338233923402341234223432344234523462347234823492350235123522353235423552356235723582359236023612362236323642365236623672368236923702371237223732374237523762377237823792380238123822383238423852386238723882389239023912392239323942395239623972398239924002401240224032404240524062407240824092410241124122413241424152416241724182419242024212422242324242425242624272428242924302431243224332434243524362437243824392440244124422443244424452446244724482449245024512452245324542455245624572458245924602461
  1. /*
  2. * Copyright (c) International Business Machines Corp., 2006
  3. *
  4. * This program is free software; you can redistribute it and/or modify
  5. * it under the terms of the GNU General Public License as published by
  6. * the Free Software Foundation; either version 2 of the License, or
  7. * (at your option) any later version.
  8. *
  9. * This program is distributed in the hope that it will be useful,
  10. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See
  12. * the GNU General Public License for more details.
  13. *
  14. * You should have received a copy of the GNU General Public License
  15. * along with this program; if not, write to the Free Software
  16. * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
  17. *
  18. * Author: Artem Bityutskiy (Битюцкий Артём)
  19. */
  20. /*
  21. * UBI attaching sub-system.
  22. *
  23. * This sub-system is responsible for attaching MTD devices and it also
  24. * implements flash media scanning.
  25. *
  26. * The attaching information is represented by a &struct ubi_attach_info'
  27. * object. Information about volumes is represented by &struct ubi_ainf_volume
  28. * objects which are kept in volume RB-tree with root at the @volumes field.
  29. * The RB-tree is indexed by the volume ID.
  30. *
  31. * Logical eraseblocks are represented by &struct ubi_ainf_peb objects. These
  32. * objects are kept in per-volume RB-trees with the root at the corresponding
  33. * &struct ubi_ainf_volume object. To put it differently, we keep an RB-tree of
  34. * per-volume objects and each of these objects is the root of RB-tree of
  35. * per-LEB objects.
  36. *
  37. * Corrupted physical eraseblocks are put to the @corr list, free physical
  38. * eraseblocks are put to the @free list and the physical eraseblock to be
  39. * erased are put to the @erase list.
  40. *
  41. * About corruptions
  42. * ~~~~~~~~~~~~~~~~~
  43. *
  44. * UBI protects EC and VID headers with CRC-32 checksums, so it can detect
  45. * whether the headers are corrupted or not. Sometimes UBI also protects the
  46. * data with CRC-32, e.g., when it executes the atomic LEB change operation, or
  47. * when it moves the contents of a PEB for wear-leveling purposes.
  48. *
  49. * UBI tries to distinguish between 2 types of corruptions.
  50. *
  51. * 1. Corruptions caused by power cuts. These are expected corruptions and UBI
  52. * tries to handle them gracefully, without printing too many warnings and
  53. * error messages. The idea is that we do not lose important data in these
  54. * cases - we may lose only the data which were being written to the media just
  55. * before the power cut happened, and the upper layers (e.g., UBIFS) are
  56. * supposed to handle such data losses (e.g., by using the FS journal).
  57. *
  58. * When UBI detects a corruption (CRC-32 mismatch) in a PEB, and it looks like
  59. * the reason is a power cut, UBI puts this PEB to the @erase list, and all
  60. * PEBs in the @erase list are scheduled for erasure later.
  61. *
  62. * 2. Unexpected corruptions which are not caused by power cuts. During
  63. * attaching, such PEBs are put to the @corr list and UBI preserves them.
  64. * Obviously, this lessens the amount of available PEBs, and if at some point
  65. * UBI runs out of free PEBs, it switches to R/O mode. UBI also loudly informs
  66. * about such PEBs every time the MTD device is attached.
  67. *
  68. * However, it is difficult to reliably distinguish between these types of
  69. * corruptions and UBI's strategy is as follows (in case of attaching by
  70. * scanning). UBI assumes corruption type 2 if the VID header is corrupted and
  71. * the data area does not contain all 0xFFs, and there were no bit-flips or
  72. * integrity errors (e.g., ECC errors in case of NAND) while reading the data
  73. * area. Otherwise UBI assumes corruption type 1. So the decision criteria
  74. * are as follows.
  75. * o If the data area contains only 0xFFs, there are no data, and it is safe
  76. * to just erase this PEB - this is corruption type 1.
  77. * o If the data area has bit-flips or data integrity errors (ECC errors on
  78. * NAND), it is probably a PEB which was being erased when power cut
  79. * happened, so this is corruption type 1. However, this is just a guess,
  80. * which might be wrong.
  81. * o Otherwise this is corruption type 2.
  82. */
  83. #include <linux/err.h>
  84. #include <linux/slab.h>
  85. #include <linux/crc32.h>
  86. #include <linux/math64.h>
  87. #include <linux/random.h>
  88. #include "ubi.h"
  89. static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai);
  90. /* Temporary variables used during scanning */
  91. static struct ubi_ec_hdr *ech;
  92. static struct ubi_vid_hdr *vidh;
  93. /**
  94. * add_to_list - add physical eraseblock to a list.
  95. * @ai: attaching information
  96. * @pnum: physical eraseblock number to add
  97. * @vol_id: the last used volume id for the PEB
  98. * @lnum: the last used LEB number for the PEB
  99. * @ec: erase counter of the physical eraseblock
  100. * @to_head: if not zero, add to the head of the list
  101. * @list: the list to add to
  102. *
  103. * This function allocates a 'struct ubi_ainf_peb' object for physical
  104. * eraseblock @pnum and adds it to the "free", "erase", or "alien" lists.
  105. * It stores the @lnum and @vol_id alongside, which can both be
  106. * %UBI_UNKNOWN if they are not available, not readable, or not assigned.
  107. * If @to_head is not zero, PEB will be added to the head of the list, which
  108. * basically means it will be processed first later. E.g., we add corrupted
  109. * PEBs (corrupted due to power cuts) to the head of the erase list to make
  110. * sure we erase them first and get rid of corruptions ASAP. This function
  111. * returns zero in case of success and a negative error code in case of
  112. * failure.
  113. */
  114. static int add_to_list(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum, int vol_id,
  115. int lnum, int ec, int to_head, struct list_head *list)
  116. {
  117. struct ubi_ainf_peb *aeb;
  118. if (list == &ai->free) {
  119. dbg_bld("add to free: PEB %d, EC %d", pnum, ec);
  120. } else if (list == &ai->erase) {
  121. dbg_bld("add to erase: PEB %d, EC %d", pnum, ec);
  122. } else if (list == &ai->alien) {
  123. dbg_bld("add to alien: PEB %d, EC %d", pnum, ec);
  124. ai->alien_peb_count += 1;
  125. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  126. } else if (list == &ai->waiting) {
  127. dbg_bld("add to waiting: PEB %d, EC %d", pnum, ec);
  128. #endif
  129. } else
  130. BUG();
  131. aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
  132. if (!aeb)
  133. return -ENOMEM;
  134. aeb->pnum = pnum;
  135. aeb->vol_id = vol_id;
  136. aeb->lnum = lnum;
  137. aeb->ec = ec;
  138. if (to_head)
  139. list_add(&aeb->u.list, list);
  140. else
  141. list_add_tail(&aeb->u.list, list);
  142. return 0;
  143. }
  144. /**
  145. * add_corrupted - add a corrupted physical eraseblock.
  146. * @ai: attaching information
  147. * @pnum: physical eraseblock number to add
  148. * @ec: erase counter of the physical eraseblock
  149. *
  150. * This function allocates a 'struct ubi_ainf_peb' object for a corrupted
  151. * physical eraseblock @pnum and adds it to the 'corr' list. The corruption
  152. * was presumably not caused by a power cut. Returns zero in case of success
  153. * and a negative error code in case of failure.
  154. */
  155. static int add_corrupted(struct ubi_attach_info *ai, int pnum, int ec)
  156. {
  157. struct ubi_ainf_peb *aeb;
  158. dbg_bld("add to corrupted: PEB %d, EC %d", pnum, ec);
  159. aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
  160. if (!aeb)
  161. return -ENOMEM;
  162. ai->corr_peb_count += 1;
  163. aeb->pnum = pnum;
  164. aeb->ec = ec;
  165. list_add(&aeb->u.list, &ai->corr);
  166. return 0;
  167. }
  168. /**
  169. * validate_vid_hdr - check volume identifier header.
  170. * @vid_hdr: the volume identifier header to check
  171. * @av: information about the volume this logical eraseblock belongs to
  172. * @pnum: physical eraseblock number the VID header came from
  173. *
  174. * This function checks that data stored in @vid_hdr is consistent. Returns
  175. * non-zero if an inconsistency was found and zero if not.
  176. *
  177. * Note, UBI does sanity check of everything it reads from the flash media.
  178. * Most of the checks are done in the I/O sub-system. Here we check that the
  179. * information in the VID header is consistent to the information in other VID
  180. * headers of the same volume.
  181. */
  182. static int validate_vid_hdr(const struct ubi_vid_hdr *vid_hdr,
  183. const struct ubi_ainf_volume *av, int pnum)
  184. {
  185. int vol_type = vid_hdr->vol_type;
  186. int vol_id = be32_to_cpu(vid_hdr->vol_id);
  187. int used_ebs = be32_to_cpu(vid_hdr->used_ebs);
  188. int data_pad = be32_to_cpu(vid_hdr->data_pad);
  189. if (av->leb_count != 0) {
  190. int av_vol_type;
  191. /*
  192. * This is not the first logical eraseblock belonging to this
  193. * volume. Ensure that the data in its VID header is consistent
  194. * to the data in previous logical eraseblock headers.
  195. */
  196. if (vol_id != av->vol_id) {
  197. ubi_err("inconsistent vol_id");
  198. goto bad;
  199. }
  200. if (av->vol_type == UBI_STATIC_VOLUME)
  201. av_vol_type = UBI_VID_STATIC;
  202. else
  203. av_vol_type = UBI_VID_DYNAMIC;
  204. if (vol_type != av_vol_type) {
  205. ubi_err("inconsistent vol_type");
  206. goto bad;
  207. }
  208. if (used_ebs != av->used_ebs) {
  209. ubi_err("inconsistent used_ebs");
  210. goto bad;
  211. }
  212. if (data_pad != av->data_pad) {
  213. ubi_err("inconsistent data_pad");
  214. goto bad;
  215. }
  216. }
  217. return 0;
  218. bad:
  219. ubi_err("inconsistent VID header at PEB %d", pnum);
  220. ubi_dump_vid_hdr(vid_hdr);
  221. ubi_dump_av(av);
  222. return -EINVAL;
  223. }
  224. /**
  225. * add_volume - add volume to the attaching information.
  226. * @ai: attaching information
  227. * @vol_id: ID of the volume to add
  228. * @pnum: physical eraseblock number
  229. * @vid_hdr: volume identifier header
  230. *
  231. * If the volume corresponding to the @vid_hdr logical eraseblock is already
  232. * present in the attaching information, this function does nothing. Otherwise
  233. * it adds corresponding volume to the attaching information. Returns a pointer
  234. * to the allocated "av" object in case of success and a negative error code in
  235. * case of failure.
  236. */
  237. static struct ubi_ainf_volume *add_volume(struct ubi_attach_info *ai,
  238. int vol_id, int pnum,
  239. const struct ubi_vid_hdr *vid_hdr)
  240. {
  241. struct ubi_ainf_volume *av;
  242. struct rb_node **p = &ai->volumes.rb_node, *parent = NULL;
  243. ubi_assert(vol_id == be32_to_cpu(vid_hdr->vol_id));
  244. /* Walk the volume RB-tree to look if this volume is already present */
  245. while (*p) {
  246. parent = *p;
  247. av = rb_entry(parent, struct ubi_ainf_volume, rb);
  248. if (vol_id == av->vol_id)
  249. return av;
  250. if (vol_id > av->vol_id)
  251. p = &(*p)->rb_left;
  252. else
  253. p = &(*p)->rb_right;
  254. }
  255. /* The volume is absent - add it */
  256. av = kmalloc(sizeof(struct ubi_ainf_volume), GFP_KERNEL);
  257. if (!av)
  258. return ERR_PTR(-ENOMEM);
  259. av->highest_lnum = av->leb_count = 0;
  260. av->vol_id = vol_id;
  261. av->root = RB_ROOT;
  262. av->used_ebs = be32_to_cpu(vid_hdr->used_ebs);
  263. av->data_pad = be32_to_cpu(vid_hdr->data_pad);
  264. av->compat = vid_hdr->compat;
  265. av->vol_type = vid_hdr->vol_type == UBI_VID_DYNAMIC ? UBI_DYNAMIC_VOLUME
  266. : UBI_STATIC_VOLUME;
  267. if (vol_id > ai->highest_vol_id)
  268. ai->highest_vol_id = vol_id;
  269. rb_link_node(&av->rb, parent, p);
  270. rb_insert_color(&av->rb, &ai->volumes);
  271. ai->vols_found += 1;
  272. dbg_bld("added volume %d", vol_id);
  273. return av;
  274. }
  275. /**
  276. * ubi_compare_lebs - find out which logical eraseblock is newer.
  277. * @ubi: UBI device description object
  278. * @aeb: first logical eraseblock to compare
  279. * @pnum: physical eraseblock number of the second logical eraseblock to
  280. * compare
  281. * @vid_hdr: volume identifier header of the second logical eraseblock
  282. *
  283. * This function compares 2 copies of a LEB and informs which one is newer. In
  284. * case of success this function returns a positive value, in case of failure, a
  285. * negative error code is returned. The success return codes use the following
  286. * bits:
  287. * o bit 0 is cleared: the first PEB (described by @aeb) is newer than the
  288. * second PEB (described by @pnum and @vid_hdr);
  289. * o bit 0 is set: the second PEB is newer;
  290. * o bit 1 is cleared: no bit-flips were detected in the newer LEB;
  291. * o bit 1 is set: bit-flips were detected in the newer LEB;
  292. * o bit 2 is cleared: the older LEB is not corrupted;
  293. * o bit 2 is set: the older LEB is corrupted.
  294. */
  295. int ubi_compare_lebs(struct ubi_device *ubi, const struct ubi_ainf_peb *aeb,
  296. int pnum, const struct ubi_vid_hdr *vid_hdr)
  297. {
  298. int len, err, second_is_newer, bitflips = 0, corrupted = 0;
  299. uint32_t data_crc, crc;
  300. struct ubi_vid_hdr *vh = NULL;
  301. unsigned long long sqnum2 = be64_to_cpu(vid_hdr->sqnum);
  302. if (sqnum2 == aeb->sqnum) {
  303. /*
  304. * This must be a really ancient UBI image which has been
  305. * created before sequence numbers support has been added. At
  306. * that times we used 32-bit LEB versions stored in logical
  307. * eraseblocks. That was before UBI got into mainline. We do not
  308. * support these images anymore. Well, those images still work,
  309. * but only if no unclean reboots happened.
  310. */
  311. ubi_err("unsupported on-flash UBI format");
  312. return -EINVAL;
  313. }
  314. /* Obviously the LEB with lower sequence counter is older */
  315. second_is_newer = (sqnum2 > aeb->sqnum);
  316. /*
  317. * Now we know which copy is newer. If the copy flag of the PEB with
  318. * newer version is not set, then we just return, otherwise we have to
  319. * check data CRC. For the second PEB we already have the VID header,
  320. * for the first one - we'll need to re-read it from flash.
  321. *
  322. * Note: this may be optimized so that we wouldn't read twice.
  323. */
  324. if (second_is_newer) {
  325. if (!vid_hdr->copy_flag) {
  326. /* It is not a copy, so it is newer */
  327. dbg_bld("second PEB %d is newer, copy_flag is unset",
  328. pnum);
  329. return 1;
  330. }
  331. } else {
  332. if (!aeb->copy_flag) {
  333. /* It is not a copy, so it is newer */
  334. dbg_bld("first PEB %d is newer, copy_flag is unset",
  335. pnum);
  336. return bitflips << 1;
  337. }
  338. vh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
  339. if (!vh)
  340. return -ENOMEM;
  341. pnum = aeb->pnum;
  342. err = ubi_io_read_vid_hdr(ubi, pnum, vh, 0);
  343. if (err) {
  344. if (err == UBI_IO_BITFLIPS)
  345. bitflips = 1;
  346. else {
  347. ubi_err("VID of PEB %d header is bad, but it was OK earlier, err %d",
  348. pnum, err);
  349. if (err > 0)
  350. err = -EIO;
  351. goto out_free_vidh;
  352. }
  353. }
  354. vid_hdr = vh;
  355. }
  356. /* Read the data of the copy and check the CRC */
  357. len = be32_to_cpu(vid_hdr->data_size);
  358. #ifdef CONFIG_UBI_SHARE_BUFFER
  359. mutex_lock(&ubi_buf_mutex);
  360. #else
  361. mutex_lock(&ubi->buf_mutex);
  362. #endif
  363. err = ubi_io_read_data(ubi, ubi->peb_buf, pnum, 0, len);
  364. if (err && err != UBI_IO_BITFLIPS && !mtd_is_eccerr(err))
  365. goto out_unlock;
  366. data_crc = be32_to_cpu(vid_hdr->data_crc);
  367. crc = crc32(UBI_CRC32_INIT, ubi->peb_buf, len);
  368. if (crc != data_crc) {
  369. dbg_bld("PEB %d CRC error: calculated %#08x, must be %#08x",
  370. pnum, crc, data_crc);
  371. corrupted = 1;
  372. bitflips = 0;
  373. second_is_newer = !second_is_newer;
  374. } else {
  375. dbg_bld("PEB %d CRC is OK", pnum);
  376. bitflips |= !!err;
  377. }
  378. #ifdef CONFIG_UBI_SHARE_BUFFER
  379. mutex_unlock(&ubi_buf_mutex);
  380. #else
  381. mutex_unlock(&ubi->buf_mutex);
  382. #endif
  383. ubi_free_vid_hdr(ubi, vh);
  384. if (second_is_newer)
  385. dbg_bld("second PEB %d is newer, copy_flag is set", pnum);
  386. else
  387. dbg_bld("first PEB %d is newer, copy_flag is set", pnum);
  388. return second_is_newer | (bitflips << 1) | (corrupted << 2);
  389. out_unlock:
  390. #ifdef CONFIG_UBI_SHARE_BUFFER
  391. mutex_unlock(&ubi_buf_mutex);
  392. #else
  393. mutex_unlock(&ubi->buf_mutex);
  394. #endif
  395. out_free_vidh:
  396. ubi_free_vid_hdr(ubi, vh);
  397. return err;
  398. }
  399. /**
  400. * ubi_add_to_av - add used physical eraseblock to the attaching information.
  401. * @ubi: UBI device description object
  402. * @ai: attaching information
  403. * @pnum: the physical eraseblock number
  404. * @ec: erase counter
  405. * @vid_hdr: the volume identifier header
  406. * @bitflips: if bit-flips were detected when this physical eraseblock was read
  407. *
  408. * This function adds information about a used physical eraseblock to the
  409. * 'used' tree of the corresponding volume. The function is rather complex
  410. * because it has to handle cases when this is not the first physical
  411. * eraseblock belonging to the same logical eraseblock, and the newer one has
  412. * to be picked, while the older one has to be dropped. This function returns
  413. * zero in case of success and a negative error code in case of failure.
  414. */
  415. int ubi_add_to_av(struct ubi_device *ubi, struct ubi_attach_info *ai, int pnum,
  416. int ec, const struct ubi_vid_hdr *vid_hdr, int bitflips)
  417. {
  418. int err, vol_id, lnum;
  419. unsigned long long sqnum;
  420. struct ubi_ainf_volume *av;
  421. struct ubi_ainf_peb *aeb;
  422. struct rb_node **p, *parent = NULL;
  423. vol_id = be32_to_cpu(vid_hdr->vol_id);
  424. lnum = be32_to_cpu(vid_hdr->lnum);
  425. sqnum = be64_to_cpu(vid_hdr->sqnum);
  426. dbg_bld("PEB %d, LEB %d:%d, EC %d, sqnum %llu, bitflips %d",
  427. pnum, vol_id, lnum, ec, sqnum, bitflips);
  428. av = add_volume(ai, vol_id, pnum, vid_hdr);
  429. if (IS_ERR(av))
  430. return PTR_ERR(av);
  431. if (ai->max_sqnum < sqnum)
  432. ai->max_sqnum = sqnum;
  433. /*
  434. * Walk the RB-tree of logical eraseblocks of volume @vol_id to look
  435. * if this is the first instance of this logical eraseblock or not.
  436. */
  437. p = &av->root.rb_node;
  438. while (*p) {
  439. int cmp_res;
  440. parent = *p;
  441. aeb = rb_entry(parent, struct ubi_ainf_peb, u.rb);
  442. if (lnum != aeb->lnum) {
  443. if (lnum < aeb->lnum)
  444. p = &(*p)->rb_left;
  445. else
  446. p = &(*p)->rb_right;
  447. continue;
  448. }
  449. /*
  450. * There is already a physical eraseblock describing the same
  451. * logical eraseblock present.
  452. */
  453. dbg_bld("this LEB already exists: PEB %d, sqnum %llu, EC %d",
  454. aeb->pnum, aeb->sqnum, aeb->ec);
  455. /*
  456. * Make sure that the logical eraseblocks have different
  457. * sequence numbers. Otherwise the image is bad.
  458. *
  459. * However, if the sequence number is zero, we assume it must
  460. * be an ancient UBI image from the era when UBI did not have
  461. * sequence numbers. We still can attach these images, unless
  462. * there is a need to distinguish between old and new
  463. * eraseblocks, in which case we'll refuse the image in
  464. * 'ubi_compare_lebs()'. In other words, we attach old clean
  465. * images, but refuse attaching old images with duplicated
  466. * logical eraseblocks because there was an unclean reboot.
  467. */
  468. if (aeb->sqnum == sqnum && sqnum != 0) {
  469. ubi_err("two LEBs with same sequence number %llu",
  470. sqnum);
  471. ubi_dump_aeb(aeb, 0);
  472. ubi_dump_vid_hdr(vid_hdr);
  473. return -EINVAL;
  474. }
  475. /*
  476. * Now we have to drop the older one and preserve the newer
  477. * one.
  478. */
  479. cmp_res = ubi_compare_lebs(ubi, aeb, pnum, vid_hdr);
  480. if (cmp_res < 0)
  481. return cmp_res;
  482. if (cmp_res & 1) {
  483. /*
  484. * This logical eraseblock is newer than the one
  485. * found earlier.
  486. */
  487. err = validate_vid_hdr(vid_hdr, av, pnum);
  488. if (err)
  489. return err;
  490. err = add_to_list(ubi, ai, aeb->pnum, aeb->vol_id,
  491. aeb->lnum, aeb->ec, cmp_res & 4,
  492. &ai->erase);
  493. if (err)
  494. return err;
  495. aeb->ec = ec;
  496. aeb->pnum = pnum;
  497. aeb->vol_id = vol_id;
  498. aeb->lnum = lnum;
  499. aeb->scrub = ((cmp_res & 2) || bitflips);
  500. aeb->copy_flag = vid_hdr->copy_flag;
  501. aeb->sqnum = sqnum;
  502. if (av->highest_lnum == lnum)
  503. av->last_data_size =
  504. be32_to_cpu(vid_hdr->data_size);
  505. return 0;
  506. }
  507. /*
  508. * This logical eraseblock is older than the one found
  509. * previously.
  510. */
  511. return add_to_list(ubi, ai, pnum, vol_id, lnum, ec,
  512. cmp_res & 4, &ai->erase);
  513. }
  514. /*
  515. * We've met this logical eraseblock for the first time, add it to the
  516. * attaching information.
  517. */
  518. err = validate_vid_hdr(vid_hdr, av, pnum);
  519. if (err)
  520. return err;
  521. aeb = kmem_cache_alloc(ai->aeb_slab_cache, GFP_KERNEL);
  522. if (!aeb)
  523. return -ENOMEM;
  524. aeb->ec = ec;
  525. aeb->pnum = pnum;
  526. aeb->vol_id = vol_id;
  527. aeb->lnum = lnum;
  528. aeb->scrub = bitflips;
  529. aeb->copy_flag = vid_hdr->copy_flag;
  530. aeb->sqnum = sqnum;
  531. if (av->highest_lnum <= lnum) {
  532. av->highest_lnum = lnum;
  533. av->last_data_size = be32_to_cpu(vid_hdr->data_size);
  534. }
  535. av->leb_count += 1;
  536. rb_link_node(&aeb->u.rb, parent, p);
  537. rb_insert_color(&aeb->u.rb, &av->root);
  538. return 0;
  539. }
  540. /**
  541. * ubi_find_av - find volume in the attaching information.
  542. * @ai: attaching information
  543. * @vol_id: the requested volume ID
  544. *
  545. * This function returns a pointer to the volume description or %NULL if there
  546. * are no data about this volume in the attaching information.
  547. */
  548. struct ubi_ainf_volume *ubi_find_av(const struct ubi_attach_info *ai,
  549. int vol_id)
  550. {
  551. struct ubi_ainf_volume *av;
  552. struct rb_node *p = ai->volumes.rb_node;
  553. while (p) {
  554. av = rb_entry(p, struct ubi_ainf_volume, rb);
  555. if (vol_id == av->vol_id)
  556. return av;
  557. if (vol_id > av->vol_id)
  558. p = p->rb_left;
  559. else
  560. p = p->rb_right;
  561. }
  562. return NULL;
  563. }
  564. /**
  565. * ubi_remove_av - delete attaching information about a volume.
  566. * @ai: attaching information
  567. * @av: the volume attaching information to delete
  568. */
  569. void ubi_remove_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
  570. {
  571. struct rb_node *rb;
  572. struct ubi_ainf_peb *aeb;
  573. dbg_bld("remove attaching information about volume %d", av->vol_id);
  574. while ((rb = rb_first(&av->root))) {
  575. aeb = rb_entry(rb, struct ubi_ainf_peb, u.rb);
  576. rb_erase(&aeb->u.rb, &av->root);
  577. list_add_tail(&aeb->u.list, &ai->erase);
  578. }
  579. rb_erase(&av->rb, &ai->volumes);
  580. kfree(av);
  581. ai->vols_found -= 1;
  582. }
  583. /**
  584. * early_erase_peb - erase a physical eraseblock.
  585. * @ubi: UBI device description object
  586. * @ai: attaching information
  587. * @pnum: physical eraseblock number to erase;
  588. * @ec: erase counter value to write (%UBI_UNKNOWN if it is unknown)
  589. *
  590. * This function erases physical eraseblock 'pnum', and writes the erase
  591. * counter header to it. This function should only be used on UBI device
  592. * initialization stages, when the EBA sub-system had not been yet initialized.
  593. * This function returns zero in case of success and a negative error code in
  594. * case of failure.
  595. */
  596. static int early_erase_peb(struct ubi_device *ubi,
  597. const struct ubi_attach_info *ai, int pnum, int ec)
  598. {
  599. int err;
  600. struct ubi_ec_hdr *ec_hdr;
  601. if ((long long)ec >= UBI_MAX_ERASECOUNTER) {
  602. /*
  603. * Erase counter overflow. Upgrade UBI and use 64-bit
  604. * erase counters internally.
  605. */
  606. ubi_err("erase counter overflow at PEB %d, EC %d", pnum, ec);
  607. return -EINVAL;
  608. }
  609. ec_hdr = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
  610. if (!ec_hdr)
  611. return -ENOMEM;
  612. ec_hdr->ec = cpu_to_be64(ec);
  613. err = ubi_io_sync_erase(ubi, pnum, 0);
  614. if (err < 0)
  615. goto out_free;
  616. err = ubi_io_write_ec_hdr(ubi, pnum, ec_hdr);
  617. out_free:
  618. kfree(ec_hdr);
  619. return err;
  620. }
  621. /**
  622. * ubi_early_get_peb - get a free physical eraseblock.
  623. * @ubi: UBI device description object
  624. * @ai: attaching information
  625. *
  626. * This function returns a free physical eraseblock. It is supposed to be
  627. * called on the UBI initialization stages when the wear-leveling sub-system is
  628. * not initialized yet. This function picks a physical eraseblocks from one of
  629. * the lists, writes the EC header if it is needed, and removes it from the
  630. * list.
  631. *
  632. * This function returns a pointer to the "aeb" of the found free PEB in case
  633. * of success and an error code in case of failure.
  634. */
  635. struct ubi_ainf_peb *ubi_early_get_peb(struct ubi_device *ubi,
  636. struct ubi_attach_info *ai)
  637. {
  638. int err = 0;
  639. struct ubi_ainf_peb *aeb, *tmp_aeb;
  640. if (!list_empty(&ai->free)) {
  641. aeb = list_entry(ai->free.next, struct ubi_ainf_peb, u.list);
  642. list_del(&aeb->u.list);
  643. dbg_bld("return free PEB %d, EC %d", aeb->pnum, aeb->ec);
  644. return aeb;
  645. }
  646. /*
  647. * We try to erase the first physical eraseblock from the erase list
  648. * and pick it if we succeed, or try to erase the next one if not. And
  649. * so forth. We don't want to take care about bad eraseblocks here -
  650. * they'll be handled later.
  651. */
  652. list_for_each_entry_safe(aeb, tmp_aeb, &ai->erase, u.list) {
  653. if (aeb->ec == UBI_UNKNOWN)
  654. aeb->ec = ai->mean_ec;
  655. err = early_erase_peb(ubi, ai, aeb->pnum, aeb->ec+1);
  656. if (err)
  657. continue;
  658. aeb->ec += 1;
  659. list_del(&aeb->u.list);
  660. dbg_bld("return PEB %d, EC %d", aeb->pnum, aeb->ec);
  661. return aeb;
  662. }
  663. ubi_err("no free eraseblocks");
  664. return ERR_PTR(-ENOSPC);
  665. }
  666. /**
  667. * check_corruption - check the data area of PEB.
  668. * @ubi: UBI device description object
  669. * @vid_hdr: the (corrupted) VID header of this PEB
  670. * @pnum: the physical eraseblock number to check
  671. *
  672. * This is a helper function which is used to distinguish between VID header
  673. * corruptions caused by power cuts and other reasons. If the PEB contains only
  674. * 0xFF bytes in the data area, the VID header is most probably corrupted
  675. * because of a power cut (%0 is returned in this case). Otherwise, it was
  676. * probably corrupted for some other reasons (%1 is returned in this case). A
  677. * negative error code is returned if a read error occurred.
  678. *
  679. * If the corruption reason was a power cut, UBI can safely erase this PEB.
  680. * Otherwise, it should preserve it to avoid possibly destroying important
  681. * information.
  682. */
  683. static int check_corruption(struct ubi_device *ubi, struct ubi_vid_hdr *vid_hdr,
  684. int pnum)
  685. {
  686. int err;
  687. #ifdef CONFIG_UBI_SHARE_BUFFER
  688. mutex_lock(&ubi_buf_mutex);
  689. #else
  690. mutex_lock(&ubi->buf_mutex);
  691. #endif
  692. memset(ubi->peb_buf, 0x00, ubi->leb_size);
  693. err = ubi_io_read(ubi, ubi->peb_buf, pnum, ubi->leb_start, ubi->leb_size);
  694. if (err == UBI_IO_BITFLIPS || mtd_is_eccerr(err)) {
  695. /*
  696. * Bit-flips or integrity errors while reading the data area.
  697. * It is difficult to say for sure what type of corruption is
  698. * this, but presumably a power cut happened while this PEB was
  699. * erased, so it became unstable and corrupted, and should be
  700. * erased.
  701. */
  702. err = 0;
  703. goto out_unlock;
  704. }
  705. if (err)
  706. goto out_unlock;
  707. if (ubi_check_pattern(ubi->peb_buf, 0xFF, ubi->leb_size))
  708. goto out_unlock;
  709. ubi_err("PEB %d contains corrupted VID header, and the data does not contain all 0xFF",
  710. pnum);
  711. ubi_err("this may be a non-UBI PEB or a severe VID header corruption which requires manual inspection");
  712. ubi_dump_vid_hdr(vid_hdr);
  713. pr_err("hexdump of PEB %d offset %d, length %d",
  714. pnum, ubi->leb_start, ubi->leb_size);
  715. ubi_dbg_print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_OFFSET, 32, 1,
  716. ubi->peb_buf, ubi->leb_size, 1);
  717. err = 1;
  718. out_unlock:
  719. #ifdef CONFIG_UBI_SHARE_BUFFER
  720. mutex_unlock(&ubi_buf_mutex);
  721. #else
  722. mutex_unlock(&ubi->buf_mutex);
  723. #endif
  724. return err;
  725. }
  726. /**
  727. * scan_peb - scan and process UBI headers of a PEB.
  728. * @ubi: UBI device description object
  729. * @ai: attaching information
  730. * @pnum: the physical eraseblock number
  731. * @vid: The volume ID of the found volume will be stored in this pointer
  732. * @sqnum: The sqnum of the found volume will be stored in this pointer
  733. *
  734. * This function reads UBI headers of PEB @pnum, checks them, and adds
  735. * information about this PEB to the corresponding list or RB-tree in the
  736. * "attaching info" structure. Returns zero if the physical eraseblock was
  737. * successfully handled and a negative error code in case of failure.
  738. */
  739. static int scan_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
  740. int pnum, int *vid, unsigned long long *sqnum)
  741. {
  742. long long uninitialized_var(ec);
  743. int err, bitflips = 0, vol_id = -1, ec_err = 0;
  744. dbg_bld("scan PEB %d", pnum);
  745. /* Skip bad physical eraseblocks */
  746. err = ubi_io_is_bad(ubi, pnum);
  747. if (err < 0)
  748. return err;
  749. else if (err) {
  750. ai->bad_peb_count += 1;
  751. return 0;
  752. }
  753. err = ubi_io_read_ec_hdr(ubi, pnum, ech, 0);
  754. if (err < 0)
  755. return err;
  756. switch (err) {
  757. case 0:
  758. break;
  759. case UBI_IO_BITFLIPS:
  760. bitflips = 1;
  761. break;
  762. case UBI_IO_FF:
  763. ai->empty_peb_count += 1;
  764. return add_to_list(ubi, ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
  765. UBI_UNKNOWN, 0, &ai->erase);
  766. case UBI_IO_FF_BITFLIPS:
  767. ai->empty_peb_count += 1;
  768. return add_to_list(ubi, ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
  769. UBI_UNKNOWN, 1, &ai->erase);
  770. case UBI_IO_BAD_HDR_EBADMSG:
  771. case UBI_IO_BAD_HDR:
  772. /*
  773. * We have to also look at the VID header, possibly it is not
  774. * corrupted. Set %bitflips flag in order to make this PEB be
  775. * moved and EC be re-created.
  776. */
  777. ec_err = err;
  778. ec = UBI_UNKNOWN;
  779. bitflips = 1;
  780. break;
  781. default:
  782. ubi_err("'ubi_io_read_ec_hdr()' returned unknown code %d", err);
  783. return -EINVAL;
  784. }
  785. if (!ec_err) {
  786. int image_seq;
  787. /* Make sure UBI version is OK */
  788. if (ech->version != UBI_VERSION) {
  789. ubi_err("this UBI version is %d, image version is %d",
  790. UBI_VERSION, (int)ech->version);
  791. return -EINVAL;
  792. }
  793. ec = be64_to_cpu(ech->ec);
  794. if (ec > UBI_MAX_ERASECOUNTER) {
  795. /*
  796. * Erase counter overflow. The EC headers have 64 bits
  797. * reserved, but we anyway make use of only 31 bit
  798. * values, as this seems to be enough for any existing
  799. * flash. Upgrade UBI and use 64-bit erase counters
  800. * internally.
  801. */
  802. ubi_err("erase counter overflow, max is %d",
  803. UBI_MAX_ERASECOUNTER);
  804. ubi_dump_ec_hdr(ech);
  805. return -EINVAL;
  806. }
  807. /*
  808. * Make sure that all PEBs have the same image sequence number.
  809. * This allows us to detect situations when users flash UBI
  810. * images incorrectly, so that the flash has the new UBI image
  811. * and leftovers from the old one. This feature was added
  812. * relatively recently, and the sequence number was always
  813. * zero, because old UBI implementations always set it to zero.
  814. * For this reasons, we do not panic if some PEBs have zero
  815. * sequence number, while other PEBs have non-zero sequence
  816. * number.
  817. */
  818. image_seq = be32_to_cpu(ech->image_seq);
  819. if (!ubi->image_seq)
  820. ubi->image_seq = image_seq;
  821. if (image_seq && ubi->image_seq != image_seq) {
  822. ubi_err("bad image sequence number %d in PEB %d, expected %d",
  823. image_seq, pnum, ubi->image_seq);
  824. ubi_dump_ec_hdr(ech);
  825. return -EINVAL;
  826. }
  827. }
  828. /* OK, we've done with the EC header, let's look at the VID header */
  829. err = ubi_io_read_vid_hdr(ubi, pnum, vidh, 0);
  830. if (err < 0)
  831. return err;
  832. switch (err) {
  833. case 0:
  834. break;
  835. case UBI_IO_BITFLIPS:
  836. bitflips = 1;
  837. break;
  838. case UBI_IO_BAD_HDR_EBADMSG:
  839. if (ec_err == UBI_IO_BAD_HDR_EBADMSG)
  840. /*
  841. * Both EC and VID headers are corrupted and were read
  842. * with data integrity error, probably this is a bad
  843. * PEB, bit it is not marked as bad yet. This may also
  844. * be a result of power cut during erasure.
  845. */
  846. ai->maybe_bad_peb_count += 1;
  847. case UBI_IO_BAD_HDR:
  848. if (ec_err)
  849. /*
  850. * Both headers are corrupted. There is a possibility
  851. * that this a valid UBI PEB which has corresponding
  852. * LEB, but the headers are corrupted. However, it is
  853. * impossible to distinguish it from a PEB which just
  854. * contains garbage because of a power cut during erase
  855. * operation. So we just schedule this PEB for erasure.
  856. *
  857. * Besides, in case of NOR flash, we deliberately
  858. * corrupt both headers because NOR flash erasure is
  859. * slow and can start from the end.
  860. */
  861. err = 0;
  862. else
  863. /*
  864. * The EC was OK, but the VID header is corrupted. We
  865. * have to check what is in the data area.
  866. */
  867. err = check_corruption(ubi, vidh, pnum);
  868. if (err < 0)
  869. return err;
  870. else if (!err)
  871. /* This corruption is caused by a power cut */
  872. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  873. err = add_to_list(ubi, ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->waiting);
  874. #else
  875. err = add_to_list(ubi, ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN, ec, 1, &ai->erase);
  876. #endif
  877. else
  878. /* This is an unexpected corruption */
  879. err = add_corrupted(ai, pnum, ec);
  880. if (err)
  881. return err;
  882. goto adjust_mean_ec;
  883. case UBI_IO_FF_BITFLIPS:
  884. err = add_to_list(ubi, ai, pnum, UBI_UNKNOWN, UBI_UNKNOWN,
  885. ec, 1, &ai->erase);
  886. if (err)
  887. return err;
  888. goto adjust_mean_ec;
  889. case UBI_IO_FF:
  890. if (ec_err || bitflips)
  891. err = add_to_list(ubi, ai, pnum, UBI_UNKNOWN,
  892. UBI_UNKNOWN, ec, 1, &ai->erase);
  893. else
  894. err = add_to_list(ubi, ai, pnum, UBI_UNKNOWN,
  895. UBI_UNKNOWN, ec, 0, &ai->free);
  896. if (err)
  897. return err;
  898. goto adjust_mean_ec;
  899. default:
  900. ubi_err("'ubi_io_read_vid_hdr()' returned unknown code %d",
  901. err);
  902. return -EINVAL;
  903. }
  904. vol_id = be32_to_cpu(vidh->vol_id);
  905. if (vid)
  906. *vid = vol_id;
  907. if (sqnum)
  908. *sqnum = be64_to_cpu(vidh->sqnum);
  909. if (vol_id > UBI_MAX_VOLUMES && vol_id != UBI_LAYOUT_VOLUME_ID) {
  910. int lnum = be32_to_cpu(vidh->lnum);
  911. /* Unsupported internal volume */
  912. switch (vidh->compat) {
  913. case UBI_COMPAT_DELETE:
  914. if (vol_id != UBI_FM_SB_VOLUME_ID
  915. && vol_id != UBI_FM_DATA_VOLUME_ID) {
  916. ubi_msg("\"delete\" compatible internal volume %d:%d found, will remove it",
  917. vol_id, lnum);
  918. }
  919. err = add_to_list(ubi, ai, pnum, vol_id, lnum,
  920. ec, 1, &ai->erase);
  921. if (err)
  922. return err;
  923. return 0;
  924. case UBI_COMPAT_RO:
  925. ubi_msg("read-only compatible internal volume %d:%d found, switch to read-only mode",
  926. vol_id, lnum);
  927. ubi->ro_mode = 1;
  928. break;
  929. case UBI_COMPAT_PRESERVE:
  930. ubi_msg("\"preserve\" compatible internal volume %d:%d found",
  931. vol_id, lnum);
  932. err = add_to_list(ubi, ai, pnum, vol_id, lnum,
  933. ec, 0, &ai->alien);
  934. if (err)
  935. return err;
  936. return 0;
  937. case UBI_COMPAT_REJECT:
  938. ubi_err("incompatible internal volume %d:%d found",
  939. vol_id, lnum);
  940. return -EINVAL;
  941. }
  942. }
  943. if (ec_err)
  944. ubi_warn("valid VID header but corrupted EC header at PEB %d",
  945. pnum);
  946. err = ubi_add_to_av(ubi, ai, pnum, ec, vidh, bitflips);
  947. if (err)
  948. return err;
  949. adjust_mean_ec:
  950. if (!ec_err) {
  951. ai->ec_sum += ec;
  952. ai->ec_count += 1;
  953. if (ec > ai->max_ec)
  954. ai->max_ec = ec;
  955. if (ec < ai->min_ec)
  956. ai->min_ec = ec;
  957. }
  958. return 0;
  959. }
  960. /**
  961. * late_analysis - analyze the overall situation with PEB.
  962. * @ubi: UBI device description object
  963. * @ai: attaching information
  964. *
  965. * This is a helper function which takes a look what PEBs we have after we
  966. * gather information about all of them ("ai" is compete). It decides whether
  967. * the flash is empty and should be formatted of whether there are too many
  968. * corrupted PEBs and we should not attach this MTD device. Returns zero if we
  969. * should proceed with attaching the MTD device, and %-EINVAL if we should not.
  970. */
  971. static int late_analysis(struct ubi_device *ubi, struct ubi_attach_info *ai)
  972. {
  973. struct ubi_ainf_peb *aeb;
  974. int max_corr, peb_count;
  975. peb_count = ubi->peb_count - ai->bad_peb_count - ai->alien_peb_count;
  976. max_corr = peb_count / 20 ?: 8;
  977. /*
  978. * Few corrupted PEBs is not a problem and may be just a result of
  979. * unclean reboots. However, many of them may indicate some problems
  980. * with the flash HW or driver.
  981. */
  982. if (ai->corr_peb_count) {
  983. ubi_err("%d PEBs are corrupted and preserved",
  984. ai->corr_peb_count);
  985. pr_err("Corrupted PEBs are:");
  986. list_for_each_entry(aeb, &ai->corr, u.list)
  987. pr_cont(" %d", aeb->pnum);
  988. pr_cont("\n");
  989. /*
  990. * If too many PEBs are corrupted, we refuse attaching,
  991. * otherwise, only print a warning.
  992. */
  993. if (ai->corr_peb_count >= max_corr) {
  994. ubi_err("too many corrupted PEBs, refusing");
  995. return -EINVAL;
  996. }
  997. }
  998. if (ai->empty_peb_count + ai->maybe_bad_peb_count == peb_count) {
  999. /*
  1000. * All PEBs are empty, or almost all - a couple PEBs look like
  1001. * they may be bad PEBs which were not marked as bad yet.
  1002. *
  1003. * This piece of code basically tries to distinguish between
  1004. * the following situations:
  1005. *
  1006. * 1. Flash is empty, but there are few bad PEBs, which are not
  1007. * marked as bad so far, and which were read with error. We
  1008. * want to go ahead and format this flash. While formatting,
  1009. * the faulty PEBs will probably be marked as bad.
  1010. *
  1011. * 2. Flash contains non-UBI data and we do not want to format
  1012. * it and destroy possibly important information.
  1013. */
  1014. if (ai->maybe_bad_peb_count <= 2) {
  1015. ai->is_empty = 1;
  1016. ubi_msg("empty MTD device detected");
  1017. get_random_bytes(&ubi->image_seq,
  1018. sizeof(ubi->image_seq));
  1019. } else {
  1020. ubi_err("MTD device is not UBI-formatted and possibly contains non-UBI data - refusing it");
  1021. return -EINVAL;
  1022. }
  1023. }
  1024. return 0;
  1025. }
  1026. /**
  1027. * destroy_av - free volume attaching information.
  1028. * @av: volume attaching information
  1029. * @ai: attaching information
  1030. *
  1031. * This function destroys the volume attaching information.
  1032. */
  1033. static void destroy_av(struct ubi_attach_info *ai, struct ubi_ainf_volume *av)
  1034. {
  1035. struct ubi_ainf_peb *aeb;
  1036. struct rb_node *this = av->root.rb_node;
  1037. while (this) {
  1038. if (this->rb_left)
  1039. this = this->rb_left;
  1040. else if (this->rb_right)
  1041. this = this->rb_right;
  1042. else {
  1043. aeb = rb_entry(this, struct ubi_ainf_peb, u.rb);
  1044. this = rb_parent(this);
  1045. if (this) {
  1046. if (this->rb_left == &aeb->u.rb)
  1047. this->rb_left = NULL;
  1048. else
  1049. this->rb_right = NULL;
  1050. }
  1051. kmem_cache_free(ai->aeb_slab_cache, aeb);
  1052. }
  1053. }
  1054. kfree(av);
  1055. }
  1056. /**
  1057. * destroy_ai - destroy attaching information.
  1058. * @ai: attaching information
  1059. */
  1060. static void destroy_ai(struct ubi_attach_info *ai)
  1061. {
  1062. struct ubi_ainf_peb *aeb, *aeb_tmp;
  1063. struct ubi_ainf_volume *av;
  1064. struct rb_node *rb;
  1065. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  1066. list_for_each_entry_safe(aeb, aeb_tmp, &ai->waiting, u.list) {
  1067. list_del(&aeb->u.list);
  1068. kmem_cache_free(ai->aeb_slab_cache, aeb);
  1069. }
  1070. #endif
  1071. list_for_each_entry_safe(aeb, aeb_tmp, &ai->alien, u.list) {
  1072. list_del(&aeb->u.list);
  1073. kmem_cache_free(ai->aeb_slab_cache, aeb);
  1074. }
  1075. list_for_each_entry_safe(aeb, aeb_tmp, &ai->erase, u.list) {
  1076. list_del(&aeb->u.list);
  1077. kmem_cache_free(ai->aeb_slab_cache, aeb);
  1078. }
  1079. list_for_each_entry_safe(aeb, aeb_tmp, &ai->corr, u.list) {
  1080. list_del(&aeb->u.list);
  1081. kmem_cache_free(ai->aeb_slab_cache, aeb);
  1082. }
  1083. list_for_each_entry_safe(aeb, aeb_tmp, &ai->free, u.list) {
  1084. list_del(&aeb->u.list);
  1085. kmem_cache_free(ai->aeb_slab_cache, aeb);
  1086. }
  1087. /* Destroy the volume RB-tree */
  1088. rb = ai->volumes.rb_node;
  1089. while (rb) {
  1090. if (rb->rb_left)
  1091. rb = rb->rb_left;
  1092. else if (rb->rb_right)
  1093. rb = rb->rb_right;
  1094. else {
  1095. av = rb_entry(rb, struct ubi_ainf_volume, rb);
  1096. rb = rb_parent(rb);
  1097. if (rb) {
  1098. if (rb->rb_left == &av->rb)
  1099. rb->rb_left = NULL;
  1100. else
  1101. rb->rb_right = NULL;
  1102. }
  1103. destroy_av(ai, av);
  1104. }
  1105. }
  1106. if (ai->aeb_slab_cache)
  1107. kmem_cache_destroy(ai->aeb_slab_cache);
  1108. kfree(ai);
  1109. }
  1110. /**
  1111. * scan_all - scan entire MTD device.
  1112. * @ubi: UBI device description object
  1113. * @ai: attach info object
  1114. * @start: start scanning at this PEB
  1115. *
  1116. * This function does full scanning of an MTD device and returns complete
  1117. * information about it in form of a "struct ubi_attach_info" object. In case
  1118. * of failure, an error code is returned.
  1119. */
  1120. static int scan_all(struct ubi_device *ubi, struct ubi_attach_info *ai,
  1121. int start)
  1122. {
  1123. int err, pnum;
  1124. struct rb_node *rb1, *rb2;
  1125. struct ubi_ainf_volume *av;
  1126. struct ubi_ainf_peb *aeb;
  1127. err = -ENOMEM;
  1128. ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
  1129. if (!ech)
  1130. return err;
  1131. vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
  1132. if (!vidh)
  1133. goto out_ech;
  1134. for (pnum = start; pnum < ubi->peb_count; pnum++) {
  1135. cond_resched();
  1136. dbg_gen("process PEB %d", pnum);
  1137. err = scan_peb(ubi, ai, pnum, NULL, NULL);
  1138. if (err < 0)
  1139. goto out_vidh;
  1140. }
  1141. ubi_msg("scanning is finished");
  1142. /* Calculate mean erase counter */
  1143. if (ai->ec_count)
  1144. ai->mean_ec = div_u64(ai->ec_sum, ai->ec_count);
  1145. err = late_analysis(ubi, ai);
  1146. if (err)
  1147. goto out_vidh;
  1148. /*
  1149. * In case of unknown erase counter we use the mean erase counter
  1150. * value.
  1151. */
  1152. ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
  1153. ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
  1154. if (aeb->ec == UBI_UNKNOWN) {
  1155. aeb->ec = ai->mean_ec;
  1156. }
  1157. }
  1158. list_for_each_entry(aeb, &ai->free, u.list) {
  1159. if (aeb->ec == UBI_UNKNOWN) {
  1160. aeb->ec = ai->mean_ec;
  1161. }
  1162. }
  1163. list_for_each_entry(aeb, &ai->corr, u.list)
  1164. if (aeb->ec == UBI_UNKNOWN) {
  1165. aeb->ec = ai->mean_ec;
  1166. }
  1167. list_for_each_entry(aeb, &ai->erase, u.list)
  1168. if (aeb->ec == UBI_UNKNOWN) {
  1169. aeb->ec = ai->mean_ec;
  1170. }
  1171. err = self_check_ai(ubi, ai);
  1172. if (err)
  1173. goto out_vidh;
  1174. ubi_free_vid_hdr(ubi, vidh);
  1175. kfree(ech);
  1176. return 0;
  1177. out_vidh:
  1178. ubi_free_vid_hdr(ubi, vidh);
  1179. out_ech:
  1180. kfree(ech);
  1181. return err;
  1182. }
  1183. #ifdef CONFIG_MTD_UBI_FASTMAP
  1184. /**
  1185. * scan_fastmap - try to find a fastmap and attach from it.
  1186. * @ubi: UBI device description object
  1187. * @ai: attach info object
  1188. *
  1189. * Returns 0 on success, negative return values indicate an internal
  1190. * error.
  1191. * UBI_NO_FASTMAP denotes that no fastmap was found.
  1192. * UBI_BAD_FASTMAP denotes that the found fastmap was invalid.
  1193. */
  1194. static int scan_fast(struct ubi_device *ubi, struct ubi_attach_info *ai)
  1195. {
  1196. int err, pnum, fm_anchor = -1;
  1197. unsigned long long max_sqnum = 0;
  1198. err = -ENOMEM;
  1199. ech = kzalloc(ubi->ec_hdr_alsize, GFP_KERNEL);
  1200. if (!ech)
  1201. goto out;
  1202. vidh = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
  1203. if (!vidh)
  1204. goto out_ech;
  1205. for (pnum = 0; pnum < UBI_FM_MAX_START; pnum++) {
  1206. int vol_id = -1;
  1207. unsigned long long sqnum = -1;
  1208. cond_resched();
  1209. dbg_gen("process PEB %d", pnum);
  1210. err = scan_peb(ubi, ai, pnum, &vol_id, &sqnum);
  1211. if (err < 0)
  1212. goto out_vidh;
  1213. if (vol_id == UBI_FM_SB_VOLUME_ID && sqnum > max_sqnum) {
  1214. max_sqnum = sqnum;
  1215. fm_anchor = pnum;
  1216. }
  1217. }
  1218. ubi_free_vid_hdr(ubi, vidh);
  1219. kfree(ech);
  1220. if (fm_anchor < 0)
  1221. return UBI_NO_FASTMAP;
  1222. return ubi_scan_fastmap(ubi, ai, fm_anchor);
  1223. out_vidh:
  1224. ubi_free_vid_hdr(ubi, vidh);
  1225. out_ech:
  1226. kfree(ech);
  1227. out:
  1228. return err;
  1229. }
  1230. #endif
  1231. static struct ubi_attach_info *alloc_ai(const char *slab_name)
  1232. {
  1233. struct ubi_attach_info *ai;
  1234. ai = kzalloc(sizeof(struct ubi_attach_info), GFP_KERNEL);
  1235. if (!ai)
  1236. return ai;
  1237. INIT_LIST_HEAD(&ai->corr);
  1238. INIT_LIST_HEAD(&ai->free);
  1239. INIT_LIST_HEAD(&ai->erase);
  1240. INIT_LIST_HEAD(&ai->alien);
  1241. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  1242. INIT_LIST_HEAD(&ai->waiting);
  1243. #endif
  1244. ai->volumes = RB_ROOT;
  1245. ai->aeb_slab_cache = kmem_cache_create(slab_name,
  1246. sizeof(struct ubi_ainf_peb),
  1247. 0, 0, NULL);
  1248. if (!ai->aeb_slab_cache) {
  1249. kfree(ai);
  1250. ai = NULL;
  1251. }
  1252. return ai;
  1253. }
  1254. /**
  1255. * ubi_attach - attach an MTD device.
  1256. * @ubi: UBI device descriptor
  1257. * @force_scan: if set to non-zero attach by scanning
  1258. *
  1259. * This function returns zero in case of success and a negative error code in
  1260. * case of failure.
  1261. */
  1262. int ubi_attach(struct ubi_device *ubi, int force_scan)
  1263. {
  1264. int err;
  1265. struct ubi_attach_info *ai;
  1266. unsigned long long time = sched_clock();
  1267. ai = alloc_ai("ubi_aeb_slab_cache");
  1268. if (!ai)
  1269. return -ENOMEM;
  1270. #ifdef CONFIG_MTD_UBI_FASTMAP
  1271. /* On small flash devices we disable fastmap in any case. */
  1272. if ((int)mtd_div_by_eb(ubi->mtd->size, ubi->mtd) <= UBI_FM_MAX_START) {
  1273. ubi->fm_disabled = 1;
  1274. force_scan = 1;
  1275. }
  1276. if (force_scan)
  1277. err = scan_all(ubi, ai, 0);
  1278. else {
  1279. err = scan_fast(ubi, ai);
  1280. if (err > 0) {
  1281. if (err != UBI_NO_FASTMAP) {
  1282. destroy_ai(ai);
  1283. ai = alloc_ai("ubi_aeb_slab_cache2");
  1284. if (!ai)
  1285. return -ENOMEM;
  1286. err = scan_all(ubi, ai, 0);
  1287. } else {
  1288. err = scan_all(ubi, ai, UBI_FM_MAX_START);
  1289. }
  1290. }
  1291. }
  1292. #else
  1293. err = scan_all(ubi, ai, 0);
  1294. #endif
  1295. time = sched_clock() - time;
  1296. do_div(time, 1000000);
  1297. ubi_msg("scan done in %lld(ms)\n", time);
  1298. if (err)
  1299. goto out_ai;
  1300. ubi->bad_peb_count = ai->bad_peb_count;
  1301. ubi->good_peb_count = ubi->peb_count - ubi->bad_peb_count;
  1302. ubi->corr_peb_count = ai->corr_peb_count;
  1303. ubi->max_ec = ai->max_ec;
  1304. ubi->mean_ec = ai->mean_ec;
  1305. ubi->ec_sum = ai->ec_sum + ubi->mean_ec * (ubi->good_peb_count - ai->ec_count); /*MTK: calc ec_sum */
  1306. dbg_gen("max. sequence number: %llu", ai->max_sqnum);
  1307. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  1308. ubi->scanning = 1;
  1309. err = ubi_backup_init_scan(ubi, ai);
  1310. if (err)
  1311. goto out_ai;
  1312. ubi->scanning = 0;
  1313. #endif
  1314. err = ubi_read_volume_table(ubi, ai);
  1315. if (err)
  1316. goto out_ai;
  1317. time = sched_clock();
  1318. err = ubi_wl_init(ubi, ai);
  1319. if (err)
  1320. goto out_vtbl;
  1321. time = sched_clock() - time;
  1322. do_div(time, 1000000);
  1323. ubi_msg("ubi_wl_init_scan done in %lld(ms)\n", time);
  1324. err = ubi_eba_init(ubi, ai);
  1325. if (err)
  1326. goto out_wl;
  1327. #ifdef CONFIG_MTD_UBI_FASTMAP
  1328. if (ubi->fm && ubi_dbg_chk_gen(ubi)) {
  1329. struct ubi_attach_info *scan_ai;
  1330. scan_ai = alloc_ai("ubi_ckh_aeb_slab_cache");
  1331. if (!scan_ai) {
  1332. err = -ENOMEM;
  1333. goto out_wl;
  1334. }
  1335. err = scan_all(ubi, scan_ai, 0);
  1336. if (err) {
  1337. destroy_ai(scan_ai);
  1338. goto out_wl;
  1339. }
  1340. err = self_check_eba(ubi, ai, scan_ai);
  1341. destroy_ai(scan_ai);
  1342. if (err)
  1343. goto out_wl;
  1344. }
  1345. #endif
  1346. destroy_ai(ai);
  1347. return 0;
  1348. out_wl:
  1349. ubi_wl_close(ubi);
  1350. out_vtbl:
  1351. ubi_free_internal_volumes(ubi);
  1352. vfree(ubi->vtbl);
  1353. out_ai:
  1354. destroy_ai(ai);
  1355. return err;
  1356. }
  1357. /**
  1358. * self_check_ai - check the attaching information.
  1359. * @ubi: UBI device description object
  1360. * @ai: attaching information
  1361. *
  1362. * This function returns zero if the attaching information is all right, and a
  1363. * negative error code if not or if an error occurred.
  1364. */
  1365. static int self_check_ai(struct ubi_device *ubi, struct ubi_attach_info *ai)
  1366. {
  1367. int pnum, err, vols_found = 0;
  1368. struct rb_node *rb1, *rb2;
  1369. struct ubi_ainf_volume *av;
  1370. struct ubi_ainf_peb *aeb, *last_aeb;
  1371. uint8_t *buf;
  1372. int min_ec, max_ec;
  1373. if (!ubi_dbg_chk_gen(ubi))
  1374. return 0;
  1375. /*
  1376. * At first, check that attaching information is OK.
  1377. */
  1378. ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
  1379. int leb_count = 0;
  1380. cond_resched();
  1381. vols_found += 1;
  1382. if (ai->is_empty) {
  1383. ubi_err("bad is_empty flag");
  1384. goto bad_av;
  1385. }
  1386. if (av->vol_id < 0 || av->highest_lnum < 0 ||
  1387. av->leb_count < 0 || av->vol_type < 0 || av->used_ebs < 0 ||
  1388. av->data_pad < 0 || av->last_data_size < 0) {
  1389. ubi_err("negative values");
  1390. goto bad_av;
  1391. }
  1392. if (av->vol_id >= UBI_MAX_VOLUMES &&
  1393. av->vol_id < UBI_INTERNAL_VOL_START) {
  1394. ubi_err("bad vol_id");
  1395. goto bad_av;
  1396. }
  1397. if (av->vol_id > ai->highest_vol_id) {
  1398. ubi_err("highest_vol_id is %d, but vol_id %d is there",
  1399. ai->highest_vol_id, av->vol_id);
  1400. goto out;
  1401. }
  1402. if (av->vol_type != UBI_DYNAMIC_VOLUME &&
  1403. av->vol_type != UBI_STATIC_VOLUME) {
  1404. ubi_err("bad vol_type");
  1405. goto bad_av;
  1406. }
  1407. if (av->data_pad > ubi->leb_size / 2) {
  1408. ubi_err("bad data_pad");
  1409. goto bad_av;
  1410. }
  1411. last_aeb = NULL;
  1412. ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
  1413. cond_resched();
  1414. last_aeb = aeb;
  1415. leb_count += 1;
  1416. {
  1417. min_ec = ai->min_ec;
  1418. max_ec = ai->max_ec;
  1419. }
  1420. if (aeb->pnum < 0 || aeb->ec < 0) {
  1421. ubi_err("negative values");
  1422. goto bad_aeb;
  1423. }
  1424. if (aeb->ec < min_ec) {
  1425. ubi_err("bad ai->min_ec (%d), %d found",
  1426. ai->min_ec, aeb->ec);
  1427. goto bad_aeb;
  1428. }
  1429. if (aeb->ec > max_ec) {
  1430. ubi_err("bad ai->max_ec (%d), %d found",
  1431. ai->max_ec, aeb->ec);
  1432. goto bad_aeb;
  1433. }
  1434. if (aeb->pnum >= ubi->peb_count) {
  1435. ubi_err("too high PEB number %d, total PEBs %d",
  1436. aeb->pnum, ubi->peb_count);
  1437. goto bad_aeb;
  1438. }
  1439. if (av->vol_type == UBI_STATIC_VOLUME) {
  1440. if (aeb->lnum >= av->used_ebs) {
  1441. ubi_err("bad lnum or used_ebs");
  1442. goto bad_aeb;
  1443. }
  1444. } else {
  1445. if (av->used_ebs != 0) {
  1446. ubi_err("non-zero used_ebs");
  1447. goto bad_aeb;
  1448. }
  1449. }
  1450. if (aeb->lnum > av->highest_lnum) {
  1451. ubi_err("incorrect highest_lnum or lnum");
  1452. goto bad_aeb;
  1453. }
  1454. }
  1455. if (av->leb_count != leb_count) {
  1456. ubi_err("bad leb_count, %d objects in the tree",
  1457. leb_count);
  1458. goto bad_av;
  1459. }
  1460. if (!last_aeb)
  1461. continue;
  1462. aeb = last_aeb;
  1463. if (aeb->lnum != av->highest_lnum) {
  1464. ubi_err("bad highest_lnum");
  1465. goto bad_aeb;
  1466. }
  1467. }
  1468. if (vols_found != ai->vols_found) {
  1469. ubi_err("bad ai->vols_found %d, should be %d",
  1470. ai->vols_found, vols_found);
  1471. goto out;
  1472. }
  1473. /* Check that attaching information is correct */
  1474. ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb) {
  1475. last_aeb = NULL;
  1476. ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb) {
  1477. int vol_type;
  1478. cond_resched();
  1479. last_aeb = aeb;
  1480. err = ubi_io_read_vid_hdr(ubi, aeb->pnum, vidh, 1);
  1481. if (err && err != UBI_IO_BITFLIPS) {
  1482. ubi_err("VID header is not OK (%d)", err);
  1483. if (err > 0)
  1484. err = -EIO;
  1485. return err;
  1486. }
  1487. vol_type = vidh->vol_type == UBI_VID_DYNAMIC ?
  1488. UBI_DYNAMIC_VOLUME : UBI_STATIC_VOLUME;
  1489. if (av->vol_type != vol_type) {
  1490. ubi_err("bad vol_type");
  1491. goto bad_vid_hdr;
  1492. }
  1493. if (aeb->sqnum != be64_to_cpu(vidh->sqnum)) {
  1494. ubi_err("bad sqnum %llu", aeb->sqnum);
  1495. goto bad_vid_hdr;
  1496. }
  1497. if (av->vol_id != be32_to_cpu(vidh->vol_id)) {
  1498. ubi_err("bad vol_id %d", av->vol_id);
  1499. goto bad_vid_hdr;
  1500. }
  1501. if (av->compat != vidh->compat) {
  1502. ubi_err("bad compat %d", vidh->compat);
  1503. goto bad_vid_hdr;
  1504. }
  1505. if (aeb->lnum != be32_to_cpu(vidh->lnum)) {
  1506. ubi_err("bad lnum %d", aeb->lnum);
  1507. goto bad_vid_hdr;
  1508. }
  1509. if (av->used_ebs != be32_to_cpu(vidh->used_ebs)) {
  1510. ubi_err("bad used_ebs %d", av->used_ebs);
  1511. goto bad_vid_hdr;
  1512. }
  1513. if (av->data_pad != be32_to_cpu(vidh->data_pad)) {
  1514. ubi_err("bad data_pad %d", av->data_pad);
  1515. goto bad_vid_hdr;
  1516. }
  1517. }
  1518. if (!last_aeb)
  1519. continue;
  1520. if (av->highest_lnum != be32_to_cpu(vidh->lnum)) {
  1521. ubi_err("bad highest_lnum %d", av->highest_lnum);
  1522. goto bad_vid_hdr;
  1523. }
  1524. if (av->last_data_size != be32_to_cpu(vidh->data_size)) {
  1525. ubi_err("bad last_data_size %d", av->last_data_size);
  1526. goto bad_vid_hdr;
  1527. }
  1528. }
  1529. /*
  1530. * Make sure that all the physical eraseblocks are in one of the lists
  1531. * or trees.
  1532. */
  1533. buf = kzalloc(ubi->peb_count, GFP_KERNEL);
  1534. if (!buf)
  1535. return -ENOMEM;
  1536. for (pnum = 0; pnum < ubi->peb_count; pnum++) {
  1537. err = ubi_io_is_bad(ubi, pnum);
  1538. if (err < 0) {
  1539. kfree(buf);
  1540. return err;
  1541. } else if (err)
  1542. buf[pnum] = 1;
  1543. }
  1544. ubi_rb_for_each_entry(rb1, av, &ai->volumes, rb)
  1545. ubi_rb_for_each_entry(rb2, aeb, &av->root, u.rb)
  1546. buf[aeb->pnum] = 1;
  1547. list_for_each_entry(aeb, &ai->free, u.list)
  1548. buf[aeb->pnum] = 1;
  1549. list_for_each_entry(aeb, &ai->corr, u.list)
  1550. buf[aeb->pnum] = 1;
  1551. list_for_each_entry(aeb, &ai->erase, u.list)
  1552. buf[aeb->pnum] = 1;
  1553. list_for_each_entry(aeb, &ai->alien, u.list)
  1554. buf[aeb->pnum] = 1;
  1555. err = 0;
  1556. for (pnum = 0; pnum < ubi->peb_count; pnum++)
  1557. if (!buf[pnum]) {
  1558. ubi_err("PEB %d is not referred", pnum);
  1559. err = 1;
  1560. }
  1561. kfree(buf);
  1562. if (err)
  1563. goto out;
  1564. return 0;
  1565. bad_aeb:
  1566. ubi_err("bad attaching information about LEB %d", aeb->lnum);
  1567. ubi_dump_aeb(aeb, 0);
  1568. ubi_dump_av(av);
  1569. goto out;
  1570. bad_av:
  1571. ubi_err("bad attaching information about volume %d", av->vol_id);
  1572. ubi_dump_av(av);
  1573. goto out;
  1574. bad_vid_hdr:
  1575. ubi_err("bad attaching information about volume %d", av->vol_id);
  1576. ubi_dump_av(av);
  1577. ubi_dump_vid_hdr(vidh);
  1578. out:
  1579. dump_stack();
  1580. return -EINVAL;
  1581. }
  1582. #ifdef CONFIG_MTD_UBI_LOWPAGE_BACKUP
  1583. /**
  1584. * check_pattern - check if buffer contains only a certain byte pattern.
  1585. * @buf: buffer to check
  1586. * @patt: the pattern to check
  1587. * @size: buffer size in bytes
  1588. *
  1589. * This function returns %1 in there are only @patt bytes in @buf, and %0 if
  1590. * something else was also found.
  1591. */
  1592. enum {
  1593. RECOVERY_NONE = 0,
  1594. RECOVERY_FROM_VOLUME,
  1595. RECOVERY_FROM_CORR
  1596. };
  1597. static int check_pattern(const void *buf, uint8_t patt, int size)
  1598. {
  1599. int i;
  1600. for (i = 0; i < size; i++)
  1601. if (((const uint8_t *)buf)[i] != patt)
  1602. return 0;
  1603. return 1;
  1604. }
  1605. /**
  1606. * ubi_backup_search_empty - search first empty page in the block.
  1607. * @ubi: ubi structure
  1608. * @pnum: the pnum to search
  1609. *
  1610. * This function returns offset of first empty page in the block.
  1611. */
  1612. static int ubi_backup_search_empty(const struct ubi_device *ubi, int pnum)
  1613. {
  1614. int low, high, mid;
  1615. int first = ubi->peb_size;
  1616. int offset, err = 0;
  1617. low = blb_get_startpage();
  1618. high = ubi->peb_size / ubi->mtd->writesize - 1;
  1619. while (low <= high) {
  1620. mid = (low + high) / 2;
  1621. offset = mid * ubi->mtd->writesize;
  1622. err = ubi_io_read_oob(ubi, ubi->databuf, ubi->oobbuf, pnum, offset);
  1623. if (err == 0 && check_pattern(ubi->oobbuf, 0xFF, ubi->mtd->oobavail)
  1624. && check_pattern(ubi->databuf, 0xFF, ubi->mtd->writesize)) {
  1625. first = offset;
  1626. high = mid - 1;
  1627. } else {
  1628. low = mid + 1;
  1629. }
  1630. }
  1631. return first;
  1632. }
  1633. int blb_recovery_peb(struct ubi_device *ubi, struct ubi_attach_info *ai,
  1634. struct ubi_blb_spare *p_blb_spare, int pnum, int num,
  1635. int backup_pnum, struct ubi_ainf_peb *cad_peb)
  1636. {
  1637. struct ubi_ainf_volume *av;
  1638. int i, err, data_size, offset, tries = 0;
  1639. struct ubi_ainf_peb *old_seb, *new_seb = NULL;
  1640. struct rb_node *rb;
  1641. int recovery = RECOVERY_NONE;
  1642. int source_vol_id, source_lnum, source_pnum, source_page;
  1643. uint32_t crc;
  1644. struct ubi_vid_hdr *vid_hdr = NULL;
  1645. source_page = be16_to_cpu(p_blb_spare->page);
  1646. source_vol_id = be32_to_cpu(p_blb_spare->vol_id);
  1647. source_pnum = be16_to_cpu(p_blb_spare->pnum);
  1648. source_lnum = be16_to_cpu(p_blb_spare->lnum);
  1649. av = ubi_find_av(ai, source_vol_id);
  1650. if (!av) {
  1651. ubi_msg("volume id %d was not found", source_vol_id);
  1652. err = -EINVAL;
  1653. goto out_free;
  1654. }
  1655. /* check from volume */
  1656. ubi_rb_for_each_entry(rb, old_seb, &av->root, u.rb)
  1657. if (old_seb->pnum == source_pnum && old_seb->lnum == source_lnum) {
  1658. recovery = RECOVERY_FROM_VOLUME;
  1659. goto recovery;
  1660. }
  1661. list_for_each_entry(old_seb, &ai->corr, u.list)
  1662. if (old_seb->pnum == source_pnum) {
  1663. recovery = RECOVERY_FROM_CORR;
  1664. list_del(&old_seb->u.list);
  1665. goto recovery;
  1666. }
  1667. list_for_each_entry(old_seb, &ai->waiting, u.list)
  1668. if (old_seb->pnum == source_pnum) {
  1669. recovery = RECOVERY_FROM_CORR;
  1670. list_del(&old_seb->u.list);
  1671. goto recovery;
  1672. }
  1673. list_for_each_entry(old_seb, &ai->free, u.list)
  1674. if (old_seb->pnum == source_pnum) {
  1675. list_del(&old_seb->u.list);
  1676. ubi_msg("add corrept peb %d, ec %d from free to erase list", old_seb->pnum,
  1677. old_seb->ec);
  1678. err =
  1679. add_to_list(ubi, ai, old_seb->pnum, old_seb->vol_id, old_seb->lnum, old_seb->ec, 1,
  1680. &ai->erase);
  1681. if (err)
  1682. return err;
  1683. kmem_cache_free(ai->aeb_slab_cache, old_seb);
  1684. break;
  1685. }
  1686. list_for_each_entry(old_seb, &ai->alien, u.list)
  1687. if (old_seb->pnum == source_pnum) {
  1688. list_del(&old_seb->u.list);
  1689. ubi_msg("add corrept peb %d, ec %d from alien to erase list", old_seb->pnum,
  1690. old_seb->ec);
  1691. err =
  1692. add_to_list(ubi, ai, old_seb->pnum, old_seb->vol_id, old_seb->lnum, old_seb->ec, 1,
  1693. &ai->erase);
  1694. if (err)
  1695. return err;
  1696. kmem_cache_free(ai->aeb_slab_cache, old_seb);
  1697. break;
  1698. }
  1699. if (cad_peb != NULL)
  1700. kmem_cache_free(ai->aeb_slab_cache, cad_peb);
  1701. return 0;
  1702. recovery:
  1703. ubi_msg("recovery from %d", recovery);
  1704. data_size = ubi->leb_size - be32_to_cpu(av->data_pad);
  1705. #ifdef CONFIG_UBI_SHARE_BUFFER
  1706. mutex_lock(&ubi_buf_mutex);
  1707. #else
  1708. mutex_lock(&ubi->buf_mutex);
  1709. #endif
  1710. for (offset = 0; offset < data_size; offset += ubi->mtd->writesize) {
  1711. /* ubi_msg("read source(%d) from %d, %d bytes", old_seb->pnum, offset, ubi->mtd->writesize); */
  1712. err = ubi_io_read_data(ubi, (void *)(((char *)ubi->peb_buf) + offset),
  1713. old_seb->pnum, offset, ubi->mtd->writesize);
  1714. if (err < 0)
  1715. ubi_warn("error %d while reading data from PEB %d:0x%x", err, old_seb->pnum,
  1716. offset);
  1717. }
  1718. for (i = 0; i < num; i++) {
  1719. ubi_msg("read backup(%d) from %d", pnum,
  1720. ubi->next_offset[0] - (i + 1) * ubi->mtd->writesize);
  1721. err =
  1722. ubi_io_read_oob(ubi, ubi->databuf, ubi->oobbuf, pnum,
  1723. ubi->next_offset[0] - (i + 1) * ubi->mtd->writesize);
  1724. source_page = be16_to_cpu(p_blb_spare->page);
  1725. if (source_page >= ubi->leb_start / ubi->mtd->writesize) {
  1726. ubi_msg("copy backup page %d to offset 0x%x", source_page,
  1727. (source_page * ubi->mtd->writesize) - ubi->leb_start);
  1728. memcpy((void *)(((char *)ubi->peb_buf) +
  1729. (source_page * ubi->mtd->writesize) - ubi->leb_start),
  1730. (const void *)ubi->databuf, ubi->mtd->writesize);
  1731. }
  1732. }
  1733. data_size = ubi_calc_data_len(ubi, (char *)ubi->peb_buf, data_size);
  1734. ubi_msg("calc CRC data size %d", data_size);
  1735. crc = crc32(UBI_CRC32_INIT, (char *)ubi->peb_buf, data_size);
  1736. vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_KERNEL);
  1737. if (!vid_hdr) {
  1738. err = -ENOMEM;
  1739. goto out_free;
  1740. }
  1741. vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
  1742. vid_hdr->vol_id = cpu_to_be32(source_vol_id);
  1743. vid_hdr->lnum = cpu_to_be32(source_lnum);
  1744. vid_hdr->compat = ubi_get_compat(ubi, source_vol_id);
  1745. vid_hdr->data_pad = cpu_to_be32(av->data_pad);
  1746. vid_hdr->used_ebs = 0;
  1747. if (av->used_ebs != 0)
  1748. ubi_msg("bad used_ebs 0x%x", av->used_ebs);
  1749. vid_hdr->vol_type = UBI_VID_DYNAMIC;
  1750. if (data_size > 0) {
  1751. vid_hdr->copy_flag = 1;
  1752. vid_hdr->data_size = cpu_to_be32(data_size);
  1753. vid_hdr->data_crc = cpu_to_be32(crc);
  1754. }
  1755. retry:
  1756. if (tries == 0 && cad_peb != NULL) {
  1757. new_seb = cad_peb;
  1758. } else {
  1759. new_seb = ubi_early_get_peb(ubi, ai);
  1760. if (IS_ERR(new_seb)) {
  1761. err = -EINVAL;
  1762. goto out_free;
  1763. }
  1764. if (backup_pnum == UBI_LEB_UNMAPPED) {
  1765. ubi_warn("no leb 1 for backup page 1 of recovery PEB");
  1766. } else if ((ubi->peb_size - ubi->next_offset[1]) < ubi->mtd->writesize) {
  1767. ubi_warn("no space to backup page 1 of recovery PEB");
  1768. } else {
  1769. struct ubi_blb_spare *blb_spare = (struct ubi_blb_spare *)ubi->oobbuf;
  1770. blb_spare->num = cpu_to_be16(1);
  1771. blb_spare->pnum = cpu_to_be16(new_seb->pnum);
  1772. blb_spare->lnum = cpu_to_be16(source_lnum);
  1773. blb_spare->vol_id = cpu_to_be32(source_vol_id);
  1774. blb_spare->page = cpu_to_be16(1);
  1775. blb_spare->sqnum = cpu_to_be64(++ai->max_sqnum);
  1776. crc = crc32(UBI_CRC32_INIT, blb_spare, sizeof(struct ubi_blb_spare) - 4);
  1777. blb_spare->crc = cpu_to_be32(crc);
  1778. sprintf(ubi->databuf, "VIDVIDVID");
  1779. err =
  1780. ubi_io_write_oob(ubi, ubi->databuf, ubi->oobbuf, backup_pnum,
  1781. ubi->next_offset[1]);
  1782. if (err)
  1783. ubi_err("ERROR: write backup page 1 of recovery PEB fail");
  1784. else
  1785. ubi_msg("backup[1] %d:%d to %d:%d, num %d", new_seb->pnum, 1,
  1786. backup_pnum, ubi->next_offset[1] / ubi->mtd->writesize, 1);
  1787. ubi->next_offset[1] += ubi->mtd->writesize;
  1788. }
  1789. }
  1790. ubi_msg("using peb %d to recovery", new_seb->pnum);
  1791. err = ubi_io_write_vid_hdr(ubi, new_seb->pnum, vid_hdr);
  1792. if (err)
  1793. goto write_error;
  1794. if (data_size > 0) {
  1795. err = ubi_io_write_data(ubi, ubi->peb_buf, new_seb->pnum, 0, data_size);
  1796. if (err)
  1797. goto write_error;
  1798. }
  1799. err =
  1800. add_to_list(ubi, ai, old_seb->pnum, old_seb->vol_id, old_seb->lnum, old_seb->ec, 1,
  1801. &ai->erase);
  1802. if (err)
  1803. goto out_free;
  1804. if (recovery == RECOVERY_FROM_VOLUME) {
  1805. old_seb->pnum = new_seb->pnum;
  1806. old_seb->ec = new_seb->ec;
  1807. old_seb->sqnum = vid_hdr->sqnum;
  1808. } else {
  1809. err = ubi_add_to_av(ubi, ai, new_seb->pnum, new_seb->ec, vid_hdr, 0);
  1810. if (err)
  1811. goto out_free;
  1812. }
  1813. kmem_cache_free(ai->aeb_slab_cache, new_seb);
  1814. ubi_free_vid_hdr(ubi, vid_hdr);
  1815. #ifdef CONFIG_UBI_SHARE_BUFFER
  1816. mutex_unlock(&ubi_buf_mutex);
  1817. #else
  1818. mutex_unlock(&ubi->buf_mutex);
  1819. #endif
  1820. return 0;
  1821. write_error:
  1822. if (err != -EIO || !ubi->bad_allowed) {
  1823. ubi_ro_mode(ubi);
  1824. kmem_cache_free(ai->aeb_slab_cache, new_seb);
  1825. goto out_free;
  1826. }
  1827. err = add_to_list(ubi, ai, new_seb->pnum, new_seb->vol_id, new_seb->lnum, new_seb->pnum, 1,
  1828. &ai->corr);
  1829. kmem_cache_free(ai->aeb_slab_cache, new_seb);
  1830. if (err || ++tries > UBI_IO_RETRIES) {
  1831. ubi_ro_mode(ubi);
  1832. goto out_free;
  1833. }
  1834. vid_hdr->sqnum = cpu_to_be64(++ai->max_sqnum);
  1835. ubi_msg("try another PEB");
  1836. goto retry;
  1837. out_free:
  1838. if (vid_hdr)
  1839. ubi_free_vid_hdr(ubi, vid_hdr);
  1840. #ifdef CONFIG_UBI_SHARE_BUFFER
  1841. mutex_unlock(&ubi_buf_mutex);
  1842. #else
  1843. mutex_unlock(&ubi->buf_mutex);
  1844. #endif
  1845. return err;
  1846. }
  1847. int ubi_backup_init_scan(struct ubi_device *ubi, struct ubi_attach_info *ai)
  1848. {
  1849. int i, j, err = 0;
  1850. struct ubi_vid_hdr *vid_hdr = NULL;
  1851. struct ubi_ainf_volume *av;
  1852. struct ubi_ainf_peb *seb, *backup_seb[2], *old_seb = NULL;/* , *new_seb; */
  1853. struct rb_node *rb;
  1854. struct ubi_blb_spare *p_blb_spare;
  1855. int pnum = 0;
  1856. int page_cnt;
  1857. int source_pnum = 0, source_lnum = 0, source_vol_id = 0, source_page = 0, num = 0;
  1858. int corrupt; /* , recovery, tries = 0; */
  1859. /* int data_size; */
  1860. uint32_t crc;
  1861. struct ubi_ainf_peb *seb_tmp;
  1862. struct ubi_ainf_peb *candidate_peb = NULL;
  1863. int high_page;
  1864. page_cnt = (1 << (ubi->mtd->erasesize_shift - ubi->mtd->writesize_shift));
  1865. ubi->databuf = vmalloc(ubi->mtd->writesize);
  1866. ubi->oobbuf = vmalloc(ubi->mtd->oobavail);
  1867. if (!ubi->databuf || !ubi->oobbuf) {
  1868. err = -ENOMEM;
  1869. goto out_free;
  1870. }
  1871. ubi->leb_scrub[0] = 0;
  1872. ubi->leb_scrub[1] = 0;
  1873. ubi->next_offset[0] = 0;
  1874. ubi->next_offset[1] = 0;
  1875. backup_seb[0] = NULL;
  1876. backup_seb[1] = NULL;
  1877. mutex_init(&ubi->blb_mutex);
  1878. av = ubi_find_av(ai, UBI_BACKUP_VOLUME_ID);
  1879. if (!av) {
  1880. ubi_msg("blb the backup volume was not found");
  1881. return 0;
  1882. }
  1883. ubi_msg("blb check backup volume(0x%x):%d", UBI_BACKUP_VOLUME_ID, av->vol_id);
  1884. p_blb_spare = (struct ubi_blb_spare *)ubi->oobbuf;
  1885. /* Get two PEBs of backup volume */
  1886. ubi_rb_for_each_entry(rb, seb, &av->root, u.rb) {
  1887. int lnum = seb->lnum;
  1888. ubi_assert(lnum < 2);
  1889. backup_seb[lnum] = seb;
  1890. ubi->next_offset[lnum] = ubi_backup_search_empty(ubi, seb->pnum);
  1891. }
  1892. /* check sqnum */
  1893. if (backup_seb[0] != NULL && backup_seb[1] != NULL) {
  1894. int peb0 = -1, peb1 = -1;
  1895. unsigned long long sqnum0 = 0, sqnum1 = 0;
  1896. pnum = backup_seb[0]->pnum;
  1897. ubi_msg("blb block %d, pnum %d next offset 0x%x(page %d)", 0, pnum,
  1898. ubi->next_offset[0], ubi->next_offset[0] / ubi->mtd->writesize);
  1899. err =
  1900. ubi_io_read_oob(ubi, NULL, ubi->oobbuf, pnum,
  1901. ubi->next_offset[0] - ubi->mtd->writesize);
  1902. if (err < 0) {
  1903. ubi_msg("blb this page of LEB0 was scrubbed or WL");
  1904. backup_seb[0] = NULL;
  1905. } else {
  1906. crc = crc32(UBI_CRC32_INIT, p_blb_spare, sizeof(struct ubi_blb_spare) - 4);
  1907. if (crc != be32_to_cpu(p_blb_spare->crc)) {
  1908. ubi_msg("blb this page of LEB0 crc error");
  1909. backup_seb[0] = NULL;
  1910. } else {
  1911. peb0 = be16_to_cpu(p_blb_spare->pnum);
  1912. sqnum0 = be64_to_cpu(p_blb_spare->sqnum);
  1913. if (ai->max_sqnum < sqnum0)
  1914. ai->max_sqnum = sqnum0;
  1915. }
  1916. }
  1917. pnum = backup_seb[1]->pnum;
  1918. ubi_msg("blb block %d, pnum %d next offset 0x%x(page %d)", 1, pnum,
  1919. ubi->next_offset[1], ubi->next_offset[1] / ubi->mtd->writesize);
  1920. err =
  1921. ubi_io_read_oob(ubi, NULL, ubi->oobbuf, pnum,
  1922. ubi->next_offset[1] - ubi->mtd->writesize);
  1923. if (err < 0) {
  1924. ubi_msg("blb this page of LEB1 was scrubbed or WL");
  1925. backup_seb[1] = NULL;
  1926. } else {
  1927. crc = crc32(UBI_CRC32_INIT, p_blb_spare, sizeof(struct ubi_blb_spare) - 4);
  1928. if (crc != be32_to_cpu(p_blb_spare->crc)) {
  1929. ubi_msg("blb this page of LEB0 crc error");
  1930. backup_seb[1] = NULL;
  1931. } else {
  1932. peb1 = be16_to_cpu(p_blb_spare->pnum);
  1933. sqnum1 = be64_to_cpu(p_blb_spare->sqnum);
  1934. if (ai->max_sqnum < sqnum1)
  1935. ai->max_sqnum = sqnum1;
  1936. }
  1937. }
  1938. ubi_msg("sqnum0 %llu , sqnum1 %llu", sqnum0, sqnum1);
  1939. if (peb0 == peb1 && peb0 != -1) {
  1940. ubi_msg("blb two record have the same peb %d", peb0);
  1941. if (sqnum1 > sqnum0) {
  1942. ubi_msg("blb LEB1 is new %d", peb0);
  1943. backup_seb[0] = NULL;
  1944. } else {
  1945. ubi_msg("blb LEB0 is new %d", peb0);
  1946. backup_seb[1] = NULL;
  1947. }
  1948. }
  1949. }
  1950. for (j = 1; j >= 0; j--) {
  1951. if (backup_seb[j] == NULL)
  1952. continue;
  1953. pnum = backup_seb[j]->pnum;
  1954. ubi_msg("blb block %d, pnum %d next offset 0x%x(page %d)", j, pnum,
  1955. ubi->next_offset[j], ubi->next_offset[j] / ubi->mtd->writesize);
  1956. err =
  1957. ubi_io_read_oob(ubi, ubi->databuf, ubi->oobbuf, pnum,
  1958. ubi->next_offset[j] - ubi->mtd->writesize);
  1959. if (err >= 0) {
  1960. source_page = be16_to_cpu(p_blb_spare->page);
  1961. num = be16_to_cpu(p_blb_spare->num);
  1962. source_vol_id = be32_to_cpu(p_blb_spare->vol_id);
  1963. source_pnum = be16_to_cpu(p_blb_spare->pnum);
  1964. source_lnum = be16_to_cpu(p_blb_spare->lnum);
  1965. crc = crc32(UBI_CRC32_INIT, p_blb_spare, sizeof(struct ubi_blb_spare) - 4);
  1966. if (crc != be32_to_cpu(p_blb_spare->crc)) {
  1967. ubi_msg("blb this page crc error");
  1968. continue;
  1969. } else {
  1970. ubi_msg("blb this page crc match");
  1971. }
  1972. } else {
  1973. ubi_msg("blb this page was scrubbed or WL");
  1974. ubi->leb_scrub[j] = 1;
  1975. continue;
  1976. }
  1977. ubi_msg("blb Spare Strut page: %X, num: %X, vol_id: %X, pnum: %X, lnum: %X",
  1978. p_blb_spare->page, p_blb_spare->num, p_blb_spare->vol_id,
  1979. p_blb_spare->pnum, p_blb_spare->lnum);
  1980. ubi_msg("blb backup @pnum %d, offset %d", pnum, ubi->next_offset[j]);
  1981. ubi_msg("blb backup source @pnum %d, lnum %d, vol_id %d, page %d, sq %d",
  1982. source_pnum, source_lnum, source_vol_id, source_page, num);
  1983. if (p_blb_spare->page == 0xFFFF && p_blb_spare->num == 0xFFFF &&
  1984. p_blb_spare->vol_id == 0xFFFFFFFF && p_blb_spare->pnum == 0xFFFF &&
  1985. p_blb_spare->lnum == 0xFFFF) {
  1986. ubi_msg("blb the backup volume was scrubbed or WL, no need to restore");
  1987. continue;
  1988. }
  1989. /* Check if source page corrupts, and recover */
  1990. corrupt = 0;
  1991. for (i = 0; i < num; i++) {
  1992. /* read backup page */
  1993. ubi_msg("blb check backup @pnum %d, offset 0x%x", pnum,
  1994. ubi->next_offset[j] - (i + 1) * ubi->mtd->writesize);
  1995. if (i > 0) {
  1996. err = ubi_io_read_oob(ubi, ubi->databuf, ubi->oobbuf, pnum,
  1997. ubi->next_offset[j] - (i +
  1998. 1) *
  1999. ubi->mtd->writesize);
  2000. if (err < 0) {
  2001. corrupt = 0;
  2002. ubi_msg("blb this page was scrubbed or WL");
  2003. ubi->leb_scrub[j] = 1;
  2004. break;
  2005. }
  2006. source_page = be16_to_cpu(p_blb_spare->page);
  2007. source_vol_id = be32_to_cpu(p_blb_spare->vol_id);
  2008. source_pnum = be16_to_cpu(p_blb_spare->pnum);
  2009. source_lnum = be16_to_cpu(p_blb_spare->lnum);
  2010. }
  2011. if (source_page == 1) {
  2012. char *buf = ubi->databuf;
  2013. ubi_msg("databuf %c%c%c%c%c%c%c%c%c", buf[0], buf[1], buf[2],
  2014. buf[3], buf[4], buf[5], buf[6], buf[7], buf[8]);
  2015. if (strncmp("VIDVIDVID", ubi->databuf, 9) == 0) {
  2016. int check_page = 2;
  2017. if (source_vol_id == UBI_BACKUP_VOLUME_ID)
  2018. check_page = blb_get_startpage();
  2019. ubi_msg("vid special case, checking page %d", check_page);
  2020. err = ubi_io_read_oob(ubi, ubi->databuf, NULL, source_pnum,
  2021. check_page * ubi->mtd->writesize);
  2022. if (err)
  2023. continue;
  2024. err = ubi_check_pattern(ubi->databuf, 0xFF,
  2025. ubi->mtd->writesize);
  2026. if (err == 1) {
  2027. ubi_msg("Page 2(%d) are all 0xFF", source_pnum);
  2028. corrupt = 2;
  2029. break;
  2030. }
  2031. continue;
  2032. }
  2033. }
  2034. /* read source page */
  2035. ubi_msg("check source @pnum %d, offset 0x%x", source_pnum,
  2036. source_page * ubi->mtd->writesize);
  2037. err = ubi_io_read_oob(ubi, ubi->databuf, NULL, source_pnum,
  2038. source_page * ubi->mtd->writesize);
  2039. ubi_msg("checked source @pnum %d, offset 0x%x, ret %d", source_pnum,
  2040. source_page * ubi->mtd->writesize, err);
  2041. if (err < 0 || err == UBI_IO_BITFLIPS) {
  2042. ubi_msg("source @pnum %d, offset 0x%x correct/bitflips =%d",
  2043. source_pnum, source_page * ubi->mtd->writesize, err);
  2044. corrupt = 1;
  2045. break;
  2046. }
  2047. /* read high page */
  2048. high_page = mtk_nand_paired_page_transfer(source_page, false);
  2049. ubi_msg("check high @pnum %d, offset 0x%x", source_pnum,
  2050. high_page * ubi->mtd->writesize);
  2051. err = ubi_io_read_oob(ubi, ubi->databuf, NULL, source_pnum,
  2052. high_page * ubi->mtd->writesize);
  2053. ubi_msg("checked high @pnum %d, offset 0x%x, ret %d", source_pnum,
  2054. high_page * ubi->mtd->writesize, err);
  2055. if (err < 0 || err == UBI_IO_BITFLIPS) {
  2056. ubi_msg("high @pnum %d, offset 0x%x correct/bitflips =%d",
  2057. source_pnum, high_page * ubi->mtd->writesize, err);
  2058. corrupt = 1;
  2059. break;
  2060. }
  2061. if (check_pattern(ubi->databuf, 0xFF, ubi->mtd->writesize) == 1) {
  2062. ubi_msg("high pare are empty");
  2063. av = ubi_find_av(ai, source_vol_id);
  2064. if (!av) {
  2065. ubi_msg("volume id %d was not found", source_vol_id);
  2066. ubi_msg("old_seb NULL");
  2067. corrupt = 1;
  2068. break;
  2069. }
  2070. ubi_rb_for_each_entry(rb, old_seb, &av->root, u.rb) {
  2071. if (old_seb->pnum == source_pnum) {
  2072. ubi_msg("old_seb peb %d", old_seb->pnum);
  2073. break;
  2074. }
  2075. }
  2076. if (old_seb != NULL && old_seb->pnum == source_pnum) {
  2077. ubi_msg("old seq %llu , blb seq %llu", old_seb->sqnum,
  2078. be64_to_cpu(p_blb_spare->sqnum));
  2079. if (old_seb->sqnum < be64_to_cpu(p_blb_spare->sqnum)) {
  2080. corrupt = 1;
  2081. break;
  2082. }
  2083. } else if (source_page == 1) {
  2084. ubi_msg("old_seb NULL");
  2085. corrupt = 1;
  2086. break;
  2087. }
  2088. }
  2089. ubi_msg("high pare has content");
  2090. }
  2091. if (corrupt == 1) {
  2092. int backup_pnum = UBI_LEB_UNMAPPED;
  2093. ubi_msg("corrupt %d", corrupt);
  2094. if (backup_seb[1] != NULL)
  2095. backup_pnum = backup_seb[1]->pnum;
  2096. blb_recovery_peb(ubi, ai, p_blb_spare, pnum, num, backup_pnum,
  2097. candidate_peb);
  2098. candidate_peb = NULL;
  2099. } else if (corrupt == 2) {
  2100. av = ubi_find_av(ai, source_vol_id);
  2101. if (!av) {
  2102. ubi_msg("volume id %d was not found", source_vol_id);
  2103. } else {
  2104. ubi_rb_for_each_entry(rb, old_seb, &av->root, u.rb) {
  2105. if (old_seb->pnum == source_pnum)
  2106. break;
  2107. }
  2108. if (old_seb != NULL && old_seb->pnum == source_pnum) {
  2109. rb_erase(&old_seb->u.rb, &av->root);
  2110. if (candidate_peb != NULL) {
  2111. ubi_msg("candidate peb %d doesn't be used, add to free list",
  2112. candidate_peb->pnum);
  2113. add_to_list(ubi, ai, candidate_peb->pnum, candidate_peb->vol_id,
  2114. candidate_peb->lnum, candidate_peb->ec, 1, &ai->free);
  2115. kmem_cache_free(ai->aeb_slab_cache, candidate_peb);
  2116. }
  2117. ubi_msg("candidate peb %d", old_seb->pnum);
  2118. candidate_peb = old_seb;
  2119. }
  2120. }
  2121. list_for_each_entry(old_seb, &ai->free, u.list)
  2122. if (old_seb->pnum == source_pnum) {
  2123. list_del(&old_seb->u.list);
  2124. ubi_msg("candidate peb %d", old_seb->pnum);
  2125. candidate_peb = old_seb;
  2126. break;
  2127. }
  2128. list_for_each_entry(old_seb, &ai->corr, u.list)
  2129. if (old_seb->pnum == source_pnum) {
  2130. list_del(&old_seb->u.list);
  2131. ubi_msg("candidate peb %d", old_seb->pnum);
  2132. candidate_peb = old_seb;
  2133. break;
  2134. }
  2135. if (candidate_peb != NULL) {
  2136. ubi_msg("erasing candidate peb %d", candidate_peb->pnum);
  2137. err =
  2138. early_erase_peb(ubi, ai, candidate_peb->pnum,
  2139. candidate_peb->ec + 1);
  2140. if (err) {
  2141. ubi_msg("erasing candidate peb %d fail %d",
  2142. candidate_peb->pnum, err);
  2143. add_to_list(ubi, ai, old_seb->pnum, old_seb->vol_id,
  2144. old_seb->lnum, old_seb->ec, 1, &ai->erase);
  2145. kmem_cache_free(ai->aeb_slab_cache, candidate_peb);
  2146. candidate_peb = NULL;
  2147. }
  2148. candidate_peb->ec++;
  2149. }
  2150. }
  2151. }
  2152. if (candidate_peb != NULL) {
  2153. ubi_msg("candidate peb %d doesn't be used, add to free list", candidate_peb->pnum);
  2154. add_to_list(ubi, ai, candidate_peb->pnum, candidate_peb->vol_id, candidate_peb->lnum,
  2155. candidate_peb->ec, 1, &ai->free);
  2156. kmem_cache_free(ai->aeb_slab_cache, candidate_peb);
  2157. }
  2158. list_for_each_entry_safe(old_seb, seb_tmp, &ai->waiting, u.list) {
  2159. list_del(&old_seb->u.list);
  2160. ubi_msg("move to erase from waiting: PEB %d, EC %d", old_seb->pnum, old_seb->ec);
  2161. err =
  2162. add_to_list(ubi, ai, old_seb->pnum, old_seb->vol_id, old_seb->lnum, old_seb->ec, 1,
  2163. &ai->erase);
  2164. kmem_cache_free(ai->aeb_slab_cache, old_seb);
  2165. }
  2166. return 0;
  2167. out_free:
  2168. if (ubi->databuf)
  2169. vfree(ubi->databuf);
  2170. if (ubi->oobbuf)
  2171. vfree(ubi->oobbuf);
  2172. if (vid_hdr)
  2173. ubi_free_vid_hdr(ubi, vid_hdr);
  2174. return err;
  2175. }
  2176. #endif