tnc_commit.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100
  1. /*
  2. * This file is part of UBIFS.
  3. *
  4. * Copyright (C) 2006-2008 Nokia Corporation.
  5. *
  6. * This program is free software; you can redistribute it and/or modify it
  7. * under the terms of the GNU General Public License version 2 as published by
  8. * the Free Software Foundation.
  9. *
  10. * This program is distributed in the hope that it will be useful, but WITHOUT
  11. * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12. * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
  13. * more details.
  14. *
  15. * You should have received a copy of the GNU General Public License along with
  16. * this program; if not, write to the Free Software Foundation, Inc., 51
  17. * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
  18. *
  19. * Authors: Adrian Hunter
  20. * Artem Bityutskiy (Битюцкий Артём)
  21. */
  22. /* This file implements TNC functions for committing */
  23. #include <linux/random.h>
  24. #include "ubifs.h"
  25. /**
  26. * make_idx_node - make an index node for fill-the-gaps method of TNC commit.
  27. * @c: UBIFS file-system description object
  28. * @idx: buffer in which to place new index node
  29. * @znode: znode from which to make new index node
  30. * @lnum: LEB number where new index node will be written
  31. * @offs: offset where new index node will be written
  32. * @len: length of new index node
  33. */
  34. static int make_idx_node(struct ubifs_info *c, struct ubifs_idx_node *idx,
  35. struct ubifs_znode *znode, int lnum, int offs, int len)
  36. {
  37. struct ubifs_znode *zp;
  38. int i, err;
  39. /* Make index node */
  40. idx->ch.node_type = UBIFS_IDX_NODE;
  41. idx->child_cnt = cpu_to_le16(znode->child_cnt);
  42. idx->level = cpu_to_le16(znode->level);
  43. for (i = 0; i < znode->child_cnt; i++) {
  44. struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
  45. struct ubifs_zbranch *zbr = &znode->zbranch[i];
  46. key_write_idx(c, &zbr->key, &br->key);
  47. br->lnum = cpu_to_le32(zbr->lnum);
  48. br->offs = cpu_to_le32(zbr->offs);
  49. br->len = cpu_to_le32(zbr->len);
  50. if (!zbr->lnum || !zbr->len) {
  51. ubifs_err("bad ref in znode");
  52. ubifs_dump_znode(c, znode);
  53. if (zbr->znode)
  54. ubifs_dump_znode(c, zbr->znode);
  55. }
  56. }
  57. ubifs_prepare_node(c, idx, len, 0);
  58. znode->lnum = lnum;
  59. znode->offs = offs;
  60. znode->len = len;
  61. err = insert_old_idx_znode(c, znode);
  62. /* Update the parent */
  63. zp = znode->parent;
  64. if (zp) {
  65. struct ubifs_zbranch *zbr;
  66. zbr = &zp->zbranch[znode->iip];
  67. zbr->lnum = lnum;
  68. zbr->offs = offs;
  69. zbr->len = len;
  70. } else {
  71. c->zroot.lnum = lnum;
  72. c->zroot.offs = offs;
  73. c->zroot.len = len;
  74. }
  75. c->calc_idx_sz += ALIGN(len, 8);
  76. atomic_long_dec(&c->dirty_zn_cnt);
  77. ubifs_assert(ubifs_zn_dirty(znode));
  78. ubifs_assert(ubifs_zn_cow(znode));
  79. /*
  80. * Note, unlike 'write_index()' we do not add memory barriers here
  81. * because this function is called with @c->tnc_mutex locked.
  82. */
  83. __clear_bit(DIRTY_ZNODE, &znode->flags);
  84. __clear_bit(COW_ZNODE, &znode->flags);
  85. return err;
  86. }
  87. /**
  88. * fill_gap - make index nodes in gaps in dirty index LEBs.
  89. * @c: UBIFS file-system description object
  90. * @lnum: LEB number that gap appears in
  91. * @gap_start: offset of start of gap
  92. * @gap_end: offset of end of gap
  93. * @dirt: adds dirty space to this
  94. *
  95. * This function returns the number of index nodes written into the gap.
  96. */
  97. static int fill_gap(struct ubifs_info *c, int lnum, int gap_start, int gap_end,
  98. int *dirt)
  99. {
  100. int len, gap_remains, gap_pos, written, pad_len;
  101. ubifs_assert((gap_start & 7) == 0);
  102. ubifs_assert((gap_end & 7) == 0);
  103. ubifs_assert(gap_end >= gap_start);
  104. gap_remains = gap_end - gap_start;
  105. if (!gap_remains)
  106. return 0;
  107. gap_pos = gap_start;
  108. written = 0;
  109. while (c->enext) {
  110. len = ubifs_idx_node_sz(c, c->enext->child_cnt);
  111. if (len < gap_remains) {
  112. struct ubifs_znode *znode = c->enext;
  113. const int alen = ALIGN(len, 8);
  114. int err;
  115. ubifs_assert(alen <= gap_remains);
  116. err = make_idx_node(c, c->ileb_buf + gap_pos, znode,
  117. lnum, gap_pos, len);
  118. if (err)
  119. return err;
  120. gap_remains -= alen;
  121. gap_pos += alen;
  122. c->enext = znode->cnext;
  123. if (c->enext == c->cnext)
  124. c->enext = NULL;
  125. written += 1;
  126. } else
  127. break;
  128. }
  129. if (gap_end == c->leb_size) {
  130. c->ileb_len = ALIGN(gap_pos, c->min_io_size);
  131. /* Pad to end of min_io_size */
  132. pad_len = c->ileb_len - gap_pos;
  133. } else
  134. /* Pad to end of gap */
  135. pad_len = gap_remains;
  136. dbg_gc("LEB %d:%d to %d len %d nodes written %d wasted bytes %d",
  137. lnum, gap_start, gap_end, gap_end - gap_start, written, pad_len);
  138. ubifs_pad(c, c->ileb_buf + gap_pos, pad_len);
  139. *dirt += pad_len;
  140. return written;
  141. }
  142. /**
  143. * find_old_idx - find an index node obsoleted since the last commit start.
  144. * @c: UBIFS file-system description object
  145. * @lnum: LEB number of obsoleted index node
  146. * @offs: offset of obsoleted index node
  147. *
  148. * Returns %1 if found and %0 otherwise.
  149. */
  150. static int find_old_idx(struct ubifs_info *c, int lnum, int offs)
  151. {
  152. struct ubifs_old_idx *o;
  153. struct rb_node *p;
  154. p = c->old_idx.rb_node;
  155. while (p) {
  156. o = rb_entry(p, struct ubifs_old_idx, rb);
  157. if (lnum < o->lnum)
  158. p = p->rb_left;
  159. else if (lnum > o->lnum)
  160. p = p->rb_right;
  161. else if (offs < o->offs)
  162. p = p->rb_left;
  163. else if (offs > o->offs)
  164. p = p->rb_right;
  165. else
  166. return 1;
  167. }
  168. return 0;
  169. }
  170. /**
  171. * is_idx_node_in_use - determine if an index node can be overwritten.
  172. * @c: UBIFS file-system description object
  173. * @key: key of index node
  174. * @level: index node level
  175. * @lnum: LEB number of index node
  176. * @offs: offset of index node
  177. *
  178. * If @key / @lnum / @offs identify an index node that was not part of the old
  179. * index, then this function returns %0 (obsolete). Else if the index node was
  180. * part of the old index but is now dirty %1 is returned, else if it is clean %2
  181. * is returned. A negative error code is returned on failure.
  182. */
  183. static int is_idx_node_in_use(struct ubifs_info *c, union ubifs_key *key,
  184. int level, int lnum, int offs)
  185. {
  186. int ret;
  187. ret = is_idx_node_in_tnc(c, key, level, lnum, offs);
  188. if (ret < 0)
  189. return ret; /* Error code */
  190. if (ret == 0)
  191. if (find_old_idx(c, lnum, offs))
  192. return 1;
  193. return ret;
  194. }
  195. /**
  196. * layout_leb_in_gaps - layout index nodes using in-the-gaps method.
  197. * @c: UBIFS file-system description object
  198. * @p: return LEB number here
  199. *
  200. * This function lays out new index nodes for dirty znodes using in-the-gaps
  201. * method of TNC commit.
  202. * This function merely puts the next znode into the next gap, making no attempt
  203. * to try to maximise the number of znodes that fit.
  204. * This function returns the number of index nodes written into the gaps, or a
  205. * negative error code on failure.
  206. */
  207. static int layout_leb_in_gaps(struct ubifs_info *c, int *p)
  208. {
  209. struct ubifs_scan_leb *sleb;
  210. struct ubifs_scan_node *snod;
  211. int lnum, dirt = 0, gap_start, gap_end, err, written, tot_written;
  212. tot_written = 0;
  213. /* Get an index LEB with lots of obsolete index nodes */
  214. lnum = ubifs_find_dirty_idx_leb(c);
  215. if (lnum < 0)
  216. /*
  217. * There also may be dirt in the index head that could be
  218. * filled, however we do not check there at present.
  219. */
  220. return lnum; /* Error code */
  221. *p = lnum;
  222. dbg_gc("LEB %d", lnum);
  223. /*
  224. * Scan the index LEB. We use the generic scan for this even though
  225. * it is more comprehensive and less efficient than is needed for this
  226. * purpose.
  227. */
  228. #ifdef CONFIG_UBIFS_SHARE_BUFFER
  229. if (mutex_trylock(&ubifs_sbuf_mutex) == 0) {
  230. atomic_long_inc(&ubifs_sbuf_lock_count);
  231. ubifs_err("trylock fail count %ld\n", atomic_long_read(&ubifs_sbuf_lock_count));
  232. mutex_lock(&ubifs_sbuf_mutex);
  233. ubifs_err("locked count %ld\n", atomic_long_read(&ubifs_sbuf_lock_count));
  234. }
  235. #endif
  236. sleb = ubifs_scan(c, lnum, 0, c->ileb_buf, 0);
  237. c->ileb_len = 0;
  238. if (IS_ERR(sleb)) {
  239. #ifdef CONFIG_UBIFS_SHARE_BUFFER
  240. mutex_unlock(&ubifs_sbuf_mutex);
  241. #endif
  242. return PTR_ERR(sleb);
  243. }
  244. gap_start = 0;
  245. list_for_each_entry(snod, &sleb->nodes, list) {
  246. struct ubifs_idx_node *idx;
  247. int in_use, level;
  248. ubifs_assert(snod->type == UBIFS_IDX_NODE);
  249. idx = snod->node;
  250. key_read(c, ubifs_idx_key(c, idx), &snod->key);
  251. level = le16_to_cpu(idx->level);
  252. /* Determine if the index node is in use (not obsolete) */
  253. in_use = is_idx_node_in_use(c, &snod->key, level, lnum,
  254. snod->offs);
  255. if (in_use < 0) {
  256. ubifs_scan_destroy(sleb);
  257. #ifdef CONFIG_UBIFS_SHARE_BUFFER
  258. mutex_unlock(&ubifs_sbuf_mutex);
  259. #endif
  260. return in_use; /* Error code */
  261. }
  262. if (in_use) {
  263. if (in_use == 1)
  264. dirt += ALIGN(snod->len, 8);
  265. /*
  266. * The obsolete index nodes form gaps that can be
  267. * overwritten. This gap has ended because we have
  268. * found an index node that is still in use
  269. * i.e. not obsolete
  270. */
  271. gap_end = snod->offs;
  272. /* Try to fill gap */
  273. written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
  274. if (written < 0) {
  275. ubifs_scan_destroy(sleb);
  276. return written; /* Error code */
  277. }
  278. tot_written += written;
  279. gap_start = ALIGN(snod->offs + snod->len, 8);
  280. }
  281. }
  282. ubifs_scan_destroy(sleb);
  283. c->ileb_len = c->leb_size;
  284. gap_end = c->leb_size;
  285. /* Try to fill gap */
  286. written = fill_gap(c, lnum, gap_start, gap_end, &dirt);
  287. if (written < 0) {
  288. #ifdef CONFIG_UBIFS_SHARE_BUFFER
  289. mutex_unlock(&ubifs_sbuf_mutex);
  290. #endif
  291. return written; /* Error code */
  292. }
  293. tot_written += written;
  294. if (tot_written == 0) {
  295. struct ubifs_lprops lp;
  296. #ifdef CONFIG_UBIFS_SHARE_BUFFER
  297. mutex_unlock(&ubifs_sbuf_mutex);
  298. #endif
  299. dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
  300. err = ubifs_read_one_lp(c, lnum, &lp);
  301. if (err)
  302. return err;
  303. if (lp.free == c->leb_size) {
  304. /*
  305. * We must have snatched this LEB from the idx_gc list
  306. * so we need to correct the free and dirty space.
  307. */
  308. err = ubifs_change_one_lp(c, lnum,
  309. c->leb_size - c->ileb_len,
  310. dirt, 0, 0, 0);
  311. if (err)
  312. return err;
  313. }
  314. return 0;
  315. }
  316. err = ubifs_change_one_lp(c, lnum, c->leb_size - c->ileb_len, dirt,
  317. 0, 0, 0);
  318. if (err) {
  319. #ifdef CONFIG_UBIFS_SHARE_BUFFER
  320. mutex_unlock(&ubifs_sbuf_mutex);
  321. #endif
  322. return err;
  323. }
  324. err = ubifs_leb_change(c, lnum, c->ileb_buf, c->ileb_len);
  325. #ifdef CONFIG_UBIFS_SHARE_BUFFER
  326. mutex_unlock(&ubifs_sbuf_mutex);
  327. #endif
  328. if (err)
  329. return err;
  330. dbg_gc("LEB %d wrote %d index nodes", lnum, tot_written);
  331. return tot_written;
  332. }
  333. /**
  334. * get_leb_cnt - calculate the number of empty LEBs needed to commit.
  335. * @c: UBIFS file-system description object
  336. * @cnt: number of znodes to commit
  337. *
  338. * This function returns the number of empty LEBs needed to commit @cnt znodes
  339. * to the current index head. The number is not exact and may be more than
  340. * needed.
  341. */
  342. static int get_leb_cnt(struct ubifs_info *c, int cnt)
  343. {
  344. int d;
  345. /* Assume maximum index node size (i.e. overestimate space needed) */
  346. cnt -= (c->leb_size - c->ihead_offs) / c->max_idx_node_sz;
  347. if (cnt < 0)
  348. cnt = 0;
  349. d = c->leb_size / c->max_idx_node_sz;
  350. return DIV_ROUND_UP(cnt, d);
  351. }
  352. /**
  353. * layout_in_gaps - in-the-gaps method of committing TNC.
  354. * @c: UBIFS file-system description object
  355. * @cnt: number of dirty znodes to commit.
  356. *
  357. * This function lays out new index nodes for dirty znodes using in-the-gaps
  358. * method of TNC commit.
  359. *
  360. * This function returns %0 on success and a negative error code on failure.
  361. */
  362. static int layout_in_gaps(struct ubifs_info *c, int cnt)
  363. {
  364. int err, leb_needed_cnt, written, *p;
  365. dbg_gc("%d znodes to write", cnt);
  366. c->gap_lebs = kmalloc(sizeof(int) * (c->lst.idx_lebs + 1), GFP_NOFS);
  367. if (!c->gap_lebs)
  368. return -ENOMEM;
  369. p = c->gap_lebs;
  370. do {
  371. ubifs_assert(p < c->gap_lebs + sizeof(int) * c->lst.idx_lebs);
  372. written = layout_leb_in_gaps(c, p);
  373. if (written < 0) {
  374. err = written;
  375. if (err != -ENOSPC) {
  376. kfree(c->gap_lebs);
  377. c->gap_lebs = NULL;
  378. return err;
  379. }
  380. if (!dbg_is_chk_index(c)) {
  381. /*
  382. * Do not print scary warnings if the debugging
  383. * option which forces in-the-gaps is enabled.
  384. */
  385. ubifs_warn("out of space");
  386. ubifs_dump_budg(c, &c->bi);
  387. ubifs_dump_lprops(c);
  388. }
  389. /* Try to commit anyway */
  390. break;
  391. }
  392. p++;
  393. cnt -= written;
  394. leb_needed_cnt = get_leb_cnt(c, cnt);
  395. dbg_gc("%d znodes remaining, need %d LEBs, have %d", cnt,
  396. leb_needed_cnt, c->ileb_cnt);
  397. } while (leb_needed_cnt > c->ileb_cnt);
  398. *p = -1;
  399. return 0;
  400. }
  401. /**
  402. * layout_in_empty_space - layout index nodes in empty space.
  403. * @c: UBIFS file-system description object
  404. *
  405. * This function lays out new index nodes for dirty znodes using empty LEBs.
  406. *
  407. * This function returns %0 on success and a negative error code on failure.
  408. */
  409. static int layout_in_empty_space(struct ubifs_info *c)
  410. {
  411. struct ubifs_znode *znode, *cnext, *zp;
  412. int lnum, offs, len, next_len, buf_len, buf_offs, used, avail;
  413. int wlen, blen, err;
  414. cnext = c->enext;
  415. if (!cnext)
  416. return 0;
  417. lnum = c->ihead_lnum;
  418. buf_offs = c->ihead_offs;
  419. buf_len = ubifs_idx_node_sz(c, c->fanout);
  420. buf_len = ALIGN(buf_len, c->min_io_size);
  421. used = 0;
  422. avail = buf_len;
  423. /* Ensure there is enough room for first write */
  424. next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
  425. if (buf_offs + next_len > c->leb_size)
  426. lnum = -1;
  427. while (1) {
  428. znode = cnext;
  429. len = ubifs_idx_node_sz(c, znode->child_cnt);
  430. /* Determine the index node position */
  431. if (lnum == -1) {
  432. if (c->ileb_nxt >= c->ileb_cnt) {
  433. ubifs_err("out of space");
  434. return -ENOSPC;
  435. }
  436. lnum = c->ilebs[c->ileb_nxt++];
  437. buf_offs = 0;
  438. used = 0;
  439. avail = buf_len;
  440. }
  441. offs = buf_offs + used;
  442. znode->lnum = lnum;
  443. znode->offs = offs;
  444. znode->len = len;
  445. /* Update the parent */
  446. zp = znode->parent;
  447. if (zp) {
  448. struct ubifs_zbranch *zbr;
  449. int i;
  450. i = znode->iip;
  451. zbr = &zp->zbranch[i];
  452. zbr->lnum = lnum;
  453. zbr->offs = offs;
  454. zbr->len = len;
  455. } else {
  456. c->zroot.lnum = lnum;
  457. c->zroot.offs = offs;
  458. c->zroot.len = len;
  459. }
  460. c->calc_idx_sz += ALIGN(len, 8);
  461. /*
  462. * Once lprops is updated, we can decrease the dirty znode count
  463. * but it is easier to just do it here.
  464. */
  465. atomic_long_dec(&c->dirty_zn_cnt);
  466. /*
  467. * Calculate the next index node length to see if there is
  468. * enough room for it
  469. */
  470. cnext = znode->cnext;
  471. if (cnext == c->cnext)
  472. next_len = 0;
  473. else
  474. next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
  475. /* Update buffer positions */
  476. wlen = used + len;
  477. used += ALIGN(len, 8);
  478. avail -= ALIGN(len, 8);
  479. if (next_len != 0 &&
  480. buf_offs + used + next_len <= c->leb_size &&
  481. avail > 0)
  482. continue;
  483. if (avail <= 0 && next_len &&
  484. buf_offs + used + next_len <= c->leb_size)
  485. blen = buf_len;
  486. else
  487. blen = ALIGN(wlen, c->min_io_size);
  488. /* The buffer is full or there are no more znodes to do */
  489. buf_offs += blen;
  490. if (next_len) {
  491. if (buf_offs + next_len > c->leb_size) {
  492. err = ubifs_update_one_lp(c, lnum,
  493. c->leb_size - buf_offs, blen - used,
  494. 0, 0);
  495. if (err)
  496. return err;
  497. lnum = -1;
  498. }
  499. used -= blen;
  500. if (used < 0)
  501. used = 0;
  502. avail = buf_len - used;
  503. continue;
  504. }
  505. err = ubifs_update_one_lp(c, lnum, c->leb_size - buf_offs,
  506. blen - used, 0, 0);
  507. if (err)
  508. return err;
  509. break;
  510. }
  511. c->dbg->new_ihead_lnum = lnum;
  512. c->dbg->new_ihead_offs = buf_offs;
  513. return 0;
  514. }
  515. /**
  516. * layout_commit - determine positions of index nodes to commit.
  517. * @c: UBIFS file-system description object
  518. * @no_space: indicates that insufficient empty LEBs were allocated
  519. * @cnt: number of znodes to commit
  520. *
  521. * Calculate and update the positions of index nodes to commit. If there were
  522. * an insufficient number of empty LEBs allocated, then index nodes are placed
  523. * into the gaps created by obsolete index nodes in non-empty index LEBs. For
  524. * this purpose, an obsolete index node is one that was not in the index as at
  525. * the end of the last commit. To write "in-the-gaps" requires that those index
  526. * LEBs are updated atomically in-place.
  527. */
  528. static int layout_commit(struct ubifs_info *c, int no_space, int cnt)
  529. {
  530. int err;
  531. if (no_space) {
  532. err = layout_in_gaps(c, cnt);
  533. if (err)
  534. return err;
  535. }
  536. err = layout_in_empty_space(c);
  537. return err;
  538. }
  539. /**
  540. * find_first_dirty - find first dirty znode.
  541. * @znode: znode to begin searching from
  542. */
  543. static struct ubifs_znode *find_first_dirty(struct ubifs_znode *znode)
  544. {
  545. int i, cont;
  546. if (!znode)
  547. return NULL;
  548. while (1) {
  549. if (znode->level == 0) {
  550. if (ubifs_zn_dirty(znode))
  551. return znode;
  552. return NULL;
  553. }
  554. cont = 0;
  555. for (i = 0; i < znode->child_cnt; i++) {
  556. struct ubifs_zbranch *zbr = &znode->zbranch[i];
  557. if (zbr->znode && ubifs_zn_dirty(zbr->znode)) {
  558. znode = zbr->znode;
  559. cont = 1;
  560. break;
  561. }
  562. }
  563. if (!cont) {
  564. if (ubifs_zn_dirty(znode))
  565. return znode;
  566. return NULL;
  567. }
  568. }
  569. }
  570. /**
  571. * find_next_dirty - find next dirty znode.
  572. * @znode: znode to begin searching from
  573. */
  574. static struct ubifs_znode *find_next_dirty(struct ubifs_znode *znode)
  575. {
  576. int n = znode->iip + 1;
  577. znode = znode->parent;
  578. if (!znode)
  579. return NULL;
  580. for (; n < znode->child_cnt; n++) {
  581. struct ubifs_zbranch *zbr = &znode->zbranch[n];
  582. if (zbr->znode && ubifs_zn_dirty(zbr->znode))
  583. return find_first_dirty(zbr->znode);
  584. }
  585. return znode;
  586. }
  587. /**
  588. * get_znodes_to_commit - create list of dirty znodes to commit.
  589. * @c: UBIFS file-system description object
  590. *
  591. * This function returns the number of znodes to commit.
  592. */
  593. static int get_znodes_to_commit(struct ubifs_info *c)
  594. {
  595. struct ubifs_znode *znode, *cnext;
  596. int cnt = 0;
  597. c->cnext = find_first_dirty(c->zroot.znode);
  598. znode = c->enext = c->cnext;
  599. if (!znode) {
  600. dbg_cmt("no znodes to commit");
  601. return 0;
  602. }
  603. cnt += 1;
  604. while (1) {
  605. ubifs_assert(!ubifs_zn_cow(znode));
  606. __set_bit(COW_ZNODE, &znode->flags);
  607. znode->alt = 0;
  608. cnext = find_next_dirty(znode);
  609. if (!cnext) {
  610. znode->cnext = c->cnext;
  611. break;
  612. }
  613. znode->cnext = cnext;
  614. znode = cnext;
  615. cnt += 1;
  616. }
  617. dbg_cmt("committing %d znodes", cnt);
  618. ubifs_assert(cnt == atomic_long_read(&c->dirty_zn_cnt));
  619. return cnt;
  620. }
  621. /**
  622. * alloc_idx_lebs - allocate empty LEBs to be used to commit.
  623. * @c: UBIFS file-system description object
  624. * @cnt: number of znodes to commit
  625. *
  626. * This function returns %-ENOSPC if it cannot allocate a sufficient number of
  627. * empty LEBs. %0 is returned on success, otherwise a negative error code
  628. * is returned.
  629. */
  630. static int alloc_idx_lebs(struct ubifs_info *c, int cnt)
  631. {
  632. int i, leb_cnt, lnum;
  633. c->ileb_cnt = 0;
  634. c->ileb_nxt = 0;
  635. leb_cnt = get_leb_cnt(c, cnt);
  636. dbg_cmt("need about %d empty LEBS for TNC commit", leb_cnt);
  637. if (!leb_cnt)
  638. return 0;
  639. c->ilebs = kmalloc(leb_cnt * sizeof(int), GFP_NOFS);
  640. if (!c->ilebs)
  641. return -ENOMEM;
  642. for (i = 0; i < leb_cnt; i++) {
  643. lnum = ubifs_find_free_leb_for_idx(c);
  644. if (lnum < 0)
  645. return lnum;
  646. c->ilebs[c->ileb_cnt++] = lnum;
  647. dbg_cmt("LEB %d", lnum);
  648. }
  649. if (dbg_is_chk_index(c) && !(prandom_u32() & 7))
  650. return -ENOSPC;
  651. return 0;
  652. }
  653. /**
  654. * free_unused_idx_lebs - free unused LEBs that were allocated for the commit.
  655. * @c: UBIFS file-system description object
  656. *
  657. * It is possible that we allocate more empty LEBs for the commit than we need.
  658. * This functions frees the surplus.
  659. *
  660. * This function returns %0 on success and a negative error code on failure.
  661. */
  662. static int free_unused_idx_lebs(struct ubifs_info *c)
  663. {
  664. int i, err = 0, lnum, er;
  665. for (i = c->ileb_nxt; i < c->ileb_cnt; i++) {
  666. lnum = c->ilebs[i];
  667. dbg_cmt("LEB %d", lnum);
  668. er = ubifs_change_one_lp(c, lnum, LPROPS_NC, LPROPS_NC, 0,
  669. LPROPS_INDEX | LPROPS_TAKEN, 0);
  670. if (!err)
  671. err = er;
  672. }
  673. return err;
  674. }
  675. /**
  676. * free_idx_lebs - free unused LEBs after commit end.
  677. * @c: UBIFS file-system description object
  678. *
  679. * This function returns %0 on success and a negative error code on failure.
  680. */
  681. static int free_idx_lebs(struct ubifs_info *c)
  682. {
  683. int err;
  684. err = free_unused_idx_lebs(c);
  685. kfree(c->ilebs);
  686. c->ilebs = NULL;
  687. return err;
  688. }
  689. /**
  690. * ubifs_tnc_start_commit - start TNC commit.
  691. * @c: UBIFS file-system description object
  692. * @zroot: new index root position is returned here
  693. *
  694. * This function prepares the list of indexing nodes to commit and lays out
  695. * their positions on flash. If there is not enough free space it uses the
  696. * in-gap commit method. Returns zero in case of success and a negative error
  697. * code in case of failure.
  698. */
  699. int ubifs_tnc_start_commit(struct ubifs_info *c, struct ubifs_zbranch *zroot)
  700. {
  701. int err = 0, cnt;
  702. mutex_lock(&c->tnc_mutex);
  703. err = dbg_check_tnc(c, 1);
  704. if (err)
  705. goto out;
  706. cnt = get_znodes_to_commit(c);
  707. if (cnt != 0) {
  708. int no_space = 0;
  709. err = alloc_idx_lebs(c, cnt);
  710. if (err == -ENOSPC)
  711. no_space = 1;
  712. else if (err)
  713. goto out_free;
  714. err = layout_commit(c, no_space, cnt);
  715. if (err)
  716. goto out_free;
  717. ubifs_assert(atomic_long_read(&c->dirty_zn_cnt) == 0);
  718. err = free_unused_idx_lebs(c);
  719. if (err)
  720. goto out;
  721. }
  722. destroy_old_idx(c);
  723. memcpy(zroot, &c->zroot, sizeof(struct ubifs_zbranch));
  724. err = ubifs_save_dirty_idx_lnums(c);
  725. if (err)
  726. goto out;
  727. spin_lock(&c->space_lock);
  728. /*
  729. * Although we have not finished committing yet, update size of the
  730. * committed index ('c->bi.old_idx_sz') and zero out the index growth
  731. * budget. It is OK to do this now, because we've reserved all the
  732. * space which is needed to commit the index, and it is save for the
  733. * budgeting subsystem to assume the index is already committed,
  734. * even though it is not.
  735. */
  736. ubifs_assert(c->bi.min_idx_lebs == ubifs_calc_min_idx_lebs(c));
  737. c->bi.old_idx_sz = c->calc_idx_sz;
  738. c->bi.uncommitted_idx = 0;
  739. c->bi.min_idx_lebs = ubifs_calc_min_idx_lebs(c);
  740. spin_unlock(&c->space_lock);
  741. mutex_unlock(&c->tnc_mutex);
  742. dbg_cmt("number of index LEBs %d", c->lst.idx_lebs);
  743. dbg_cmt("size of index %llu", c->calc_idx_sz);
  744. return err;
  745. out_free:
  746. free_idx_lebs(c);
  747. out:
  748. mutex_unlock(&c->tnc_mutex);
  749. return err;
  750. }
  751. /**
  752. * write_index - write index nodes.
  753. * @c: UBIFS file-system description object
  754. *
  755. * This function writes the index nodes whose positions were laid out in the
  756. * layout_in_empty_space function.
  757. */
  758. static int write_index(struct ubifs_info *c)
  759. {
  760. struct ubifs_idx_node *idx;
  761. struct ubifs_znode *znode, *cnext;
  762. int i, lnum, offs, len, next_len, buf_len, buf_offs, used;
  763. int avail, wlen, err, lnum_pos = 0, blen, nxt_offs;
  764. cnext = c->enext;
  765. if (!cnext)
  766. return 0;
  767. /*
  768. * Always write index nodes to the index head so that index nodes and
  769. * other types of nodes are never mixed in the same erase block.
  770. */
  771. lnum = c->ihead_lnum;
  772. buf_offs = c->ihead_offs;
  773. /* Allocate commit buffer */
  774. buf_len = ALIGN(c->max_idx_node_sz, c->min_io_size);
  775. used = 0;
  776. avail = buf_len;
  777. /* Ensure there is enough room for first write */
  778. next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
  779. if (buf_offs + next_len > c->leb_size) {
  780. err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0, 0,
  781. LPROPS_TAKEN);
  782. if (err)
  783. return err;
  784. lnum = -1;
  785. }
  786. while (1) {
  787. cond_resched();
  788. znode = cnext;
  789. idx = c->cbuf + used;
  790. /* Make index node */
  791. idx->ch.node_type = UBIFS_IDX_NODE;
  792. idx->child_cnt = cpu_to_le16(znode->child_cnt);
  793. idx->level = cpu_to_le16(znode->level);
  794. for (i = 0; i < znode->child_cnt; i++) {
  795. struct ubifs_branch *br = ubifs_idx_branch(c, idx, i);
  796. struct ubifs_zbranch *zbr = &znode->zbranch[i];
  797. key_write_idx(c, &zbr->key, &br->key);
  798. br->lnum = cpu_to_le32(zbr->lnum);
  799. br->offs = cpu_to_le32(zbr->offs);
  800. br->len = cpu_to_le32(zbr->len);
  801. if (!zbr->lnum || !zbr->len) {
  802. ubifs_err("bad ref in znode");
  803. ubifs_dump_znode(c, znode);
  804. if (zbr->znode)
  805. ubifs_dump_znode(c, zbr->znode);
  806. }
  807. }
  808. len = ubifs_idx_node_sz(c, znode->child_cnt);
  809. ubifs_prepare_node(c, idx, len, 0);
  810. /* Determine the index node position */
  811. if (lnum == -1) {
  812. lnum = c->ilebs[lnum_pos++];
  813. buf_offs = 0;
  814. used = 0;
  815. avail = buf_len;
  816. }
  817. offs = buf_offs + used;
  818. if (lnum != znode->lnum || offs != znode->offs ||
  819. len != znode->len) {
  820. ubifs_err("inconsistent znode posn");
  821. return -EINVAL;
  822. }
  823. /* Grab some stuff from znode while we still can */
  824. cnext = znode->cnext;
  825. ubifs_assert(ubifs_zn_dirty(znode));
  826. ubifs_assert(ubifs_zn_cow(znode));
  827. /*
  828. * It is important that other threads should see %DIRTY_ZNODE
  829. * flag cleared before %COW_ZNODE. Specifically, it matters in
  830. * the 'dirty_cow_znode()' function. This is the reason for the
  831. * first barrier. Also, we want the bit changes to be seen to
  832. * other threads ASAP, to avoid unnecesarry copying, which is
  833. * the reason for the second barrier.
  834. */
  835. clear_bit(DIRTY_ZNODE, &znode->flags);
  836. smp_mb__before_atomic();
  837. clear_bit(COW_ZNODE, &znode->flags);
  838. smp_mb__after_atomic();
  839. /*
  840. * We have marked the znode as clean but have not updated the
  841. * @c->clean_zn_cnt counter. If this znode becomes dirty again
  842. * before 'free_obsolete_znodes()' is called, then
  843. * @c->clean_zn_cnt will be decremented before it gets
  844. * incremented (resulting in 2 decrements for the same znode).
  845. * This means that @c->clean_zn_cnt may become negative for a
  846. * while.
  847. *
  848. * Q: why we cannot increment @c->clean_zn_cnt?
  849. * A: because we do not have the @c->tnc_mutex locked, and the
  850. * following code would be racy and buggy:
  851. *
  852. * if (!ubifs_zn_obsolete(znode)) {
  853. * atomic_long_inc(&c->clean_zn_cnt);
  854. * atomic_long_inc(&ubifs_clean_zn_cnt);
  855. * }
  856. *
  857. * Thus, we just delay the @c->clean_zn_cnt update until we
  858. * have the mutex locked.
  859. */
  860. /* Do not access znode from this point on */
  861. /* Update buffer positions */
  862. wlen = used + len;
  863. used += ALIGN(len, 8);
  864. avail -= ALIGN(len, 8);
  865. /*
  866. * Calculate the next index node length to see if there is
  867. * enough room for it
  868. */
  869. if (cnext == c->cnext)
  870. next_len = 0;
  871. else
  872. next_len = ubifs_idx_node_sz(c, cnext->child_cnt);
  873. nxt_offs = buf_offs + used + next_len;
  874. if (next_len && nxt_offs <= c->leb_size) {
  875. if (avail > 0)
  876. continue;
  877. else
  878. blen = buf_len;
  879. } else {
  880. wlen = ALIGN(wlen, 8);
  881. blen = ALIGN(wlen, c->min_io_size);
  882. ubifs_pad(c, c->cbuf + wlen, blen - wlen);
  883. }
  884. /* The buffer is full or there are no more znodes to do */
  885. err = ubifs_leb_write(c, lnum, c->cbuf, buf_offs, blen);
  886. if (err)
  887. return err;
  888. buf_offs += blen;
  889. if (next_len) {
  890. if (nxt_offs > c->leb_size) {
  891. err = ubifs_update_one_lp(c, lnum, LPROPS_NC, 0,
  892. 0, LPROPS_TAKEN);
  893. if (err)
  894. return err;
  895. lnum = -1;
  896. }
  897. used -= blen;
  898. if (used < 0)
  899. used = 0;
  900. avail = buf_len - used;
  901. memmove(c->cbuf, c->cbuf + blen, used);
  902. continue;
  903. }
  904. break;
  905. }
  906. if (lnum != c->dbg->new_ihead_lnum ||
  907. buf_offs != c->dbg->new_ihead_offs) {
  908. ubifs_err("inconsistent ihead");
  909. return -EINVAL;
  910. }
  911. c->ihead_lnum = lnum;
  912. c->ihead_offs = buf_offs;
  913. return 0;
  914. }
  915. /**
  916. * free_obsolete_znodes - free obsolete znodes.
  917. * @c: UBIFS file-system description object
  918. *
  919. * At the end of commit end, obsolete znodes are freed.
  920. */
  921. static void free_obsolete_znodes(struct ubifs_info *c)
  922. {
  923. struct ubifs_znode *znode, *cnext;
  924. cnext = c->cnext;
  925. do {
  926. znode = cnext;
  927. cnext = znode->cnext;
  928. if (ubifs_zn_obsolete(znode))
  929. kfree(znode);
  930. else {
  931. znode->cnext = NULL;
  932. atomic_long_inc(&c->clean_zn_cnt);
  933. atomic_long_inc(&ubifs_clean_zn_cnt);
  934. }
  935. } while (cnext != c->cnext);
  936. }
  937. /**
  938. * return_gap_lebs - return LEBs used by the in-gap commit method.
  939. * @c: UBIFS file-system description object
  940. *
  941. * This function clears the "taken" flag for the LEBs which were used by the
  942. * "commit in-the-gaps" method.
  943. */
  944. static int return_gap_lebs(struct ubifs_info *c)
  945. {
  946. int *p, err;
  947. if (!c->gap_lebs)
  948. return 0;
  949. dbg_cmt("");
  950. for (p = c->gap_lebs; *p != -1; p++) {
  951. err = ubifs_change_one_lp(c, *p, LPROPS_NC, LPROPS_NC, 0,
  952. LPROPS_TAKEN, 0);
  953. if (err)
  954. return err;
  955. }
  956. kfree(c->gap_lebs);
  957. c->gap_lebs = NULL;
  958. return 0;
  959. }
  960. /**
  961. * ubifs_tnc_end_commit - update the TNC for commit end.
  962. * @c: UBIFS file-system description object
  963. *
  964. * Write the dirty znodes.
  965. */
  966. int ubifs_tnc_end_commit(struct ubifs_info *c)
  967. {
  968. int err;
  969. if (!c->cnext)
  970. return 0;
  971. err = return_gap_lebs(c);
  972. if (err)
  973. return err;
  974. err = write_index(c);
  975. if (err)
  976. return err;
  977. mutex_lock(&c->tnc_mutex);
  978. dbg_cmt("TNC height is %d", c->zroot.znode->level + 1);
  979. free_obsolete_znodes(c);
  980. c->cnext = NULL;
  981. kfree(c->ilebs);
  982. c->ilebs = NULL;
  983. mutex_unlock(&c->tnc_mutex);
  984. return 0;
  985. }