transaction.c 56 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054
  1. /*
  2. * Copyright (C) 2007 Oracle. All rights reserved.
  3. *
  4. * This program is free software; you can redistribute it and/or
  5. * modify it under the terms of the GNU General Public
  6. * License v2 as published by the Free Software Foundation.
  7. *
  8. * This program is distributed in the hope that it will be useful,
  9. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  10. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
  11. * General Public License for more details.
  12. *
  13. * You should have received a copy of the GNU General Public
  14. * License along with this program; if not, write to the
  15. * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
  16. * Boston, MA 021110-1307, USA.
  17. */
  18. #include <linux/fs.h>
  19. #include <linux/slab.h>
  20. #include <linux/sched.h>
  21. #include <linux/writeback.h>
  22. #include <linux/pagemap.h>
  23. #include <linux/blkdev.h>
  24. #include <linux/uuid.h>
  25. #include "ctree.h"
  26. #include "disk-io.h"
  27. #include "transaction.h"
  28. #include "locking.h"
  29. #include "tree-log.h"
  30. #include "inode-map.h"
  31. #include "volumes.h"
  32. #include "dev-replace.h"
  33. #include "qgroup.h"
  34. #define BTRFS_ROOT_TRANS_TAG 0
  35. static unsigned int btrfs_blocked_trans_types[TRANS_STATE_MAX] = {
  36. [TRANS_STATE_RUNNING] = 0U,
  37. [TRANS_STATE_BLOCKED] = (__TRANS_USERSPACE |
  38. __TRANS_START),
  39. [TRANS_STATE_COMMIT_START] = (__TRANS_USERSPACE |
  40. __TRANS_START |
  41. __TRANS_ATTACH),
  42. [TRANS_STATE_COMMIT_DOING] = (__TRANS_USERSPACE |
  43. __TRANS_START |
  44. __TRANS_ATTACH |
  45. __TRANS_JOIN),
  46. [TRANS_STATE_UNBLOCKED] = (__TRANS_USERSPACE |
  47. __TRANS_START |
  48. __TRANS_ATTACH |
  49. __TRANS_JOIN |
  50. __TRANS_JOIN_NOLOCK),
  51. [TRANS_STATE_COMPLETED] = (__TRANS_USERSPACE |
  52. __TRANS_START |
  53. __TRANS_ATTACH |
  54. __TRANS_JOIN |
  55. __TRANS_JOIN_NOLOCK),
  56. };
  57. void btrfs_put_transaction(struct btrfs_transaction *transaction)
  58. {
  59. WARN_ON(atomic_read(&transaction->use_count) == 0);
  60. if (atomic_dec_and_test(&transaction->use_count)) {
  61. BUG_ON(!list_empty(&transaction->list));
  62. WARN_ON(!RB_EMPTY_ROOT(&transaction->delayed_refs.href_root));
  63. while (!list_empty(&transaction->pending_chunks)) {
  64. struct extent_map *em;
  65. em = list_first_entry(&transaction->pending_chunks,
  66. struct extent_map, list);
  67. list_del_init(&em->list);
  68. free_extent_map(em);
  69. }
  70. kmem_cache_free(btrfs_transaction_cachep, transaction);
  71. }
  72. }
  73. static noinline void switch_commit_roots(struct btrfs_transaction *trans,
  74. struct btrfs_fs_info *fs_info)
  75. {
  76. struct btrfs_root *root, *tmp;
  77. down_write(&fs_info->commit_root_sem);
  78. list_for_each_entry_safe(root, tmp, &trans->switch_commits,
  79. dirty_list) {
  80. list_del_init(&root->dirty_list);
  81. free_extent_buffer(root->commit_root);
  82. root->commit_root = btrfs_root_node(root);
  83. if (is_fstree(root->objectid))
  84. btrfs_unpin_free_ino(root);
  85. }
  86. up_write(&fs_info->commit_root_sem);
  87. }
  88. static inline void extwriter_counter_inc(struct btrfs_transaction *trans,
  89. unsigned int type)
  90. {
  91. if (type & TRANS_EXTWRITERS)
  92. atomic_inc(&trans->num_extwriters);
  93. }
  94. static inline void extwriter_counter_dec(struct btrfs_transaction *trans,
  95. unsigned int type)
  96. {
  97. if (type & TRANS_EXTWRITERS)
  98. atomic_dec(&trans->num_extwriters);
  99. }
  100. static inline void extwriter_counter_init(struct btrfs_transaction *trans,
  101. unsigned int type)
  102. {
  103. atomic_set(&trans->num_extwriters, ((type & TRANS_EXTWRITERS) ? 1 : 0));
  104. }
  105. static inline int extwriter_counter_read(struct btrfs_transaction *trans)
  106. {
  107. return atomic_read(&trans->num_extwriters);
  108. }
  109. /*
  110. * either allocate a new transaction or hop into the existing one
  111. */
  112. static noinline int join_transaction(struct btrfs_root *root, unsigned int type)
  113. {
  114. struct btrfs_transaction *cur_trans;
  115. struct btrfs_fs_info *fs_info = root->fs_info;
  116. spin_lock(&fs_info->trans_lock);
  117. loop:
  118. /* The file system has been taken offline. No new transactions. */
  119. if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  120. spin_unlock(&fs_info->trans_lock);
  121. return -EROFS;
  122. }
  123. cur_trans = fs_info->running_transaction;
  124. if (cur_trans) {
  125. if (cur_trans->aborted) {
  126. spin_unlock(&fs_info->trans_lock);
  127. return cur_trans->aborted;
  128. }
  129. if (btrfs_blocked_trans_types[cur_trans->state] & type) {
  130. spin_unlock(&fs_info->trans_lock);
  131. return -EBUSY;
  132. }
  133. atomic_inc(&cur_trans->use_count);
  134. atomic_inc(&cur_trans->num_writers);
  135. extwriter_counter_inc(cur_trans, type);
  136. spin_unlock(&fs_info->trans_lock);
  137. return 0;
  138. }
  139. spin_unlock(&fs_info->trans_lock);
  140. /*
  141. * If we are ATTACH, we just want to catch the current transaction,
  142. * and commit it. If there is no transaction, just return ENOENT.
  143. */
  144. if (type == TRANS_ATTACH)
  145. return -ENOENT;
  146. /*
  147. * JOIN_NOLOCK only happens during the transaction commit, so
  148. * it is impossible that ->running_transaction is NULL
  149. */
  150. BUG_ON(type == TRANS_JOIN_NOLOCK);
  151. cur_trans = kmem_cache_alloc(btrfs_transaction_cachep, GFP_NOFS);
  152. if (!cur_trans)
  153. return -ENOMEM;
  154. spin_lock(&fs_info->trans_lock);
  155. if (fs_info->running_transaction) {
  156. /*
  157. * someone started a transaction after we unlocked. Make sure
  158. * to redo the checks above
  159. */
  160. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  161. goto loop;
  162. } else if (test_bit(BTRFS_FS_STATE_ERROR, &fs_info->fs_state)) {
  163. spin_unlock(&fs_info->trans_lock);
  164. kmem_cache_free(btrfs_transaction_cachep, cur_trans);
  165. return -EROFS;
  166. }
  167. atomic_set(&cur_trans->num_writers, 1);
  168. extwriter_counter_init(cur_trans, type);
  169. init_waitqueue_head(&cur_trans->writer_wait);
  170. init_waitqueue_head(&cur_trans->commit_wait);
  171. cur_trans->state = TRANS_STATE_RUNNING;
  172. /*
  173. * One for this trans handle, one so it will live on until we
  174. * commit the transaction.
  175. */
  176. atomic_set(&cur_trans->use_count, 2);
  177. cur_trans->start_time = get_seconds();
  178. cur_trans->delayed_refs.href_root = RB_ROOT;
  179. atomic_set(&cur_trans->delayed_refs.num_entries, 0);
  180. cur_trans->delayed_refs.num_heads_ready = 0;
  181. cur_trans->delayed_refs.num_heads = 0;
  182. cur_trans->delayed_refs.flushing = 0;
  183. cur_trans->delayed_refs.run_delayed_start = 0;
  184. /*
  185. * although the tree mod log is per file system and not per transaction,
  186. * the log must never go across transaction boundaries.
  187. */
  188. smp_mb();
  189. if (!list_empty(&fs_info->tree_mod_seq_list))
  190. WARN(1, KERN_ERR "BTRFS: tree_mod_seq_list not empty when "
  191. "creating a fresh transaction\n");
  192. if (!RB_EMPTY_ROOT(&fs_info->tree_mod_log))
  193. WARN(1, KERN_ERR "BTRFS: tree_mod_log rb tree not empty when "
  194. "creating a fresh transaction\n");
  195. atomic64_set(&fs_info->tree_mod_seq, 0);
  196. spin_lock_init(&cur_trans->delayed_refs.lock);
  197. INIT_LIST_HEAD(&cur_trans->pending_snapshots);
  198. INIT_LIST_HEAD(&cur_trans->pending_chunks);
  199. INIT_LIST_HEAD(&cur_trans->switch_commits);
  200. INIT_LIST_HEAD(&cur_trans->pending_ordered);
  201. list_add_tail(&cur_trans->list, &fs_info->trans_list);
  202. extent_io_tree_init(&cur_trans->dirty_pages,
  203. fs_info->btree_inode->i_mapping);
  204. fs_info->generation++;
  205. cur_trans->transid = fs_info->generation;
  206. fs_info->running_transaction = cur_trans;
  207. cur_trans->aborted = 0;
  208. spin_unlock(&fs_info->trans_lock);
  209. return 0;
  210. }
  211. /*
  212. * this does all the record keeping required to make sure that a reference
  213. * counted root is properly recorded in a given transaction. This is required
  214. * to make sure the old root from before we joined the transaction is deleted
  215. * when the transaction commits
  216. */
  217. static int record_root_in_trans(struct btrfs_trans_handle *trans,
  218. struct btrfs_root *root)
  219. {
  220. if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
  221. root->last_trans < trans->transid) {
  222. WARN_ON(root == root->fs_info->extent_root);
  223. WARN_ON(root->commit_root != root->node);
  224. /*
  225. * see below for IN_TRANS_SETUP usage rules
  226. * we have the reloc mutex held now, so there
  227. * is only one writer in this function
  228. */
  229. set_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  230. /* make sure readers find IN_TRANS_SETUP before
  231. * they find our root->last_trans update
  232. */
  233. smp_wmb();
  234. spin_lock(&root->fs_info->fs_roots_radix_lock);
  235. if (root->last_trans == trans->transid) {
  236. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  237. return 0;
  238. }
  239. radix_tree_tag_set(&root->fs_info->fs_roots_radix,
  240. (unsigned long)root->root_key.objectid,
  241. BTRFS_ROOT_TRANS_TAG);
  242. spin_unlock(&root->fs_info->fs_roots_radix_lock);
  243. root->last_trans = trans->transid;
  244. /* this is pretty tricky. We don't want to
  245. * take the relocation lock in btrfs_record_root_in_trans
  246. * unless we're really doing the first setup for this root in
  247. * this transaction.
  248. *
  249. * Normally we'd use root->last_trans as a flag to decide
  250. * if we want to take the expensive mutex.
  251. *
  252. * But, we have to set root->last_trans before we
  253. * init the relocation root, otherwise, we trip over warnings
  254. * in ctree.c. The solution used here is to flag ourselves
  255. * with root IN_TRANS_SETUP. When this is 1, we're still
  256. * fixing up the reloc trees and everyone must wait.
  257. *
  258. * When this is zero, they can trust root->last_trans and fly
  259. * through btrfs_record_root_in_trans without having to take the
  260. * lock. smp_wmb() makes sure that all the writes above are
  261. * done before we pop in the zero below
  262. */
  263. btrfs_init_reloc_root(trans, root);
  264. smp_mb__before_atomic();
  265. clear_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state);
  266. }
  267. return 0;
  268. }
  269. int btrfs_record_root_in_trans(struct btrfs_trans_handle *trans,
  270. struct btrfs_root *root)
  271. {
  272. if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state))
  273. return 0;
  274. /*
  275. * see record_root_in_trans for comments about IN_TRANS_SETUP usage
  276. * and barriers
  277. */
  278. smp_rmb();
  279. if (root->last_trans == trans->transid &&
  280. !test_bit(BTRFS_ROOT_IN_TRANS_SETUP, &root->state))
  281. return 0;
  282. mutex_lock(&root->fs_info->reloc_mutex);
  283. record_root_in_trans(trans, root);
  284. mutex_unlock(&root->fs_info->reloc_mutex);
  285. return 0;
  286. }
  287. static inline int is_transaction_blocked(struct btrfs_transaction *trans)
  288. {
  289. return (trans->state >= TRANS_STATE_BLOCKED &&
  290. trans->state < TRANS_STATE_UNBLOCKED &&
  291. !trans->aborted);
  292. }
  293. /* wait for commit against the current transaction to become unblocked
  294. * when this is done, it is safe to start a new transaction, but the current
  295. * transaction might not be fully on disk.
  296. */
  297. static void wait_current_trans(struct btrfs_root *root)
  298. {
  299. struct btrfs_transaction *cur_trans;
  300. spin_lock(&root->fs_info->trans_lock);
  301. cur_trans = root->fs_info->running_transaction;
  302. if (cur_trans && is_transaction_blocked(cur_trans)) {
  303. atomic_inc(&cur_trans->use_count);
  304. spin_unlock(&root->fs_info->trans_lock);
  305. wait_event(root->fs_info->transaction_wait,
  306. cur_trans->state >= TRANS_STATE_UNBLOCKED ||
  307. cur_trans->aborted);
  308. btrfs_put_transaction(cur_trans);
  309. } else {
  310. spin_unlock(&root->fs_info->trans_lock);
  311. }
  312. }
  313. static int may_wait_transaction(struct btrfs_root *root, int type)
  314. {
  315. if (root->fs_info->log_root_recovering)
  316. return 0;
  317. if (type == TRANS_USERSPACE)
  318. return 1;
  319. if (type == TRANS_START &&
  320. !atomic_read(&root->fs_info->open_ioctl_trans))
  321. return 1;
  322. return 0;
  323. }
  324. static inline bool need_reserve_reloc_root(struct btrfs_root *root)
  325. {
  326. if (!root->fs_info->reloc_ctl ||
  327. !test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
  328. root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
  329. root->reloc_root)
  330. return false;
  331. return true;
  332. }
  333. static struct btrfs_trans_handle *
  334. start_transaction(struct btrfs_root *root, u64 num_items, unsigned int type,
  335. enum btrfs_reserve_flush_enum flush)
  336. {
  337. struct btrfs_trans_handle *h;
  338. struct btrfs_transaction *cur_trans;
  339. u64 num_bytes = 0;
  340. u64 qgroup_reserved = 0;
  341. bool reloc_reserved = false;
  342. int ret;
  343. /* Send isn't supposed to start transactions. */
  344. ASSERT(current->journal_info != BTRFS_SEND_TRANS_STUB);
  345. if (test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state))
  346. return ERR_PTR(-EROFS);
  347. if (current->journal_info) {
  348. WARN_ON(type & TRANS_EXTWRITERS);
  349. h = current->journal_info;
  350. h->use_count++;
  351. WARN_ON(h->use_count > 2);
  352. h->orig_rsv = h->block_rsv;
  353. h->block_rsv = NULL;
  354. goto got_it;
  355. }
  356. /*
  357. * Do the reservation before we join the transaction so we can do all
  358. * the appropriate flushing if need be.
  359. */
  360. if (num_items > 0 && root != root->fs_info->chunk_root) {
  361. if (root->fs_info->quota_enabled &&
  362. is_fstree(root->root_key.objectid)) {
  363. qgroup_reserved = num_items * root->nodesize;
  364. ret = btrfs_qgroup_reserve(root, qgroup_reserved);
  365. if (ret)
  366. return ERR_PTR(ret);
  367. }
  368. num_bytes = btrfs_calc_trans_metadata_size(root, num_items);
  369. /*
  370. * Do the reservation for the relocation root creation
  371. */
  372. if (need_reserve_reloc_root(root)) {
  373. num_bytes += root->nodesize;
  374. reloc_reserved = true;
  375. }
  376. ret = btrfs_block_rsv_add(root,
  377. &root->fs_info->trans_block_rsv,
  378. num_bytes, flush);
  379. if (ret)
  380. goto reserve_fail;
  381. }
  382. again:
  383. h = kmem_cache_alloc(btrfs_trans_handle_cachep, GFP_NOFS);
  384. if (!h) {
  385. ret = -ENOMEM;
  386. goto alloc_fail;
  387. }
  388. /*
  389. * If we are JOIN_NOLOCK we're already committing a transaction and
  390. * waiting on this guy, so we don't need to do the sb_start_intwrite
  391. * because we're already holding a ref. We need this because we could
  392. * have raced in and did an fsync() on a file which can kick a commit
  393. * and then we deadlock with somebody doing a freeze.
  394. *
  395. * If we are ATTACH, it means we just want to catch the current
  396. * transaction and commit it, so we needn't do sb_start_intwrite().
  397. */
  398. if (type & __TRANS_FREEZABLE)
  399. sb_start_intwrite(root->fs_info->sb);
  400. if (may_wait_transaction(root, type))
  401. wait_current_trans(root);
  402. do {
  403. ret = join_transaction(root, type);
  404. if (ret == -EBUSY) {
  405. wait_current_trans(root);
  406. if (unlikely(type == TRANS_ATTACH))
  407. ret = -ENOENT;
  408. }
  409. } while (ret == -EBUSY);
  410. if (ret < 0) {
  411. /* We must get the transaction if we are JOIN_NOLOCK. */
  412. BUG_ON(type == TRANS_JOIN_NOLOCK);
  413. goto join_fail;
  414. }
  415. cur_trans = root->fs_info->running_transaction;
  416. h->transid = cur_trans->transid;
  417. h->transaction = cur_trans;
  418. h->blocks_used = 0;
  419. h->bytes_reserved = 0;
  420. h->root = root;
  421. h->delayed_ref_updates = 0;
  422. h->use_count = 1;
  423. h->adding_csums = 0;
  424. h->block_rsv = NULL;
  425. h->orig_rsv = NULL;
  426. h->aborted = 0;
  427. h->qgroup_reserved = 0;
  428. h->delayed_ref_elem.seq = 0;
  429. h->type = type;
  430. h->allocating_chunk = false;
  431. h->reloc_reserved = false;
  432. h->sync = false;
  433. INIT_LIST_HEAD(&h->qgroup_ref_list);
  434. INIT_LIST_HEAD(&h->new_bgs);
  435. INIT_LIST_HEAD(&h->ordered);
  436. smp_mb();
  437. if (cur_trans->state >= TRANS_STATE_BLOCKED &&
  438. may_wait_transaction(root, type)) {
  439. current->journal_info = h;
  440. btrfs_commit_transaction(h, root);
  441. goto again;
  442. }
  443. if (num_bytes) {
  444. trace_btrfs_space_reservation(root->fs_info, "transaction",
  445. h->transid, num_bytes, 1);
  446. h->block_rsv = &root->fs_info->trans_block_rsv;
  447. h->bytes_reserved = num_bytes;
  448. h->reloc_reserved = reloc_reserved;
  449. }
  450. h->qgroup_reserved = qgroup_reserved;
  451. got_it:
  452. btrfs_record_root_in_trans(h, root);
  453. if (!current->journal_info && type != TRANS_USERSPACE)
  454. current->journal_info = h;
  455. return h;
  456. join_fail:
  457. if (type & __TRANS_FREEZABLE)
  458. sb_end_intwrite(root->fs_info->sb);
  459. kmem_cache_free(btrfs_trans_handle_cachep, h);
  460. alloc_fail:
  461. if (num_bytes)
  462. btrfs_block_rsv_release(root, &root->fs_info->trans_block_rsv,
  463. num_bytes);
  464. reserve_fail:
  465. if (qgroup_reserved)
  466. btrfs_qgroup_free(root, qgroup_reserved);
  467. return ERR_PTR(ret);
  468. }
  469. struct btrfs_trans_handle *btrfs_start_transaction(struct btrfs_root *root,
  470. int num_items)
  471. {
  472. return start_transaction(root, num_items, TRANS_START,
  473. BTRFS_RESERVE_FLUSH_ALL);
  474. }
  475. struct btrfs_trans_handle *btrfs_start_transaction_lflush(
  476. struct btrfs_root *root, int num_items)
  477. {
  478. return start_transaction(root, num_items, TRANS_START,
  479. BTRFS_RESERVE_FLUSH_LIMIT);
  480. }
  481. struct btrfs_trans_handle *btrfs_join_transaction(struct btrfs_root *root)
  482. {
  483. return start_transaction(root, 0, TRANS_JOIN, 0);
  484. }
  485. struct btrfs_trans_handle *btrfs_join_transaction_nolock(struct btrfs_root *root)
  486. {
  487. return start_transaction(root, 0, TRANS_JOIN_NOLOCK, 0);
  488. }
  489. struct btrfs_trans_handle *btrfs_start_ioctl_transaction(struct btrfs_root *root)
  490. {
  491. return start_transaction(root, 0, TRANS_USERSPACE, 0);
  492. }
  493. /*
  494. * btrfs_attach_transaction() - catch the running transaction
  495. *
  496. * It is used when we want to commit the current the transaction, but
  497. * don't want to start a new one.
  498. *
  499. * Note: If this function return -ENOENT, it just means there is no
  500. * running transaction. But it is possible that the inactive transaction
  501. * is still in the memory, not fully on disk. If you hope there is no
  502. * inactive transaction in the fs when -ENOENT is returned, you should
  503. * invoke
  504. * btrfs_attach_transaction_barrier()
  505. */
  506. struct btrfs_trans_handle *btrfs_attach_transaction(struct btrfs_root *root)
  507. {
  508. return start_transaction(root, 0, TRANS_ATTACH, 0);
  509. }
  510. /*
  511. * btrfs_attach_transaction_barrier() - catch the running transaction
  512. *
  513. * It is similar to the above function, the differentia is this one
  514. * will wait for all the inactive transactions until they fully
  515. * complete.
  516. */
  517. struct btrfs_trans_handle *
  518. btrfs_attach_transaction_barrier(struct btrfs_root *root)
  519. {
  520. struct btrfs_trans_handle *trans;
  521. trans = start_transaction(root, 0, TRANS_ATTACH, 0);
  522. if (IS_ERR(trans) && PTR_ERR(trans) == -ENOENT)
  523. btrfs_wait_for_commit(root, 0);
  524. return trans;
  525. }
  526. /* wait for a transaction commit to be fully complete */
  527. static noinline void wait_for_commit(struct btrfs_root *root,
  528. struct btrfs_transaction *commit)
  529. {
  530. wait_event(commit->commit_wait, commit->state == TRANS_STATE_COMPLETED);
  531. }
  532. int btrfs_wait_for_commit(struct btrfs_root *root, u64 transid)
  533. {
  534. struct btrfs_transaction *cur_trans = NULL, *t;
  535. int ret = 0;
  536. if (transid) {
  537. if (transid <= root->fs_info->last_trans_committed)
  538. goto out;
  539. /* find specified transaction */
  540. spin_lock(&root->fs_info->trans_lock);
  541. list_for_each_entry(t, &root->fs_info->trans_list, list) {
  542. if (t->transid == transid) {
  543. cur_trans = t;
  544. atomic_inc(&cur_trans->use_count);
  545. ret = 0;
  546. break;
  547. }
  548. if (t->transid > transid) {
  549. ret = 0;
  550. break;
  551. }
  552. }
  553. spin_unlock(&root->fs_info->trans_lock);
  554. /*
  555. * The specified transaction doesn't exist, or we
  556. * raced with btrfs_commit_transaction
  557. */
  558. if (!cur_trans) {
  559. if (transid > root->fs_info->last_trans_committed)
  560. ret = -EINVAL;
  561. goto out;
  562. }
  563. } else {
  564. /* find newest transaction that is committing | committed */
  565. spin_lock(&root->fs_info->trans_lock);
  566. list_for_each_entry_reverse(t, &root->fs_info->trans_list,
  567. list) {
  568. if (t->state >= TRANS_STATE_COMMIT_START) {
  569. if (t->state == TRANS_STATE_COMPLETED)
  570. break;
  571. cur_trans = t;
  572. atomic_inc(&cur_trans->use_count);
  573. break;
  574. }
  575. }
  576. spin_unlock(&root->fs_info->trans_lock);
  577. if (!cur_trans)
  578. goto out; /* nothing committing|committed */
  579. }
  580. wait_for_commit(root, cur_trans);
  581. btrfs_put_transaction(cur_trans);
  582. out:
  583. return ret;
  584. }
  585. void btrfs_throttle(struct btrfs_root *root)
  586. {
  587. if (!atomic_read(&root->fs_info->open_ioctl_trans))
  588. wait_current_trans(root);
  589. }
  590. static int should_end_transaction(struct btrfs_trans_handle *trans,
  591. struct btrfs_root *root)
  592. {
  593. if (root->fs_info->global_block_rsv.space_info->full &&
  594. btrfs_check_space_for_delayed_refs(trans, root))
  595. return 1;
  596. return !!btrfs_block_rsv_check(root, &root->fs_info->global_block_rsv, 5);
  597. }
  598. int btrfs_should_end_transaction(struct btrfs_trans_handle *trans,
  599. struct btrfs_root *root)
  600. {
  601. struct btrfs_transaction *cur_trans = trans->transaction;
  602. int updates;
  603. int err;
  604. smp_mb();
  605. if (cur_trans->state >= TRANS_STATE_BLOCKED ||
  606. cur_trans->delayed_refs.flushing)
  607. return 1;
  608. updates = trans->delayed_ref_updates;
  609. trans->delayed_ref_updates = 0;
  610. if (updates) {
  611. err = btrfs_run_delayed_refs(trans, root, updates);
  612. if (err) /* Error code will also eval true */
  613. return err;
  614. }
  615. return should_end_transaction(trans, root);
  616. }
  617. static int __btrfs_end_transaction(struct btrfs_trans_handle *trans,
  618. struct btrfs_root *root, int throttle)
  619. {
  620. struct btrfs_transaction *cur_trans = trans->transaction;
  621. struct btrfs_fs_info *info = root->fs_info;
  622. unsigned long cur = trans->delayed_ref_updates;
  623. int lock = (trans->type != TRANS_JOIN_NOLOCK);
  624. int err = 0;
  625. int must_run_delayed_refs = 0;
  626. if (trans->use_count > 1) {
  627. trans->use_count--;
  628. trans->block_rsv = trans->orig_rsv;
  629. return 0;
  630. }
  631. btrfs_trans_release_metadata(trans, root);
  632. trans->block_rsv = NULL;
  633. if (!list_empty(&trans->new_bgs))
  634. btrfs_create_pending_block_groups(trans, root);
  635. if (!list_empty(&trans->ordered)) {
  636. spin_lock(&info->trans_lock);
  637. list_splice(&trans->ordered, &cur_trans->pending_ordered);
  638. spin_unlock(&info->trans_lock);
  639. }
  640. trans->delayed_ref_updates = 0;
  641. if (!trans->sync) {
  642. must_run_delayed_refs =
  643. btrfs_should_throttle_delayed_refs(trans, root);
  644. cur = max_t(unsigned long, cur, 32);
  645. /*
  646. * don't make the caller wait if they are from a NOLOCK
  647. * or ATTACH transaction, it will deadlock with commit
  648. */
  649. if (must_run_delayed_refs == 1 &&
  650. (trans->type & (__TRANS_JOIN_NOLOCK | __TRANS_ATTACH)))
  651. must_run_delayed_refs = 2;
  652. }
  653. if (trans->qgroup_reserved) {
  654. /*
  655. * the same root has to be passed here between start_transaction
  656. * and end_transaction. Subvolume quota depends on this.
  657. */
  658. btrfs_qgroup_free(trans->root, trans->qgroup_reserved);
  659. trans->qgroup_reserved = 0;
  660. }
  661. btrfs_trans_release_metadata(trans, root);
  662. trans->block_rsv = NULL;
  663. if (!list_empty(&trans->new_bgs))
  664. btrfs_create_pending_block_groups(trans, root);
  665. if (lock && !atomic_read(&root->fs_info->open_ioctl_trans) &&
  666. should_end_transaction(trans, root) &&
  667. ACCESS_ONCE(cur_trans->state) == TRANS_STATE_RUNNING) {
  668. spin_lock(&info->trans_lock);
  669. if (cur_trans->state == TRANS_STATE_RUNNING)
  670. cur_trans->state = TRANS_STATE_BLOCKED;
  671. spin_unlock(&info->trans_lock);
  672. }
  673. if (lock && ACCESS_ONCE(cur_trans->state) == TRANS_STATE_BLOCKED) {
  674. if (throttle)
  675. return btrfs_commit_transaction(trans, root);
  676. else
  677. wake_up_process(info->transaction_kthread);
  678. }
  679. if (trans->type & __TRANS_FREEZABLE)
  680. sb_end_intwrite(root->fs_info->sb);
  681. WARN_ON(cur_trans != info->running_transaction);
  682. WARN_ON(atomic_read(&cur_trans->num_writers) < 1);
  683. atomic_dec(&cur_trans->num_writers);
  684. extwriter_counter_dec(cur_trans, trans->type);
  685. smp_mb();
  686. if (waitqueue_active(&cur_trans->writer_wait))
  687. wake_up(&cur_trans->writer_wait);
  688. btrfs_put_transaction(cur_trans);
  689. if (current->journal_info == trans)
  690. current->journal_info = NULL;
  691. if (throttle)
  692. btrfs_run_delayed_iputs(root);
  693. if (trans->aborted ||
  694. test_bit(BTRFS_FS_STATE_ERROR, &root->fs_info->fs_state)) {
  695. wake_up_process(info->transaction_kthread);
  696. err = -EIO;
  697. }
  698. assert_qgroups_uptodate(trans);
  699. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  700. if (must_run_delayed_refs) {
  701. btrfs_async_run_delayed_refs(root, cur,
  702. must_run_delayed_refs == 1);
  703. }
  704. return err;
  705. }
  706. int btrfs_end_transaction(struct btrfs_trans_handle *trans,
  707. struct btrfs_root *root)
  708. {
  709. return __btrfs_end_transaction(trans, root, 0);
  710. }
  711. int btrfs_end_transaction_throttle(struct btrfs_trans_handle *trans,
  712. struct btrfs_root *root)
  713. {
  714. return __btrfs_end_transaction(trans, root, 1);
  715. }
  716. /*
  717. * when btree blocks are allocated, they have some corresponding bits set for
  718. * them in one of two extent_io trees. This is used to make sure all of
  719. * those extents are sent to disk but does not wait on them
  720. */
  721. int btrfs_write_marked_extents(struct btrfs_root *root,
  722. struct extent_io_tree *dirty_pages, int mark)
  723. {
  724. int err = 0;
  725. int werr = 0;
  726. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  727. struct extent_state *cached_state = NULL;
  728. u64 start = 0;
  729. u64 end;
  730. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  731. mark, &cached_state)) {
  732. convert_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  733. mark, &cached_state, GFP_NOFS);
  734. cached_state = NULL;
  735. err = filemap_fdatawrite_range(mapping, start, end);
  736. if (err)
  737. werr = err;
  738. cond_resched();
  739. start = end + 1;
  740. }
  741. if (err)
  742. werr = err;
  743. return werr;
  744. }
  745. /*
  746. * when btree blocks are allocated, they have some corresponding bits set for
  747. * them in one of two extent_io trees. This is used to make sure all of
  748. * those extents are on disk for transaction or log commit. We wait
  749. * on all the pages and clear them from the dirty pages state tree
  750. */
  751. int btrfs_wait_marked_extents(struct btrfs_root *root,
  752. struct extent_io_tree *dirty_pages, int mark)
  753. {
  754. int err = 0;
  755. int werr = 0;
  756. struct address_space *mapping = root->fs_info->btree_inode->i_mapping;
  757. struct extent_state *cached_state = NULL;
  758. u64 start = 0;
  759. u64 end;
  760. struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
  761. bool errors = false;
  762. while (!find_first_extent_bit(dirty_pages, start, &start, &end,
  763. EXTENT_NEED_WAIT, &cached_state)) {
  764. clear_extent_bit(dirty_pages, start, end, EXTENT_NEED_WAIT,
  765. 0, 0, &cached_state, GFP_NOFS);
  766. err = filemap_fdatawait_range(mapping, start, end);
  767. if (err)
  768. werr = err;
  769. cond_resched();
  770. start = end + 1;
  771. }
  772. if (err)
  773. werr = err;
  774. if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
  775. if ((mark & EXTENT_DIRTY) &&
  776. test_and_clear_bit(BTRFS_INODE_BTREE_LOG1_ERR,
  777. &btree_ino->runtime_flags))
  778. errors = true;
  779. if ((mark & EXTENT_NEW) &&
  780. test_and_clear_bit(BTRFS_INODE_BTREE_LOG2_ERR,
  781. &btree_ino->runtime_flags))
  782. errors = true;
  783. } else {
  784. if (test_and_clear_bit(BTRFS_INODE_BTREE_ERR,
  785. &btree_ino->runtime_flags))
  786. errors = true;
  787. }
  788. if (errors && !werr)
  789. werr = -EIO;
  790. return werr;
  791. }
  792. /*
  793. * when btree blocks are allocated, they have some corresponding bits set for
  794. * them in one of two extent_io trees. This is used to make sure all of
  795. * those extents are on disk for transaction or log commit
  796. */
  797. static int btrfs_write_and_wait_marked_extents(struct btrfs_root *root,
  798. struct extent_io_tree *dirty_pages, int mark)
  799. {
  800. int ret;
  801. int ret2;
  802. struct blk_plug plug;
  803. blk_start_plug(&plug);
  804. ret = btrfs_write_marked_extents(root, dirty_pages, mark);
  805. blk_finish_plug(&plug);
  806. ret2 = btrfs_wait_marked_extents(root, dirty_pages, mark);
  807. if (ret)
  808. return ret;
  809. if (ret2)
  810. return ret2;
  811. return 0;
  812. }
  813. int btrfs_write_and_wait_transaction(struct btrfs_trans_handle *trans,
  814. struct btrfs_root *root)
  815. {
  816. if (!trans || !trans->transaction) {
  817. struct inode *btree_inode;
  818. btree_inode = root->fs_info->btree_inode;
  819. return filemap_write_and_wait(btree_inode->i_mapping);
  820. }
  821. return btrfs_write_and_wait_marked_extents(root,
  822. &trans->transaction->dirty_pages,
  823. EXTENT_DIRTY);
  824. }
  825. /*
  826. * this is used to update the root pointer in the tree of tree roots.
  827. *
  828. * But, in the case of the extent allocation tree, updating the root
  829. * pointer may allocate blocks which may change the root of the extent
  830. * allocation tree.
  831. *
  832. * So, this loops and repeats and makes sure the cowonly root didn't
  833. * change while the root pointer was being updated in the metadata.
  834. */
  835. static int update_cowonly_root(struct btrfs_trans_handle *trans,
  836. struct btrfs_root *root)
  837. {
  838. int ret;
  839. u64 old_root_bytenr;
  840. u64 old_root_used;
  841. struct btrfs_root *tree_root = root->fs_info->tree_root;
  842. old_root_used = btrfs_root_used(&root->root_item);
  843. btrfs_write_dirty_block_groups(trans, root);
  844. while (1) {
  845. old_root_bytenr = btrfs_root_bytenr(&root->root_item);
  846. if (old_root_bytenr == root->node->start &&
  847. old_root_used == btrfs_root_used(&root->root_item))
  848. break;
  849. btrfs_set_root_node(&root->root_item, root->node);
  850. ret = btrfs_update_root(trans, tree_root,
  851. &root->root_key,
  852. &root->root_item);
  853. if (ret)
  854. return ret;
  855. old_root_used = btrfs_root_used(&root->root_item);
  856. ret = btrfs_write_dirty_block_groups(trans, root);
  857. if (ret)
  858. return ret;
  859. }
  860. return 0;
  861. }
  862. /*
  863. * update all the cowonly tree roots on disk
  864. *
  865. * The error handling in this function may not be obvious. Any of the
  866. * failures will cause the file system to go offline. We still need
  867. * to clean up the delayed refs.
  868. */
  869. static noinline int commit_cowonly_roots(struct btrfs_trans_handle *trans,
  870. struct btrfs_root *root)
  871. {
  872. struct btrfs_fs_info *fs_info = root->fs_info;
  873. struct list_head *next;
  874. struct extent_buffer *eb;
  875. int ret;
  876. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  877. if (ret)
  878. return ret;
  879. eb = btrfs_lock_root_node(fs_info->tree_root);
  880. ret = btrfs_cow_block(trans, fs_info->tree_root, eb, NULL,
  881. 0, &eb);
  882. btrfs_tree_unlock(eb);
  883. free_extent_buffer(eb);
  884. if (ret)
  885. return ret;
  886. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  887. if (ret)
  888. return ret;
  889. ret = btrfs_run_dev_stats(trans, root->fs_info);
  890. if (ret)
  891. return ret;
  892. ret = btrfs_run_dev_replace(trans, root->fs_info);
  893. if (ret)
  894. return ret;
  895. ret = btrfs_run_qgroups(trans, root->fs_info);
  896. if (ret)
  897. return ret;
  898. /* run_qgroups might have added some more refs */
  899. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  900. if (ret)
  901. return ret;
  902. while (!list_empty(&fs_info->dirty_cowonly_roots)) {
  903. next = fs_info->dirty_cowonly_roots.next;
  904. list_del_init(next);
  905. root = list_entry(next, struct btrfs_root, dirty_list);
  906. if (root != fs_info->extent_root)
  907. list_add_tail(&root->dirty_list,
  908. &trans->transaction->switch_commits);
  909. ret = update_cowonly_root(trans, root);
  910. if (ret)
  911. return ret;
  912. }
  913. list_add_tail(&fs_info->extent_root->dirty_list,
  914. &trans->transaction->switch_commits);
  915. btrfs_after_dev_replace_commit(fs_info);
  916. return 0;
  917. }
  918. /*
  919. * dead roots are old snapshots that need to be deleted. This allocates
  920. * a dirty root struct and adds it into the list of dead roots that need to
  921. * be deleted
  922. */
  923. void btrfs_add_dead_root(struct btrfs_root *root)
  924. {
  925. spin_lock(&root->fs_info->trans_lock);
  926. if (list_empty(&root->root_list))
  927. list_add_tail(&root->root_list, &root->fs_info->dead_roots);
  928. spin_unlock(&root->fs_info->trans_lock);
  929. }
  930. /*
  931. * update all the cowonly tree roots on disk
  932. */
  933. static noinline int commit_fs_roots(struct btrfs_trans_handle *trans,
  934. struct btrfs_root *root)
  935. {
  936. struct btrfs_root *gang[8];
  937. struct btrfs_fs_info *fs_info = root->fs_info;
  938. int i;
  939. int ret;
  940. int err = 0;
  941. spin_lock(&fs_info->fs_roots_radix_lock);
  942. while (1) {
  943. ret = radix_tree_gang_lookup_tag(&fs_info->fs_roots_radix,
  944. (void **)gang, 0,
  945. ARRAY_SIZE(gang),
  946. BTRFS_ROOT_TRANS_TAG);
  947. if (ret == 0)
  948. break;
  949. for (i = 0; i < ret; i++) {
  950. root = gang[i];
  951. radix_tree_tag_clear(&fs_info->fs_roots_radix,
  952. (unsigned long)root->root_key.objectid,
  953. BTRFS_ROOT_TRANS_TAG);
  954. spin_unlock(&fs_info->fs_roots_radix_lock);
  955. btrfs_free_log(trans, root);
  956. btrfs_update_reloc_root(trans, root);
  957. btrfs_orphan_commit_root(trans, root);
  958. btrfs_save_ino_cache(root, trans);
  959. /* see comments in should_cow_block() */
  960. clear_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  961. smp_mb__after_atomic();
  962. if (root->commit_root != root->node) {
  963. list_add_tail(&root->dirty_list,
  964. &trans->transaction->switch_commits);
  965. btrfs_set_root_node(&root->root_item,
  966. root->node);
  967. }
  968. err = btrfs_update_root(trans, fs_info->tree_root,
  969. &root->root_key,
  970. &root->root_item);
  971. spin_lock(&fs_info->fs_roots_radix_lock);
  972. if (err)
  973. break;
  974. }
  975. }
  976. spin_unlock(&fs_info->fs_roots_radix_lock);
  977. return err;
  978. }
  979. /*
  980. * defrag a given btree.
  981. * Every leaf in the btree is read and defragged.
  982. */
  983. int btrfs_defrag_root(struct btrfs_root *root)
  984. {
  985. struct btrfs_fs_info *info = root->fs_info;
  986. struct btrfs_trans_handle *trans;
  987. int ret;
  988. if (test_and_set_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state))
  989. return 0;
  990. while (1) {
  991. trans = btrfs_start_transaction(root, 0);
  992. if (IS_ERR(trans))
  993. return PTR_ERR(trans);
  994. ret = btrfs_defrag_leaves(trans, root);
  995. btrfs_end_transaction(trans, root);
  996. btrfs_btree_balance_dirty(info->tree_root);
  997. cond_resched();
  998. if (btrfs_fs_closing(root->fs_info) || ret != -EAGAIN)
  999. break;
  1000. if (btrfs_defrag_cancelled(root->fs_info)) {
  1001. pr_debug("BTRFS: defrag_root cancelled\n");
  1002. ret = -EAGAIN;
  1003. break;
  1004. }
  1005. }
  1006. clear_bit(BTRFS_ROOT_DEFRAG_RUNNING, &root->state);
  1007. return ret;
  1008. }
  1009. /*
  1010. * new snapshots need to be created at a very specific time in the
  1011. * transaction commit. This does the actual creation.
  1012. *
  1013. * Note:
  1014. * If the error which may affect the commitment of the current transaction
  1015. * happens, we should return the error number. If the error which just affect
  1016. * the creation of the pending snapshots, just return 0.
  1017. */
  1018. static noinline int create_pending_snapshot(struct btrfs_trans_handle *trans,
  1019. struct btrfs_fs_info *fs_info,
  1020. struct btrfs_pending_snapshot *pending)
  1021. {
  1022. struct btrfs_key key;
  1023. struct btrfs_root_item *new_root_item;
  1024. struct btrfs_root *tree_root = fs_info->tree_root;
  1025. struct btrfs_root *root = pending->root;
  1026. struct btrfs_root *parent_root;
  1027. struct btrfs_block_rsv *rsv;
  1028. struct inode *parent_inode;
  1029. struct btrfs_path *path;
  1030. struct btrfs_dir_item *dir_item;
  1031. struct dentry *dentry;
  1032. struct extent_buffer *tmp;
  1033. struct extent_buffer *old;
  1034. struct timespec cur_time = CURRENT_TIME;
  1035. int ret = 0;
  1036. u64 to_reserve = 0;
  1037. u64 index = 0;
  1038. u64 objectid;
  1039. u64 root_flags;
  1040. uuid_le new_uuid;
  1041. path = btrfs_alloc_path();
  1042. if (!path) {
  1043. pending->error = -ENOMEM;
  1044. return 0;
  1045. }
  1046. new_root_item = kmalloc(sizeof(*new_root_item), GFP_NOFS);
  1047. if (!new_root_item) {
  1048. pending->error = -ENOMEM;
  1049. goto root_item_alloc_fail;
  1050. }
  1051. pending->error = btrfs_find_free_objectid(tree_root, &objectid);
  1052. if (pending->error)
  1053. goto no_free_objectid;
  1054. btrfs_reloc_pre_snapshot(trans, pending, &to_reserve);
  1055. if (to_reserve > 0) {
  1056. pending->error = btrfs_block_rsv_add(root,
  1057. &pending->block_rsv,
  1058. to_reserve,
  1059. BTRFS_RESERVE_NO_FLUSH);
  1060. if (pending->error)
  1061. goto no_free_objectid;
  1062. }
  1063. key.objectid = objectid;
  1064. key.offset = (u64)-1;
  1065. key.type = BTRFS_ROOT_ITEM_KEY;
  1066. rsv = trans->block_rsv;
  1067. trans->block_rsv = &pending->block_rsv;
  1068. trans->bytes_reserved = trans->block_rsv->reserved;
  1069. dentry = pending->dentry;
  1070. parent_inode = pending->dir;
  1071. parent_root = BTRFS_I(parent_inode)->root;
  1072. record_root_in_trans(trans, parent_root);
  1073. /*
  1074. * insert the directory item
  1075. */
  1076. ret = btrfs_set_inode_index(parent_inode, &index);
  1077. BUG_ON(ret); /* -ENOMEM */
  1078. /* check if there is a file/dir which has the same name. */
  1079. dir_item = btrfs_lookup_dir_item(NULL, parent_root, path,
  1080. btrfs_ino(parent_inode),
  1081. dentry->d_name.name,
  1082. dentry->d_name.len, 0);
  1083. if (dir_item != NULL && !IS_ERR(dir_item)) {
  1084. pending->error = -EEXIST;
  1085. goto dir_item_existed;
  1086. } else if (IS_ERR(dir_item)) {
  1087. ret = PTR_ERR(dir_item);
  1088. btrfs_abort_transaction(trans, root, ret);
  1089. goto fail;
  1090. }
  1091. btrfs_release_path(path);
  1092. /*
  1093. * pull in the delayed directory update
  1094. * and the delayed inode item
  1095. * otherwise we corrupt the FS during
  1096. * snapshot
  1097. */
  1098. ret = btrfs_run_delayed_items(trans, root);
  1099. if (ret) { /* Transaction aborted */
  1100. btrfs_abort_transaction(trans, root, ret);
  1101. goto fail;
  1102. }
  1103. record_root_in_trans(trans, root);
  1104. btrfs_set_root_last_snapshot(&root->root_item, trans->transid);
  1105. memcpy(new_root_item, &root->root_item, sizeof(*new_root_item));
  1106. btrfs_check_and_init_root_item(new_root_item);
  1107. root_flags = btrfs_root_flags(new_root_item);
  1108. if (pending->readonly)
  1109. root_flags |= BTRFS_ROOT_SUBVOL_RDONLY;
  1110. else
  1111. root_flags &= ~BTRFS_ROOT_SUBVOL_RDONLY;
  1112. btrfs_set_root_flags(new_root_item, root_flags);
  1113. btrfs_set_root_generation_v2(new_root_item,
  1114. trans->transid);
  1115. uuid_le_gen(&new_uuid);
  1116. memcpy(new_root_item->uuid, new_uuid.b, BTRFS_UUID_SIZE);
  1117. memcpy(new_root_item->parent_uuid, root->root_item.uuid,
  1118. BTRFS_UUID_SIZE);
  1119. if (!(root_flags & BTRFS_ROOT_SUBVOL_RDONLY)) {
  1120. memset(new_root_item->received_uuid, 0,
  1121. sizeof(new_root_item->received_uuid));
  1122. memset(&new_root_item->stime, 0, sizeof(new_root_item->stime));
  1123. memset(&new_root_item->rtime, 0, sizeof(new_root_item->rtime));
  1124. btrfs_set_root_stransid(new_root_item, 0);
  1125. btrfs_set_root_rtransid(new_root_item, 0);
  1126. }
  1127. btrfs_set_stack_timespec_sec(&new_root_item->otime, cur_time.tv_sec);
  1128. btrfs_set_stack_timespec_nsec(&new_root_item->otime, cur_time.tv_nsec);
  1129. btrfs_set_root_otransid(new_root_item, trans->transid);
  1130. old = btrfs_lock_root_node(root);
  1131. ret = btrfs_cow_block(trans, root, old, NULL, 0, &old);
  1132. if (ret) {
  1133. btrfs_tree_unlock(old);
  1134. free_extent_buffer(old);
  1135. btrfs_abort_transaction(trans, root, ret);
  1136. goto fail;
  1137. }
  1138. btrfs_set_lock_blocking(old);
  1139. ret = btrfs_copy_root(trans, root, old, &tmp, objectid);
  1140. /* clean up in any case */
  1141. btrfs_tree_unlock(old);
  1142. free_extent_buffer(old);
  1143. if (ret) {
  1144. btrfs_abort_transaction(trans, root, ret);
  1145. goto fail;
  1146. }
  1147. /*
  1148. * We need to flush delayed refs in order to make sure all of our quota
  1149. * operations have been done before we call btrfs_qgroup_inherit.
  1150. */
  1151. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1152. if (ret) {
  1153. btrfs_abort_transaction(trans, root, ret);
  1154. goto fail;
  1155. }
  1156. ret = btrfs_qgroup_inherit(trans, fs_info,
  1157. root->root_key.objectid,
  1158. objectid, pending->inherit);
  1159. if (ret) {
  1160. btrfs_abort_transaction(trans, root, ret);
  1161. goto fail;
  1162. }
  1163. /* see comments in should_cow_block() */
  1164. set_bit(BTRFS_ROOT_FORCE_COW, &root->state);
  1165. smp_wmb();
  1166. btrfs_set_root_node(new_root_item, tmp);
  1167. /* record when the snapshot was created in key.offset */
  1168. key.offset = trans->transid;
  1169. ret = btrfs_insert_root(trans, tree_root, &key, new_root_item);
  1170. btrfs_tree_unlock(tmp);
  1171. free_extent_buffer(tmp);
  1172. if (ret) {
  1173. btrfs_abort_transaction(trans, root, ret);
  1174. goto fail;
  1175. }
  1176. /*
  1177. * insert root back/forward references
  1178. */
  1179. ret = btrfs_add_root_ref(trans, tree_root, objectid,
  1180. parent_root->root_key.objectid,
  1181. btrfs_ino(parent_inode), index,
  1182. dentry->d_name.name, dentry->d_name.len);
  1183. if (ret) {
  1184. btrfs_abort_transaction(trans, root, ret);
  1185. goto fail;
  1186. }
  1187. key.offset = (u64)-1;
  1188. pending->snap = btrfs_read_fs_root_no_name(root->fs_info, &key);
  1189. if (IS_ERR(pending->snap)) {
  1190. ret = PTR_ERR(pending->snap);
  1191. btrfs_abort_transaction(trans, root, ret);
  1192. goto fail;
  1193. }
  1194. ret = btrfs_reloc_post_snapshot(trans, pending);
  1195. if (ret) {
  1196. btrfs_abort_transaction(trans, root, ret);
  1197. goto fail;
  1198. }
  1199. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1200. if (ret) {
  1201. btrfs_abort_transaction(trans, root, ret);
  1202. goto fail;
  1203. }
  1204. ret = btrfs_insert_dir_item(trans, parent_root,
  1205. dentry->d_name.name, dentry->d_name.len,
  1206. parent_inode, &key,
  1207. BTRFS_FT_DIR, index);
  1208. /* We have check then name at the beginning, so it is impossible. */
  1209. BUG_ON(ret == -EEXIST || ret == -EOVERFLOW);
  1210. if (ret) {
  1211. btrfs_abort_transaction(trans, root, ret);
  1212. goto fail;
  1213. }
  1214. btrfs_i_size_write(parent_inode, parent_inode->i_size +
  1215. dentry->d_name.len * 2);
  1216. parent_inode->i_mtime = parent_inode->i_ctime = CURRENT_TIME;
  1217. ret = btrfs_update_inode_fallback(trans, parent_root, parent_inode);
  1218. if (ret) {
  1219. btrfs_abort_transaction(trans, root, ret);
  1220. goto fail;
  1221. }
  1222. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root, new_uuid.b,
  1223. BTRFS_UUID_KEY_SUBVOL, objectid);
  1224. if (ret) {
  1225. btrfs_abort_transaction(trans, root, ret);
  1226. goto fail;
  1227. }
  1228. if (!btrfs_is_empty_uuid(new_root_item->received_uuid)) {
  1229. ret = btrfs_uuid_tree_add(trans, fs_info->uuid_root,
  1230. new_root_item->received_uuid,
  1231. BTRFS_UUID_KEY_RECEIVED_SUBVOL,
  1232. objectid);
  1233. if (ret && ret != -EEXIST) {
  1234. btrfs_abort_transaction(trans, root, ret);
  1235. goto fail;
  1236. }
  1237. }
  1238. fail:
  1239. pending->error = ret;
  1240. dir_item_existed:
  1241. trans->block_rsv = rsv;
  1242. trans->bytes_reserved = 0;
  1243. no_free_objectid:
  1244. kfree(new_root_item);
  1245. root_item_alloc_fail:
  1246. btrfs_free_path(path);
  1247. return ret;
  1248. }
  1249. /*
  1250. * create all the snapshots we've scheduled for creation
  1251. */
  1252. static noinline int create_pending_snapshots(struct btrfs_trans_handle *trans,
  1253. struct btrfs_fs_info *fs_info)
  1254. {
  1255. struct btrfs_pending_snapshot *pending, *next;
  1256. struct list_head *head = &trans->transaction->pending_snapshots;
  1257. int ret = 0;
  1258. list_for_each_entry_safe(pending, next, head, list) {
  1259. list_del(&pending->list);
  1260. ret = create_pending_snapshot(trans, fs_info, pending);
  1261. if (ret)
  1262. break;
  1263. }
  1264. return ret;
  1265. }
  1266. static void update_super_roots(struct btrfs_root *root)
  1267. {
  1268. struct btrfs_root_item *root_item;
  1269. struct btrfs_super_block *super;
  1270. super = root->fs_info->super_copy;
  1271. root_item = &root->fs_info->chunk_root->root_item;
  1272. super->chunk_root = root_item->bytenr;
  1273. super->chunk_root_generation = root_item->generation;
  1274. super->chunk_root_level = root_item->level;
  1275. root_item = &root->fs_info->tree_root->root_item;
  1276. super->root = root_item->bytenr;
  1277. super->generation = root_item->generation;
  1278. super->root_level = root_item->level;
  1279. if (btrfs_test_opt(root, SPACE_CACHE))
  1280. super->cache_generation = root_item->generation;
  1281. if (root->fs_info->update_uuid_tree_gen)
  1282. super->uuid_tree_generation = root_item->generation;
  1283. }
  1284. int btrfs_transaction_in_commit(struct btrfs_fs_info *info)
  1285. {
  1286. struct btrfs_transaction *trans;
  1287. int ret = 0;
  1288. spin_lock(&info->trans_lock);
  1289. trans = info->running_transaction;
  1290. if (trans)
  1291. ret = (trans->state >= TRANS_STATE_COMMIT_START);
  1292. spin_unlock(&info->trans_lock);
  1293. return ret;
  1294. }
  1295. int btrfs_transaction_blocked(struct btrfs_fs_info *info)
  1296. {
  1297. struct btrfs_transaction *trans;
  1298. int ret = 0;
  1299. spin_lock(&info->trans_lock);
  1300. trans = info->running_transaction;
  1301. if (trans)
  1302. ret = is_transaction_blocked(trans);
  1303. spin_unlock(&info->trans_lock);
  1304. return ret;
  1305. }
  1306. /*
  1307. * wait for the current transaction commit to start and block subsequent
  1308. * transaction joins
  1309. */
  1310. static void wait_current_trans_commit_start(struct btrfs_root *root,
  1311. struct btrfs_transaction *trans)
  1312. {
  1313. wait_event(root->fs_info->transaction_blocked_wait,
  1314. trans->state >= TRANS_STATE_COMMIT_START ||
  1315. trans->aborted);
  1316. }
  1317. /*
  1318. * wait for the current transaction to start and then become unblocked.
  1319. * caller holds ref.
  1320. */
  1321. static void wait_current_trans_commit_start_and_unblock(struct btrfs_root *root,
  1322. struct btrfs_transaction *trans)
  1323. {
  1324. wait_event(root->fs_info->transaction_wait,
  1325. trans->state >= TRANS_STATE_UNBLOCKED ||
  1326. trans->aborted);
  1327. }
  1328. /*
  1329. * commit transactions asynchronously. once btrfs_commit_transaction_async
  1330. * returns, any subsequent transaction will not be allowed to join.
  1331. */
  1332. struct btrfs_async_commit {
  1333. struct btrfs_trans_handle *newtrans;
  1334. struct btrfs_root *root;
  1335. struct work_struct work;
  1336. };
  1337. static void do_async_commit(struct work_struct *work)
  1338. {
  1339. struct btrfs_async_commit *ac =
  1340. container_of(work, struct btrfs_async_commit, work);
  1341. /*
  1342. * We've got freeze protection passed with the transaction.
  1343. * Tell lockdep about it.
  1344. */
  1345. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1346. rwsem_acquire_read(
  1347. &ac->root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1348. 0, 1, _THIS_IP_);
  1349. current->journal_info = ac->newtrans;
  1350. btrfs_commit_transaction(ac->newtrans, ac->root);
  1351. kfree(ac);
  1352. }
  1353. int btrfs_commit_transaction_async(struct btrfs_trans_handle *trans,
  1354. struct btrfs_root *root,
  1355. int wait_for_unblock)
  1356. {
  1357. struct btrfs_async_commit *ac;
  1358. struct btrfs_transaction *cur_trans;
  1359. ac = kmalloc(sizeof(*ac), GFP_NOFS);
  1360. if (!ac)
  1361. return -ENOMEM;
  1362. INIT_WORK(&ac->work, do_async_commit);
  1363. ac->root = root;
  1364. ac->newtrans = btrfs_join_transaction(root);
  1365. if (IS_ERR(ac->newtrans)) {
  1366. int err = PTR_ERR(ac->newtrans);
  1367. kfree(ac);
  1368. return err;
  1369. }
  1370. /* take transaction reference */
  1371. cur_trans = trans->transaction;
  1372. atomic_inc(&cur_trans->use_count);
  1373. btrfs_end_transaction(trans, root);
  1374. /*
  1375. * Tell lockdep we've released the freeze rwsem, since the
  1376. * async commit thread will be the one to unlock it.
  1377. */
  1378. if (ac->newtrans->type & __TRANS_FREEZABLE)
  1379. rwsem_release(
  1380. &root->fs_info->sb->s_writers.lock_map[SB_FREEZE_FS-1],
  1381. 1, _THIS_IP_);
  1382. schedule_work(&ac->work);
  1383. /* wait for transaction to start and unblock */
  1384. if (wait_for_unblock)
  1385. wait_current_trans_commit_start_and_unblock(root, cur_trans);
  1386. else
  1387. wait_current_trans_commit_start(root, cur_trans);
  1388. if (current->journal_info == trans)
  1389. current->journal_info = NULL;
  1390. btrfs_put_transaction(cur_trans);
  1391. return 0;
  1392. }
  1393. static void cleanup_transaction(struct btrfs_trans_handle *trans,
  1394. struct btrfs_root *root, int err)
  1395. {
  1396. struct btrfs_transaction *cur_trans = trans->transaction;
  1397. DEFINE_WAIT(wait);
  1398. WARN_ON(trans->use_count > 1);
  1399. btrfs_abort_transaction(trans, root, err);
  1400. spin_lock(&root->fs_info->trans_lock);
  1401. /*
  1402. * If the transaction is removed from the list, it means this
  1403. * transaction has been committed successfully, so it is impossible
  1404. * to call the cleanup function.
  1405. */
  1406. BUG_ON(list_empty(&cur_trans->list));
  1407. list_del_init(&cur_trans->list);
  1408. if (cur_trans == root->fs_info->running_transaction) {
  1409. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1410. spin_unlock(&root->fs_info->trans_lock);
  1411. wait_event(cur_trans->writer_wait,
  1412. atomic_read(&cur_trans->num_writers) == 1);
  1413. spin_lock(&root->fs_info->trans_lock);
  1414. }
  1415. spin_unlock(&root->fs_info->trans_lock);
  1416. btrfs_cleanup_one_transaction(trans->transaction, root);
  1417. spin_lock(&root->fs_info->trans_lock);
  1418. if (cur_trans == root->fs_info->running_transaction)
  1419. root->fs_info->running_transaction = NULL;
  1420. spin_unlock(&root->fs_info->trans_lock);
  1421. if (trans->type & __TRANS_FREEZABLE)
  1422. sb_end_intwrite(root->fs_info->sb);
  1423. btrfs_put_transaction(cur_trans);
  1424. btrfs_put_transaction(cur_trans);
  1425. trace_btrfs_transaction_commit(root);
  1426. if (current->journal_info == trans)
  1427. current->journal_info = NULL;
  1428. btrfs_scrub_cancel(root->fs_info);
  1429. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1430. }
  1431. static inline int btrfs_start_delalloc_flush(struct btrfs_fs_info *fs_info)
  1432. {
  1433. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1434. return btrfs_start_delalloc_roots(fs_info, 1, -1);
  1435. return 0;
  1436. }
  1437. static inline void btrfs_wait_delalloc_flush(struct btrfs_fs_info *fs_info)
  1438. {
  1439. if (btrfs_test_opt(fs_info->tree_root, FLUSHONCOMMIT))
  1440. btrfs_wait_ordered_roots(fs_info, -1);
  1441. }
  1442. static inline void
  1443. btrfs_wait_pending_ordered(struct btrfs_transaction *cur_trans,
  1444. struct btrfs_fs_info *fs_info)
  1445. {
  1446. struct btrfs_ordered_extent *ordered;
  1447. spin_lock(&fs_info->trans_lock);
  1448. while (!list_empty(&cur_trans->pending_ordered)) {
  1449. ordered = list_first_entry(&cur_trans->pending_ordered,
  1450. struct btrfs_ordered_extent,
  1451. trans_list);
  1452. list_del_init(&ordered->trans_list);
  1453. spin_unlock(&fs_info->trans_lock);
  1454. wait_event(ordered->wait, test_bit(BTRFS_ORDERED_COMPLETE,
  1455. &ordered->flags));
  1456. btrfs_put_ordered_extent(ordered);
  1457. spin_lock(&fs_info->trans_lock);
  1458. }
  1459. spin_unlock(&fs_info->trans_lock);
  1460. }
  1461. int btrfs_commit_transaction(struct btrfs_trans_handle *trans,
  1462. struct btrfs_root *root)
  1463. {
  1464. struct btrfs_transaction *cur_trans = trans->transaction;
  1465. struct btrfs_transaction *prev_trans = NULL;
  1466. struct btrfs_inode *btree_ino = BTRFS_I(root->fs_info->btree_inode);
  1467. int ret;
  1468. /* Stop the commit early if ->aborted is set */
  1469. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1470. ret = cur_trans->aborted;
  1471. btrfs_end_transaction(trans, root);
  1472. return ret;
  1473. }
  1474. /* make a pass through all the delayed refs we have so far
  1475. * any runnings procs may add more while we are here
  1476. */
  1477. ret = btrfs_run_delayed_refs(trans, root, 0);
  1478. if (ret) {
  1479. btrfs_end_transaction(trans, root);
  1480. return ret;
  1481. }
  1482. btrfs_trans_release_metadata(trans, root);
  1483. trans->block_rsv = NULL;
  1484. if (trans->qgroup_reserved) {
  1485. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1486. trans->qgroup_reserved = 0;
  1487. }
  1488. cur_trans = trans->transaction;
  1489. /*
  1490. * set the flushing flag so procs in this transaction have to
  1491. * start sending their work down.
  1492. */
  1493. cur_trans->delayed_refs.flushing = 1;
  1494. smp_wmb();
  1495. if (!list_empty(&trans->new_bgs))
  1496. btrfs_create_pending_block_groups(trans, root);
  1497. ret = btrfs_run_delayed_refs(trans, root, 0);
  1498. if (ret) {
  1499. btrfs_end_transaction(trans, root);
  1500. return ret;
  1501. }
  1502. spin_lock(&root->fs_info->trans_lock);
  1503. list_splice(&trans->ordered, &cur_trans->pending_ordered);
  1504. if (cur_trans->state >= TRANS_STATE_COMMIT_START) {
  1505. spin_unlock(&root->fs_info->trans_lock);
  1506. atomic_inc(&cur_trans->use_count);
  1507. ret = btrfs_end_transaction(trans, root);
  1508. wait_for_commit(root, cur_trans);
  1509. btrfs_put_transaction(cur_trans);
  1510. return ret;
  1511. }
  1512. cur_trans->state = TRANS_STATE_COMMIT_START;
  1513. wake_up(&root->fs_info->transaction_blocked_wait);
  1514. if (cur_trans->list.prev != &root->fs_info->trans_list) {
  1515. prev_trans = list_entry(cur_trans->list.prev,
  1516. struct btrfs_transaction, list);
  1517. if (prev_trans->state != TRANS_STATE_COMPLETED) {
  1518. atomic_inc(&prev_trans->use_count);
  1519. spin_unlock(&root->fs_info->trans_lock);
  1520. wait_for_commit(root, prev_trans);
  1521. btrfs_put_transaction(prev_trans);
  1522. } else {
  1523. spin_unlock(&root->fs_info->trans_lock);
  1524. }
  1525. } else {
  1526. spin_unlock(&root->fs_info->trans_lock);
  1527. }
  1528. extwriter_counter_dec(cur_trans, trans->type);
  1529. ret = btrfs_start_delalloc_flush(root->fs_info);
  1530. if (ret)
  1531. goto cleanup_transaction;
  1532. ret = btrfs_run_delayed_items(trans, root);
  1533. if (ret)
  1534. goto cleanup_transaction;
  1535. wait_event(cur_trans->writer_wait,
  1536. extwriter_counter_read(cur_trans) == 0);
  1537. /* some pending stuffs might be added after the previous flush. */
  1538. ret = btrfs_run_delayed_items(trans, root);
  1539. if (ret)
  1540. goto cleanup_transaction;
  1541. btrfs_wait_delalloc_flush(root->fs_info);
  1542. btrfs_wait_pending_ordered(cur_trans, root->fs_info);
  1543. btrfs_scrub_pause(root);
  1544. /*
  1545. * Ok now we need to make sure to block out any other joins while we
  1546. * commit the transaction. We could have started a join before setting
  1547. * COMMIT_DOING so make sure to wait for num_writers to == 1 again.
  1548. */
  1549. spin_lock(&root->fs_info->trans_lock);
  1550. cur_trans->state = TRANS_STATE_COMMIT_DOING;
  1551. spin_unlock(&root->fs_info->trans_lock);
  1552. wait_event(cur_trans->writer_wait,
  1553. atomic_read(&cur_trans->num_writers) == 1);
  1554. /* ->aborted might be set after the previous check, so check it */
  1555. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1556. ret = cur_trans->aborted;
  1557. goto scrub_continue;
  1558. }
  1559. /*
  1560. * the reloc mutex makes sure that we stop
  1561. * the balancing code from coming in and moving
  1562. * extents around in the middle of the commit
  1563. */
  1564. mutex_lock(&root->fs_info->reloc_mutex);
  1565. /*
  1566. * We needn't worry about the delayed items because we will
  1567. * deal with them in create_pending_snapshot(), which is the
  1568. * core function of the snapshot creation.
  1569. */
  1570. ret = create_pending_snapshots(trans, root->fs_info);
  1571. if (ret) {
  1572. mutex_unlock(&root->fs_info->reloc_mutex);
  1573. goto scrub_continue;
  1574. }
  1575. /*
  1576. * We insert the dir indexes of the snapshots and update the inode
  1577. * of the snapshots' parents after the snapshot creation, so there
  1578. * are some delayed items which are not dealt with. Now deal with
  1579. * them.
  1580. *
  1581. * We needn't worry that this operation will corrupt the snapshots,
  1582. * because all the tree which are snapshoted will be forced to COW
  1583. * the nodes and leaves.
  1584. */
  1585. ret = btrfs_run_delayed_items(trans, root);
  1586. if (ret) {
  1587. mutex_unlock(&root->fs_info->reloc_mutex);
  1588. goto scrub_continue;
  1589. }
  1590. ret = btrfs_run_delayed_refs(trans, root, (unsigned long)-1);
  1591. if (ret) {
  1592. mutex_unlock(&root->fs_info->reloc_mutex);
  1593. goto scrub_continue;
  1594. }
  1595. /*
  1596. * make sure none of the code above managed to slip in a
  1597. * delayed item
  1598. */
  1599. btrfs_assert_delayed_root_empty(root);
  1600. WARN_ON(cur_trans != trans->transaction);
  1601. /* btrfs_commit_tree_roots is responsible for getting the
  1602. * various roots consistent with each other. Every pointer
  1603. * in the tree of tree roots has to point to the most up to date
  1604. * root for every subvolume and other tree. So, we have to keep
  1605. * the tree logging code from jumping in and changing any
  1606. * of the trees.
  1607. *
  1608. * At this point in the commit, there can't be any tree-log
  1609. * writers, but a little lower down we drop the trans mutex
  1610. * and let new people in. By holding the tree_log_mutex
  1611. * from now until after the super is written, we avoid races
  1612. * with the tree-log code.
  1613. */
  1614. mutex_lock(&root->fs_info->tree_log_mutex);
  1615. ret = commit_fs_roots(trans, root);
  1616. if (ret) {
  1617. mutex_unlock(&root->fs_info->tree_log_mutex);
  1618. mutex_unlock(&root->fs_info->reloc_mutex);
  1619. goto scrub_continue;
  1620. }
  1621. /*
  1622. * Since the transaction is done, we should set the inode map cache flag
  1623. * before any other comming transaction.
  1624. */
  1625. if (btrfs_test_opt(root, CHANGE_INODE_CACHE))
  1626. btrfs_set_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
  1627. else
  1628. btrfs_clear_opt(root->fs_info->mount_opt, INODE_MAP_CACHE);
  1629. /* commit_fs_roots gets rid of all the tree log roots, it is now
  1630. * safe to free the root of tree log roots
  1631. */
  1632. btrfs_free_log_root_tree(trans, root->fs_info);
  1633. ret = commit_cowonly_roots(trans, root);
  1634. if (ret) {
  1635. mutex_unlock(&root->fs_info->tree_log_mutex);
  1636. mutex_unlock(&root->fs_info->reloc_mutex);
  1637. goto scrub_continue;
  1638. }
  1639. /*
  1640. * The tasks which save the space cache and inode cache may also
  1641. * update ->aborted, check it.
  1642. */
  1643. if (unlikely(ACCESS_ONCE(cur_trans->aborted))) {
  1644. ret = cur_trans->aborted;
  1645. mutex_unlock(&root->fs_info->tree_log_mutex);
  1646. mutex_unlock(&root->fs_info->reloc_mutex);
  1647. goto scrub_continue;
  1648. }
  1649. btrfs_prepare_extent_commit(trans, root);
  1650. cur_trans = root->fs_info->running_transaction;
  1651. btrfs_set_root_node(&root->fs_info->tree_root->root_item,
  1652. root->fs_info->tree_root->node);
  1653. list_add_tail(&root->fs_info->tree_root->dirty_list,
  1654. &cur_trans->switch_commits);
  1655. btrfs_set_root_node(&root->fs_info->chunk_root->root_item,
  1656. root->fs_info->chunk_root->node);
  1657. list_add_tail(&root->fs_info->chunk_root->dirty_list,
  1658. &cur_trans->switch_commits);
  1659. switch_commit_roots(cur_trans, root->fs_info);
  1660. assert_qgroups_uptodate(trans);
  1661. update_super_roots(root);
  1662. btrfs_set_super_log_root(root->fs_info->super_copy, 0);
  1663. btrfs_set_super_log_root_level(root->fs_info->super_copy, 0);
  1664. memcpy(root->fs_info->super_for_commit, root->fs_info->super_copy,
  1665. sizeof(*root->fs_info->super_copy));
  1666. btrfs_update_commit_device_size(root->fs_info);
  1667. btrfs_update_commit_device_bytes_used(root, cur_trans);
  1668. clear_bit(BTRFS_INODE_BTREE_LOG1_ERR, &btree_ino->runtime_flags);
  1669. clear_bit(BTRFS_INODE_BTREE_LOG2_ERR, &btree_ino->runtime_flags);
  1670. spin_lock(&root->fs_info->trans_lock);
  1671. cur_trans->state = TRANS_STATE_UNBLOCKED;
  1672. root->fs_info->running_transaction = NULL;
  1673. spin_unlock(&root->fs_info->trans_lock);
  1674. mutex_unlock(&root->fs_info->reloc_mutex);
  1675. wake_up(&root->fs_info->transaction_wait);
  1676. ret = btrfs_write_and_wait_transaction(trans, root);
  1677. if (ret) {
  1678. btrfs_error(root->fs_info, ret,
  1679. "Error while writing out transaction");
  1680. mutex_unlock(&root->fs_info->tree_log_mutex);
  1681. goto scrub_continue;
  1682. }
  1683. ret = write_ctree_super(trans, root, 0);
  1684. if (ret) {
  1685. mutex_unlock(&root->fs_info->tree_log_mutex);
  1686. goto scrub_continue;
  1687. }
  1688. /*
  1689. * the super is written, we can safely allow the tree-loggers
  1690. * to go about their business
  1691. */
  1692. mutex_unlock(&root->fs_info->tree_log_mutex);
  1693. btrfs_finish_extent_commit(trans, root);
  1694. root->fs_info->last_trans_committed = cur_trans->transid;
  1695. /*
  1696. * We needn't acquire the lock here because there is no other task
  1697. * which can change it.
  1698. */
  1699. cur_trans->state = TRANS_STATE_COMPLETED;
  1700. wake_up(&cur_trans->commit_wait);
  1701. spin_lock(&root->fs_info->trans_lock);
  1702. list_del_init(&cur_trans->list);
  1703. spin_unlock(&root->fs_info->trans_lock);
  1704. btrfs_put_transaction(cur_trans);
  1705. btrfs_put_transaction(cur_trans);
  1706. if (trans->type & __TRANS_FREEZABLE)
  1707. sb_end_intwrite(root->fs_info->sb);
  1708. trace_btrfs_transaction_commit(root);
  1709. btrfs_scrub_continue(root);
  1710. if (current->journal_info == trans)
  1711. current->journal_info = NULL;
  1712. kmem_cache_free(btrfs_trans_handle_cachep, trans);
  1713. if (current != root->fs_info->transaction_kthread)
  1714. btrfs_run_delayed_iputs(root);
  1715. return ret;
  1716. scrub_continue:
  1717. btrfs_scrub_continue(root);
  1718. cleanup_transaction:
  1719. btrfs_trans_release_metadata(trans, root);
  1720. trans->block_rsv = NULL;
  1721. if (trans->qgroup_reserved) {
  1722. btrfs_qgroup_free(root, trans->qgroup_reserved);
  1723. trans->qgroup_reserved = 0;
  1724. }
  1725. btrfs_warn(root->fs_info, "Skipping commit of aborted transaction.");
  1726. if (current->journal_info == trans)
  1727. current->journal_info = NULL;
  1728. cleanup_transaction(trans, root, ret);
  1729. return ret;
  1730. }
  1731. /*
  1732. * return < 0 if error
  1733. * 0 if there are no more dead_roots at the time of call
  1734. * 1 there are more to be processed, call me again
  1735. *
  1736. * The return value indicates there are certainly more snapshots to delete, but
  1737. * if there comes a new one during processing, it may return 0. We don't mind,
  1738. * because btrfs_commit_super will poke cleaner thread and it will process it a
  1739. * few seconds later.
  1740. */
  1741. int btrfs_clean_one_deleted_snapshot(struct btrfs_root *root)
  1742. {
  1743. int ret;
  1744. struct btrfs_fs_info *fs_info = root->fs_info;
  1745. spin_lock(&fs_info->trans_lock);
  1746. if (list_empty(&fs_info->dead_roots)) {
  1747. spin_unlock(&fs_info->trans_lock);
  1748. return 0;
  1749. }
  1750. root = list_first_entry(&fs_info->dead_roots,
  1751. struct btrfs_root, root_list);
  1752. list_del_init(&root->root_list);
  1753. spin_unlock(&fs_info->trans_lock);
  1754. pr_debug("BTRFS: cleaner removing %llu\n", root->objectid);
  1755. btrfs_kill_all_delayed_nodes(root);
  1756. if (btrfs_header_backref_rev(root->node) <
  1757. BTRFS_MIXED_BACKREF_REV)
  1758. ret = btrfs_drop_snapshot(root, NULL, 0, 0);
  1759. else
  1760. ret = btrfs_drop_snapshot(root, NULL, 1, 0);
  1761. return (ret < 0) ? 0 : 1;
  1762. }