data.c 27 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171
  1. /*
  2. * fs/f2fs/data.c
  3. *
  4. * Copyright (c) 2012 Samsung Electronics Co., Ltd.
  5. * http://www.samsung.com/
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. */
  11. #include <linux/fs.h>
  12. #include <linux/f2fs_fs.h>
  13. #include <linux/buffer_head.h>
  14. #include <linux/mpage.h>
  15. #include <linux/aio.h>
  16. #include <linux/writeback.h>
  17. #include <linux/backing-dev.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/bio.h>
  20. #include <linux/prefetch.h>
  21. #include "f2fs.h"
  22. #include "node.h"
  23. #include "segment.h"
  24. #include <trace/events/f2fs.h>
  25. static void f2fs_read_end_io(struct bio *bio, int err)
  26. {
  27. struct bio_vec *bvec;
  28. int i;
  29. bio_for_each_segment_all(bvec, bio, i) {
  30. struct page *page = bvec->bv_page;
  31. if (!err) {
  32. SetPageUptodate(page);
  33. } else {
  34. ClearPageUptodate(page);
  35. SetPageError(page);
  36. }
  37. unlock_page(page);
  38. }
  39. bio_put(bio);
  40. }
  41. static void f2fs_write_end_io(struct bio *bio, int err)
  42. {
  43. struct f2fs_sb_info *sbi = bio->bi_private;
  44. struct bio_vec *bvec;
  45. int i;
  46. bio_for_each_segment_all(bvec, bio, i) {
  47. struct page *page = bvec->bv_page;
  48. if (unlikely(err)) {
  49. set_page_dirty(page);
  50. set_bit(AS_EIO, &page->mapping->flags);
  51. f2fs_stop_checkpoint(sbi);
  52. }
  53. end_page_writeback(page);
  54. dec_page_count(sbi, F2FS_WRITEBACK);
  55. }
  56. if (sbi->wait_io) {
  57. complete(sbi->wait_io);
  58. sbi->wait_io = NULL;
  59. }
  60. if (!get_pages(sbi, F2FS_WRITEBACK) &&
  61. !list_empty(&sbi->cp_wait.task_list))
  62. wake_up(&sbi->cp_wait);
  63. bio_put(bio);
  64. }
  65. /*
  66. * Low-level block read/write IO operations.
  67. */
  68. static struct bio *__bio_alloc(struct f2fs_sb_info *sbi, block_t blk_addr,
  69. int npages, bool is_read)
  70. {
  71. struct bio *bio;
  72. /* No failure on bio allocation */
  73. bio = bio_alloc(GFP_NOIO, npages);
  74. bio->bi_bdev = sbi->sb->s_bdev;
  75. bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(blk_addr);
  76. bio->bi_end_io = is_read ? f2fs_read_end_io : f2fs_write_end_io;
  77. bio->bi_private = sbi;
  78. return bio;
  79. }
  80. static void __submit_merged_bio(struct f2fs_bio_info *io)
  81. {
  82. struct f2fs_io_info *fio = &io->fio;
  83. int rw;
  84. if (!io->bio)
  85. return;
  86. rw = fio->rw;
  87. if (is_read_io(rw)) {
  88. trace_f2fs_submit_read_bio(io->sbi->sb, rw,
  89. fio->type, io->bio);
  90. submit_bio(rw, io->bio);
  91. } else {
  92. trace_f2fs_submit_write_bio(io->sbi->sb, rw,
  93. fio->type, io->bio);
  94. /*
  95. * META_FLUSH is only from the checkpoint procedure, and we
  96. * should wait this metadata bio for FS consistency.
  97. */
  98. if (fio->type == META_FLUSH) {
  99. DECLARE_COMPLETION_ONSTACK(wait);
  100. io->sbi->wait_io = &wait;
  101. submit_bio(rw, io->bio);
  102. wait_for_completion(&wait);
  103. } else {
  104. submit_bio(rw, io->bio);
  105. }
  106. }
  107. io->bio = NULL;
  108. }
  109. void f2fs_submit_merged_bio(struct f2fs_sb_info *sbi,
  110. enum page_type type, int rw)
  111. {
  112. enum page_type btype = PAGE_TYPE_OF_BIO(type);
  113. struct f2fs_bio_info *io;
  114. io = is_read_io(rw) ? &sbi->read_io : &sbi->write_io[btype];
  115. down_write(&io->io_rwsem);
  116. /* change META to META_FLUSH in the checkpoint procedure */
  117. if (type >= META_FLUSH) {
  118. io->fio.type = META_FLUSH;
  119. if (test_opt(sbi, NOBARRIER))
  120. io->fio.rw = WRITE_FLUSH | REQ_META | REQ_PRIO;
  121. else
  122. io->fio.rw = WRITE_FLUSH_FUA | REQ_META | REQ_PRIO;
  123. }
  124. __submit_merged_bio(io);
  125. up_write(&io->io_rwsem);
  126. }
  127. /*
  128. * Fill the locked page with data located in the block address.
  129. * Return unlocked page.
  130. */
  131. int f2fs_submit_page_bio(struct f2fs_sb_info *sbi, struct page *page,
  132. block_t blk_addr, int rw)
  133. {
  134. struct bio *bio;
  135. trace_f2fs_submit_page_bio(page, blk_addr, rw);
  136. /* Allocate a new bio */
  137. bio = __bio_alloc(sbi, blk_addr, 1, is_read_io(rw));
  138. if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
  139. bio_put(bio);
  140. f2fs_put_page(page, 1);
  141. return -EFAULT;
  142. }
  143. submit_bio(rw, bio);
  144. return 0;
  145. }
  146. void f2fs_submit_page_mbio(struct f2fs_sb_info *sbi, struct page *page,
  147. block_t blk_addr, struct f2fs_io_info *fio)
  148. {
  149. enum page_type btype = PAGE_TYPE_OF_BIO(fio->type);
  150. struct f2fs_bio_info *io;
  151. bool is_read = is_read_io(fio->rw);
  152. io = is_read ? &sbi->read_io : &sbi->write_io[btype];
  153. verify_block_addr(sbi, blk_addr);
  154. down_write(&io->io_rwsem);
  155. if (!is_read)
  156. inc_page_count(sbi, F2FS_WRITEBACK);
  157. if (io->bio && (io->last_block_in_bio != blk_addr - 1 ||
  158. io->fio.rw != fio->rw))
  159. __submit_merged_bio(io);
  160. alloc_new:
  161. if (io->bio == NULL) {
  162. int bio_blocks = MAX_BIO_BLOCKS(sbi);
  163. io->bio = __bio_alloc(sbi, blk_addr, bio_blocks, is_read);
  164. io->fio = *fio;
  165. }
  166. if (bio_add_page(io->bio, page, PAGE_CACHE_SIZE, 0) <
  167. PAGE_CACHE_SIZE) {
  168. __submit_merged_bio(io);
  169. goto alloc_new;
  170. }
  171. io->last_block_in_bio = blk_addr;
  172. up_write(&io->io_rwsem);
  173. trace_f2fs_submit_page_mbio(page, fio->rw, fio->type, blk_addr);
  174. }
  175. /*
  176. * Lock ordering for the change of data block address:
  177. * ->data_page
  178. * ->node_page
  179. * update block addresses in the node page
  180. */
  181. static void __set_data_blkaddr(struct dnode_of_data *dn, block_t new_addr)
  182. {
  183. struct f2fs_node *rn;
  184. __le32 *addr_array;
  185. struct page *node_page = dn->node_page;
  186. unsigned int ofs_in_node = dn->ofs_in_node;
  187. f2fs_wait_on_page_writeback(node_page, NODE);
  188. rn = F2FS_NODE(node_page);
  189. /* Get physical address of data block */
  190. addr_array = blkaddr_in_node(rn);
  191. addr_array[ofs_in_node] = cpu_to_le32(new_addr);
  192. set_page_dirty(node_page);
  193. }
  194. int reserve_new_block(struct dnode_of_data *dn)
  195. {
  196. struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
  197. if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
  198. return -EPERM;
  199. if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
  200. return -ENOSPC;
  201. trace_f2fs_reserve_new_block(dn->inode, dn->nid, dn->ofs_in_node);
  202. __set_data_blkaddr(dn, NEW_ADDR);
  203. dn->data_blkaddr = NEW_ADDR;
  204. mark_inode_dirty(dn->inode);
  205. sync_inode_page(dn);
  206. return 0;
  207. }
  208. int f2fs_reserve_block(struct dnode_of_data *dn, pgoff_t index)
  209. {
  210. bool need_put = dn->inode_page ? false : true;
  211. int err;
  212. /* if inode_page exists, index should be zero */
  213. f2fs_bug_on(F2FS_I_SB(dn->inode), !need_put && index);
  214. err = get_dnode_of_data(dn, index, ALLOC_NODE);
  215. if (err)
  216. return err;
  217. if (dn->data_blkaddr == NULL_ADDR)
  218. err = reserve_new_block(dn);
  219. if (err || need_put)
  220. f2fs_put_dnode(dn);
  221. return err;
  222. }
  223. static int check_extent_cache(struct inode *inode, pgoff_t pgofs,
  224. struct buffer_head *bh_result)
  225. {
  226. struct f2fs_inode_info *fi = F2FS_I(inode);
  227. pgoff_t start_fofs, end_fofs;
  228. block_t start_blkaddr;
  229. if (is_inode_flag_set(fi, FI_NO_EXTENT))
  230. return 0;
  231. read_lock(&fi->ext.ext_lock);
  232. if (fi->ext.len == 0) {
  233. read_unlock(&fi->ext.ext_lock);
  234. return 0;
  235. }
  236. stat_inc_total_hit(inode->i_sb);
  237. start_fofs = fi->ext.fofs;
  238. end_fofs = fi->ext.fofs + fi->ext.len - 1;
  239. start_blkaddr = fi->ext.blk_addr;
  240. if (pgofs >= start_fofs && pgofs <= end_fofs) {
  241. unsigned int blkbits = inode->i_sb->s_blocksize_bits;
  242. size_t count;
  243. clear_buffer_new(bh_result);
  244. map_bh(bh_result, inode->i_sb,
  245. start_blkaddr + pgofs - start_fofs);
  246. count = end_fofs - pgofs + 1;
  247. if (count < (UINT_MAX >> blkbits))
  248. bh_result->b_size = (count << blkbits);
  249. else
  250. bh_result->b_size = UINT_MAX;
  251. stat_inc_read_hit(inode->i_sb);
  252. read_unlock(&fi->ext.ext_lock);
  253. return 1;
  254. }
  255. read_unlock(&fi->ext.ext_lock);
  256. return 0;
  257. }
  258. void update_extent_cache(block_t blk_addr, struct dnode_of_data *dn)
  259. {
  260. struct f2fs_inode_info *fi = F2FS_I(dn->inode);
  261. pgoff_t fofs, start_fofs, end_fofs;
  262. block_t start_blkaddr, end_blkaddr;
  263. int need_update = true;
  264. f2fs_bug_on(F2FS_I_SB(dn->inode), blk_addr == NEW_ADDR);
  265. fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
  266. dn->ofs_in_node;
  267. /* Update the page address in the parent node */
  268. __set_data_blkaddr(dn, blk_addr);
  269. if (is_inode_flag_set(fi, FI_NO_EXTENT))
  270. return;
  271. write_lock(&fi->ext.ext_lock);
  272. start_fofs = fi->ext.fofs;
  273. end_fofs = fi->ext.fofs + fi->ext.len - 1;
  274. start_blkaddr = fi->ext.blk_addr;
  275. end_blkaddr = fi->ext.blk_addr + fi->ext.len - 1;
  276. /* Drop and initialize the matched extent */
  277. if (fi->ext.len == 1 && fofs == start_fofs)
  278. fi->ext.len = 0;
  279. /* Initial extent */
  280. if (fi->ext.len == 0) {
  281. if (blk_addr != NULL_ADDR) {
  282. fi->ext.fofs = fofs;
  283. fi->ext.blk_addr = blk_addr;
  284. fi->ext.len = 1;
  285. }
  286. goto end_update;
  287. }
  288. /* Front merge */
  289. if (fofs == start_fofs - 1 && blk_addr == start_blkaddr - 1) {
  290. fi->ext.fofs--;
  291. fi->ext.blk_addr--;
  292. fi->ext.len++;
  293. goto end_update;
  294. }
  295. /* Back merge */
  296. if (fofs == end_fofs + 1 && blk_addr == end_blkaddr + 1) {
  297. fi->ext.len++;
  298. goto end_update;
  299. }
  300. /* Split the existing extent */
  301. if (fi->ext.len > 1 &&
  302. fofs >= start_fofs && fofs <= end_fofs) {
  303. if ((end_fofs - fofs) < (fi->ext.len >> 1)) {
  304. fi->ext.len = fofs - start_fofs;
  305. } else {
  306. fi->ext.fofs = fofs + 1;
  307. fi->ext.blk_addr = start_blkaddr +
  308. fofs - start_fofs + 1;
  309. fi->ext.len -= fofs - start_fofs + 1;
  310. }
  311. } else {
  312. need_update = false;
  313. }
  314. /* Finally, if the extent is very fragmented, let's drop the cache. */
  315. if (fi->ext.len < F2FS_MIN_EXTENT_LEN) {
  316. fi->ext.len = 0;
  317. set_inode_flag(fi, FI_NO_EXTENT);
  318. need_update = true;
  319. }
  320. end_update:
  321. write_unlock(&fi->ext.ext_lock);
  322. if (need_update)
  323. sync_inode_page(dn);
  324. return;
  325. }
  326. struct page *find_data_page(struct inode *inode, pgoff_t index, bool sync)
  327. {
  328. struct address_space *mapping = inode->i_mapping;
  329. struct dnode_of_data dn;
  330. struct page *page;
  331. int err;
  332. page = find_get_page(mapping, index);
  333. if (page && PageUptodate(page))
  334. return page;
  335. f2fs_put_page(page, 0);
  336. set_new_dnode(&dn, inode, NULL, NULL, 0);
  337. err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
  338. if (err)
  339. return ERR_PTR(err);
  340. f2fs_put_dnode(&dn);
  341. if (dn.data_blkaddr == NULL_ADDR)
  342. return ERR_PTR(-ENOENT);
  343. /* By fallocate(), there is no cached page, but with NEW_ADDR */
  344. if (unlikely(dn.data_blkaddr == NEW_ADDR))
  345. return ERR_PTR(-EINVAL);
  346. page = grab_cache_page(mapping, index);
  347. if (!page)
  348. return ERR_PTR(-ENOMEM);
  349. if (PageUptodate(page)) {
  350. unlock_page(page);
  351. return page;
  352. }
  353. err = f2fs_submit_page_bio(F2FS_I_SB(inode), page, dn.data_blkaddr,
  354. sync ? READ_SYNC : READA);
  355. if (err)
  356. return ERR_PTR(err);
  357. if (sync) {
  358. wait_on_page_locked(page);
  359. if (unlikely(!PageUptodate(page))) {
  360. f2fs_put_page(page, 0);
  361. return ERR_PTR(-EIO);
  362. }
  363. }
  364. return page;
  365. }
  366. /*
  367. * If it tries to access a hole, return an error.
  368. * Because, the callers, functions in dir.c and GC, should be able to know
  369. * whether this page exists or not.
  370. */
  371. struct page *get_lock_data_page(struct inode *inode, pgoff_t index)
  372. {
  373. struct address_space *mapping = inode->i_mapping;
  374. struct dnode_of_data dn;
  375. struct page *page;
  376. int err;
  377. repeat:
  378. page = grab_cache_page(mapping, index);
  379. if (!page)
  380. return ERR_PTR(-ENOMEM);
  381. set_new_dnode(&dn, inode, NULL, NULL, 0);
  382. err = get_dnode_of_data(&dn, index, LOOKUP_NODE);
  383. if (err) {
  384. f2fs_put_page(page, 1);
  385. return ERR_PTR(err);
  386. }
  387. f2fs_put_dnode(&dn);
  388. if (unlikely(dn.data_blkaddr == NULL_ADDR)) {
  389. f2fs_put_page(page, 1);
  390. return ERR_PTR(-ENOENT);
  391. }
  392. if (PageUptodate(page))
  393. return page;
  394. /*
  395. * A new dentry page is allocated but not able to be written, since its
  396. * new inode page couldn't be allocated due to -ENOSPC.
  397. * In such the case, its blkaddr can be remained as NEW_ADDR.
  398. * see, f2fs_add_link -> get_new_data_page -> init_inode_metadata.
  399. */
  400. if (dn.data_blkaddr == NEW_ADDR) {
  401. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  402. SetPageUptodate(page);
  403. return page;
  404. }
  405. err = f2fs_submit_page_bio(F2FS_I_SB(inode), page,
  406. dn.data_blkaddr, READ_SYNC);
  407. if (err)
  408. return ERR_PTR(err);
  409. lock_page(page);
  410. if (unlikely(!PageUptodate(page))) {
  411. f2fs_put_page(page, 1);
  412. return ERR_PTR(-EIO);
  413. }
  414. if (unlikely(page->mapping != mapping)) {
  415. f2fs_put_page(page, 1);
  416. goto repeat;
  417. }
  418. return page;
  419. }
  420. /*
  421. * Caller ensures that this data page is never allocated.
  422. * A new zero-filled data page is allocated in the page cache.
  423. *
  424. * Also, caller should grab and release a rwsem by calling f2fs_lock_op() and
  425. * f2fs_unlock_op().
  426. * Note that, ipage is set only by make_empty_dir.
  427. */
  428. struct page *get_new_data_page(struct inode *inode,
  429. struct page *ipage, pgoff_t index, bool new_i_size)
  430. {
  431. struct address_space *mapping = inode->i_mapping;
  432. struct page *page;
  433. struct dnode_of_data dn;
  434. int err;
  435. set_new_dnode(&dn, inode, ipage, NULL, 0);
  436. err = f2fs_reserve_block(&dn, index);
  437. if (err)
  438. return ERR_PTR(err);
  439. repeat:
  440. page = grab_cache_page(mapping, index);
  441. if (!page) {
  442. err = -ENOMEM;
  443. goto put_err;
  444. }
  445. if (PageUptodate(page))
  446. return page;
  447. if (dn.data_blkaddr == NEW_ADDR) {
  448. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  449. SetPageUptodate(page);
  450. } else {
  451. err = f2fs_submit_page_bio(F2FS_I_SB(inode), page,
  452. dn.data_blkaddr, READ_SYNC);
  453. if (err)
  454. goto put_err;
  455. lock_page(page);
  456. if (unlikely(!PageUptodate(page))) {
  457. f2fs_put_page(page, 1);
  458. err = -EIO;
  459. goto put_err;
  460. }
  461. if (unlikely(page->mapping != mapping)) {
  462. f2fs_put_page(page, 1);
  463. goto repeat;
  464. }
  465. }
  466. if (new_i_size &&
  467. i_size_read(inode) < ((index + 1) << PAGE_CACHE_SHIFT)) {
  468. i_size_write(inode, ((index + 1) << PAGE_CACHE_SHIFT));
  469. /* Only the directory inode sets new_i_size */
  470. set_inode_flag(F2FS_I(inode), FI_UPDATE_DIR);
  471. }
  472. return page;
  473. put_err:
  474. f2fs_put_dnode(&dn);
  475. return ERR_PTR(err);
  476. }
  477. static int __allocate_data_block(struct dnode_of_data *dn)
  478. {
  479. struct f2fs_sb_info *sbi = F2FS_I_SB(dn->inode);
  480. struct f2fs_inode_info *fi = F2FS_I(dn->inode);
  481. struct f2fs_summary sum;
  482. block_t new_blkaddr;
  483. struct node_info ni;
  484. pgoff_t fofs;
  485. int type;
  486. if (unlikely(is_inode_flag_set(F2FS_I(dn->inode), FI_NO_ALLOC)))
  487. return -EPERM;
  488. if (unlikely(!inc_valid_block_count(sbi, dn->inode, 1)))
  489. return -ENOSPC;
  490. __set_data_blkaddr(dn, NEW_ADDR);
  491. dn->data_blkaddr = NEW_ADDR;
  492. get_node_info(sbi, dn->nid, &ni);
  493. set_summary(&sum, dn->nid, dn->ofs_in_node, ni.version);
  494. type = CURSEG_WARM_DATA;
  495. allocate_data_block(sbi, NULL, NULL_ADDR, &new_blkaddr, &sum, type);
  496. /* direct IO doesn't use extent cache to maximize the performance */
  497. set_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
  498. update_extent_cache(new_blkaddr, dn);
  499. clear_inode_flag(F2FS_I(dn->inode), FI_NO_EXTENT);
  500. /* update i_size */
  501. fofs = start_bidx_of_node(ofs_of_node(dn->node_page), fi) +
  502. dn->ofs_in_node;
  503. if (i_size_read(dn->inode) < ((fofs + 1) << PAGE_CACHE_SHIFT))
  504. i_size_write(dn->inode, ((fofs + 1) << PAGE_CACHE_SHIFT));
  505. dn->data_blkaddr = new_blkaddr;
  506. return 0;
  507. }
  508. /*
  509. * get_data_block() now supported readahead/bmap/rw direct_IO with mapped bh.
  510. * If original data blocks are allocated, then give them to blockdev.
  511. * Otherwise,
  512. * a. preallocate requested block addresses
  513. * b. do not use extent cache for better performance
  514. * c. give the block addresses to blockdev
  515. */
  516. static int __get_data_block(struct inode *inode, sector_t iblock,
  517. struct buffer_head *bh_result, int create, bool fiemap)
  518. {
  519. unsigned int blkbits = inode->i_sb->s_blocksize_bits;
  520. unsigned maxblocks = bh_result->b_size >> blkbits;
  521. struct dnode_of_data dn;
  522. int mode = create ? ALLOC_NODE : LOOKUP_NODE_RA;
  523. pgoff_t pgofs, end_offset;
  524. int err = 0, ofs = 1;
  525. bool allocated = false;
  526. /* Get the page offset from the block offset(iblock) */
  527. pgofs = (pgoff_t)(iblock >> (PAGE_CACHE_SHIFT - blkbits));
  528. if (check_extent_cache(inode, pgofs, bh_result))
  529. goto out;
  530. if (create) {
  531. f2fs_balance_fs(F2FS_I_SB(inode));
  532. f2fs_lock_op(F2FS_I_SB(inode));
  533. }
  534. /* When reading holes, we need its node page */
  535. set_new_dnode(&dn, inode, NULL, NULL, 0);
  536. err = get_dnode_of_data(&dn, pgofs, mode);
  537. if (err) {
  538. if (err == -ENOENT)
  539. err = 0;
  540. goto unlock_out;
  541. }
  542. if (dn.data_blkaddr == NEW_ADDR && !fiemap)
  543. goto put_out;
  544. if (dn.data_blkaddr != NULL_ADDR) {
  545. map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
  546. } else if (create) {
  547. err = __allocate_data_block(&dn);
  548. if (err)
  549. goto put_out;
  550. allocated = true;
  551. map_bh(bh_result, inode->i_sb, dn.data_blkaddr);
  552. } else {
  553. goto put_out;
  554. }
  555. end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
  556. bh_result->b_size = (((size_t)1) << blkbits);
  557. dn.ofs_in_node++;
  558. pgofs++;
  559. get_next:
  560. if (dn.ofs_in_node >= end_offset) {
  561. if (allocated)
  562. sync_inode_page(&dn);
  563. allocated = false;
  564. f2fs_put_dnode(&dn);
  565. set_new_dnode(&dn, inode, NULL, NULL, 0);
  566. err = get_dnode_of_data(&dn, pgofs, mode);
  567. if (err) {
  568. if (err == -ENOENT)
  569. err = 0;
  570. goto unlock_out;
  571. }
  572. if (dn.data_blkaddr == NEW_ADDR && !fiemap)
  573. goto put_out;
  574. end_offset = ADDRS_PER_PAGE(dn.node_page, F2FS_I(inode));
  575. }
  576. if (maxblocks > (bh_result->b_size >> blkbits)) {
  577. block_t blkaddr = datablock_addr(dn.node_page, dn.ofs_in_node);
  578. if (blkaddr == NULL_ADDR && create) {
  579. err = __allocate_data_block(&dn);
  580. if (err)
  581. goto sync_out;
  582. allocated = true;
  583. blkaddr = dn.data_blkaddr;
  584. }
  585. /* Give more consecutive addresses for the readahead */
  586. if (blkaddr == (bh_result->b_blocknr + ofs)) {
  587. ofs++;
  588. dn.ofs_in_node++;
  589. pgofs++;
  590. bh_result->b_size += (((size_t)1) << blkbits);
  591. goto get_next;
  592. }
  593. }
  594. sync_out:
  595. if (allocated)
  596. sync_inode_page(&dn);
  597. put_out:
  598. f2fs_put_dnode(&dn);
  599. unlock_out:
  600. if (create)
  601. f2fs_unlock_op(F2FS_I_SB(inode));
  602. out:
  603. trace_f2fs_get_data_block(inode, iblock, bh_result, err);
  604. return err;
  605. }
  606. static int get_data_block(struct inode *inode, sector_t iblock,
  607. struct buffer_head *bh_result, int create)
  608. {
  609. return __get_data_block(inode, iblock, bh_result, create, false);
  610. }
  611. static int get_data_block_fiemap(struct inode *inode, sector_t iblock,
  612. struct buffer_head *bh_result, int create)
  613. {
  614. return __get_data_block(inode, iblock, bh_result, create, true);
  615. }
  616. int f2fs_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo,
  617. u64 start, u64 len)
  618. {
  619. return generic_block_fiemap(inode, fieinfo,
  620. start, len, get_data_block_fiemap);
  621. }
  622. static int f2fs_read_data_page(struct file *file, struct page *page)
  623. {
  624. struct inode *inode = page->mapping->host;
  625. int ret;
  626. trace_f2fs_readpage(page, DATA);
  627. /* If the file has inline data, try to read it directly */
  628. if (f2fs_has_inline_data(inode))
  629. ret = f2fs_read_inline_data(inode, page);
  630. else
  631. ret = mpage_readpage(page, get_data_block);
  632. return ret;
  633. }
  634. static int f2fs_read_data_pages(struct file *file,
  635. struct address_space *mapping,
  636. struct list_head *pages, unsigned nr_pages)
  637. {
  638. struct inode *inode = file->f_mapping->host;
  639. /* If the file has inline data, skip readpages */
  640. if (f2fs_has_inline_data(inode))
  641. return 0;
  642. return mpage_readpages(mapping, pages, nr_pages, get_data_block);
  643. }
  644. int do_write_data_page(struct page *page, struct f2fs_io_info *fio)
  645. {
  646. struct inode *inode = page->mapping->host;
  647. block_t old_blkaddr, new_blkaddr;
  648. struct dnode_of_data dn;
  649. int err = 0;
  650. set_new_dnode(&dn, inode, NULL, NULL, 0);
  651. err = get_dnode_of_data(&dn, page->index, LOOKUP_NODE);
  652. if (err)
  653. return err;
  654. old_blkaddr = dn.data_blkaddr;
  655. /* This page is already truncated */
  656. if (old_blkaddr == NULL_ADDR)
  657. goto out_writepage;
  658. set_page_writeback(page);
  659. /*
  660. * If current allocation needs SSR,
  661. * it had better in-place writes for updated data.
  662. */
  663. if (unlikely(old_blkaddr != NEW_ADDR &&
  664. !is_cold_data(page) &&
  665. need_inplace_update(inode))) {
  666. rewrite_data_page(page, old_blkaddr, fio);
  667. set_inode_flag(F2FS_I(inode), FI_UPDATE_WRITE);
  668. } else {
  669. write_data_page(page, &dn, &new_blkaddr, fio);
  670. update_extent_cache(new_blkaddr, &dn);
  671. set_inode_flag(F2FS_I(inode), FI_APPEND_WRITE);
  672. }
  673. out_writepage:
  674. f2fs_put_dnode(&dn);
  675. return err;
  676. }
  677. static int f2fs_write_data_page(struct page *page,
  678. struct writeback_control *wbc)
  679. {
  680. struct inode *inode = page->mapping->host;
  681. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  682. loff_t i_size = i_size_read(inode);
  683. const pgoff_t end_index = ((unsigned long long) i_size)
  684. >> PAGE_CACHE_SHIFT;
  685. unsigned offset = 0;
  686. bool need_balance_fs = false;
  687. int err = 0;
  688. struct f2fs_io_info fio = {
  689. .type = DATA,
  690. .rw = (wbc->sync_mode == WB_SYNC_ALL) ? WRITE_SYNC : WRITE,
  691. };
  692. trace_f2fs_writepage(page, DATA);
  693. if (page->index < end_index)
  694. goto write;
  695. /*
  696. * If the offset is out-of-range of file size,
  697. * this page does not have to be written to disk.
  698. */
  699. offset = i_size & (PAGE_CACHE_SIZE - 1);
  700. if ((page->index >= end_index + 1) || !offset)
  701. goto out;
  702. zero_user_segment(page, offset, PAGE_CACHE_SIZE);
  703. write:
  704. if (unlikely(sbi->por_doing))
  705. goto redirty_out;
  706. /* Dentry blocks are controlled by checkpoint */
  707. if (S_ISDIR(inode->i_mode)) {
  708. if (unlikely(f2fs_cp_error(sbi)))
  709. goto redirty_out;
  710. err = do_write_data_page(page, &fio);
  711. goto done;
  712. }
  713. /* we should bypass data pages to proceed the kworkder jobs */
  714. if (unlikely(f2fs_cp_error(sbi))) {
  715. SetPageError(page);
  716. unlock_page(page);
  717. goto out;
  718. }
  719. if (!wbc->for_reclaim)
  720. need_balance_fs = true;
  721. else if (has_not_enough_free_secs(sbi, 0))
  722. goto redirty_out;
  723. f2fs_lock_op(sbi);
  724. if (f2fs_has_inline_data(inode) || f2fs_may_inline(inode))
  725. err = f2fs_write_inline_data(inode, page, offset);
  726. else
  727. err = do_write_data_page(page, &fio);
  728. f2fs_unlock_op(sbi);
  729. done:
  730. if (err && err != -ENOENT)
  731. goto redirty_out;
  732. clear_cold_data(page);
  733. out:
  734. inode_dec_dirty_pages(inode);
  735. unlock_page(page);
  736. if (need_balance_fs)
  737. f2fs_balance_fs(sbi);
  738. if (wbc->for_reclaim)
  739. f2fs_submit_merged_bio(sbi, DATA, WRITE);
  740. return 0;
  741. redirty_out:
  742. redirty_page_for_writepage(wbc, page);
  743. return AOP_WRITEPAGE_ACTIVATE;
  744. }
  745. static int __f2fs_writepage(struct page *page, struct writeback_control *wbc,
  746. void *data)
  747. {
  748. struct address_space *mapping = data;
  749. int ret = mapping->a_ops->writepage(page, wbc);
  750. mapping_set_error(mapping, ret);
  751. return ret;
  752. }
  753. static int f2fs_write_data_pages(struct address_space *mapping,
  754. struct writeback_control *wbc)
  755. {
  756. struct inode *inode = mapping->host;
  757. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  758. bool locked = false;
  759. int ret;
  760. long diff;
  761. trace_f2fs_writepages(mapping->host, wbc, DATA);
  762. /* deal with chardevs and other special file */
  763. if (!mapping->a_ops->writepage)
  764. return 0;
  765. if (S_ISDIR(inode->i_mode) && wbc->sync_mode == WB_SYNC_NONE &&
  766. get_dirty_pages(inode) < nr_pages_to_skip(sbi, DATA) &&
  767. available_free_memory(sbi, DIRTY_DENTS))
  768. goto skip_write;
  769. diff = nr_pages_to_write(sbi, DATA, wbc);
  770. if (!S_ISDIR(inode->i_mode)) {
  771. mutex_lock(&sbi->writepages);
  772. locked = true;
  773. }
  774. ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
  775. if (locked)
  776. mutex_unlock(&sbi->writepages);
  777. f2fs_submit_merged_bio(sbi, DATA, WRITE);
  778. remove_dirty_dir_inode(inode);
  779. wbc->nr_to_write = max((long)0, wbc->nr_to_write - diff);
  780. return ret;
  781. skip_write:
  782. wbc->pages_skipped += get_dirty_pages(inode);
  783. return 0;
  784. }
  785. static void f2fs_write_failed(struct address_space *mapping, loff_t to)
  786. {
  787. struct inode *inode = mapping->host;
  788. if (to > inode->i_size) {
  789. truncate_pagecache(inode, inode->i_size);
  790. truncate_blocks(inode, inode->i_size, true);
  791. }
  792. }
  793. static int f2fs_write_begin(struct file *file, struct address_space *mapping,
  794. loff_t pos, unsigned len, unsigned flags,
  795. struct page **pagep, void **fsdata)
  796. {
  797. struct inode *inode = mapping->host;
  798. struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
  799. struct page *page;
  800. pgoff_t index = ((unsigned long long) pos) >> PAGE_CACHE_SHIFT;
  801. struct dnode_of_data dn;
  802. int err = 0;
  803. trace_f2fs_write_begin(inode, pos, len, flags);
  804. f2fs_balance_fs(sbi);
  805. repeat:
  806. err = f2fs_convert_inline_data(inode, pos + len, NULL);
  807. if (err)
  808. goto fail;
  809. page = grab_cache_page_write_begin(mapping, index, flags);
  810. if (!page) {
  811. err = -ENOMEM;
  812. goto fail;
  813. }
  814. /* to avoid latency during memory pressure */
  815. unlock_page(page);
  816. *pagep = page;
  817. if (f2fs_has_inline_data(inode) && (pos + len) <= MAX_INLINE_DATA)
  818. goto inline_data;
  819. f2fs_lock_op(sbi);
  820. set_new_dnode(&dn, inode, NULL, NULL, 0);
  821. err = f2fs_reserve_block(&dn, index);
  822. f2fs_unlock_op(sbi);
  823. if (err) {
  824. f2fs_put_page(page, 0);
  825. goto fail;
  826. }
  827. inline_data:
  828. lock_page(page);
  829. if (unlikely(page->mapping != mapping)) {
  830. f2fs_put_page(page, 1);
  831. goto repeat;
  832. }
  833. f2fs_wait_on_page_writeback(page, DATA);
  834. if ((len == PAGE_CACHE_SIZE) || PageUptodate(page))
  835. return 0;
  836. if ((pos & PAGE_CACHE_MASK) >= i_size_read(inode)) {
  837. unsigned start = pos & (PAGE_CACHE_SIZE - 1);
  838. unsigned end = start + len;
  839. /* Reading beyond i_size is simple: memset to zero */
  840. zero_user_segments(page, 0, start, end, PAGE_CACHE_SIZE);
  841. goto out;
  842. }
  843. if (f2fs_has_inline_data(inode)) {
  844. err = f2fs_read_inline_data(inode, page);
  845. if (err) {
  846. page_cache_release(page);
  847. goto fail;
  848. }
  849. } else if (dn.data_blkaddr == NEW_ADDR) {
  850. zero_user_segment(page, 0, PAGE_CACHE_SIZE);
  851. } else {
  852. err = f2fs_submit_page_bio(sbi, page, dn.data_blkaddr,
  853. READ_SYNC);
  854. if (err)
  855. goto fail;
  856. lock_page(page);
  857. if (unlikely(!PageUptodate(page))) {
  858. f2fs_put_page(page, 1);
  859. err = -EIO;
  860. goto fail;
  861. }
  862. if (unlikely(page->mapping != mapping)) {
  863. f2fs_put_page(page, 1);
  864. goto repeat;
  865. }
  866. }
  867. out:
  868. SetPageUptodate(page);
  869. clear_cold_data(page);
  870. return 0;
  871. fail:
  872. f2fs_write_failed(mapping, pos + len);
  873. return err;
  874. }
  875. static int f2fs_write_end(struct file *file,
  876. struct address_space *mapping,
  877. loff_t pos, unsigned len, unsigned copied,
  878. struct page *page, void *fsdata)
  879. {
  880. struct inode *inode = page->mapping->host;
  881. trace_f2fs_write_end(inode, pos, len, copied);
  882. if (f2fs_is_atomic_file(inode) || f2fs_is_volatile_file(inode))
  883. register_inmem_page(inode, page);
  884. else
  885. set_page_dirty(page);
  886. if (pos + copied > i_size_read(inode)) {
  887. i_size_write(inode, pos + copied);
  888. mark_inode_dirty(inode);
  889. update_inode_page(inode);
  890. }
  891. f2fs_put_page(page, 1);
  892. return copied;
  893. }
  894. static int check_direct_IO(struct inode *inode, int rw,
  895. struct iov_iter *iter, loff_t offset)
  896. {
  897. unsigned blocksize_mask = inode->i_sb->s_blocksize - 1;
  898. if (rw == READ)
  899. return 0;
  900. if (offset & blocksize_mask)
  901. return -EINVAL;
  902. if (iov_iter_alignment(iter) & blocksize_mask)
  903. return -EINVAL;
  904. return 0;
  905. }
  906. static ssize_t f2fs_direct_IO(int rw, struct kiocb *iocb,
  907. struct iov_iter *iter, loff_t offset)
  908. {
  909. struct file *file = iocb->ki_filp;
  910. struct address_space *mapping = file->f_mapping;
  911. struct inode *inode = mapping->host;
  912. size_t count = iov_iter_count(iter);
  913. int err;
  914. /* Let buffer I/O handle the inline data case. */
  915. if (f2fs_has_inline_data(inode))
  916. return 0;
  917. if (check_direct_IO(inode, rw, iter, offset))
  918. return 0;
  919. trace_f2fs_direct_IO_enter(inode, offset, count, rw);
  920. err = blockdev_direct_IO(rw, iocb, inode, iter, offset, get_data_block);
  921. if (err < 0 && (rw & WRITE))
  922. f2fs_write_failed(mapping, offset + count);
  923. trace_f2fs_direct_IO_exit(inode, offset, count, rw, err);
  924. return err;
  925. }
  926. static void f2fs_invalidate_data_page(struct page *page, unsigned int offset,
  927. unsigned int length)
  928. {
  929. struct inode *inode = page->mapping->host;
  930. if (offset % PAGE_CACHE_SIZE || length != PAGE_CACHE_SIZE)
  931. return;
  932. if (PageDirty(page))
  933. inode_dec_dirty_pages(inode);
  934. ClearPagePrivate(page);
  935. }
  936. static int f2fs_release_data_page(struct page *page, gfp_t wait)
  937. {
  938. ClearPagePrivate(page);
  939. return 1;
  940. }
  941. static int f2fs_set_data_page_dirty(struct page *page)
  942. {
  943. struct address_space *mapping = page->mapping;
  944. struct inode *inode = mapping->host;
  945. trace_f2fs_set_page_dirty(page, DATA);
  946. SetPageUptodate(page);
  947. mark_inode_dirty(inode);
  948. if (!PageDirty(page)) {
  949. __set_page_dirty_nobuffers(page);
  950. update_dirty_page(inode, page);
  951. return 1;
  952. }
  953. return 0;
  954. }
  955. static sector_t f2fs_bmap(struct address_space *mapping, sector_t block)
  956. {
  957. struct inode *inode = mapping->host;
  958. if (f2fs_has_inline_data(inode))
  959. return 0;
  960. return generic_block_bmap(mapping, block, get_data_block);
  961. }
  962. const struct address_space_operations f2fs_dblock_aops = {
  963. .readpage = f2fs_read_data_page,
  964. .readpages = f2fs_read_data_pages,
  965. .writepage = f2fs_write_data_page,
  966. .writepages = f2fs_write_data_pages,
  967. .write_begin = f2fs_write_begin,
  968. .write_end = f2fs_write_end,
  969. .set_page_dirty = f2fs_set_data_page_dirty,
  970. .invalidatepage = f2fs_invalidate_data_page,
  971. .releasepage = f2fs_release_data_page,
  972. .direct_IO = f2fs_direct_IO,
  973. .bmap = f2fs_bmap,
  974. };