pnfs.c 52 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include <linux/nfs_page.h>
  31. #include <linux/module.h>
  32. #include "internal.h"
  33. #include "pnfs.h"
  34. #include "iostat.h"
  35. #include "nfs4trace.h"
  36. #define NFSDBG_FACILITY NFSDBG_PNFS
  37. #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
  38. /* Locking:
  39. *
  40. * pnfs_spinlock:
  41. * protects pnfs_modules_tbl.
  42. */
  43. static DEFINE_SPINLOCK(pnfs_spinlock);
  44. /*
  45. * pnfs_modules_tbl holds all pnfs modules
  46. */
  47. static LIST_HEAD(pnfs_modules_tbl);
  48. /* Return the registered pnfs layout driver module matching given id */
  49. static struct pnfs_layoutdriver_type *
  50. find_pnfs_driver_locked(u32 id)
  51. {
  52. struct pnfs_layoutdriver_type *local;
  53. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  54. if (local->id == id)
  55. goto out;
  56. local = NULL;
  57. out:
  58. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  59. return local;
  60. }
  61. static struct pnfs_layoutdriver_type *
  62. find_pnfs_driver(u32 id)
  63. {
  64. struct pnfs_layoutdriver_type *local;
  65. spin_lock(&pnfs_spinlock);
  66. local = find_pnfs_driver_locked(id);
  67. if (local != NULL && !try_module_get(local->owner)) {
  68. dprintk("%s: Could not grab reference on module\n", __func__);
  69. local = NULL;
  70. }
  71. spin_unlock(&pnfs_spinlock);
  72. return local;
  73. }
  74. void
  75. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  76. {
  77. if (nfss->pnfs_curr_ld) {
  78. if (nfss->pnfs_curr_ld->clear_layoutdriver)
  79. nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
  80. /* Decrement the MDS count. Purge the deviceid cache if zero */
  81. if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
  82. nfs4_deviceid_purge_client(nfss->nfs_client);
  83. module_put(nfss->pnfs_curr_ld->owner);
  84. }
  85. nfss->pnfs_curr_ld = NULL;
  86. }
  87. /*
  88. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  89. * Currently only one pNFS layout driver per filesystem is supported.
  90. *
  91. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  92. */
  93. void
  94. set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
  95. u32 id)
  96. {
  97. struct pnfs_layoutdriver_type *ld_type = NULL;
  98. if (id == 0)
  99. goto out_no_driver;
  100. if (!(server->nfs_client->cl_exchange_flags &
  101. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  102. printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
  103. __func__, id, server->nfs_client->cl_exchange_flags);
  104. goto out_no_driver;
  105. }
  106. ld_type = find_pnfs_driver(id);
  107. if (!ld_type) {
  108. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  109. ld_type = find_pnfs_driver(id);
  110. if (!ld_type) {
  111. dprintk("%s: No pNFS module found for %u.\n",
  112. __func__, id);
  113. goto out_no_driver;
  114. }
  115. }
  116. server->pnfs_curr_ld = ld_type;
  117. if (ld_type->set_layoutdriver
  118. && ld_type->set_layoutdriver(server, mntfh)) {
  119. printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
  120. "driver %u.\n", __func__, id);
  121. module_put(ld_type->owner);
  122. goto out_no_driver;
  123. }
  124. /* Bump the MDS count */
  125. atomic_inc(&server->nfs_client->cl_mds_count);
  126. dprintk("%s: pNFS module for %u set\n", __func__, id);
  127. return;
  128. out_no_driver:
  129. dprintk("%s: Using NFSv4 I/O\n", __func__);
  130. server->pnfs_curr_ld = NULL;
  131. }
  132. int
  133. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  134. {
  135. int status = -EINVAL;
  136. struct pnfs_layoutdriver_type *tmp;
  137. if (ld_type->id == 0) {
  138. printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
  139. return status;
  140. }
  141. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  142. printk(KERN_ERR "NFS: %s Layout driver must provide "
  143. "alloc_lseg and free_lseg.\n", __func__);
  144. return status;
  145. }
  146. spin_lock(&pnfs_spinlock);
  147. tmp = find_pnfs_driver_locked(ld_type->id);
  148. if (!tmp) {
  149. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  150. status = 0;
  151. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  152. ld_type->name);
  153. } else {
  154. printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
  155. __func__, ld_type->id);
  156. }
  157. spin_unlock(&pnfs_spinlock);
  158. return status;
  159. }
  160. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  161. void
  162. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  163. {
  164. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  165. spin_lock(&pnfs_spinlock);
  166. list_del(&ld_type->pnfs_tblid);
  167. spin_unlock(&pnfs_spinlock);
  168. }
  169. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  170. /*
  171. * pNFS client layout cache
  172. */
  173. /* Need to hold i_lock if caller does not already hold reference */
  174. void
  175. pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
  176. {
  177. atomic_inc(&lo->plh_refcount);
  178. }
  179. static struct pnfs_layout_hdr *
  180. pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
  181. {
  182. struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
  183. return ld->alloc_layout_hdr(ino, gfp_flags);
  184. }
  185. static void
  186. pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
  187. {
  188. struct nfs_server *server = NFS_SERVER(lo->plh_inode);
  189. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  190. if (!list_empty(&lo->plh_layouts)) {
  191. struct nfs_client *clp = server->nfs_client;
  192. spin_lock(&clp->cl_lock);
  193. list_del_init(&lo->plh_layouts);
  194. spin_unlock(&clp->cl_lock);
  195. }
  196. put_rpccred(lo->plh_lc_cred);
  197. return ld->free_layout_hdr(lo);
  198. }
  199. static void
  200. pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
  201. {
  202. struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
  203. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  204. nfsi->layout = NULL;
  205. /* Reset MDS Threshold I/O counters */
  206. nfsi->write_io = 0;
  207. nfsi->read_io = 0;
  208. }
  209. void
  210. pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
  211. {
  212. struct inode *inode = lo->plh_inode;
  213. if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
  214. pnfs_detach_layout_hdr(lo);
  215. spin_unlock(&inode->i_lock);
  216. pnfs_free_layout_hdr(lo);
  217. }
  218. }
  219. static int
  220. pnfs_iomode_to_fail_bit(u32 iomode)
  221. {
  222. return iomode == IOMODE_RW ?
  223. NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
  224. }
  225. static void
  226. pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  227. {
  228. lo->plh_retry_timestamp = jiffies;
  229. if (!test_and_set_bit(fail_bit, &lo->plh_flags))
  230. atomic_inc(&lo->plh_refcount);
  231. }
  232. static void
  233. pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  234. {
  235. if (test_and_clear_bit(fail_bit, &lo->plh_flags))
  236. atomic_dec(&lo->plh_refcount);
  237. }
  238. static void
  239. pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  240. {
  241. struct inode *inode = lo->plh_inode;
  242. struct pnfs_layout_range range = {
  243. .iomode = iomode,
  244. .offset = 0,
  245. .length = NFS4_MAX_UINT64,
  246. };
  247. LIST_HEAD(head);
  248. spin_lock(&inode->i_lock);
  249. pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
  250. pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
  251. spin_unlock(&inode->i_lock);
  252. pnfs_free_lseg_list(&head);
  253. dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
  254. iomode == IOMODE_RW ? "RW" : "READ");
  255. }
  256. static bool
  257. pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  258. {
  259. unsigned long start, end;
  260. int fail_bit = pnfs_iomode_to_fail_bit(iomode);
  261. if (test_bit(fail_bit, &lo->plh_flags) == 0)
  262. return false;
  263. end = jiffies;
  264. start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
  265. if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
  266. /* It is time to retry the failed layoutgets */
  267. pnfs_layout_clear_fail_bit(lo, fail_bit);
  268. return false;
  269. }
  270. return true;
  271. }
  272. static void
  273. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  274. {
  275. INIT_LIST_HEAD(&lseg->pls_list);
  276. INIT_LIST_HEAD(&lseg->pls_lc_list);
  277. atomic_set(&lseg->pls_refcount, 1);
  278. smp_mb();
  279. set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
  280. lseg->pls_layout = lo;
  281. }
  282. static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
  283. {
  284. struct inode *ino = lseg->pls_layout->plh_inode;
  285. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  286. }
  287. static void
  288. pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
  289. struct pnfs_layout_segment *lseg)
  290. {
  291. struct inode *inode = lo->plh_inode;
  292. WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  293. list_del_init(&lseg->pls_list);
  294. /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
  295. atomic_dec(&lo->plh_refcount);
  296. if (list_empty(&lo->plh_segs))
  297. clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  298. rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
  299. }
  300. void
  301. pnfs_put_lseg(struct pnfs_layout_segment *lseg)
  302. {
  303. struct pnfs_layout_hdr *lo;
  304. struct inode *inode;
  305. if (!lseg)
  306. return;
  307. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  308. atomic_read(&lseg->pls_refcount),
  309. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  310. lo = lseg->pls_layout;
  311. inode = lo->plh_inode;
  312. if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
  313. pnfs_get_layout_hdr(lo);
  314. pnfs_layout_remove_lseg(lo, lseg);
  315. spin_unlock(&inode->i_lock);
  316. pnfs_free_lseg(lseg);
  317. pnfs_put_layout_hdr(lo);
  318. }
  319. }
  320. EXPORT_SYMBOL_GPL(pnfs_put_lseg);
  321. static void pnfs_free_lseg_async_work(struct work_struct *work)
  322. {
  323. struct pnfs_layout_segment *lseg;
  324. struct pnfs_layout_hdr *lo;
  325. lseg = container_of(work, struct pnfs_layout_segment, pls_work);
  326. lo = lseg->pls_layout;
  327. pnfs_free_lseg(lseg);
  328. pnfs_put_layout_hdr(lo);
  329. }
  330. static void pnfs_free_lseg_async(struct pnfs_layout_segment *lseg)
  331. {
  332. INIT_WORK(&lseg->pls_work, pnfs_free_lseg_async_work);
  333. schedule_work(&lseg->pls_work);
  334. }
  335. void
  336. pnfs_put_lseg_locked(struct pnfs_layout_segment *lseg)
  337. {
  338. if (!lseg)
  339. return;
  340. assert_spin_locked(&lseg->pls_layout->plh_inode->i_lock);
  341. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  342. atomic_read(&lseg->pls_refcount),
  343. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  344. if (atomic_dec_and_test(&lseg->pls_refcount)) {
  345. struct pnfs_layout_hdr *lo = lseg->pls_layout;
  346. pnfs_get_layout_hdr(lo);
  347. pnfs_layout_remove_lseg(lo, lseg);
  348. pnfs_free_lseg_async(lseg);
  349. }
  350. }
  351. EXPORT_SYMBOL_GPL(pnfs_put_lseg_locked);
  352. static u64
  353. end_offset(u64 start, u64 len)
  354. {
  355. u64 end;
  356. end = start + len;
  357. return end >= start ? end : NFS4_MAX_UINT64;
  358. }
  359. /*
  360. * is l2 fully contained in l1?
  361. * start1 end1
  362. * [----------------------------------)
  363. * start2 end2
  364. * [----------------)
  365. */
  366. static bool
  367. pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
  368. const struct pnfs_layout_range *l2)
  369. {
  370. u64 start1 = l1->offset;
  371. u64 end1 = end_offset(start1, l1->length);
  372. u64 start2 = l2->offset;
  373. u64 end2 = end_offset(start2, l2->length);
  374. return (start1 <= start2) && (end1 >= end2);
  375. }
  376. /*
  377. * is l1 and l2 intersecting?
  378. * start1 end1
  379. * [----------------------------------)
  380. * start2 end2
  381. * [----------------)
  382. */
  383. static bool
  384. pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
  385. const struct pnfs_layout_range *l2)
  386. {
  387. u64 start1 = l1->offset;
  388. u64 end1 = end_offset(start1, l1->length);
  389. u64 start2 = l2->offset;
  390. u64 end2 = end_offset(start2, l2->length);
  391. return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
  392. (end2 == NFS4_MAX_UINT64 || end2 > start1);
  393. }
  394. static bool
  395. should_free_lseg(const struct pnfs_layout_range *lseg_range,
  396. const struct pnfs_layout_range *recall_range)
  397. {
  398. return (recall_range->iomode == IOMODE_ANY ||
  399. lseg_range->iomode == recall_range->iomode) &&
  400. pnfs_lseg_range_intersecting(lseg_range, recall_range);
  401. }
  402. static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
  403. struct list_head *tmp_list)
  404. {
  405. if (!atomic_dec_and_test(&lseg->pls_refcount))
  406. return false;
  407. pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
  408. list_add(&lseg->pls_list, tmp_list);
  409. return true;
  410. }
  411. /* Returns 1 if lseg is removed from list, 0 otherwise */
  412. static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
  413. struct list_head *tmp_list)
  414. {
  415. int rv = 0;
  416. if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  417. /* Remove the reference keeping the lseg in the
  418. * list. It will now be removed when all
  419. * outstanding io is finished.
  420. */
  421. dprintk("%s: lseg %p ref %d\n", __func__, lseg,
  422. atomic_read(&lseg->pls_refcount));
  423. if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
  424. rv = 1;
  425. }
  426. return rv;
  427. }
  428. /* Returns count of number of matching invalid lsegs remaining in list
  429. * after call.
  430. */
  431. int
  432. pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
  433. struct list_head *tmp_list,
  434. struct pnfs_layout_range *recall_range)
  435. {
  436. struct pnfs_layout_segment *lseg, *next;
  437. int invalid = 0, removed = 0;
  438. dprintk("%s:Begin lo %p\n", __func__, lo);
  439. if (list_empty(&lo->plh_segs))
  440. return 0;
  441. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  442. if (!recall_range ||
  443. should_free_lseg(&lseg->pls_range, recall_range)) {
  444. dprintk("%s: freeing lseg %p iomode %d "
  445. "offset %llu length %llu\n", __func__,
  446. lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
  447. lseg->pls_range.length);
  448. invalid++;
  449. removed += mark_lseg_invalid(lseg, tmp_list);
  450. }
  451. dprintk("%s:Return %i\n", __func__, invalid - removed);
  452. return invalid - removed;
  453. }
  454. /* note free_me must contain lsegs from a single layout_hdr */
  455. void
  456. pnfs_free_lseg_list(struct list_head *free_me)
  457. {
  458. struct pnfs_layout_segment *lseg, *tmp;
  459. if (list_empty(free_me))
  460. return;
  461. list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
  462. list_del(&lseg->pls_list);
  463. pnfs_free_lseg(lseg);
  464. }
  465. }
  466. void
  467. pnfs_destroy_layout(struct nfs_inode *nfsi)
  468. {
  469. struct pnfs_layout_hdr *lo;
  470. LIST_HEAD(tmp_list);
  471. spin_lock(&nfsi->vfs_inode.i_lock);
  472. lo = nfsi->layout;
  473. if (lo) {
  474. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  475. pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  476. pnfs_get_layout_hdr(lo);
  477. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
  478. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
  479. spin_unlock(&nfsi->vfs_inode.i_lock);
  480. pnfs_free_lseg_list(&tmp_list);
  481. pnfs_put_layout_hdr(lo);
  482. } else
  483. spin_unlock(&nfsi->vfs_inode.i_lock);
  484. }
  485. EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
  486. static bool
  487. pnfs_layout_add_bulk_destroy_list(struct inode *inode,
  488. struct list_head *layout_list)
  489. {
  490. struct pnfs_layout_hdr *lo;
  491. bool ret = false;
  492. spin_lock(&inode->i_lock);
  493. lo = NFS_I(inode)->layout;
  494. if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
  495. pnfs_get_layout_hdr(lo);
  496. list_add(&lo->plh_bulk_destroy, layout_list);
  497. ret = true;
  498. }
  499. spin_unlock(&inode->i_lock);
  500. return ret;
  501. }
  502. /* Caller must hold rcu_read_lock and clp->cl_lock */
  503. static int
  504. pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
  505. struct nfs_server *server,
  506. struct list_head *layout_list)
  507. {
  508. struct pnfs_layout_hdr *lo, *next;
  509. struct inode *inode;
  510. list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
  511. inode = igrab(lo->plh_inode);
  512. if (inode == NULL)
  513. continue;
  514. list_del_init(&lo->plh_layouts);
  515. if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
  516. continue;
  517. rcu_read_unlock();
  518. spin_unlock(&clp->cl_lock);
  519. iput(inode);
  520. spin_lock(&clp->cl_lock);
  521. rcu_read_lock();
  522. return -EAGAIN;
  523. }
  524. return 0;
  525. }
  526. static int
  527. pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
  528. bool is_bulk_recall)
  529. {
  530. struct pnfs_layout_hdr *lo;
  531. struct inode *inode;
  532. struct pnfs_layout_range range = {
  533. .iomode = IOMODE_ANY,
  534. .offset = 0,
  535. .length = NFS4_MAX_UINT64,
  536. };
  537. LIST_HEAD(lseg_list);
  538. int ret = 0;
  539. while (!list_empty(layout_list)) {
  540. lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
  541. plh_bulk_destroy);
  542. dprintk("%s freeing layout for inode %lu\n", __func__,
  543. lo->plh_inode->i_ino);
  544. inode = lo->plh_inode;
  545. pnfs_layoutcommit_inode(inode, false);
  546. spin_lock(&inode->i_lock);
  547. list_del_init(&lo->plh_bulk_destroy);
  548. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  549. if (is_bulk_recall)
  550. set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  551. if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
  552. ret = -EAGAIN;
  553. spin_unlock(&inode->i_lock);
  554. pnfs_free_lseg_list(&lseg_list);
  555. pnfs_put_layout_hdr(lo);
  556. iput(inode);
  557. }
  558. return ret;
  559. }
  560. int
  561. pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
  562. struct nfs_fsid *fsid,
  563. bool is_recall)
  564. {
  565. struct nfs_server *server;
  566. LIST_HEAD(layout_list);
  567. spin_lock(&clp->cl_lock);
  568. rcu_read_lock();
  569. restart:
  570. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  571. if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
  572. continue;
  573. if (pnfs_layout_bulk_destroy_byserver_locked(clp,
  574. server,
  575. &layout_list) != 0)
  576. goto restart;
  577. }
  578. rcu_read_unlock();
  579. spin_unlock(&clp->cl_lock);
  580. if (list_empty(&layout_list))
  581. return 0;
  582. return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
  583. }
  584. int
  585. pnfs_destroy_layouts_byclid(struct nfs_client *clp,
  586. bool is_recall)
  587. {
  588. struct nfs_server *server;
  589. LIST_HEAD(layout_list);
  590. spin_lock(&clp->cl_lock);
  591. rcu_read_lock();
  592. restart:
  593. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  594. if (pnfs_layout_bulk_destroy_byserver_locked(clp,
  595. server,
  596. &layout_list) != 0)
  597. goto restart;
  598. }
  599. rcu_read_unlock();
  600. spin_unlock(&clp->cl_lock);
  601. if (list_empty(&layout_list))
  602. return 0;
  603. return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
  604. }
  605. /*
  606. * Called by the state manger to remove all layouts established under an
  607. * expired lease.
  608. */
  609. void
  610. pnfs_destroy_all_layouts(struct nfs_client *clp)
  611. {
  612. nfs4_deviceid_mark_client_invalid(clp);
  613. nfs4_deviceid_purge_client(clp);
  614. pnfs_destroy_layouts_byclid(clp, false);
  615. }
  616. /*
  617. * Compare 2 layout stateid sequence ids, to see which is newer,
  618. * taking into account wraparound issues.
  619. */
  620. static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
  621. {
  622. return (s32)(s1 - s2) > 0;
  623. }
  624. /* update lo->plh_stateid with new if is more recent */
  625. void
  626. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
  627. bool update_barrier)
  628. {
  629. u32 oldseq, newseq, new_barrier;
  630. int empty = list_empty(&lo->plh_segs);
  631. oldseq = be32_to_cpu(lo->plh_stateid.seqid);
  632. newseq = be32_to_cpu(new->seqid);
  633. if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
  634. nfs4_stateid_copy(&lo->plh_stateid, new);
  635. if (update_barrier) {
  636. new_barrier = be32_to_cpu(new->seqid);
  637. } else {
  638. /* Because of wraparound, we want to keep the barrier
  639. * "close" to the current seqids.
  640. */
  641. new_barrier = newseq - atomic_read(&lo->plh_outstanding);
  642. }
  643. if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
  644. lo->plh_barrier = new_barrier;
  645. }
  646. }
  647. static bool
  648. pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
  649. const nfs4_stateid *stateid)
  650. {
  651. u32 seqid = be32_to_cpu(stateid->seqid);
  652. return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
  653. }
  654. /* lget is set to 1 if called from inside send_layoutget call chain */
  655. static bool
  656. pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
  657. {
  658. return lo->plh_block_lgets ||
  659. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
  660. (list_empty(&lo->plh_segs) &&
  661. (atomic_read(&lo->plh_outstanding) > lget));
  662. }
  663. int
  664. pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
  665. struct nfs4_state *open_state)
  666. {
  667. int status = 0;
  668. dprintk("--> %s\n", __func__);
  669. spin_lock(&lo->plh_inode->i_lock);
  670. if (pnfs_layoutgets_blocked(lo, 1)) {
  671. status = -EAGAIN;
  672. } else if (!nfs4_valid_open_stateid(open_state)) {
  673. status = -EBADF;
  674. } else if (list_empty(&lo->plh_segs) ||
  675. test_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags)) {
  676. int seq;
  677. do {
  678. seq = read_seqbegin(&open_state->seqlock);
  679. nfs4_stateid_copy(dst, &open_state->stateid);
  680. } while (read_seqretry(&open_state->seqlock, seq));
  681. } else
  682. nfs4_stateid_copy(dst, &lo->plh_stateid);
  683. spin_unlock(&lo->plh_inode->i_lock);
  684. dprintk("<-- %s\n", __func__);
  685. return status;
  686. }
  687. /*
  688. * Get layout from server.
  689. * for now, assume that whole file layouts are requested.
  690. * arg->offset: 0
  691. * arg->length: all ones
  692. */
  693. static struct pnfs_layout_segment *
  694. send_layoutget(struct pnfs_layout_hdr *lo,
  695. struct nfs_open_context *ctx,
  696. struct pnfs_layout_range *range,
  697. gfp_t gfp_flags)
  698. {
  699. struct inode *ino = lo->plh_inode;
  700. struct nfs_server *server = NFS_SERVER(ino);
  701. struct nfs4_layoutget *lgp;
  702. struct pnfs_layout_segment *lseg;
  703. dprintk("--> %s\n", __func__);
  704. lgp = kzalloc(sizeof(*lgp), gfp_flags);
  705. if (lgp == NULL)
  706. return NULL;
  707. lgp->args.minlength = PAGE_CACHE_SIZE;
  708. if (lgp->args.minlength > range->length)
  709. lgp->args.minlength = range->length;
  710. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  711. lgp->args.range = *range;
  712. lgp->args.type = server->pnfs_curr_ld->id;
  713. lgp->args.inode = ino;
  714. lgp->args.ctx = get_nfs_open_context(ctx);
  715. lgp->gfp_flags = gfp_flags;
  716. lgp->cred = lo->plh_lc_cred;
  717. /* Synchronously retrieve layout information from server and
  718. * store in lseg.
  719. */
  720. lseg = nfs4_proc_layoutget(lgp, gfp_flags);
  721. if (IS_ERR(lseg)) {
  722. switch (PTR_ERR(lseg)) {
  723. case -ENOMEM:
  724. case -ERESTARTSYS:
  725. break;
  726. default:
  727. /* remember that LAYOUTGET failed and suspend trying */
  728. pnfs_layout_io_set_failed(lo, range->iomode);
  729. }
  730. return NULL;
  731. }
  732. return lseg;
  733. }
  734. static void pnfs_clear_layoutcommit(struct inode *inode,
  735. struct list_head *head)
  736. {
  737. struct nfs_inode *nfsi = NFS_I(inode);
  738. struct pnfs_layout_segment *lseg, *tmp;
  739. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  740. return;
  741. list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
  742. if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  743. continue;
  744. pnfs_lseg_dec_and_remove_zero(lseg, head);
  745. }
  746. }
  747. /*
  748. * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  749. * when the layout segment list is empty.
  750. *
  751. * Note that a pnfs_layout_hdr can exist with an empty layout segment
  752. * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
  753. * deviceid is marked invalid.
  754. */
  755. int
  756. _pnfs_return_layout(struct inode *ino)
  757. {
  758. struct pnfs_layout_hdr *lo = NULL;
  759. struct nfs_inode *nfsi = NFS_I(ino);
  760. LIST_HEAD(tmp_list);
  761. struct nfs4_layoutreturn *lrp;
  762. nfs4_stateid stateid;
  763. int status = 0, empty;
  764. dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
  765. spin_lock(&ino->i_lock);
  766. lo = nfsi->layout;
  767. if (!lo) {
  768. spin_unlock(&ino->i_lock);
  769. dprintk("NFS: %s no layout to return\n", __func__);
  770. goto out;
  771. }
  772. stateid = nfsi->layout->plh_stateid;
  773. /* Reference matched in nfs4_layoutreturn_release */
  774. pnfs_get_layout_hdr(lo);
  775. empty = list_empty(&lo->plh_segs);
  776. pnfs_clear_layoutcommit(ino, &tmp_list);
  777. pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  778. if (NFS_SERVER(ino)->pnfs_curr_ld->return_range) {
  779. struct pnfs_layout_range range = {
  780. .iomode = IOMODE_ANY,
  781. .offset = 0,
  782. .length = NFS4_MAX_UINT64,
  783. };
  784. NFS_SERVER(ino)->pnfs_curr_ld->return_range(lo, &range);
  785. }
  786. /* Don't send a LAYOUTRETURN if list was initially empty */
  787. if (empty) {
  788. spin_unlock(&ino->i_lock);
  789. pnfs_put_layout_hdr(lo);
  790. dprintk("NFS: %s no layout segments to return\n", __func__);
  791. goto out;
  792. }
  793. set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
  794. lo->plh_block_lgets++;
  795. spin_unlock(&ino->i_lock);
  796. pnfs_free_lseg_list(&tmp_list);
  797. lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
  798. if (unlikely(lrp == NULL)) {
  799. status = -ENOMEM;
  800. spin_lock(&ino->i_lock);
  801. lo->plh_block_lgets--;
  802. spin_unlock(&ino->i_lock);
  803. pnfs_put_layout_hdr(lo);
  804. goto out;
  805. }
  806. lrp->args.stateid = stateid;
  807. lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
  808. lrp->args.inode = ino;
  809. lrp->args.layout = lo;
  810. lrp->clp = NFS_SERVER(ino)->nfs_client;
  811. lrp->cred = lo->plh_lc_cred;
  812. status = nfs4_proc_layoutreturn(lrp);
  813. out:
  814. dprintk("<-- %s status: %d\n", __func__, status);
  815. return status;
  816. }
  817. EXPORT_SYMBOL_GPL(_pnfs_return_layout);
  818. int
  819. pnfs_commit_and_return_layout(struct inode *inode)
  820. {
  821. struct pnfs_layout_hdr *lo;
  822. int ret;
  823. spin_lock(&inode->i_lock);
  824. lo = NFS_I(inode)->layout;
  825. if (lo == NULL) {
  826. spin_unlock(&inode->i_lock);
  827. return 0;
  828. }
  829. pnfs_get_layout_hdr(lo);
  830. /* Block new layoutgets and read/write to ds */
  831. lo->plh_block_lgets++;
  832. spin_unlock(&inode->i_lock);
  833. filemap_fdatawait(inode->i_mapping);
  834. ret = pnfs_layoutcommit_inode(inode, true);
  835. if (ret == 0)
  836. ret = _pnfs_return_layout(inode);
  837. spin_lock(&inode->i_lock);
  838. lo->plh_block_lgets--;
  839. spin_unlock(&inode->i_lock);
  840. pnfs_put_layout_hdr(lo);
  841. return ret;
  842. }
  843. bool pnfs_roc(struct inode *ino)
  844. {
  845. struct pnfs_layout_hdr *lo;
  846. struct pnfs_layout_segment *lseg, *tmp;
  847. LIST_HEAD(tmp_list);
  848. bool found = false;
  849. spin_lock(&ino->i_lock);
  850. lo = NFS_I(ino)->layout;
  851. if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
  852. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
  853. goto out_nolayout;
  854. list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
  855. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  856. mark_lseg_invalid(lseg, &tmp_list);
  857. found = true;
  858. }
  859. if (!found)
  860. goto out_nolayout;
  861. lo->plh_block_lgets++;
  862. pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
  863. spin_unlock(&ino->i_lock);
  864. pnfs_free_lseg_list(&tmp_list);
  865. return true;
  866. out_nolayout:
  867. spin_unlock(&ino->i_lock);
  868. return false;
  869. }
  870. void pnfs_roc_release(struct inode *ino)
  871. {
  872. struct pnfs_layout_hdr *lo;
  873. spin_lock(&ino->i_lock);
  874. lo = NFS_I(ino)->layout;
  875. lo->plh_block_lgets--;
  876. if (atomic_dec_and_test(&lo->plh_refcount)) {
  877. pnfs_detach_layout_hdr(lo);
  878. spin_unlock(&ino->i_lock);
  879. pnfs_free_layout_hdr(lo);
  880. } else
  881. spin_unlock(&ino->i_lock);
  882. }
  883. void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
  884. {
  885. struct pnfs_layout_hdr *lo;
  886. spin_lock(&ino->i_lock);
  887. lo = NFS_I(ino)->layout;
  888. if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
  889. lo->plh_barrier = barrier;
  890. spin_unlock(&ino->i_lock);
  891. }
  892. bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
  893. {
  894. struct nfs_inode *nfsi = NFS_I(ino);
  895. struct pnfs_layout_hdr *lo;
  896. struct pnfs_layout_segment *lseg;
  897. u32 current_seqid;
  898. bool found = false;
  899. spin_lock(&ino->i_lock);
  900. list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
  901. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  902. rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
  903. found = true;
  904. goto out;
  905. }
  906. lo = nfsi->layout;
  907. current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
  908. /* Since close does not return a layout stateid for use as
  909. * a barrier, we choose the worst-case barrier.
  910. */
  911. *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
  912. out:
  913. spin_unlock(&ino->i_lock);
  914. return found;
  915. }
  916. /*
  917. * Compare two layout segments for sorting into layout cache.
  918. * We want to preferentially return RW over RO layouts, so ensure those
  919. * are seen first.
  920. */
  921. static s64
  922. pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
  923. const struct pnfs_layout_range *l2)
  924. {
  925. s64 d;
  926. /* high offset > low offset */
  927. d = l1->offset - l2->offset;
  928. if (d)
  929. return d;
  930. /* short length > long length */
  931. d = l2->length - l1->length;
  932. if (d)
  933. return d;
  934. /* read > read/write */
  935. return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
  936. }
  937. static void
  938. pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
  939. struct pnfs_layout_segment *lseg)
  940. {
  941. struct pnfs_layout_segment *lp;
  942. dprintk("%s:Begin\n", __func__);
  943. list_for_each_entry(lp, &lo->plh_segs, pls_list) {
  944. if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0)
  945. continue;
  946. list_add_tail(&lseg->pls_list, &lp->pls_list);
  947. dprintk("%s: inserted lseg %p "
  948. "iomode %d offset %llu length %llu before "
  949. "lp %p iomode %d offset %llu length %llu\n",
  950. __func__, lseg, lseg->pls_range.iomode,
  951. lseg->pls_range.offset, lseg->pls_range.length,
  952. lp, lp->pls_range.iomode, lp->pls_range.offset,
  953. lp->pls_range.length);
  954. goto out;
  955. }
  956. list_add_tail(&lseg->pls_list, &lo->plh_segs);
  957. dprintk("%s: inserted lseg %p "
  958. "iomode %d offset %llu length %llu at tail\n",
  959. __func__, lseg, lseg->pls_range.iomode,
  960. lseg->pls_range.offset, lseg->pls_range.length);
  961. out:
  962. pnfs_get_layout_hdr(lo);
  963. dprintk("%s:Return\n", __func__);
  964. }
  965. static struct pnfs_layout_hdr *
  966. alloc_init_layout_hdr(struct inode *ino,
  967. struct nfs_open_context *ctx,
  968. gfp_t gfp_flags)
  969. {
  970. struct pnfs_layout_hdr *lo;
  971. lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
  972. if (!lo)
  973. return NULL;
  974. atomic_set(&lo->plh_refcount, 1);
  975. INIT_LIST_HEAD(&lo->plh_layouts);
  976. INIT_LIST_HEAD(&lo->plh_segs);
  977. INIT_LIST_HEAD(&lo->plh_bulk_destroy);
  978. lo->plh_inode = ino;
  979. lo->plh_lc_cred = get_rpccred(ctx->cred);
  980. return lo;
  981. }
  982. static struct pnfs_layout_hdr *
  983. pnfs_find_alloc_layout(struct inode *ino,
  984. struct nfs_open_context *ctx,
  985. gfp_t gfp_flags)
  986. {
  987. struct nfs_inode *nfsi = NFS_I(ino);
  988. struct pnfs_layout_hdr *new = NULL;
  989. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  990. if (nfsi->layout != NULL)
  991. goto out_existing;
  992. spin_unlock(&ino->i_lock);
  993. new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
  994. spin_lock(&ino->i_lock);
  995. if (likely(nfsi->layout == NULL)) { /* Won the race? */
  996. nfsi->layout = new;
  997. return new;
  998. } else if (new != NULL)
  999. pnfs_free_layout_hdr(new);
  1000. out_existing:
  1001. pnfs_get_layout_hdr(nfsi->layout);
  1002. return nfsi->layout;
  1003. }
  1004. /*
  1005. * iomode matching rules:
  1006. * iomode lseg match
  1007. * ----- ----- -----
  1008. * ANY READ true
  1009. * ANY RW true
  1010. * RW READ false
  1011. * RW RW true
  1012. * READ READ true
  1013. * READ RW true
  1014. */
  1015. static bool
  1016. pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
  1017. const struct pnfs_layout_range *range)
  1018. {
  1019. struct pnfs_layout_range range1;
  1020. if ((range->iomode == IOMODE_RW &&
  1021. ls_range->iomode != IOMODE_RW) ||
  1022. !pnfs_lseg_range_intersecting(ls_range, range))
  1023. return 0;
  1024. /* range1 covers only the first byte in the range */
  1025. range1 = *range;
  1026. range1.length = 1;
  1027. return pnfs_lseg_range_contained(ls_range, &range1);
  1028. }
  1029. /*
  1030. * lookup range in layout
  1031. */
  1032. static struct pnfs_layout_segment *
  1033. pnfs_find_lseg(struct pnfs_layout_hdr *lo,
  1034. struct pnfs_layout_range *range)
  1035. {
  1036. struct pnfs_layout_segment *lseg, *ret = NULL;
  1037. dprintk("%s:Begin\n", __func__);
  1038. list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
  1039. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
  1040. pnfs_lseg_range_match(&lseg->pls_range, range)) {
  1041. ret = pnfs_get_lseg(lseg);
  1042. break;
  1043. }
  1044. if (lseg->pls_range.offset > range->offset)
  1045. break;
  1046. }
  1047. dprintk("%s:Return lseg %p ref %d\n",
  1048. __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
  1049. return ret;
  1050. }
  1051. /*
  1052. * Use mdsthreshold hints set at each OPEN to determine if I/O should go
  1053. * to the MDS or over pNFS
  1054. *
  1055. * The nfs_inode read_io and write_io fields are cumulative counters reset
  1056. * when there are no layout segments. Note that in pnfs_update_layout iomode
  1057. * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
  1058. * WRITE request.
  1059. *
  1060. * A return of true means use MDS I/O.
  1061. *
  1062. * From rfc 5661:
  1063. * If a file's size is smaller than the file size threshold, data accesses
  1064. * SHOULD be sent to the metadata server. If an I/O request has a length that
  1065. * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
  1066. * server. If both file size and I/O size are provided, the client SHOULD
  1067. * reach or exceed both thresholds before sending its read or write
  1068. * requests to the data server.
  1069. */
  1070. static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
  1071. struct inode *ino, int iomode)
  1072. {
  1073. struct nfs4_threshold *t = ctx->mdsthreshold;
  1074. struct nfs_inode *nfsi = NFS_I(ino);
  1075. loff_t fsize = i_size_read(ino);
  1076. bool size = false, size_set = false, io = false, io_set = false, ret = false;
  1077. if (t == NULL)
  1078. return ret;
  1079. dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
  1080. __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
  1081. switch (iomode) {
  1082. case IOMODE_READ:
  1083. if (t->bm & THRESHOLD_RD) {
  1084. dprintk("%s fsize %llu\n", __func__, fsize);
  1085. size_set = true;
  1086. if (fsize < t->rd_sz)
  1087. size = true;
  1088. }
  1089. if (t->bm & THRESHOLD_RD_IO) {
  1090. dprintk("%s nfsi->read_io %llu\n", __func__,
  1091. nfsi->read_io);
  1092. io_set = true;
  1093. if (nfsi->read_io < t->rd_io_sz)
  1094. io = true;
  1095. }
  1096. break;
  1097. case IOMODE_RW:
  1098. if (t->bm & THRESHOLD_WR) {
  1099. dprintk("%s fsize %llu\n", __func__, fsize);
  1100. size_set = true;
  1101. if (fsize < t->wr_sz)
  1102. size = true;
  1103. }
  1104. if (t->bm & THRESHOLD_WR_IO) {
  1105. dprintk("%s nfsi->write_io %llu\n", __func__,
  1106. nfsi->write_io);
  1107. io_set = true;
  1108. if (nfsi->write_io < t->wr_io_sz)
  1109. io = true;
  1110. }
  1111. break;
  1112. }
  1113. if (size_set && io_set) {
  1114. if (size && io)
  1115. ret = true;
  1116. } else if (size || io)
  1117. ret = true;
  1118. dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
  1119. return ret;
  1120. }
  1121. /*
  1122. * Layout segment is retreived from the server if not cached.
  1123. * The appropriate layout segment is referenced and returned to the caller.
  1124. */
  1125. struct pnfs_layout_segment *
  1126. pnfs_update_layout(struct inode *ino,
  1127. struct nfs_open_context *ctx,
  1128. loff_t pos,
  1129. u64 count,
  1130. enum pnfs_iomode iomode,
  1131. gfp_t gfp_flags)
  1132. {
  1133. struct pnfs_layout_range arg = {
  1134. .iomode = iomode,
  1135. .offset = pos,
  1136. .length = count,
  1137. };
  1138. unsigned pg_offset;
  1139. struct nfs_server *server = NFS_SERVER(ino);
  1140. struct nfs_client *clp = server->nfs_client;
  1141. struct pnfs_layout_hdr *lo;
  1142. struct pnfs_layout_segment *lseg = NULL;
  1143. bool first;
  1144. if (!pnfs_enabled_sb(NFS_SERVER(ino)))
  1145. goto out;
  1146. if (pnfs_within_mdsthreshold(ctx, ino, iomode))
  1147. goto out;
  1148. spin_lock(&ino->i_lock);
  1149. lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
  1150. if (lo == NULL) {
  1151. spin_unlock(&ino->i_lock);
  1152. goto out;
  1153. }
  1154. /* Do we even need to bother with this? */
  1155. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  1156. dprintk("%s matches recall, use MDS\n", __func__);
  1157. goto out_unlock;
  1158. }
  1159. /* if LAYOUTGET already failed once we don't try again */
  1160. if (pnfs_layout_io_test_failed(lo, iomode))
  1161. goto out_unlock;
  1162. /* Check to see if the layout for the given range already exists */
  1163. lseg = pnfs_find_lseg(lo, &arg);
  1164. if (lseg)
  1165. goto out_unlock;
  1166. if (pnfs_layoutgets_blocked(lo, 0))
  1167. goto out_unlock;
  1168. atomic_inc(&lo->plh_outstanding);
  1169. first = list_empty(&lo->plh_layouts) ? true : false;
  1170. spin_unlock(&ino->i_lock);
  1171. if (first) {
  1172. /* The lo must be on the clp list if there is any
  1173. * chance of a CB_LAYOUTRECALL(FILE) coming in.
  1174. */
  1175. spin_lock(&clp->cl_lock);
  1176. list_add_tail(&lo->plh_layouts, &server->layouts);
  1177. spin_unlock(&clp->cl_lock);
  1178. }
  1179. pg_offset = arg.offset & ~PAGE_CACHE_MASK;
  1180. if (pg_offset) {
  1181. arg.offset -= pg_offset;
  1182. arg.length += pg_offset;
  1183. }
  1184. if (arg.length != NFS4_MAX_UINT64)
  1185. arg.length = PAGE_CACHE_ALIGN(arg.length);
  1186. lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
  1187. atomic_dec(&lo->plh_outstanding);
  1188. out_put_layout_hdr:
  1189. pnfs_put_layout_hdr(lo);
  1190. out:
  1191. dprintk("%s: inode %s/%llu pNFS layout segment %s for "
  1192. "(%s, offset: %llu, length: %llu)\n",
  1193. __func__, ino->i_sb->s_id,
  1194. (unsigned long long)NFS_FILEID(ino),
  1195. lseg == NULL ? "not found" : "found",
  1196. iomode==IOMODE_RW ? "read/write" : "read-only",
  1197. (unsigned long long)pos,
  1198. (unsigned long long)count);
  1199. return lseg;
  1200. out_unlock:
  1201. spin_unlock(&ino->i_lock);
  1202. goto out_put_layout_hdr;
  1203. }
  1204. EXPORT_SYMBOL_GPL(pnfs_update_layout);
  1205. struct pnfs_layout_segment *
  1206. pnfs_layout_process(struct nfs4_layoutget *lgp)
  1207. {
  1208. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  1209. struct nfs4_layoutget_res *res = &lgp->res;
  1210. struct pnfs_layout_segment *lseg;
  1211. struct inode *ino = lo->plh_inode;
  1212. LIST_HEAD(free_me);
  1213. int status = 0;
  1214. /* Inject layout blob into I/O device driver */
  1215. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
  1216. if (!lseg || IS_ERR(lseg)) {
  1217. if (!lseg)
  1218. status = -ENOMEM;
  1219. else
  1220. status = PTR_ERR(lseg);
  1221. dprintk("%s: Could not allocate layout: error %d\n",
  1222. __func__, status);
  1223. goto out;
  1224. }
  1225. init_lseg(lo, lseg);
  1226. lseg->pls_range = res->range;
  1227. spin_lock(&ino->i_lock);
  1228. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  1229. dprintk("%s forget reply due to recall\n", __func__);
  1230. goto out_forget_reply;
  1231. }
  1232. if (pnfs_layoutgets_blocked(lo, 1)) {
  1233. dprintk("%s forget reply due to state\n", __func__);
  1234. goto out_forget_reply;
  1235. }
  1236. if (nfs4_stateid_match_other(&lo->plh_stateid, &res->stateid)) {
  1237. /* existing state ID, make sure the sequence number matches. */
  1238. if (pnfs_layout_stateid_blocked(lo, &res->stateid)) {
  1239. dprintk("%s forget reply due to sequence\n", __func__);
  1240. goto out_forget_reply;
  1241. }
  1242. pnfs_set_layout_stateid(lo, &res->stateid, false);
  1243. } else {
  1244. /*
  1245. * We got an entirely new state ID. Mark all segments for the
  1246. * inode invalid, and don't bother validating the stateid
  1247. * sequence number.
  1248. */
  1249. pnfs_mark_matching_lsegs_invalid(lo, &free_me, NULL);
  1250. nfs4_stateid_copy(&lo->plh_stateid, &res->stateid);
  1251. lo->plh_barrier = be32_to_cpu(res->stateid.seqid);
  1252. }
  1253. clear_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
  1254. pnfs_get_lseg(lseg);
  1255. pnfs_layout_insert_lseg(lo, lseg);
  1256. if (res->return_on_close) {
  1257. set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
  1258. set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
  1259. }
  1260. spin_unlock(&ino->i_lock);
  1261. pnfs_free_lseg_list(&free_me);
  1262. return lseg;
  1263. out:
  1264. return ERR_PTR(status);
  1265. out_forget_reply:
  1266. spin_unlock(&ino->i_lock);
  1267. lseg->pls_layout = lo;
  1268. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  1269. goto out;
  1270. }
  1271. void
  1272. pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1273. {
  1274. u64 rd_size = req->wb_bytes;
  1275. if (pgio->pg_lseg == NULL) {
  1276. if (pgio->pg_dreq == NULL)
  1277. rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
  1278. else
  1279. rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
  1280. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1281. req->wb_context,
  1282. req_offset(req),
  1283. rd_size,
  1284. IOMODE_READ,
  1285. GFP_KERNEL);
  1286. }
  1287. /* If no lseg, fall back to read through mds */
  1288. if (pgio->pg_lseg == NULL)
  1289. nfs_pageio_reset_read_mds(pgio);
  1290. }
  1291. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
  1292. void
  1293. pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
  1294. struct nfs_page *req, u64 wb_size)
  1295. {
  1296. if (pgio->pg_lseg == NULL)
  1297. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1298. req->wb_context,
  1299. req_offset(req),
  1300. wb_size,
  1301. IOMODE_RW,
  1302. GFP_NOFS);
  1303. /* If no lseg, fall back to write through mds */
  1304. if (pgio->pg_lseg == NULL)
  1305. nfs_pageio_reset_write_mds(pgio);
  1306. }
  1307. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
  1308. /*
  1309. * Return 0 if @req cannot be coalesced into @pgio, otherwise return the number
  1310. * of bytes (maximum @req->wb_bytes) that can be coalesced.
  1311. */
  1312. size_t
  1313. pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  1314. struct nfs_page *req)
  1315. {
  1316. unsigned int size;
  1317. u64 seg_end, req_start, seg_left;
  1318. size = nfs_generic_pg_test(pgio, prev, req);
  1319. if (!size)
  1320. return 0;
  1321. /*
  1322. * 'size' contains the number of bytes left in the current page (up
  1323. * to the original size asked for in @req->wb_bytes).
  1324. *
  1325. * Calculate how many bytes are left in the layout segment
  1326. * and if there are less bytes than 'size', return that instead.
  1327. *
  1328. * Please also note that 'end_offset' is actually the offset of the
  1329. * first byte that lies outside the pnfs_layout_range. FIXME?
  1330. *
  1331. */
  1332. if (pgio->pg_lseg) {
  1333. seg_end = end_offset(pgio->pg_lseg->pls_range.offset,
  1334. pgio->pg_lseg->pls_range.length);
  1335. req_start = req_offset(req);
  1336. WARN_ON_ONCE(req_start > seg_end);
  1337. /* start of request is past the last byte of this segment */
  1338. if (req_start >= seg_end)
  1339. return 0;
  1340. /* adjust 'size' iff there are fewer bytes left in the
  1341. * segment than what nfs_generic_pg_test returned */
  1342. seg_left = seg_end - req_start;
  1343. if (seg_left < size)
  1344. size = (unsigned int)seg_left;
  1345. }
  1346. return size;
  1347. }
  1348. EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
  1349. int pnfs_write_done_resend_to_mds(struct nfs_pgio_header *hdr)
  1350. {
  1351. struct nfs_pageio_descriptor pgio;
  1352. /* Resend all requests through the MDS */
  1353. nfs_pageio_init_write(&pgio, hdr->inode, FLUSH_STABLE, true,
  1354. hdr->completion_ops);
  1355. set_bit(NFS_CONTEXT_RESEND_WRITES, &hdr->args.context->flags);
  1356. return nfs_pageio_resend(&pgio, hdr);
  1357. }
  1358. EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
  1359. static void pnfs_ld_handle_write_error(struct nfs_pgio_header *hdr)
  1360. {
  1361. dprintk("pnfs write error = %d\n", hdr->pnfs_error);
  1362. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1363. PNFS_LAYOUTRET_ON_ERROR) {
  1364. pnfs_return_layout(hdr->inode);
  1365. }
  1366. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1367. hdr->task.tk_status = pnfs_write_done_resend_to_mds(hdr);
  1368. }
  1369. /*
  1370. * Called by non rpc-based layout drivers
  1371. */
  1372. void pnfs_ld_write_done(struct nfs_pgio_header *hdr)
  1373. {
  1374. trace_nfs4_pnfs_write(hdr, hdr->pnfs_error);
  1375. if (!hdr->pnfs_error) {
  1376. pnfs_set_layoutcommit(hdr);
  1377. hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
  1378. } else
  1379. pnfs_ld_handle_write_error(hdr);
  1380. hdr->mds_ops->rpc_release(hdr);
  1381. }
  1382. EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
  1383. static void
  1384. pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
  1385. struct nfs_pgio_header *hdr)
  1386. {
  1387. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1388. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1389. nfs_pageio_reset_write_mds(desc);
  1390. desc->pg_recoalesce = 1;
  1391. }
  1392. nfs_pgio_data_destroy(hdr);
  1393. hdr->release(hdr);
  1394. }
  1395. static enum pnfs_try_status
  1396. pnfs_try_to_write_data(struct nfs_pgio_header *hdr,
  1397. const struct rpc_call_ops *call_ops,
  1398. struct pnfs_layout_segment *lseg,
  1399. int how)
  1400. {
  1401. struct inode *inode = hdr->inode;
  1402. enum pnfs_try_status trypnfs;
  1403. struct nfs_server *nfss = NFS_SERVER(inode);
  1404. hdr->mds_ops = call_ops;
  1405. dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
  1406. inode->i_ino, hdr->args.count, hdr->args.offset, how);
  1407. trypnfs = nfss->pnfs_curr_ld->write_pagelist(hdr, how);
  1408. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1409. nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
  1410. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1411. return trypnfs;
  1412. }
  1413. static void
  1414. pnfs_do_write(struct nfs_pageio_descriptor *desc,
  1415. struct nfs_pgio_header *hdr, int how)
  1416. {
  1417. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1418. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1419. enum pnfs_try_status trypnfs;
  1420. desc->pg_lseg = NULL;
  1421. trypnfs = pnfs_try_to_write_data(hdr, call_ops, lseg, how);
  1422. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1423. pnfs_write_through_mds(desc, hdr);
  1424. pnfs_put_lseg(lseg);
  1425. }
  1426. static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
  1427. {
  1428. pnfs_put_lseg(hdr->lseg);
  1429. nfs_pgio_header_free(hdr);
  1430. }
  1431. EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
  1432. int
  1433. pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
  1434. {
  1435. struct nfs_pgio_header *hdr;
  1436. int ret;
  1437. hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
  1438. if (!hdr) {
  1439. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1440. pnfs_put_lseg(desc->pg_lseg);
  1441. desc->pg_lseg = NULL;
  1442. return -ENOMEM;
  1443. }
  1444. nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
  1445. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1446. ret = nfs_generic_pgio(desc, hdr);
  1447. if (ret != 0) {
  1448. pnfs_put_lseg(desc->pg_lseg);
  1449. desc->pg_lseg = NULL;
  1450. } else
  1451. pnfs_do_write(desc, hdr, desc->pg_ioflags);
  1452. return ret;
  1453. }
  1454. EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
  1455. int pnfs_read_done_resend_to_mds(struct nfs_pgio_header *hdr)
  1456. {
  1457. struct nfs_pageio_descriptor pgio;
  1458. /* Resend all requests through the MDS */
  1459. nfs_pageio_init_read(&pgio, hdr->inode, true, hdr->completion_ops);
  1460. return nfs_pageio_resend(&pgio, hdr);
  1461. }
  1462. EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
  1463. static void pnfs_ld_handle_read_error(struct nfs_pgio_header *hdr)
  1464. {
  1465. dprintk("pnfs read error = %d\n", hdr->pnfs_error);
  1466. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1467. PNFS_LAYOUTRET_ON_ERROR) {
  1468. pnfs_return_layout(hdr->inode);
  1469. }
  1470. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1471. hdr->task.tk_status = pnfs_read_done_resend_to_mds(hdr);
  1472. }
  1473. /*
  1474. * Called by non rpc-based layout drivers
  1475. */
  1476. void pnfs_ld_read_done(struct nfs_pgio_header *hdr)
  1477. {
  1478. trace_nfs4_pnfs_read(hdr, hdr->pnfs_error);
  1479. if (likely(!hdr->pnfs_error)) {
  1480. __nfs4_read_done_cb(hdr);
  1481. hdr->mds_ops->rpc_call_done(&hdr->task, hdr);
  1482. } else
  1483. pnfs_ld_handle_read_error(hdr);
  1484. hdr->mds_ops->rpc_release(hdr);
  1485. }
  1486. EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
  1487. static void
  1488. pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
  1489. struct nfs_pgio_header *hdr)
  1490. {
  1491. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1492. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1493. nfs_pageio_reset_read_mds(desc);
  1494. desc->pg_recoalesce = 1;
  1495. }
  1496. nfs_pgio_data_destroy(hdr);
  1497. hdr->release(hdr);
  1498. }
  1499. /*
  1500. * Call the appropriate parallel I/O subsystem read function.
  1501. */
  1502. static enum pnfs_try_status
  1503. pnfs_try_to_read_data(struct nfs_pgio_header *hdr,
  1504. const struct rpc_call_ops *call_ops,
  1505. struct pnfs_layout_segment *lseg)
  1506. {
  1507. struct inode *inode = hdr->inode;
  1508. struct nfs_server *nfss = NFS_SERVER(inode);
  1509. enum pnfs_try_status trypnfs;
  1510. hdr->mds_ops = call_ops;
  1511. dprintk("%s: Reading ino:%lu %u@%llu\n",
  1512. __func__, inode->i_ino, hdr->args.count, hdr->args.offset);
  1513. trypnfs = nfss->pnfs_curr_ld->read_pagelist(hdr);
  1514. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1515. nfs_inc_stats(inode, NFSIOS_PNFS_READ);
  1516. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1517. return trypnfs;
  1518. }
  1519. static void
  1520. pnfs_do_read(struct nfs_pageio_descriptor *desc, struct nfs_pgio_header *hdr)
  1521. {
  1522. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1523. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1524. enum pnfs_try_status trypnfs;
  1525. desc->pg_lseg = NULL;
  1526. trypnfs = pnfs_try_to_read_data(hdr, call_ops, lseg);
  1527. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1528. pnfs_read_through_mds(desc, hdr);
  1529. pnfs_put_lseg(lseg);
  1530. }
  1531. static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
  1532. {
  1533. pnfs_put_lseg(hdr->lseg);
  1534. nfs_pgio_header_free(hdr);
  1535. }
  1536. EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
  1537. int
  1538. pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
  1539. {
  1540. struct nfs_pgio_header *hdr;
  1541. int ret;
  1542. hdr = nfs_pgio_header_alloc(desc->pg_rw_ops);
  1543. if (!hdr) {
  1544. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1545. ret = -ENOMEM;
  1546. pnfs_put_lseg(desc->pg_lseg);
  1547. desc->pg_lseg = NULL;
  1548. return ret;
  1549. }
  1550. nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
  1551. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1552. ret = nfs_generic_pgio(desc, hdr);
  1553. if (ret != 0) {
  1554. pnfs_put_lseg(desc->pg_lseg);
  1555. desc->pg_lseg = NULL;
  1556. } else
  1557. pnfs_do_read(desc, hdr);
  1558. return ret;
  1559. }
  1560. EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
  1561. static void pnfs_clear_layoutcommitting(struct inode *inode)
  1562. {
  1563. unsigned long *bitlock = &NFS_I(inode)->flags;
  1564. clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
  1565. smp_mb__after_atomic();
  1566. wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
  1567. }
  1568. /*
  1569. * There can be multiple RW segments.
  1570. */
  1571. static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
  1572. {
  1573. struct pnfs_layout_segment *lseg;
  1574. list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
  1575. if (lseg->pls_range.iomode == IOMODE_RW &&
  1576. test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  1577. list_add(&lseg->pls_lc_list, listp);
  1578. }
  1579. }
  1580. static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
  1581. {
  1582. struct pnfs_layout_segment *lseg, *tmp;
  1583. /* Matched by references in pnfs_set_layoutcommit */
  1584. list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
  1585. list_del_init(&lseg->pls_lc_list);
  1586. pnfs_put_lseg(lseg);
  1587. }
  1588. pnfs_clear_layoutcommitting(inode);
  1589. }
  1590. void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
  1591. {
  1592. pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
  1593. }
  1594. EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
  1595. void
  1596. pnfs_set_layoutcommit(struct nfs_pgio_header *hdr)
  1597. {
  1598. struct inode *inode = hdr->inode;
  1599. struct nfs_inode *nfsi = NFS_I(inode);
  1600. loff_t end_pos = hdr->mds_offset + hdr->res.count;
  1601. bool mark_as_dirty = false;
  1602. spin_lock(&inode->i_lock);
  1603. if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1604. mark_as_dirty = true;
  1605. dprintk("%s: Set layoutcommit for inode %lu ",
  1606. __func__, inode->i_ino);
  1607. }
  1608. if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
  1609. /* references matched in nfs4_layoutcommit_release */
  1610. pnfs_get_lseg(hdr->lseg);
  1611. }
  1612. if (end_pos > nfsi->layout->plh_lwb)
  1613. nfsi->layout->plh_lwb = end_pos;
  1614. spin_unlock(&inode->i_lock);
  1615. dprintk("%s: lseg %p end_pos %llu\n",
  1616. __func__, hdr->lseg, nfsi->layout->plh_lwb);
  1617. /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
  1618. * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
  1619. if (mark_as_dirty)
  1620. mark_inode_dirty_sync(inode);
  1621. }
  1622. EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
  1623. void pnfs_commit_set_layoutcommit(struct nfs_commit_data *data)
  1624. {
  1625. struct inode *inode = data->inode;
  1626. struct nfs_inode *nfsi = NFS_I(inode);
  1627. bool mark_as_dirty = false;
  1628. spin_lock(&inode->i_lock);
  1629. if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1630. mark_as_dirty = true;
  1631. dprintk("%s: Set layoutcommit for inode %lu ",
  1632. __func__, inode->i_ino);
  1633. }
  1634. if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &data->lseg->pls_flags)) {
  1635. /* references matched in nfs4_layoutcommit_release */
  1636. pnfs_get_lseg(data->lseg);
  1637. }
  1638. if (data->lwb > nfsi->layout->plh_lwb)
  1639. nfsi->layout->plh_lwb = data->lwb;
  1640. spin_unlock(&inode->i_lock);
  1641. dprintk("%s: lseg %p end_pos %llu\n",
  1642. __func__, data->lseg, nfsi->layout->plh_lwb);
  1643. /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
  1644. * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
  1645. if (mark_as_dirty)
  1646. mark_inode_dirty_sync(inode);
  1647. }
  1648. EXPORT_SYMBOL_GPL(pnfs_commit_set_layoutcommit);
  1649. void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
  1650. {
  1651. struct nfs_server *nfss = NFS_SERVER(data->args.inode);
  1652. if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
  1653. nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
  1654. pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
  1655. }
  1656. /*
  1657. * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
  1658. * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
  1659. * data to disk to allow the server to recover the data if it crashes.
  1660. * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
  1661. * is off, and a COMMIT is sent to a data server, or
  1662. * if WRITEs to a data server return NFS_DATA_SYNC.
  1663. */
  1664. int
  1665. pnfs_layoutcommit_inode(struct inode *inode, bool sync)
  1666. {
  1667. struct pnfs_layoutdriver_type *ld = NFS_SERVER(inode)->pnfs_curr_ld;
  1668. struct nfs4_layoutcommit_data *data;
  1669. struct nfs_inode *nfsi = NFS_I(inode);
  1670. loff_t end_pos;
  1671. int status;
  1672. if (!pnfs_layoutcommit_outstanding(inode))
  1673. return 0;
  1674. dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
  1675. status = -EAGAIN;
  1676. if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
  1677. if (!sync)
  1678. goto out;
  1679. status = wait_on_bit_lock_action(&nfsi->flags,
  1680. NFS_INO_LAYOUTCOMMITTING,
  1681. nfs_wait_bit_killable,
  1682. TASK_KILLABLE);
  1683. if (status)
  1684. goto out;
  1685. }
  1686. status = -ENOMEM;
  1687. /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
  1688. data = kzalloc(sizeof(*data), GFP_NOFS);
  1689. if (!data)
  1690. goto clear_layoutcommitting;
  1691. status = 0;
  1692. spin_lock(&inode->i_lock);
  1693. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  1694. goto out_unlock;
  1695. INIT_LIST_HEAD(&data->lseg_list);
  1696. pnfs_list_write_lseg(inode, &data->lseg_list);
  1697. end_pos = nfsi->layout->plh_lwb;
  1698. nfsi->layout->plh_lwb = 0;
  1699. nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
  1700. spin_unlock(&inode->i_lock);
  1701. data->args.inode = inode;
  1702. data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
  1703. nfs_fattr_init(&data->fattr);
  1704. data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
  1705. data->res.fattr = &data->fattr;
  1706. data->args.lastbytewritten = end_pos - 1;
  1707. data->res.server = NFS_SERVER(inode);
  1708. if (ld->prepare_layoutcommit) {
  1709. status = ld->prepare_layoutcommit(&data->args);
  1710. if (status) {
  1711. spin_lock(&inode->i_lock);
  1712. if (end_pos < nfsi->layout->plh_lwb)
  1713. nfsi->layout->plh_lwb = end_pos;
  1714. spin_unlock(&inode->i_lock);
  1715. put_rpccred(data->cred);
  1716. set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags);
  1717. goto clear_layoutcommitting;
  1718. }
  1719. }
  1720. status = nfs4_proc_layoutcommit(data, sync);
  1721. out:
  1722. if (status)
  1723. mark_inode_dirty_sync(inode);
  1724. dprintk("<-- %s status %d\n", __func__, status);
  1725. return status;
  1726. out_unlock:
  1727. spin_unlock(&inode->i_lock);
  1728. kfree(data);
  1729. clear_layoutcommitting:
  1730. pnfs_clear_layoutcommitting(inode);
  1731. goto out;
  1732. }
  1733. struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
  1734. {
  1735. struct nfs4_threshold *thp;
  1736. thp = kzalloc(sizeof(*thp), GFP_NOFS);
  1737. if (!thp) {
  1738. dprintk("%s mdsthreshold allocation failed\n", __func__);
  1739. return NULL;
  1740. }
  1741. return thp;
  1742. }