pnfs.c 51 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include <linux/nfs_page.h>
  31. #include <linux/module.h>
  32. #include "internal.h"
  33. #include "pnfs.h"
  34. #include "iostat.h"
  35. #define NFSDBG_FACILITY NFSDBG_PNFS
  36. #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
  37. /* Locking:
  38. *
  39. * pnfs_spinlock:
  40. * protects pnfs_modules_tbl.
  41. */
  42. static DEFINE_SPINLOCK(pnfs_spinlock);
  43. /*
  44. * pnfs_modules_tbl holds all pnfs modules
  45. */
  46. static LIST_HEAD(pnfs_modules_tbl);
  47. /* Return the registered pnfs layout driver module matching given id */
  48. static struct pnfs_layoutdriver_type *
  49. find_pnfs_driver_locked(u32 id)
  50. {
  51. struct pnfs_layoutdriver_type *local;
  52. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  53. if (local->id == id)
  54. goto out;
  55. local = NULL;
  56. out:
  57. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  58. return local;
  59. }
  60. static struct pnfs_layoutdriver_type *
  61. find_pnfs_driver(u32 id)
  62. {
  63. struct pnfs_layoutdriver_type *local;
  64. spin_lock(&pnfs_spinlock);
  65. local = find_pnfs_driver_locked(id);
  66. if (local != NULL && !try_module_get(local->owner)) {
  67. dprintk("%s: Could not grab reference on module\n", __func__);
  68. local = NULL;
  69. }
  70. spin_unlock(&pnfs_spinlock);
  71. return local;
  72. }
  73. void
  74. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  75. {
  76. if (nfss->pnfs_curr_ld) {
  77. if (nfss->pnfs_curr_ld->clear_layoutdriver)
  78. nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
  79. /* Decrement the MDS count. Purge the deviceid cache if zero */
  80. if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
  81. nfs4_deviceid_purge_client(nfss->nfs_client);
  82. module_put(nfss->pnfs_curr_ld->owner);
  83. }
  84. nfss->pnfs_curr_ld = NULL;
  85. }
  86. /*
  87. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  88. * Currently only one pNFS layout driver per filesystem is supported.
  89. *
  90. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  91. */
  92. void
  93. set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
  94. u32 id)
  95. {
  96. struct pnfs_layoutdriver_type *ld_type = NULL;
  97. if (id == 0)
  98. goto out_no_driver;
  99. if (!(server->nfs_client->cl_exchange_flags &
  100. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  101. printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
  102. __func__, id, server->nfs_client->cl_exchange_flags);
  103. goto out_no_driver;
  104. }
  105. ld_type = find_pnfs_driver(id);
  106. if (!ld_type) {
  107. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  108. ld_type = find_pnfs_driver(id);
  109. if (!ld_type) {
  110. dprintk("%s: No pNFS module found for %u.\n",
  111. __func__, id);
  112. goto out_no_driver;
  113. }
  114. }
  115. server->pnfs_curr_ld = ld_type;
  116. if (ld_type->set_layoutdriver
  117. && ld_type->set_layoutdriver(server, mntfh)) {
  118. printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
  119. "driver %u.\n", __func__, id);
  120. module_put(ld_type->owner);
  121. goto out_no_driver;
  122. }
  123. /* Bump the MDS count */
  124. atomic_inc(&server->nfs_client->cl_mds_count);
  125. dprintk("%s: pNFS module for %u set\n", __func__, id);
  126. return;
  127. out_no_driver:
  128. dprintk("%s: Using NFSv4 I/O\n", __func__);
  129. server->pnfs_curr_ld = NULL;
  130. }
  131. int
  132. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  133. {
  134. int status = -EINVAL;
  135. struct pnfs_layoutdriver_type *tmp;
  136. if (ld_type->id == 0) {
  137. printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
  138. return status;
  139. }
  140. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  141. printk(KERN_ERR "NFS: %s Layout driver must provide "
  142. "alloc_lseg and free_lseg.\n", __func__);
  143. return status;
  144. }
  145. spin_lock(&pnfs_spinlock);
  146. tmp = find_pnfs_driver_locked(ld_type->id);
  147. if (!tmp) {
  148. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  149. status = 0;
  150. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  151. ld_type->name);
  152. } else {
  153. printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
  154. __func__, ld_type->id);
  155. }
  156. spin_unlock(&pnfs_spinlock);
  157. return status;
  158. }
  159. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  160. void
  161. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  162. {
  163. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  164. spin_lock(&pnfs_spinlock);
  165. list_del(&ld_type->pnfs_tblid);
  166. spin_unlock(&pnfs_spinlock);
  167. }
  168. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  169. /*
  170. * pNFS client layout cache
  171. */
  172. /* Need to hold i_lock if caller does not already hold reference */
  173. void
  174. pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
  175. {
  176. atomic_inc(&lo->plh_refcount);
  177. }
  178. static struct pnfs_layout_hdr *
  179. pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
  180. {
  181. struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
  182. return ld->alloc_layout_hdr(ino, gfp_flags);
  183. }
  184. static void
  185. pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
  186. {
  187. struct nfs_server *server = NFS_SERVER(lo->plh_inode);
  188. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  189. if (!list_empty(&lo->plh_layouts)) {
  190. struct nfs_client *clp = server->nfs_client;
  191. spin_lock(&clp->cl_lock);
  192. list_del_init(&lo->plh_layouts);
  193. spin_unlock(&clp->cl_lock);
  194. }
  195. put_rpccred(lo->plh_lc_cred);
  196. return ld->free_layout_hdr(lo);
  197. }
  198. static void
  199. pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
  200. {
  201. struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
  202. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  203. nfsi->layout = NULL;
  204. /* Reset MDS Threshold I/O counters */
  205. nfsi->write_io = 0;
  206. nfsi->read_io = 0;
  207. }
  208. void
  209. pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
  210. {
  211. struct inode *inode = lo->plh_inode;
  212. if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
  213. pnfs_detach_layout_hdr(lo);
  214. spin_unlock(&inode->i_lock);
  215. pnfs_free_layout_hdr(lo);
  216. }
  217. }
  218. static int
  219. pnfs_iomode_to_fail_bit(u32 iomode)
  220. {
  221. return iomode == IOMODE_RW ?
  222. NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
  223. }
  224. static void
  225. pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  226. {
  227. lo->plh_retry_timestamp = jiffies;
  228. if (!test_and_set_bit(fail_bit, &lo->plh_flags))
  229. atomic_inc(&lo->plh_refcount);
  230. }
  231. static void
  232. pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  233. {
  234. if (test_and_clear_bit(fail_bit, &lo->plh_flags))
  235. atomic_dec(&lo->plh_refcount);
  236. }
  237. static void
  238. pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  239. {
  240. struct inode *inode = lo->plh_inode;
  241. struct pnfs_layout_range range = {
  242. .iomode = iomode,
  243. .offset = 0,
  244. .length = NFS4_MAX_UINT64,
  245. };
  246. LIST_HEAD(head);
  247. spin_lock(&inode->i_lock);
  248. pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
  249. pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
  250. spin_unlock(&inode->i_lock);
  251. pnfs_free_lseg_list(&head);
  252. dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
  253. iomode == IOMODE_RW ? "RW" : "READ");
  254. }
  255. static bool
  256. pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  257. {
  258. unsigned long start, end;
  259. int fail_bit = pnfs_iomode_to_fail_bit(iomode);
  260. if (test_bit(fail_bit, &lo->plh_flags) == 0)
  261. return false;
  262. end = jiffies;
  263. start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
  264. if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
  265. /* It is time to retry the failed layoutgets */
  266. pnfs_layout_clear_fail_bit(lo, fail_bit);
  267. return false;
  268. }
  269. return true;
  270. }
  271. static void
  272. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  273. {
  274. INIT_LIST_HEAD(&lseg->pls_list);
  275. INIT_LIST_HEAD(&lseg->pls_lc_list);
  276. atomic_set(&lseg->pls_refcount, 1);
  277. smp_mb();
  278. set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
  279. lseg->pls_layout = lo;
  280. }
  281. static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
  282. {
  283. struct inode *ino = lseg->pls_layout->plh_inode;
  284. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  285. }
  286. static void
  287. pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
  288. struct pnfs_layout_segment *lseg)
  289. {
  290. struct inode *inode = lo->plh_inode;
  291. WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  292. list_del_init(&lseg->pls_list);
  293. /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
  294. atomic_dec(&lo->plh_refcount);
  295. if (list_empty(&lo->plh_segs))
  296. clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  297. rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
  298. }
  299. void
  300. pnfs_put_lseg(struct pnfs_layout_segment *lseg)
  301. {
  302. struct pnfs_layout_hdr *lo;
  303. struct inode *inode;
  304. if (!lseg)
  305. return;
  306. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  307. atomic_read(&lseg->pls_refcount),
  308. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  309. lo = lseg->pls_layout;
  310. inode = lo->plh_inode;
  311. if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
  312. pnfs_get_layout_hdr(lo);
  313. pnfs_layout_remove_lseg(lo, lseg);
  314. spin_unlock(&inode->i_lock);
  315. pnfs_free_lseg(lseg);
  316. pnfs_put_layout_hdr(lo);
  317. }
  318. }
  319. EXPORT_SYMBOL_GPL(pnfs_put_lseg);
  320. static u64
  321. end_offset(u64 start, u64 len)
  322. {
  323. u64 end;
  324. end = start + len;
  325. return end >= start ? end : NFS4_MAX_UINT64;
  326. }
  327. /*
  328. * is l2 fully contained in l1?
  329. * start1 end1
  330. * [----------------------------------)
  331. * start2 end2
  332. * [----------------)
  333. */
  334. static bool
  335. pnfs_lseg_range_contained(const struct pnfs_layout_range *l1,
  336. const struct pnfs_layout_range *l2)
  337. {
  338. u64 start1 = l1->offset;
  339. u64 end1 = end_offset(start1, l1->length);
  340. u64 start2 = l2->offset;
  341. u64 end2 = end_offset(start2, l2->length);
  342. return (start1 <= start2) && (end1 >= end2);
  343. }
  344. /*
  345. * is l1 and l2 intersecting?
  346. * start1 end1
  347. * [----------------------------------)
  348. * start2 end2
  349. * [----------------)
  350. */
  351. static bool
  352. pnfs_lseg_range_intersecting(const struct pnfs_layout_range *l1,
  353. const struct pnfs_layout_range *l2)
  354. {
  355. u64 start1 = l1->offset;
  356. u64 end1 = end_offset(start1, l1->length);
  357. u64 start2 = l2->offset;
  358. u64 end2 = end_offset(start2, l2->length);
  359. return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
  360. (end2 == NFS4_MAX_UINT64 || end2 > start1);
  361. }
  362. static bool
  363. should_free_lseg(const struct pnfs_layout_range *lseg_range,
  364. const struct pnfs_layout_range *recall_range)
  365. {
  366. return (recall_range->iomode == IOMODE_ANY ||
  367. lseg_range->iomode == recall_range->iomode) &&
  368. pnfs_lseg_range_intersecting(lseg_range, recall_range);
  369. }
  370. static bool pnfs_lseg_dec_and_remove_zero(struct pnfs_layout_segment *lseg,
  371. struct list_head *tmp_list)
  372. {
  373. if (!atomic_dec_and_test(&lseg->pls_refcount))
  374. return false;
  375. pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
  376. list_add(&lseg->pls_list, tmp_list);
  377. return true;
  378. }
  379. /* Returns 1 if lseg is removed from list, 0 otherwise */
  380. static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
  381. struct list_head *tmp_list)
  382. {
  383. int rv = 0;
  384. if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  385. /* Remove the reference keeping the lseg in the
  386. * list. It will now be removed when all
  387. * outstanding io is finished.
  388. */
  389. dprintk("%s: lseg %p ref %d\n", __func__, lseg,
  390. atomic_read(&lseg->pls_refcount));
  391. if (pnfs_lseg_dec_and_remove_zero(lseg, tmp_list))
  392. rv = 1;
  393. }
  394. return rv;
  395. }
  396. /* Returns count of number of matching invalid lsegs remaining in list
  397. * after call.
  398. */
  399. int
  400. pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
  401. struct list_head *tmp_list,
  402. struct pnfs_layout_range *recall_range)
  403. {
  404. struct pnfs_layout_segment *lseg, *next;
  405. int invalid = 0, removed = 0;
  406. dprintk("%s:Begin lo %p\n", __func__, lo);
  407. if (list_empty(&lo->plh_segs))
  408. return 0;
  409. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  410. if (!recall_range ||
  411. should_free_lseg(&lseg->pls_range, recall_range)) {
  412. dprintk("%s: freeing lseg %p iomode %d "
  413. "offset %llu length %llu\n", __func__,
  414. lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
  415. lseg->pls_range.length);
  416. invalid++;
  417. removed += mark_lseg_invalid(lseg, tmp_list);
  418. }
  419. dprintk("%s:Return %i\n", __func__, invalid - removed);
  420. return invalid - removed;
  421. }
  422. /* note free_me must contain lsegs from a single layout_hdr */
  423. void
  424. pnfs_free_lseg_list(struct list_head *free_me)
  425. {
  426. struct pnfs_layout_segment *lseg, *tmp;
  427. if (list_empty(free_me))
  428. return;
  429. list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
  430. list_del(&lseg->pls_list);
  431. pnfs_free_lseg(lseg);
  432. }
  433. }
  434. void
  435. pnfs_destroy_layout(struct nfs_inode *nfsi)
  436. {
  437. struct pnfs_layout_hdr *lo;
  438. LIST_HEAD(tmp_list);
  439. spin_lock(&nfsi->vfs_inode.i_lock);
  440. lo = nfsi->layout;
  441. if (lo) {
  442. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  443. pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  444. pnfs_get_layout_hdr(lo);
  445. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
  446. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
  447. spin_unlock(&nfsi->vfs_inode.i_lock);
  448. pnfs_free_lseg_list(&tmp_list);
  449. pnfs_put_layout_hdr(lo);
  450. } else
  451. spin_unlock(&nfsi->vfs_inode.i_lock);
  452. }
  453. EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
  454. static bool
  455. pnfs_layout_add_bulk_destroy_list(struct inode *inode,
  456. struct list_head *layout_list)
  457. {
  458. struct pnfs_layout_hdr *lo;
  459. bool ret = false;
  460. spin_lock(&inode->i_lock);
  461. lo = NFS_I(inode)->layout;
  462. if (lo != NULL && list_empty(&lo->plh_bulk_destroy)) {
  463. pnfs_get_layout_hdr(lo);
  464. list_add(&lo->plh_bulk_destroy, layout_list);
  465. ret = true;
  466. }
  467. spin_unlock(&inode->i_lock);
  468. return ret;
  469. }
  470. /* Caller must hold rcu_read_lock and clp->cl_lock */
  471. static int
  472. pnfs_layout_bulk_destroy_byserver_locked(struct nfs_client *clp,
  473. struct nfs_server *server,
  474. struct list_head *layout_list)
  475. {
  476. struct pnfs_layout_hdr *lo, *next;
  477. struct inode *inode;
  478. list_for_each_entry_safe(lo, next, &server->layouts, plh_layouts) {
  479. inode = igrab(lo->plh_inode);
  480. if (inode == NULL)
  481. continue;
  482. list_del_init(&lo->plh_layouts);
  483. if (pnfs_layout_add_bulk_destroy_list(inode, layout_list))
  484. continue;
  485. rcu_read_unlock();
  486. spin_unlock(&clp->cl_lock);
  487. iput(inode);
  488. spin_lock(&clp->cl_lock);
  489. rcu_read_lock();
  490. return -EAGAIN;
  491. }
  492. return 0;
  493. }
  494. static int
  495. pnfs_layout_free_bulk_destroy_list(struct list_head *layout_list,
  496. bool is_bulk_recall)
  497. {
  498. struct pnfs_layout_hdr *lo;
  499. struct inode *inode;
  500. struct pnfs_layout_range range = {
  501. .iomode = IOMODE_ANY,
  502. .offset = 0,
  503. .length = NFS4_MAX_UINT64,
  504. };
  505. LIST_HEAD(lseg_list);
  506. int ret = 0;
  507. while (!list_empty(layout_list)) {
  508. lo = list_entry(layout_list->next, struct pnfs_layout_hdr,
  509. plh_bulk_destroy);
  510. dprintk("%s freeing layout for inode %lu\n", __func__,
  511. lo->plh_inode->i_ino);
  512. inode = lo->plh_inode;
  513. spin_lock(&inode->i_lock);
  514. list_del_init(&lo->plh_bulk_destroy);
  515. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  516. if (is_bulk_recall)
  517. set_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  518. if (pnfs_mark_matching_lsegs_invalid(lo, &lseg_list, &range))
  519. ret = -EAGAIN;
  520. spin_unlock(&inode->i_lock);
  521. pnfs_free_lseg_list(&lseg_list);
  522. pnfs_put_layout_hdr(lo);
  523. iput(inode);
  524. }
  525. return ret;
  526. }
  527. int
  528. pnfs_destroy_layouts_byfsid(struct nfs_client *clp,
  529. struct nfs_fsid *fsid,
  530. bool is_recall)
  531. {
  532. struct nfs_server *server;
  533. LIST_HEAD(layout_list);
  534. spin_lock(&clp->cl_lock);
  535. rcu_read_lock();
  536. restart:
  537. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  538. if (memcmp(&server->fsid, fsid, sizeof(*fsid)) != 0)
  539. continue;
  540. if (pnfs_layout_bulk_destroy_byserver_locked(clp,
  541. server,
  542. &layout_list) != 0)
  543. goto restart;
  544. }
  545. rcu_read_unlock();
  546. spin_unlock(&clp->cl_lock);
  547. if (list_empty(&layout_list))
  548. return 0;
  549. return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
  550. }
  551. int
  552. pnfs_destroy_layouts_byclid(struct nfs_client *clp,
  553. bool is_recall)
  554. {
  555. struct nfs_server *server;
  556. LIST_HEAD(layout_list);
  557. spin_lock(&clp->cl_lock);
  558. rcu_read_lock();
  559. restart:
  560. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  561. if (pnfs_layout_bulk_destroy_byserver_locked(clp,
  562. server,
  563. &layout_list) != 0)
  564. goto restart;
  565. }
  566. rcu_read_unlock();
  567. spin_unlock(&clp->cl_lock);
  568. if (list_empty(&layout_list))
  569. return 0;
  570. return pnfs_layout_free_bulk_destroy_list(&layout_list, is_recall);
  571. }
  572. /*
  573. * Called by the state manger to remove all layouts established under an
  574. * expired lease.
  575. */
  576. void
  577. pnfs_destroy_all_layouts(struct nfs_client *clp)
  578. {
  579. nfs4_deviceid_mark_client_invalid(clp);
  580. nfs4_deviceid_purge_client(clp);
  581. pnfs_destroy_layouts_byclid(clp, false);
  582. }
  583. /*
  584. * Compare 2 layout stateid sequence ids, to see which is newer,
  585. * taking into account wraparound issues.
  586. */
  587. static bool pnfs_seqid_is_newer(u32 s1, u32 s2)
  588. {
  589. return (s32)s1 - (s32)s2 > 0;
  590. }
  591. /* update lo->plh_stateid with new if is more recent */
  592. void
  593. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
  594. bool update_barrier)
  595. {
  596. u32 oldseq, newseq, new_barrier;
  597. int empty = list_empty(&lo->plh_segs);
  598. oldseq = be32_to_cpu(lo->plh_stateid.seqid);
  599. newseq = be32_to_cpu(new->seqid);
  600. if (empty || pnfs_seqid_is_newer(newseq, oldseq)) {
  601. nfs4_stateid_copy(&lo->plh_stateid, new);
  602. if (update_barrier) {
  603. new_barrier = be32_to_cpu(new->seqid);
  604. } else {
  605. /* Because of wraparound, we want to keep the barrier
  606. * "close" to the current seqids.
  607. */
  608. new_barrier = newseq - atomic_read(&lo->plh_outstanding);
  609. }
  610. if (empty || pnfs_seqid_is_newer(new_barrier, lo->plh_barrier))
  611. lo->plh_barrier = new_barrier;
  612. }
  613. }
  614. static bool
  615. pnfs_layout_stateid_blocked(const struct pnfs_layout_hdr *lo,
  616. const nfs4_stateid *stateid)
  617. {
  618. u32 seqid = be32_to_cpu(stateid->seqid);
  619. return !pnfs_seqid_is_newer(seqid, lo->plh_barrier);
  620. }
  621. /* lget is set to 1 if called from inside send_layoutget call chain */
  622. static bool
  623. pnfs_layoutgets_blocked(const struct pnfs_layout_hdr *lo, int lget)
  624. {
  625. return lo->plh_block_lgets ||
  626. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
  627. (list_empty(&lo->plh_segs) &&
  628. (atomic_read(&lo->plh_outstanding) > lget));
  629. }
  630. int
  631. pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
  632. struct nfs4_state *open_state)
  633. {
  634. int status = 0;
  635. dprintk("--> %s\n", __func__);
  636. spin_lock(&lo->plh_inode->i_lock);
  637. if (pnfs_layoutgets_blocked(lo, 1)) {
  638. status = -EAGAIN;
  639. } else if (!nfs4_valid_open_stateid(open_state)) {
  640. status = -EBADF;
  641. } else if (list_empty(&lo->plh_segs)) {
  642. int seq;
  643. do {
  644. seq = read_seqbegin(&open_state->seqlock);
  645. nfs4_stateid_copy(dst, &open_state->stateid);
  646. } while (read_seqretry(&open_state->seqlock, seq));
  647. } else
  648. nfs4_stateid_copy(dst, &lo->plh_stateid);
  649. spin_unlock(&lo->plh_inode->i_lock);
  650. dprintk("<-- %s\n", __func__);
  651. return status;
  652. }
  653. /*
  654. * Get layout from server.
  655. * for now, assume that whole file layouts are requested.
  656. * arg->offset: 0
  657. * arg->length: all ones
  658. */
  659. static struct pnfs_layout_segment *
  660. send_layoutget(struct pnfs_layout_hdr *lo,
  661. struct nfs_open_context *ctx,
  662. struct pnfs_layout_range *range,
  663. gfp_t gfp_flags)
  664. {
  665. struct inode *ino = lo->plh_inode;
  666. struct nfs_server *server = NFS_SERVER(ino);
  667. struct nfs4_layoutget *lgp;
  668. struct pnfs_layout_segment *lseg;
  669. dprintk("--> %s\n", __func__);
  670. lgp = kzalloc(sizeof(*lgp), gfp_flags);
  671. if (lgp == NULL)
  672. return NULL;
  673. lgp->args.minlength = PAGE_CACHE_SIZE;
  674. if (lgp->args.minlength > range->length)
  675. lgp->args.minlength = range->length;
  676. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  677. lgp->args.range = *range;
  678. lgp->args.type = server->pnfs_curr_ld->id;
  679. lgp->args.inode = ino;
  680. lgp->args.ctx = get_nfs_open_context(ctx);
  681. lgp->gfp_flags = gfp_flags;
  682. lgp->cred = lo->plh_lc_cred;
  683. /* Synchronously retrieve layout information from server and
  684. * store in lseg.
  685. */
  686. lseg = nfs4_proc_layoutget(lgp, gfp_flags);
  687. if (IS_ERR(lseg)) {
  688. switch (PTR_ERR(lseg)) {
  689. case -ENOMEM:
  690. case -ERESTARTSYS:
  691. break;
  692. default:
  693. /* remember that LAYOUTGET failed and suspend trying */
  694. pnfs_layout_io_set_failed(lo, range->iomode);
  695. }
  696. return NULL;
  697. }
  698. return lseg;
  699. }
  700. static void pnfs_clear_layoutcommit(struct inode *inode,
  701. struct list_head *head)
  702. {
  703. struct nfs_inode *nfsi = NFS_I(inode);
  704. struct pnfs_layout_segment *lseg, *tmp;
  705. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  706. return;
  707. list_for_each_entry_safe(lseg, tmp, &nfsi->layout->plh_segs, pls_list) {
  708. if (!test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  709. continue;
  710. pnfs_lseg_dec_and_remove_zero(lseg, head);
  711. }
  712. }
  713. /*
  714. * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  715. * when the layout segment list is empty.
  716. *
  717. * Note that a pnfs_layout_hdr can exist with an empty layout segment
  718. * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
  719. * deviceid is marked invalid.
  720. */
  721. int
  722. _pnfs_return_layout(struct inode *ino)
  723. {
  724. struct pnfs_layout_hdr *lo = NULL;
  725. struct nfs_inode *nfsi = NFS_I(ino);
  726. LIST_HEAD(tmp_list);
  727. struct nfs4_layoutreturn *lrp;
  728. nfs4_stateid stateid;
  729. int status = 0, empty;
  730. dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
  731. spin_lock(&ino->i_lock);
  732. lo = nfsi->layout;
  733. if (!lo) {
  734. spin_unlock(&ino->i_lock);
  735. dprintk("NFS: %s no layout to return\n", __func__);
  736. goto out;
  737. }
  738. stateid = nfsi->layout->plh_stateid;
  739. /* Reference matched in nfs4_layoutreturn_release */
  740. pnfs_get_layout_hdr(lo);
  741. empty = list_empty(&lo->plh_segs);
  742. pnfs_clear_layoutcommit(ino, &tmp_list);
  743. pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  744. /* Don't send a LAYOUTRETURN if list was initially empty */
  745. if (empty) {
  746. spin_unlock(&ino->i_lock);
  747. pnfs_put_layout_hdr(lo);
  748. dprintk("NFS: %s no layout segments to return\n", __func__);
  749. goto out;
  750. }
  751. lo->plh_block_lgets++;
  752. spin_unlock(&ino->i_lock);
  753. pnfs_free_lseg_list(&tmp_list);
  754. lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
  755. if (unlikely(lrp == NULL)) {
  756. status = -ENOMEM;
  757. spin_lock(&ino->i_lock);
  758. lo->plh_block_lgets--;
  759. spin_unlock(&ino->i_lock);
  760. pnfs_put_layout_hdr(lo);
  761. goto out;
  762. }
  763. lrp->args.stateid = stateid;
  764. lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
  765. lrp->args.inode = ino;
  766. lrp->args.layout = lo;
  767. lrp->clp = NFS_SERVER(ino)->nfs_client;
  768. lrp->cred = lo->plh_lc_cred;
  769. status = nfs4_proc_layoutreturn(lrp);
  770. out:
  771. dprintk("<-- %s status: %d\n", __func__, status);
  772. return status;
  773. }
  774. EXPORT_SYMBOL_GPL(_pnfs_return_layout);
  775. int
  776. pnfs_commit_and_return_layout(struct inode *inode)
  777. {
  778. struct pnfs_layout_hdr *lo;
  779. int ret;
  780. spin_lock(&inode->i_lock);
  781. lo = NFS_I(inode)->layout;
  782. if (lo == NULL) {
  783. spin_unlock(&inode->i_lock);
  784. return 0;
  785. }
  786. pnfs_get_layout_hdr(lo);
  787. /* Block new layoutgets and read/write to ds */
  788. lo->plh_block_lgets++;
  789. spin_unlock(&inode->i_lock);
  790. filemap_fdatawait(inode->i_mapping);
  791. ret = pnfs_layoutcommit_inode(inode, true);
  792. if (ret == 0)
  793. ret = _pnfs_return_layout(inode);
  794. spin_lock(&inode->i_lock);
  795. lo->plh_block_lgets--;
  796. spin_unlock(&inode->i_lock);
  797. pnfs_put_layout_hdr(lo);
  798. return ret;
  799. }
  800. bool pnfs_roc(struct inode *ino)
  801. {
  802. struct pnfs_layout_hdr *lo;
  803. struct pnfs_layout_segment *lseg, *tmp;
  804. LIST_HEAD(tmp_list);
  805. bool found = false;
  806. spin_lock(&ino->i_lock);
  807. lo = NFS_I(ino)->layout;
  808. if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
  809. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
  810. goto out_nolayout;
  811. list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
  812. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  813. mark_lseg_invalid(lseg, &tmp_list);
  814. found = true;
  815. }
  816. if (!found)
  817. goto out_nolayout;
  818. lo->plh_block_lgets++;
  819. pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
  820. spin_unlock(&ino->i_lock);
  821. pnfs_free_lseg_list(&tmp_list);
  822. return true;
  823. out_nolayout:
  824. spin_unlock(&ino->i_lock);
  825. return false;
  826. }
  827. void pnfs_roc_release(struct inode *ino)
  828. {
  829. struct pnfs_layout_hdr *lo;
  830. spin_lock(&ino->i_lock);
  831. lo = NFS_I(ino)->layout;
  832. lo->plh_block_lgets--;
  833. if (atomic_dec_and_test(&lo->plh_refcount)) {
  834. pnfs_detach_layout_hdr(lo);
  835. spin_unlock(&ino->i_lock);
  836. pnfs_free_layout_hdr(lo);
  837. } else
  838. spin_unlock(&ino->i_lock);
  839. }
  840. void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
  841. {
  842. struct pnfs_layout_hdr *lo;
  843. spin_lock(&ino->i_lock);
  844. lo = NFS_I(ino)->layout;
  845. if (pnfs_seqid_is_newer(barrier, lo->plh_barrier))
  846. lo->plh_barrier = barrier;
  847. spin_unlock(&ino->i_lock);
  848. }
  849. bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
  850. {
  851. struct nfs_inode *nfsi = NFS_I(ino);
  852. struct pnfs_layout_hdr *lo;
  853. struct pnfs_layout_segment *lseg;
  854. u32 current_seqid;
  855. bool found = false;
  856. spin_lock(&ino->i_lock);
  857. list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
  858. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  859. rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
  860. found = true;
  861. goto out;
  862. }
  863. lo = nfsi->layout;
  864. current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
  865. /* Since close does not return a layout stateid for use as
  866. * a barrier, we choose the worst-case barrier.
  867. */
  868. *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
  869. out:
  870. spin_unlock(&ino->i_lock);
  871. return found;
  872. }
  873. /*
  874. * Compare two layout segments for sorting into layout cache.
  875. * We want to preferentially return RW over RO layouts, so ensure those
  876. * are seen first.
  877. */
  878. static s64
  879. pnfs_lseg_range_cmp(const struct pnfs_layout_range *l1,
  880. const struct pnfs_layout_range *l2)
  881. {
  882. s64 d;
  883. /* high offset > low offset */
  884. d = l1->offset - l2->offset;
  885. if (d)
  886. return d;
  887. /* short length > long length */
  888. d = l2->length - l1->length;
  889. if (d)
  890. return d;
  891. /* read > read/write */
  892. return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
  893. }
  894. static void
  895. pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
  896. struct pnfs_layout_segment *lseg)
  897. {
  898. struct pnfs_layout_segment *lp;
  899. dprintk("%s:Begin\n", __func__);
  900. list_for_each_entry(lp, &lo->plh_segs, pls_list) {
  901. if (pnfs_lseg_range_cmp(&lseg->pls_range, &lp->pls_range) > 0)
  902. continue;
  903. list_add_tail(&lseg->pls_list, &lp->pls_list);
  904. dprintk("%s: inserted lseg %p "
  905. "iomode %d offset %llu length %llu before "
  906. "lp %p iomode %d offset %llu length %llu\n",
  907. __func__, lseg, lseg->pls_range.iomode,
  908. lseg->pls_range.offset, lseg->pls_range.length,
  909. lp, lp->pls_range.iomode, lp->pls_range.offset,
  910. lp->pls_range.length);
  911. goto out;
  912. }
  913. list_add_tail(&lseg->pls_list, &lo->plh_segs);
  914. dprintk("%s: inserted lseg %p "
  915. "iomode %d offset %llu length %llu at tail\n",
  916. __func__, lseg, lseg->pls_range.iomode,
  917. lseg->pls_range.offset, lseg->pls_range.length);
  918. out:
  919. pnfs_get_layout_hdr(lo);
  920. dprintk("%s:Return\n", __func__);
  921. }
  922. static struct pnfs_layout_hdr *
  923. alloc_init_layout_hdr(struct inode *ino,
  924. struct nfs_open_context *ctx,
  925. gfp_t gfp_flags)
  926. {
  927. struct pnfs_layout_hdr *lo;
  928. lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
  929. if (!lo)
  930. return NULL;
  931. atomic_set(&lo->plh_refcount, 1);
  932. INIT_LIST_HEAD(&lo->plh_layouts);
  933. INIT_LIST_HEAD(&lo->plh_segs);
  934. INIT_LIST_HEAD(&lo->plh_bulk_destroy);
  935. lo->plh_inode = ino;
  936. lo->plh_lc_cred = get_rpccred(ctx->cred);
  937. return lo;
  938. }
  939. static struct pnfs_layout_hdr *
  940. pnfs_find_alloc_layout(struct inode *ino,
  941. struct nfs_open_context *ctx,
  942. gfp_t gfp_flags)
  943. {
  944. struct nfs_inode *nfsi = NFS_I(ino);
  945. struct pnfs_layout_hdr *new = NULL;
  946. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  947. if (nfsi->layout != NULL)
  948. goto out_existing;
  949. spin_unlock(&ino->i_lock);
  950. new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
  951. spin_lock(&ino->i_lock);
  952. if (likely(nfsi->layout == NULL)) { /* Won the race? */
  953. nfsi->layout = new;
  954. return new;
  955. } else if (new != NULL)
  956. pnfs_free_layout_hdr(new);
  957. out_existing:
  958. pnfs_get_layout_hdr(nfsi->layout);
  959. return nfsi->layout;
  960. }
  961. /*
  962. * iomode matching rules:
  963. * iomode lseg match
  964. * ----- ----- -----
  965. * ANY READ true
  966. * ANY RW true
  967. * RW READ false
  968. * RW RW true
  969. * READ READ true
  970. * READ RW true
  971. */
  972. static bool
  973. pnfs_lseg_range_match(const struct pnfs_layout_range *ls_range,
  974. const struct pnfs_layout_range *range)
  975. {
  976. struct pnfs_layout_range range1;
  977. if ((range->iomode == IOMODE_RW &&
  978. ls_range->iomode != IOMODE_RW) ||
  979. !pnfs_lseg_range_intersecting(ls_range, range))
  980. return 0;
  981. /* range1 covers only the first byte in the range */
  982. range1 = *range;
  983. range1.length = 1;
  984. return pnfs_lseg_range_contained(ls_range, &range1);
  985. }
  986. /*
  987. * lookup range in layout
  988. */
  989. static struct pnfs_layout_segment *
  990. pnfs_find_lseg(struct pnfs_layout_hdr *lo,
  991. struct pnfs_layout_range *range)
  992. {
  993. struct pnfs_layout_segment *lseg, *ret = NULL;
  994. dprintk("%s:Begin\n", __func__);
  995. list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
  996. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
  997. pnfs_lseg_range_match(&lseg->pls_range, range)) {
  998. ret = pnfs_get_lseg(lseg);
  999. break;
  1000. }
  1001. if (lseg->pls_range.offset > range->offset)
  1002. break;
  1003. }
  1004. dprintk("%s:Return lseg %p ref %d\n",
  1005. __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
  1006. return ret;
  1007. }
  1008. /*
  1009. * Use mdsthreshold hints set at each OPEN to determine if I/O should go
  1010. * to the MDS or over pNFS
  1011. *
  1012. * The nfs_inode read_io and write_io fields are cumulative counters reset
  1013. * when there are no layout segments. Note that in pnfs_update_layout iomode
  1014. * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
  1015. * WRITE request.
  1016. *
  1017. * A return of true means use MDS I/O.
  1018. *
  1019. * From rfc 5661:
  1020. * If a file's size is smaller than the file size threshold, data accesses
  1021. * SHOULD be sent to the metadata server. If an I/O request has a length that
  1022. * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
  1023. * server. If both file size and I/O size are provided, the client SHOULD
  1024. * reach or exceed both thresholds before sending its read or write
  1025. * requests to the data server.
  1026. */
  1027. static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
  1028. struct inode *ino, int iomode)
  1029. {
  1030. struct nfs4_threshold *t = ctx->mdsthreshold;
  1031. struct nfs_inode *nfsi = NFS_I(ino);
  1032. loff_t fsize = i_size_read(ino);
  1033. bool size = false, size_set = false, io = false, io_set = false, ret = false;
  1034. if (t == NULL)
  1035. return ret;
  1036. dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
  1037. __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
  1038. switch (iomode) {
  1039. case IOMODE_READ:
  1040. if (t->bm & THRESHOLD_RD) {
  1041. dprintk("%s fsize %llu\n", __func__, fsize);
  1042. size_set = true;
  1043. if (fsize < t->rd_sz)
  1044. size = true;
  1045. }
  1046. if (t->bm & THRESHOLD_RD_IO) {
  1047. dprintk("%s nfsi->read_io %llu\n", __func__,
  1048. nfsi->read_io);
  1049. io_set = true;
  1050. if (nfsi->read_io < t->rd_io_sz)
  1051. io = true;
  1052. }
  1053. break;
  1054. case IOMODE_RW:
  1055. if (t->bm & THRESHOLD_WR) {
  1056. dprintk("%s fsize %llu\n", __func__, fsize);
  1057. size_set = true;
  1058. if (fsize < t->wr_sz)
  1059. size = true;
  1060. }
  1061. if (t->bm & THRESHOLD_WR_IO) {
  1062. dprintk("%s nfsi->write_io %llu\n", __func__,
  1063. nfsi->write_io);
  1064. io_set = true;
  1065. if (nfsi->write_io < t->wr_io_sz)
  1066. io = true;
  1067. }
  1068. break;
  1069. }
  1070. if (size_set && io_set) {
  1071. if (size && io)
  1072. ret = true;
  1073. } else if (size || io)
  1074. ret = true;
  1075. dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
  1076. return ret;
  1077. }
  1078. /*
  1079. * Layout segment is retreived from the server if not cached.
  1080. * The appropriate layout segment is referenced and returned to the caller.
  1081. */
  1082. struct pnfs_layout_segment *
  1083. pnfs_update_layout(struct inode *ino,
  1084. struct nfs_open_context *ctx,
  1085. loff_t pos,
  1086. u64 count,
  1087. enum pnfs_iomode iomode,
  1088. gfp_t gfp_flags)
  1089. {
  1090. struct pnfs_layout_range arg = {
  1091. .iomode = iomode,
  1092. .offset = pos,
  1093. .length = count,
  1094. };
  1095. unsigned pg_offset;
  1096. struct nfs_server *server = NFS_SERVER(ino);
  1097. struct nfs_client *clp = server->nfs_client;
  1098. struct pnfs_layout_hdr *lo;
  1099. struct pnfs_layout_segment *lseg = NULL;
  1100. bool first;
  1101. if (!pnfs_enabled_sb(NFS_SERVER(ino)))
  1102. goto out;
  1103. if (pnfs_within_mdsthreshold(ctx, ino, iomode))
  1104. goto out;
  1105. spin_lock(&ino->i_lock);
  1106. lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
  1107. if (lo == NULL) {
  1108. spin_unlock(&ino->i_lock);
  1109. goto out;
  1110. }
  1111. /* Do we even need to bother with this? */
  1112. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  1113. dprintk("%s matches recall, use MDS\n", __func__);
  1114. goto out_unlock;
  1115. }
  1116. /* if LAYOUTGET already failed once we don't try again */
  1117. if (pnfs_layout_io_test_failed(lo, iomode))
  1118. goto out_unlock;
  1119. /* Check to see if the layout for the given range already exists */
  1120. lseg = pnfs_find_lseg(lo, &arg);
  1121. if (lseg)
  1122. goto out_unlock;
  1123. if (pnfs_layoutgets_blocked(lo, 0))
  1124. goto out_unlock;
  1125. atomic_inc(&lo->plh_outstanding);
  1126. first = list_empty(&lo->plh_layouts) ? true : false;
  1127. spin_unlock(&ino->i_lock);
  1128. if (first) {
  1129. /* The lo must be on the clp list if there is any
  1130. * chance of a CB_LAYOUTRECALL(FILE) coming in.
  1131. */
  1132. spin_lock(&clp->cl_lock);
  1133. list_add_tail(&lo->plh_layouts, &server->layouts);
  1134. spin_unlock(&clp->cl_lock);
  1135. }
  1136. pg_offset = arg.offset & ~PAGE_CACHE_MASK;
  1137. if (pg_offset) {
  1138. arg.offset -= pg_offset;
  1139. arg.length += pg_offset;
  1140. }
  1141. if (arg.length != NFS4_MAX_UINT64)
  1142. arg.length = PAGE_CACHE_ALIGN(arg.length);
  1143. lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
  1144. atomic_dec(&lo->plh_outstanding);
  1145. out_put_layout_hdr:
  1146. pnfs_put_layout_hdr(lo);
  1147. out:
  1148. dprintk("%s: inode %s/%llu pNFS layout segment %s for "
  1149. "(%s, offset: %llu, length: %llu)\n",
  1150. __func__, ino->i_sb->s_id,
  1151. (unsigned long long)NFS_FILEID(ino),
  1152. lseg == NULL ? "not found" : "found",
  1153. iomode==IOMODE_RW ? "read/write" : "read-only",
  1154. (unsigned long long)pos,
  1155. (unsigned long long)count);
  1156. return lseg;
  1157. out_unlock:
  1158. spin_unlock(&ino->i_lock);
  1159. goto out_put_layout_hdr;
  1160. }
  1161. EXPORT_SYMBOL_GPL(pnfs_update_layout);
  1162. struct pnfs_layout_segment *
  1163. pnfs_layout_process(struct nfs4_layoutget *lgp)
  1164. {
  1165. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  1166. struct nfs4_layoutget_res *res = &lgp->res;
  1167. struct pnfs_layout_segment *lseg;
  1168. struct inode *ino = lo->plh_inode;
  1169. int status = 0;
  1170. /* Inject layout blob into I/O device driver */
  1171. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
  1172. if (!lseg || IS_ERR(lseg)) {
  1173. if (!lseg)
  1174. status = -ENOMEM;
  1175. else
  1176. status = PTR_ERR(lseg);
  1177. dprintk("%s: Could not allocate layout: error %d\n",
  1178. __func__, status);
  1179. goto out;
  1180. }
  1181. spin_lock(&ino->i_lock);
  1182. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  1183. dprintk("%s forget reply due to recall\n", __func__);
  1184. goto out_forget_reply;
  1185. }
  1186. if (pnfs_layoutgets_blocked(lo, 1) ||
  1187. pnfs_layout_stateid_blocked(lo, &res->stateid)) {
  1188. dprintk("%s forget reply due to state\n", __func__);
  1189. goto out_forget_reply;
  1190. }
  1191. /* Done processing layoutget. Set the layout stateid */
  1192. pnfs_set_layout_stateid(lo, &res->stateid, false);
  1193. init_lseg(lo, lseg);
  1194. lseg->pls_range = res->range;
  1195. pnfs_get_lseg(lseg);
  1196. pnfs_layout_insert_lseg(lo, lseg);
  1197. if (res->return_on_close) {
  1198. set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
  1199. set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
  1200. }
  1201. spin_unlock(&ino->i_lock);
  1202. return lseg;
  1203. out:
  1204. return ERR_PTR(status);
  1205. out_forget_reply:
  1206. spin_unlock(&ino->i_lock);
  1207. lseg->pls_layout = lo;
  1208. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  1209. goto out;
  1210. }
  1211. void
  1212. pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1213. {
  1214. u64 rd_size = req->wb_bytes;
  1215. WARN_ON_ONCE(pgio->pg_lseg != NULL);
  1216. if (req->wb_offset != req->wb_pgbase) {
  1217. nfs_pageio_reset_read_mds(pgio);
  1218. return;
  1219. }
  1220. if (pgio->pg_dreq == NULL)
  1221. rd_size = i_size_read(pgio->pg_inode) - req_offset(req);
  1222. else
  1223. rd_size = nfs_dreq_bytes_left(pgio->pg_dreq);
  1224. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1225. req->wb_context,
  1226. req_offset(req),
  1227. rd_size,
  1228. IOMODE_READ,
  1229. GFP_KERNEL);
  1230. /* If no lseg, fall back to read through mds */
  1231. if (pgio->pg_lseg == NULL)
  1232. nfs_pageio_reset_read_mds(pgio);
  1233. }
  1234. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
  1235. void
  1236. pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio,
  1237. struct nfs_page *req, u64 wb_size)
  1238. {
  1239. WARN_ON_ONCE(pgio->pg_lseg != NULL);
  1240. if (req->wb_offset != req->wb_pgbase) {
  1241. nfs_pageio_reset_write_mds(pgio);
  1242. return;
  1243. }
  1244. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1245. req->wb_context,
  1246. req_offset(req),
  1247. wb_size,
  1248. IOMODE_RW,
  1249. GFP_NOFS);
  1250. /* If no lseg, fall back to write through mds */
  1251. if (pgio->pg_lseg == NULL)
  1252. nfs_pageio_reset_write_mds(pgio);
  1253. }
  1254. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
  1255. void
  1256. pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
  1257. const struct nfs_pgio_completion_ops *compl_ops)
  1258. {
  1259. struct nfs_server *server = NFS_SERVER(inode);
  1260. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  1261. if (ld == NULL)
  1262. nfs_pageio_init_read(pgio, inode, compl_ops);
  1263. else
  1264. nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0);
  1265. }
  1266. void
  1267. pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
  1268. int ioflags,
  1269. const struct nfs_pgio_completion_ops *compl_ops)
  1270. {
  1271. struct nfs_server *server = NFS_SERVER(inode);
  1272. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  1273. if (ld == NULL)
  1274. nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
  1275. else
  1276. nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags);
  1277. }
  1278. bool
  1279. pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  1280. struct nfs_page *req)
  1281. {
  1282. if (pgio->pg_lseg == NULL)
  1283. return nfs_generic_pg_test(pgio, prev, req);
  1284. /*
  1285. * Test if a nfs_page is fully contained in the pnfs_layout_range.
  1286. * Note that this test makes several assumptions:
  1287. * - that the previous nfs_page in the struct nfs_pageio_descriptor
  1288. * is known to lie within the range.
  1289. * - that the nfs_page being tested is known to be contiguous with the
  1290. * previous nfs_page.
  1291. * - Layout ranges are page aligned, so we only have to test the
  1292. * start offset of the request.
  1293. *
  1294. * Please also note that 'end_offset' is actually the offset of the
  1295. * first byte that lies outside the pnfs_layout_range. FIXME?
  1296. *
  1297. */
  1298. return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
  1299. pgio->pg_lseg->pls_range.length);
  1300. }
  1301. EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
  1302. int pnfs_write_done_resend_to_mds(struct inode *inode,
  1303. struct list_head *head,
  1304. const struct nfs_pgio_completion_ops *compl_ops,
  1305. struct nfs_direct_req *dreq)
  1306. {
  1307. struct nfs_pageio_descriptor pgio;
  1308. LIST_HEAD(failed);
  1309. /* Resend all requests through the MDS */
  1310. nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
  1311. pgio.pg_dreq = dreq;
  1312. while (!list_empty(head)) {
  1313. struct nfs_page *req = nfs_list_entry(head->next);
  1314. nfs_list_remove_request(req);
  1315. if (!nfs_pageio_add_request(&pgio, req))
  1316. nfs_list_add_request(req, &failed);
  1317. }
  1318. nfs_pageio_complete(&pgio);
  1319. if (!list_empty(&failed)) {
  1320. /* For some reason our attempt to resend pages. Mark the
  1321. * overall send request as having failed, and let
  1322. * nfs_writeback_release_full deal with the error.
  1323. */
  1324. list_move(&failed, head);
  1325. return -EIO;
  1326. }
  1327. return 0;
  1328. }
  1329. EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
  1330. static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
  1331. {
  1332. struct nfs_pgio_header *hdr = data->header;
  1333. dprintk("pnfs write error = %d\n", hdr->pnfs_error);
  1334. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1335. PNFS_LAYOUTRET_ON_ERROR) {
  1336. pnfs_return_layout(hdr->inode);
  1337. }
  1338. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1339. data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
  1340. &hdr->pages,
  1341. hdr->completion_ops,
  1342. hdr->dreq);
  1343. }
  1344. /*
  1345. * Called by non rpc-based layout drivers
  1346. */
  1347. void pnfs_ld_write_done(struct nfs_write_data *data)
  1348. {
  1349. struct nfs_pgio_header *hdr = data->header;
  1350. if (!hdr->pnfs_error) {
  1351. pnfs_set_layoutcommit(data);
  1352. hdr->mds_ops->rpc_call_done(&data->task, data);
  1353. } else
  1354. pnfs_ld_handle_write_error(data);
  1355. hdr->mds_ops->rpc_release(data);
  1356. }
  1357. EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
  1358. static void
  1359. pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
  1360. struct nfs_write_data *data)
  1361. {
  1362. struct nfs_pgio_header *hdr = data->header;
  1363. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1364. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1365. nfs_pageio_reset_write_mds(desc);
  1366. desc->pg_recoalesce = 1;
  1367. }
  1368. nfs_writedata_release(data);
  1369. }
  1370. static enum pnfs_try_status
  1371. pnfs_try_to_write_data(struct nfs_write_data *wdata,
  1372. const struct rpc_call_ops *call_ops,
  1373. struct pnfs_layout_segment *lseg,
  1374. int how)
  1375. {
  1376. struct nfs_pgio_header *hdr = wdata->header;
  1377. struct inode *inode = hdr->inode;
  1378. enum pnfs_try_status trypnfs;
  1379. struct nfs_server *nfss = NFS_SERVER(inode);
  1380. hdr->mds_ops = call_ops;
  1381. dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
  1382. inode->i_ino, wdata->args.count, wdata->args.offset, how);
  1383. trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
  1384. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1385. nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
  1386. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1387. return trypnfs;
  1388. }
  1389. static void
  1390. pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
  1391. {
  1392. struct nfs_write_data *data;
  1393. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1394. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1395. desc->pg_lseg = NULL;
  1396. while (!list_empty(head)) {
  1397. enum pnfs_try_status trypnfs;
  1398. data = list_first_entry(head, struct nfs_write_data, list);
  1399. list_del_init(&data->list);
  1400. trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
  1401. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1402. pnfs_write_through_mds(desc, data);
  1403. }
  1404. pnfs_put_lseg(lseg);
  1405. }
  1406. static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
  1407. {
  1408. pnfs_put_lseg(hdr->lseg);
  1409. nfs_writehdr_free(hdr);
  1410. }
  1411. EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
  1412. int
  1413. pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
  1414. {
  1415. struct nfs_write_header *whdr;
  1416. struct nfs_pgio_header *hdr;
  1417. int ret;
  1418. whdr = nfs_writehdr_alloc();
  1419. if (!whdr) {
  1420. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1421. pnfs_put_lseg(desc->pg_lseg);
  1422. desc->pg_lseg = NULL;
  1423. return -ENOMEM;
  1424. }
  1425. hdr = &whdr->header;
  1426. nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
  1427. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1428. atomic_inc(&hdr->refcnt);
  1429. ret = nfs_generic_flush(desc, hdr);
  1430. if (ret != 0) {
  1431. pnfs_put_lseg(desc->pg_lseg);
  1432. desc->pg_lseg = NULL;
  1433. } else
  1434. pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
  1435. if (atomic_dec_and_test(&hdr->refcnt))
  1436. hdr->completion_ops->completion(hdr);
  1437. return ret;
  1438. }
  1439. EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
  1440. int pnfs_read_done_resend_to_mds(struct inode *inode,
  1441. struct list_head *head,
  1442. const struct nfs_pgio_completion_ops *compl_ops,
  1443. struct nfs_direct_req *dreq)
  1444. {
  1445. struct nfs_pageio_descriptor pgio;
  1446. LIST_HEAD(failed);
  1447. /* Resend all requests through the MDS */
  1448. nfs_pageio_init_read(&pgio, inode, compl_ops);
  1449. pgio.pg_dreq = dreq;
  1450. while (!list_empty(head)) {
  1451. struct nfs_page *req = nfs_list_entry(head->next);
  1452. nfs_list_remove_request(req);
  1453. if (!nfs_pageio_add_request(&pgio, req))
  1454. nfs_list_add_request(req, &failed);
  1455. }
  1456. nfs_pageio_complete(&pgio);
  1457. if (!list_empty(&failed)) {
  1458. list_move(&failed, head);
  1459. return -EIO;
  1460. }
  1461. return 0;
  1462. }
  1463. EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
  1464. static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
  1465. {
  1466. struct nfs_pgio_header *hdr = data->header;
  1467. dprintk("pnfs read error = %d\n", hdr->pnfs_error);
  1468. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1469. PNFS_LAYOUTRET_ON_ERROR) {
  1470. pnfs_return_layout(hdr->inode);
  1471. }
  1472. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1473. data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
  1474. &hdr->pages,
  1475. hdr->completion_ops,
  1476. hdr->dreq);
  1477. }
  1478. /*
  1479. * Called by non rpc-based layout drivers
  1480. */
  1481. void pnfs_ld_read_done(struct nfs_read_data *data)
  1482. {
  1483. struct nfs_pgio_header *hdr = data->header;
  1484. if (likely(!hdr->pnfs_error)) {
  1485. __nfs4_read_done_cb(data);
  1486. hdr->mds_ops->rpc_call_done(&data->task, data);
  1487. } else
  1488. pnfs_ld_handle_read_error(data);
  1489. hdr->mds_ops->rpc_release(data);
  1490. }
  1491. EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
  1492. static void
  1493. pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
  1494. struct nfs_read_data *data)
  1495. {
  1496. struct nfs_pgio_header *hdr = data->header;
  1497. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1498. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1499. nfs_pageio_reset_read_mds(desc);
  1500. desc->pg_recoalesce = 1;
  1501. }
  1502. nfs_readdata_release(data);
  1503. }
  1504. /*
  1505. * Call the appropriate parallel I/O subsystem read function.
  1506. */
  1507. static enum pnfs_try_status
  1508. pnfs_try_to_read_data(struct nfs_read_data *rdata,
  1509. const struct rpc_call_ops *call_ops,
  1510. struct pnfs_layout_segment *lseg)
  1511. {
  1512. struct nfs_pgio_header *hdr = rdata->header;
  1513. struct inode *inode = hdr->inode;
  1514. struct nfs_server *nfss = NFS_SERVER(inode);
  1515. enum pnfs_try_status trypnfs;
  1516. hdr->mds_ops = call_ops;
  1517. dprintk("%s: Reading ino:%lu %u@%llu\n",
  1518. __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
  1519. trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
  1520. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1521. nfs_inc_stats(inode, NFSIOS_PNFS_READ);
  1522. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1523. return trypnfs;
  1524. }
  1525. static void
  1526. pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
  1527. {
  1528. struct nfs_read_data *data;
  1529. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1530. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1531. desc->pg_lseg = NULL;
  1532. while (!list_empty(head)) {
  1533. enum pnfs_try_status trypnfs;
  1534. data = list_first_entry(head, struct nfs_read_data, list);
  1535. list_del_init(&data->list);
  1536. trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
  1537. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1538. pnfs_read_through_mds(desc, data);
  1539. }
  1540. pnfs_put_lseg(lseg);
  1541. }
  1542. static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
  1543. {
  1544. pnfs_put_lseg(hdr->lseg);
  1545. nfs_readhdr_free(hdr);
  1546. }
  1547. EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
  1548. int
  1549. pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
  1550. {
  1551. struct nfs_read_header *rhdr;
  1552. struct nfs_pgio_header *hdr;
  1553. int ret;
  1554. rhdr = nfs_readhdr_alloc();
  1555. if (!rhdr) {
  1556. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1557. ret = -ENOMEM;
  1558. pnfs_put_lseg(desc->pg_lseg);
  1559. desc->pg_lseg = NULL;
  1560. return ret;
  1561. }
  1562. hdr = &rhdr->header;
  1563. nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
  1564. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1565. atomic_inc(&hdr->refcnt);
  1566. ret = nfs_generic_pagein(desc, hdr);
  1567. if (ret != 0) {
  1568. pnfs_put_lseg(desc->pg_lseg);
  1569. desc->pg_lseg = NULL;
  1570. } else
  1571. pnfs_do_multiple_reads(desc, &hdr->rpc_list);
  1572. if (atomic_dec_and_test(&hdr->refcnt))
  1573. hdr->completion_ops->completion(hdr);
  1574. return ret;
  1575. }
  1576. EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
  1577. /*
  1578. * There can be multiple RW segments.
  1579. */
  1580. static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
  1581. {
  1582. struct pnfs_layout_segment *lseg;
  1583. list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
  1584. if (lseg->pls_range.iomode == IOMODE_RW &&
  1585. test_and_clear_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  1586. list_add(&lseg->pls_lc_list, listp);
  1587. }
  1588. }
  1589. static void pnfs_list_write_lseg_done(struct inode *inode, struct list_head *listp)
  1590. {
  1591. struct pnfs_layout_segment *lseg, *tmp;
  1592. unsigned long *bitlock = &NFS_I(inode)->flags;
  1593. /* Matched by references in pnfs_set_layoutcommit */
  1594. list_for_each_entry_safe(lseg, tmp, listp, pls_lc_list) {
  1595. list_del_init(&lseg->pls_lc_list);
  1596. pnfs_put_lseg(lseg);
  1597. }
  1598. clear_bit_unlock(NFS_INO_LAYOUTCOMMITTING, bitlock);
  1599. smp_mb__after_clear_bit();
  1600. wake_up_bit(bitlock, NFS_INO_LAYOUTCOMMITTING);
  1601. }
  1602. void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
  1603. {
  1604. pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
  1605. }
  1606. EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
  1607. void
  1608. pnfs_set_layoutcommit(struct nfs_write_data *wdata)
  1609. {
  1610. struct nfs_pgio_header *hdr = wdata->header;
  1611. struct inode *inode = hdr->inode;
  1612. struct nfs_inode *nfsi = NFS_I(inode);
  1613. loff_t end_pos = wdata->mds_offset + wdata->res.count;
  1614. bool mark_as_dirty = false;
  1615. spin_lock(&inode->i_lock);
  1616. if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1617. mark_as_dirty = true;
  1618. dprintk("%s: Set layoutcommit for inode %lu ",
  1619. __func__, inode->i_ino);
  1620. }
  1621. if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
  1622. /* references matched in nfs4_layoutcommit_release */
  1623. pnfs_get_lseg(hdr->lseg);
  1624. }
  1625. if (end_pos > nfsi->layout->plh_lwb)
  1626. nfsi->layout->plh_lwb = end_pos;
  1627. spin_unlock(&inode->i_lock);
  1628. dprintk("%s: lseg %p end_pos %llu\n",
  1629. __func__, hdr->lseg, nfsi->layout->plh_lwb);
  1630. /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
  1631. * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
  1632. if (mark_as_dirty)
  1633. mark_inode_dirty_sync(inode);
  1634. }
  1635. EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
  1636. void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
  1637. {
  1638. struct nfs_server *nfss = NFS_SERVER(data->args.inode);
  1639. if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
  1640. nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
  1641. pnfs_list_write_lseg_done(data->args.inode, &data->lseg_list);
  1642. }
  1643. /*
  1644. * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
  1645. * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
  1646. * data to disk to allow the server to recover the data if it crashes.
  1647. * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
  1648. * is off, and a COMMIT is sent to a data server, or
  1649. * if WRITEs to a data server return NFS_DATA_SYNC.
  1650. */
  1651. int
  1652. pnfs_layoutcommit_inode(struct inode *inode, bool sync)
  1653. {
  1654. struct nfs4_layoutcommit_data *data;
  1655. struct nfs_inode *nfsi = NFS_I(inode);
  1656. loff_t end_pos;
  1657. int status = 0;
  1658. dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
  1659. if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  1660. return 0;
  1661. /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
  1662. data = kzalloc(sizeof(*data), GFP_NOFS);
  1663. if (!data) {
  1664. status = -ENOMEM;
  1665. goto out;
  1666. }
  1667. if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  1668. goto out_free;
  1669. if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
  1670. if (!sync) {
  1671. status = -EAGAIN;
  1672. goto out_free;
  1673. }
  1674. status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
  1675. nfs_wait_bit_killable, TASK_KILLABLE);
  1676. if (status)
  1677. goto out_free;
  1678. }
  1679. INIT_LIST_HEAD(&data->lseg_list);
  1680. spin_lock(&inode->i_lock);
  1681. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1682. clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
  1683. spin_unlock(&inode->i_lock);
  1684. wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
  1685. goto out_free;
  1686. }
  1687. pnfs_list_write_lseg(inode, &data->lseg_list);
  1688. end_pos = nfsi->layout->plh_lwb;
  1689. nfsi->layout->plh_lwb = 0;
  1690. nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
  1691. spin_unlock(&inode->i_lock);
  1692. data->args.inode = inode;
  1693. data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
  1694. nfs_fattr_init(&data->fattr);
  1695. data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
  1696. data->res.fattr = &data->fattr;
  1697. data->args.lastbytewritten = end_pos - 1;
  1698. data->res.server = NFS_SERVER(inode);
  1699. status = nfs4_proc_layoutcommit(data, sync);
  1700. out:
  1701. if (status)
  1702. mark_inode_dirty_sync(inode);
  1703. dprintk("<-- %s status %d\n", __func__, status);
  1704. return status;
  1705. out_free:
  1706. kfree(data);
  1707. goto out;
  1708. }
  1709. struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
  1710. {
  1711. struct nfs4_threshold *thp;
  1712. thp = kzalloc(sizeof(*thp), GFP_NOFS);
  1713. if (!thp) {
  1714. dprintk("%s mdsthreshold allocation failed\n", __func__);
  1715. return NULL;
  1716. }
  1717. return thp;
  1718. }