pnfs.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include <linux/nfs_page.h>
  31. #include <linux/module.h>
  32. #include "internal.h"
  33. #include "pnfs.h"
  34. #include "iostat.h"
  35. #define NFSDBG_FACILITY NFSDBG_PNFS
  36. #define PNFS_LAYOUTGET_RETRY_TIMEOUT (120*HZ)
  37. /* Locking:
  38. *
  39. * pnfs_spinlock:
  40. * protects pnfs_modules_tbl.
  41. */
  42. static DEFINE_SPINLOCK(pnfs_spinlock);
  43. /*
  44. * pnfs_modules_tbl holds all pnfs modules
  45. */
  46. static LIST_HEAD(pnfs_modules_tbl);
  47. /* Return the registered pnfs layout driver module matching given id */
  48. static struct pnfs_layoutdriver_type *
  49. find_pnfs_driver_locked(u32 id)
  50. {
  51. struct pnfs_layoutdriver_type *local;
  52. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  53. if (local->id == id)
  54. goto out;
  55. local = NULL;
  56. out:
  57. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  58. return local;
  59. }
  60. static struct pnfs_layoutdriver_type *
  61. find_pnfs_driver(u32 id)
  62. {
  63. struct pnfs_layoutdriver_type *local;
  64. spin_lock(&pnfs_spinlock);
  65. local = find_pnfs_driver_locked(id);
  66. if (local != NULL && !try_module_get(local->owner)) {
  67. dprintk("%s: Could not grab reference on module\n", __func__);
  68. local = NULL;
  69. }
  70. spin_unlock(&pnfs_spinlock);
  71. return local;
  72. }
  73. void
  74. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  75. {
  76. if (nfss->pnfs_curr_ld) {
  77. if (nfss->pnfs_curr_ld->clear_layoutdriver)
  78. nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
  79. /* Decrement the MDS count. Purge the deviceid cache if zero */
  80. if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
  81. nfs4_deviceid_purge_client(nfss->nfs_client);
  82. module_put(nfss->pnfs_curr_ld->owner);
  83. }
  84. nfss->pnfs_curr_ld = NULL;
  85. }
  86. /*
  87. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  88. * Currently only one pNFS layout driver per filesystem is supported.
  89. *
  90. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  91. */
  92. void
  93. set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
  94. u32 id)
  95. {
  96. struct pnfs_layoutdriver_type *ld_type = NULL;
  97. if (id == 0)
  98. goto out_no_driver;
  99. if (!(server->nfs_client->cl_exchange_flags &
  100. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  101. printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
  102. __func__, id, server->nfs_client->cl_exchange_flags);
  103. goto out_no_driver;
  104. }
  105. ld_type = find_pnfs_driver(id);
  106. if (!ld_type) {
  107. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  108. ld_type = find_pnfs_driver(id);
  109. if (!ld_type) {
  110. dprintk("%s: No pNFS module found for %u.\n",
  111. __func__, id);
  112. goto out_no_driver;
  113. }
  114. }
  115. server->pnfs_curr_ld = ld_type;
  116. if (ld_type->set_layoutdriver
  117. && ld_type->set_layoutdriver(server, mntfh)) {
  118. printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
  119. "driver %u.\n", __func__, id);
  120. module_put(ld_type->owner);
  121. goto out_no_driver;
  122. }
  123. /* Bump the MDS count */
  124. atomic_inc(&server->nfs_client->cl_mds_count);
  125. dprintk("%s: pNFS module for %u set\n", __func__, id);
  126. return;
  127. out_no_driver:
  128. dprintk("%s: Using NFSv4 I/O\n", __func__);
  129. server->pnfs_curr_ld = NULL;
  130. }
  131. int
  132. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  133. {
  134. int status = -EINVAL;
  135. struct pnfs_layoutdriver_type *tmp;
  136. if (ld_type->id == 0) {
  137. printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
  138. return status;
  139. }
  140. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  141. printk(KERN_ERR "NFS: %s Layout driver must provide "
  142. "alloc_lseg and free_lseg.\n", __func__);
  143. return status;
  144. }
  145. spin_lock(&pnfs_spinlock);
  146. tmp = find_pnfs_driver_locked(ld_type->id);
  147. if (!tmp) {
  148. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  149. status = 0;
  150. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  151. ld_type->name);
  152. } else {
  153. printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
  154. __func__, ld_type->id);
  155. }
  156. spin_unlock(&pnfs_spinlock);
  157. return status;
  158. }
  159. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  160. void
  161. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  162. {
  163. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  164. spin_lock(&pnfs_spinlock);
  165. list_del(&ld_type->pnfs_tblid);
  166. spin_unlock(&pnfs_spinlock);
  167. }
  168. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  169. /*
  170. * pNFS client layout cache
  171. */
  172. /* Need to hold i_lock if caller does not already hold reference */
  173. void
  174. pnfs_get_layout_hdr(struct pnfs_layout_hdr *lo)
  175. {
  176. atomic_inc(&lo->plh_refcount);
  177. }
  178. static struct pnfs_layout_hdr *
  179. pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
  180. {
  181. struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
  182. return ld->alloc_layout_hdr(ino, gfp_flags);
  183. }
  184. static void
  185. pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
  186. {
  187. struct nfs_server *server = NFS_SERVER(lo->plh_inode);
  188. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  189. if (!list_empty(&lo->plh_layouts)) {
  190. struct nfs_client *clp = server->nfs_client;
  191. spin_lock(&clp->cl_lock);
  192. list_del_init(&lo->plh_layouts);
  193. spin_unlock(&clp->cl_lock);
  194. }
  195. put_rpccred(lo->plh_lc_cred);
  196. return ld->free_layout_hdr(lo);
  197. }
  198. static void
  199. pnfs_detach_layout_hdr(struct pnfs_layout_hdr *lo)
  200. {
  201. struct nfs_inode *nfsi = NFS_I(lo->plh_inode);
  202. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  203. nfsi->layout = NULL;
  204. /* Reset MDS Threshold I/O counters */
  205. nfsi->write_io = 0;
  206. nfsi->read_io = 0;
  207. }
  208. void
  209. pnfs_put_layout_hdr(struct pnfs_layout_hdr *lo)
  210. {
  211. struct inode *inode = lo->plh_inode;
  212. if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
  213. pnfs_detach_layout_hdr(lo);
  214. spin_unlock(&inode->i_lock);
  215. pnfs_free_layout_hdr(lo);
  216. }
  217. }
  218. static int
  219. pnfs_iomode_to_fail_bit(u32 iomode)
  220. {
  221. return iomode == IOMODE_RW ?
  222. NFS_LAYOUT_RW_FAILED : NFS_LAYOUT_RO_FAILED;
  223. }
  224. static void
  225. pnfs_layout_set_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  226. {
  227. lo->plh_retry_timestamp = jiffies;
  228. if (test_and_set_bit(fail_bit, &lo->plh_flags))
  229. atomic_inc(&lo->plh_refcount);
  230. }
  231. static void
  232. pnfs_layout_clear_fail_bit(struct pnfs_layout_hdr *lo, int fail_bit)
  233. {
  234. if (test_and_clear_bit(fail_bit, &lo->plh_flags))
  235. atomic_dec(&lo->plh_refcount);
  236. }
  237. static void
  238. pnfs_layout_io_set_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  239. {
  240. struct inode *inode = lo->plh_inode;
  241. struct pnfs_layout_range range = {
  242. .iomode = iomode,
  243. .offset = 0,
  244. .length = NFS4_MAX_UINT64,
  245. };
  246. LIST_HEAD(head);
  247. spin_lock(&inode->i_lock);
  248. pnfs_layout_set_fail_bit(lo, pnfs_iomode_to_fail_bit(iomode));
  249. pnfs_mark_matching_lsegs_invalid(lo, &head, &range);
  250. spin_unlock(&inode->i_lock);
  251. pnfs_free_lseg_list(&head);
  252. dprintk("%s Setting layout IOMODE_%s fail bit\n", __func__,
  253. iomode == IOMODE_RW ? "RW" : "READ");
  254. }
  255. static bool
  256. pnfs_layout_io_test_failed(struct pnfs_layout_hdr *lo, u32 iomode)
  257. {
  258. unsigned long start, end;
  259. int fail_bit = pnfs_iomode_to_fail_bit(iomode);
  260. if (test_bit(fail_bit, &lo->plh_flags) == 0)
  261. return false;
  262. end = jiffies;
  263. start = end - PNFS_LAYOUTGET_RETRY_TIMEOUT;
  264. if (!time_in_range(lo->plh_retry_timestamp, start, end)) {
  265. /* It is time to retry the failed layoutgets */
  266. pnfs_layout_clear_fail_bit(lo, fail_bit);
  267. return false;
  268. }
  269. return true;
  270. }
  271. static void
  272. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  273. {
  274. INIT_LIST_HEAD(&lseg->pls_list);
  275. INIT_LIST_HEAD(&lseg->pls_lc_list);
  276. atomic_set(&lseg->pls_refcount, 1);
  277. smp_mb();
  278. set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
  279. lseg->pls_layout = lo;
  280. }
  281. static void pnfs_free_lseg(struct pnfs_layout_segment *lseg)
  282. {
  283. struct inode *ino = lseg->pls_layout->plh_inode;
  284. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  285. }
  286. static void
  287. pnfs_layout_remove_lseg(struct pnfs_layout_hdr *lo,
  288. struct pnfs_layout_segment *lseg)
  289. {
  290. struct inode *inode = lo->plh_inode;
  291. WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  292. list_del_init(&lseg->pls_list);
  293. /* Matched by pnfs_get_layout_hdr in pnfs_layout_insert_lseg */
  294. atomic_dec(&lo->plh_refcount);
  295. if (list_empty(&lo->plh_segs))
  296. clear_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags);
  297. rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
  298. }
  299. void
  300. pnfs_put_lseg(struct pnfs_layout_segment *lseg)
  301. {
  302. struct pnfs_layout_hdr *lo;
  303. struct inode *inode;
  304. if (!lseg)
  305. return;
  306. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  307. atomic_read(&lseg->pls_refcount),
  308. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  309. lo = lseg->pls_layout;
  310. inode = lo->plh_inode;
  311. if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
  312. pnfs_get_layout_hdr(lo);
  313. pnfs_layout_remove_lseg(lo, lseg);
  314. spin_unlock(&inode->i_lock);
  315. pnfs_free_lseg(lseg);
  316. pnfs_put_layout_hdr(lo);
  317. }
  318. }
  319. EXPORT_SYMBOL_GPL(pnfs_put_lseg);
  320. static inline u64
  321. end_offset(u64 start, u64 len)
  322. {
  323. u64 end;
  324. end = start + len;
  325. return end >= start ? end : NFS4_MAX_UINT64;
  326. }
  327. /* last octet in a range */
  328. static inline u64
  329. last_byte_offset(u64 start, u64 len)
  330. {
  331. u64 end;
  332. BUG_ON(!len);
  333. end = start + len;
  334. return end > start ? end - 1 : NFS4_MAX_UINT64;
  335. }
  336. /*
  337. * is l2 fully contained in l1?
  338. * start1 end1
  339. * [----------------------------------)
  340. * start2 end2
  341. * [----------------)
  342. */
  343. static inline int
  344. lo_seg_contained(struct pnfs_layout_range *l1,
  345. struct pnfs_layout_range *l2)
  346. {
  347. u64 start1 = l1->offset;
  348. u64 end1 = end_offset(start1, l1->length);
  349. u64 start2 = l2->offset;
  350. u64 end2 = end_offset(start2, l2->length);
  351. return (start1 <= start2) && (end1 >= end2);
  352. }
  353. /*
  354. * is l1 and l2 intersecting?
  355. * start1 end1
  356. * [----------------------------------)
  357. * start2 end2
  358. * [----------------)
  359. */
  360. static inline int
  361. lo_seg_intersecting(struct pnfs_layout_range *l1,
  362. struct pnfs_layout_range *l2)
  363. {
  364. u64 start1 = l1->offset;
  365. u64 end1 = end_offset(start1, l1->length);
  366. u64 start2 = l2->offset;
  367. u64 end2 = end_offset(start2, l2->length);
  368. return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
  369. (end2 == NFS4_MAX_UINT64 || end2 > start1);
  370. }
  371. static bool
  372. should_free_lseg(struct pnfs_layout_range *lseg_range,
  373. struct pnfs_layout_range *recall_range)
  374. {
  375. return (recall_range->iomode == IOMODE_ANY ||
  376. lseg_range->iomode == recall_range->iomode) &&
  377. lo_seg_intersecting(lseg_range, recall_range);
  378. }
  379. /* Returns 1 if lseg is removed from list, 0 otherwise */
  380. static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
  381. struct list_head *tmp_list)
  382. {
  383. int rv = 0;
  384. if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  385. /* Remove the reference keeping the lseg in the
  386. * list. It will now be removed when all
  387. * outstanding io is finished.
  388. */
  389. dprintk("%s: lseg %p ref %d\n", __func__, lseg,
  390. atomic_read(&lseg->pls_refcount));
  391. if (atomic_dec_and_test(&lseg->pls_refcount)) {
  392. pnfs_layout_remove_lseg(lseg->pls_layout, lseg);
  393. list_add(&lseg->pls_list, tmp_list);
  394. rv = 1;
  395. }
  396. }
  397. return rv;
  398. }
  399. /* Returns count of number of matching invalid lsegs remaining in list
  400. * after call.
  401. */
  402. int
  403. pnfs_mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
  404. struct list_head *tmp_list,
  405. struct pnfs_layout_range *recall_range)
  406. {
  407. struct pnfs_layout_segment *lseg, *next;
  408. int invalid = 0, removed = 0;
  409. dprintk("%s:Begin lo %p\n", __func__, lo);
  410. if (list_empty(&lo->plh_segs))
  411. return 0;
  412. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  413. if (!recall_range ||
  414. should_free_lseg(&lseg->pls_range, recall_range)) {
  415. dprintk("%s: freeing lseg %p iomode %d "
  416. "offset %llu length %llu\n", __func__,
  417. lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
  418. lseg->pls_range.length);
  419. invalid++;
  420. removed += mark_lseg_invalid(lseg, tmp_list);
  421. }
  422. dprintk("%s:Return %i\n", __func__, invalid - removed);
  423. return invalid - removed;
  424. }
  425. /* note free_me must contain lsegs from a single layout_hdr */
  426. void
  427. pnfs_free_lseg_list(struct list_head *free_me)
  428. {
  429. struct pnfs_layout_segment *lseg, *tmp;
  430. if (list_empty(free_me))
  431. return;
  432. list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
  433. list_del(&lseg->pls_list);
  434. pnfs_free_lseg(lseg);
  435. }
  436. }
  437. void
  438. pnfs_destroy_layout(struct nfs_inode *nfsi)
  439. {
  440. struct pnfs_layout_hdr *lo;
  441. LIST_HEAD(tmp_list);
  442. spin_lock(&nfsi->vfs_inode.i_lock);
  443. lo = nfsi->layout;
  444. if (lo) {
  445. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  446. pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  447. pnfs_get_layout_hdr(lo);
  448. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RO_FAILED);
  449. pnfs_layout_clear_fail_bit(lo, NFS_LAYOUT_RW_FAILED);
  450. spin_unlock(&nfsi->vfs_inode.i_lock);
  451. pnfs_free_lseg_list(&tmp_list);
  452. pnfs_put_layout_hdr(lo);
  453. } else
  454. spin_unlock(&nfsi->vfs_inode.i_lock);
  455. }
  456. EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
  457. /*
  458. * Called by the state manger to remove all layouts established under an
  459. * expired lease.
  460. */
  461. void
  462. pnfs_destroy_all_layouts(struct nfs_client *clp)
  463. {
  464. struct nfs_server *server;
  465. struct pnfs_layout_hdr *lo;
  466. LIST_HEAD(tmp_list);
  467. nfs4_deviceid_mark_client_invalid(clp);
  468. nfs4_deviceid_purge_client(clp);
  469. spin_lock(&clp->cl_lock);
  470. rcu_read_lock();
  471. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  472. if (!list_empty(&server->layouts))
  473. list_splice_init(&server->layouts, &tmp_list);
  474. }
  475. rcu_read_unlock();
  476. spin_unlock(&clp->cl_lock);
  477. while (!list_empty(&tmp_list)) {
  478. lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
  479. plh_layouts);
  480. dprintk("%s freeing layout for inode %lu\n", __func__,
  481. lo->plh_inode->i_ino);
  482. list_del_init(&lo->plh_layouts);
  483. pnfs_destroy_layout(NFS_I(lo->plh_inode));
  484. }
  485. }
  486. /* update lo->plh_stateid with new if is more recent */
  487. void
  488. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
  489. bool update_barrier)
  490. {
  491. u32 oldseq, newseq;
  492. oldseq = be32_to_cpu(lo->plh_stateid.seqid);
  493. newseq = be32_to_cpu(new->seqid);
  494. if ((int)(newseq - oldseq) > 0) {
  495. nfs4_stateid_copy(&lo->plh_stateid, new);
  496. if (update_barrier) {
  497. u32 new_barrier = be32_to_cpu(new->seqid);
  498. if ((int)(new_barrier - lo->plh_barrier))
  499. lo->plh_barrier = new_barrier;
  500. } else {
  501. /* Because of wraparound, we want to keep the barrier
  502. * "close" to the current seqids. It needs to be
  503. * within 2**31 to count as "behind", so if it
  504. * gets too near that limit, give us a litle leeway
  505. * and bring it to within 2**30.
  506. * NOTE - and yes, this is all unsigned arithmetic.
  507. */
  508. if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
  509. lo->plh_barrier = newseq - (1 << 30);
  510. }
  511. }
  512. }
  513. /* lget is set to 1 if called from inside send_layoutget call chain */
  514. static bool
  515. pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
  516. int lget)
  517. {
  518. if ((stateid) &&
  519. (int)(lo->plh_barrier - be32_to_cpu(stateid->seqid)) >= 0)
  520. return true;
  521. return lo->plh_block_lgets ||
  522. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
  523. (list_empty(&lo->plh_segs) &&
  524. (atomic_read(&lo->plh_outstanding) > lget));
  525. }
  526. int
  527. pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
  528. struct nfs4_state *open_state)
  529. {
  530. int status = 0;
  531. dprintk("--> %s\n", __func__);
  532. spin_lock(&lo->plh_inode->i_lock);
  533. if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
  534. status = -EAGAIN;
  535. } else if (list_empty(&lo->plh_segs)) {
  536. int seq;
  537. do {
  538. seq = read_seqbegin(&open_state->seqlock);
  539. nfs4_stateid_copy(dst, &open_state->stateid);
  540. } while (read_seqretry(&open_state->seqlock, seq));
  541. } else
  542. nfs4_stateid_copy(dst, &lo->plh_stateid);
  543. spin_unlock(&lo->plh_inode->i_lock);
  544. dprintk("<-- %s\n", __func__);
  545. return status;
  546. }
  547. /*
  548. * Get layout from server.
  549. * for now, assume that whole file layouts are requested.
  550. * arg->offset: 0
  551. * arg->length: all ones
  552. */
  553. static struct pnfs_layout_segment *
  554. send_layoutget(struct pnfs_layout_hdr *lo,
  555. struct nfs_open_context *ctx,
  556. struct pnfs_layout_range *range,
  557. gfp_t gfp_flags)
  558. {
  559. struct inode *ino = lo->plh_inode;
  560. struct nfs_server *server = NFS_SERVER(ino);
  561. struct nfs4_layoutget *lgp;
  562. struct pnfs_layout_segment *lseg;
  563. dprintk("--> %s\n", __func__);
  564. BUG_ON(ctx == NULL);
  565. lgp = kzalloc(sizeof(*lgp), gfp_flags);
  566. if (lgp == NULL)
  567. return NULL;
  568. lgp->args.minlength = PAGE_CACHE_SIZE;
  569. if (lgp->args.minlength > range->length)
  570. lgp->args.minlength = range->length;
  571. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  572. lgp->args.range = *range;
  573. lgp->args.type = server->pnfs_curr_ld->id;
  574. lgp->args.inode = ino;
  575. lgp->args.ctx = get_nfs_open_context(ctx);
  576. lgp->gfp_flags = gfp_flags;
  577. /* Synchronously retrieve layout information from server and
  578. * store in lseg.
  579. */
  580. lseg = nfs4_proc_layoutget(lgp, gfp_flags);
  581. if (IS_ERR(lseg)) {
  582. switch (PTR_ERR(lseg)) {
  583. case -ENOMEM:
  584. case -ERESTARTSYS:
  585. break;
  586. default:
  587. /* remember that LAYOUTGET failed and suspend trying */
  588. pnfs_layout_io_set_failed(lo, range->iomode);
  589. }
  590. return NULL;
  591. }
  592. return lseg;
  593. }
  594. /*
  595. * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  596. * when the layout segment list is empty.
  597. *
  598. * Note that a pnfs_layout_hdr can exist with an empty layout segment
  599. * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
  600. * deviceid is marked invalid.
  601. */
  602. int
  603. _pnfs_return_layout(struct inode *ino)
  604. {
  605. struct pnfs_layout_hdr *lo = NULL;
  606. struct nfs_inode *nfsi = NFS_I(ino);
  607. LIST_HEAD(tmp_list);
  608. struct nfs4_layoutreturn *lrp;
  609. nfs4_stateid stateid;
  610. int status = 0, empty;
  611. dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
  612. spin_lock(&ino->i_lock);
  613. lo = nfsi->layout;
  614. if (!lo) {
  615. spin_unlock(&ino->i_lock);
  616. dprintk("NFS: %s no layout to return\n", __func__);
  617. goto out;
  618. }
  619. stateid = nfsi->layout->plh_stateid;
  620. /* Reference matched in nfs4_layoutreturn_release */
  621. pnfs_get_layout_hdr(lo);
  622. empty = list_empty(&lo->plh_segs);
  623. pnfs_mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  624. /* Don't send a LAYOUTRETURN if list was initially empty */
  625. if (empty) {
  626. spin_unlock(&ino->i_lock);
  627. pnfs_put_layout_hdr(lo);
  628. dprintk("NFS: %s no layout segments to return\n", __func__);
  629. goto out;
  630. }
  631. lo->plh_block_lgets++;
  632. spin_unlock(&ino->i_lock);
  633. pnfs_free_lseg_list(&tmp_list);
  634. WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
  635. lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
  636. if (unlikely(lrp == NULL)) {
  637. status = -ENOMEM;
  638. pnfs_layout_io_set_failed(lo, IOMODE_RW);
  639. pnfs_layout_io_set_failed(lo, IOMODE_READ);
  640. pnfs_put_layout_hdr(lo);
  641. goto out;
  642. }
  643. lrp->args.stateid = stateid;
  644. lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
  645. lrp->args.inode = ino;
  646. lrp->args.layout = lo;
  647. lrp->clp = NFS_SERVER(ino)->nfs_client;
  648. status = nfs4_proc_layoutreturn(lrp);
  649. out:
  650. dprintk("<-- %s status: %d\n", __func__, status);
  651. return status;
  652. }
  653. EXPORT_SYMBOL_GPL(_pnfs_return_layout);
  654. bool pnfs_roc(struct inode *ino)
  655. {
  656. struct pnfs_layout_hdr *lo;
  657. struct pnfs_layout_segment *lseg, *tmp;
  658. LIST_HEAD(tmp_list);
  659. bool found = false;
  660. spin_lock(&ino->i_lock);
  661. lo = NFS_I(ino)->layout;
  662. if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
  663. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
  664. goto out_nolayout;
  665. list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
  666. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  667. mark_lseg_invalid(lseg, &tmp_list);
  668. found = true;
  669. }
  670. if (!found)
  671. goto out_nolayout;
  672. lo->plh_block_lgets++;
  673. pnfs_get_layout_hdr(lo); /* matched in pnfs_roc_release */
  674. spin_unlock(&ino->i_lock);
  675. pnfs_free_lseg_list(&tmp_list);
  676. return true;
  677. out_nolayout:
  678. spin_unlock(&ino->i_lock);
  679. return false;
  680. }
  681. void pnfs_roc_release(struct inode *ino)
  682. {
  683. struct pnfs_layout_hdr *lo;
  684. spin_lock(&ino->i_lock);
  685. lo = NFS_I(ino)->layout;
  686. lo->plh_block_lgets--;
  687. if (atomic_dec_and_test(&lo->plh_refcount)) {
  688. pnfs_detach_layout_hdr(lo);
  689. spin_unlock(&ino->i_lock);
  690. pnfs_free_layout_hdr(lo);
  691. } else
  692. spin_unlock(&ino->i_lock);
  693. }
  694. void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
  695. {
  696. struct pnfs_layout_hdr *lo;
  697. spin_lock(&ino->i_lock);
  698. lo = NFS_I(ino)->layout;
  699. if ((int)(barrier - lo->plh_barrier) > 0)
  700. lo->plh_barrier = barrier;
  701. spin_unlock(&ino->i_lock);
  702. }
  703. bool pnfs_roc_drain(struct inode *ino, u32 *barrier, struct rpc_task *task)
  704. {
  705. struct nfs_inode *nfsi = NFS_I(ino);
  706. struct pnfs_layout_hdr *lo;
  707. struct pnfs_layout_segment *lseg;
  708. u32 current_seqid;
  709. bool found = false;
  710. spin_lock(&ino->i_lock);
  711. list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
  712. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  713. rpc_sleep_on(&NFS_SERVER(ino)->roc_rpcwaitq, task, NULL);
  714. found = true;
  715. goto out;
  716. }
  717. lo = nfsi->layout;
  718. current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
  719. /* Since close does not return a layout stateid for use as
  720. * a barrier, we choose the worst-case barrier.
  721. */
  722. *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
  723. out:
  724. spin_unlock(&ino->i_lock);
  725. return found;
  726. }
  727. /*
  728. * Compare two layout segments for sorting into layout cache.
  729. * We want to preferentially return RW over RO layouts, so ensure those
  730. * are seen first.
  731. */
  732. static s64
  733. cmp_layout(struct pnfs_layout_range *l1,
  734. struct pnfs_layout_range *l2)
  735. {
  736. s64 d;
  737. /* high offset > low offset */
  738. d = l1->offset - l2->offset;
  739. if (d)
  740. return d;
  741. /* short length > long length */
  742. d = l2->length - l1->length;
  743. if (d)
  744. return d;
  745. /* read > read/write */
  746. return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
  747. }
  748. static void
  749. pnfs_layout_insert_lseg(struct pnfs_layout_hdr *lo,
  750. struct pnfs_layout_segment *lseg)
  751. {
  752. struct pnfs_layout_segment *lp;
  753. dprintk("%s:Begin\n", __func__);
  754. list_for_each_entry(lp, &lo->plh_segs, pls_list) {
  755. if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
  756. continue;
  757. list_add_tail(&lseg->pls_list, &lp->pls_list);
  758. dprintk("%s: inserted lseg %p "
  759. "iomode %d offset %llu length %llu before "
  760. "lp %p iomode %d offset %llu length %llu\n",
  761. __func__, lseg, lseg->pls_range.iomode,
  762. lseg->pls_range.offset, lseg->pls_range.length,
  763. lp, lp->pls_range.iomode, lp->pls_range.offset,
  764. lp->pls_range.length);
  765. goto out;
  766. }
  767. list_add_tail(&lseg->pls_list, &lo->plh_segs);
  768. dprintk("%s: inserted lseg %p "
  769. "iomode %d offset %llu length %llu at tail\n",
  770. __func__, lseg, lseg->pls_range.iomode,
  771. lseg->pls_range.offset, lseg->pls_range.length);
  772. out:
  773. pnfs_get_layout_hdr(lo);
  774. dprintk("%s:Return\n", __func__);
  775. }
  776. static struct pnfs_layout_hdr *
  777. alloc_init_layout_hdr(struct inode *ino,
  778. struct nfs_open_context *ctx,
  779. gfp_t gfp_flags)
  780. {
  781. struct pnfs_layout_hdr *lo;
  782. lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
  783. if (!lo)
  784. return NULL;
  785. atomic_set(&lo->plh_refcount, 1);
  786. INIT_LIST_HEAD(&lo->plh_layouts);
  787. INIT_LIST_HEAD(&lo->plh_segs);
  788. INIT_LIST_HEAD(&lo->plh_bulk_recall);
  789. lo->plh_inode = ino;
  790. lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
  791. return lo;
  792. }
  793. static struct pnfs_layout_hdr *
  794. pnfs_find_alloc_layout(struct inode *ino,
  795. struct nfs_open_context *ctx,
  796. gfp_t gfp_flags)
  797. {
  798. struct nfs_inode *nfsi = NFS_I(ino);
  799. struct pnfs_layout_hdr *new = NULL;
  800. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  801. if (nfsi->layout) {
  802. pnfs_get_layout_hdr(nfsi->layout);
  803. return nfsi->layout;
  804. }
  805. spin_unlock(&ino->i_lock);
  806. new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
  807. spin_lock(&ino->i_lock);
  808. if (likely(nfsi->layout == NULL)) /* Won the race? */
  809. nfsi->layout = new;
  810. else
  811. pnfs_free_layout_hdr(new);
  812. return nfsi->layout;
  813. }
  814. /*
  815. * iomode matching rules:
  816. * iomode lseg match
  817. * ----- ----- -----
  818. * ANY READ true
  819. * ANY RW true
  820. * RW READ false
  821. * RW RW true
  822. * READ READ true
  823. * READ RW true
  824. */
  825. static int
  826. is_matching_lseg(struct pnfs_layout_range *ls_range,
  827. struct pnfs_layout_range *range)
  828. {
  829. struct pnfs_layout_range range1;
  830. if ((range->iomode == IOMODE_RW &&
  831. ls_range->iomode != IOMODE_RW) ||
  832. !lo_seg_intersecting(ls_range, range))
  833. return 0;
  834. /* range1 covers only the first byte in the range */
  835. range1 = *range;
  836. range1.length = 1;
  837. return lo_seg_contained(ls_range, &range1);
  838. }
  839. /*
  840. * lookup range in layout
  841. */
  842. static struct pnfs_layout_segment *
  843. pnfs_find_lseg(struct pnfs_layout_hdr *lo,
  844. struct pnfs_layout_range *range)
  845. {
  846. struct pnfs_layout_segment *lseg, *ret = NULL;
  847. dprintk("%s:Begin\n", __func__);
  848. list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
  849. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
  850. is_matching_lseg(&lseg->pls_range, range)) {
  851. ret = pnfs_get_lseg(lseg);
  852. break;
  853. }
  854. if (lseg->pls_range.offset > range->offset)
  855. break;
  856. }
  857. dprintk("%s:Return lseg %p ref %d\n",
  858. __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
  859. return ret;
  860. }
  861. /*
  862. * Use mdsthreshold hints set at each OPEN to determine if I/O should go
  863. * to the MDS or over pNFS
  864. *
  865. * The nfs_inode read_io and write_io fields are cumulative counters reset
  866. * when there are no layout segments. Note that in pnfs_update_layout iomode
  867. * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
  868. * WRITE request.
  869. *
  870. * A return of true means use MDS I/O.
  871. *
  872. * From rfc 5661:
  873. * If a file's size is smaller than the file size threshold, data accesses
  874. * SHOULD be sent to the metadata server. If an I/O request has a length that
  875. * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
  876. * server. If both file size and I/O size are provided, the client SHOULD
  877. * reach or exceed both thresholds before sending its read or write
  878. * requests to the data server.
  879. */
  880. static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
  881. struct inode *ino, int iomode)
  882. {
  883. struct nfs4_threshold *t = ctx->mdsthreshold;
  884. struct nfs_inode *nfsi = NFS_I(ino);
  885. loff_t fsize = i_size_read(ino);
  886. bool size = false, size_set = false, io = false, io_set = false, ret = false;
  887. if (t == NULL)
  888. return ret;
  889. dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
  890. __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
  891. switch (iomode) {
  892. case IOMODE_READ:
  893. if (t->bm & THRESHOLD_RD) {
  894. dprintk("%s fsize %llu\n", __func__, fsize);
  895. size_set = true;
  896. if (fsize < t->rd_sz)
  897. size = true;
  898. }
  899. if (t->bm & THRESHOLD_RD_IO) {
  900. dprintk("%s nfsi->read_io %llu\n", __func__,
  901. nfsi->read_io);
  902. io_set = true;
  903. if (nfsi->read_io < t->rd_io_sz)
  904. io = true;
  905. }
  906. break;
  907. case IOMODE_RW:
  908. if (t->bm & THRESHOLD_WR) {
  909. dprintk("%s fsize %llu\n", __func__, fsize);
  910. size_set = true;
  911. if (fsize < t->wr_sz)
  912. size = true;
  913. }
  914. if (t->bm & THRESHOLD_WR_IO) {
  915. dprintk("%s nfsi->write_io %llu\n", __func__,
  916. nfsi->write_io);
  917. io_set = true;
  918. if (nfsi->write_io < t->wr_io_sz)
  919. io = true;
  920. }
  921. break;
  922. }
  923. if (size_set && io_set) {
  924. if (size && io)
  925. ret = true;
  926. } else if (size || io)
  927. ret = true;
  928. dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
  929. return ret;
  930. }
  931. /*
  932. * Layout segment is retreived from the server if not cached.
  933. * The appropriate layout segment is referenced and returned to the caller.
  934. */
  935. struct pnfs_layout_segment *
  936. pnfs_update_layout(struct inode *ino,
  937. struct nfs_open_context *ctx,
  938. loff_t pos,
  939. u64 count,
  940. enum pnfs_iomode iomode,
  941. gfp_t gfp_flags)
  942. {
  943. struct pnfs_layout_range arg = {
  944. .iomode = iomode,
  945. .offset = pos,
  946. .length = count,
  947. };
  948. unsigned pg_offset;
  949. struct nfs_server *server = NFS_SERVER(ino);
  950. struct nfs_client *clp = server->nfs_client;
  951. struct pnfs_layout_hdr *lo;
  952. struct pnfs_layout_segment *lseg = NULL;
  953. bool first = false;
  954. if (!pnfs_enabled_sb(NFS_SERVER(ino)))
  955. goto out;
  956. if (pnfs_within_mdsthreshold(ctx, ino, iomode))
  957. goto out;
  958. spin_lock(&ino->i_lock);
  959. lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
  960. if (lo == NULL) {
  961. spin_unlock(&ino->i_lock);
  962. goto out;
  963. }
  964. /* Do we even need to bother with this? */
  965. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  966. dprintk("%s matches recall, use MDS\n", __func__);
  967. goto out_unlock;
  968. }
  969. /* if LAYOUTGET already failed once we don't try again */
  970. if (pnfs_layout_io_test_failed(lo, iomode))
  971. goto out_unlock;
  972. /* Check to see if the layout for the given range already exists */
  973. lseg = pnfs_find_lseg(lo, &arg);
  974. if (lseg)
  975. goto out_unlock;
  976. if (pnfs_layoutgets_blocked(lo, NULL, 0))
  977. goto out_unlock;
  978. atomic_inc(&lo->plh_outstanding);
  979. if (list_empty(&lo->plh_segs))
  980. first = true;
  981. spin_unlock(&ino->i_lock);
  982. if (first) {
  983. /* The lo must be on the clp list if there is any
  984. * chance of a CB_LAYOUTRECALL(FILE) coming in.
  985. */
  986. spin_lock(&clp->cl_lock);
  987. BUG_ON(!list_empty(&lo->plh_layouts));
  988. list_add_tail(&lo->plh_layouts, &server->layouts);
  989. spin_unlock(&clp->cl_lock);
  990. }
  991. pg_offset = arg.offset & ~PAGE_CACHE_MASK;
  992. if (pg_offset) {
  993. arg.offset -= pg_offset;
  994. arg.length += pg_offset;
  995. }
  996. if (arg.length != NFS4_MAX_UINT64)
  997. arg.length = PAGE_CACHE_ALIGN(arg.length);
  998. lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
  999. atomic_dec(&lo->plh_outstanding);
  1000. out_put_layout_hdr:
  1001. pnfs_put_layout_hdr(lo);
  1002. out:
  1003. dprintk("%s: inode %s/%llu pNFS layout segment %s for "
  1004. "(%s, offset: %llu, length: %llu)\n",
  1005. __func__, ino->i_sb->s_id,
  1006. (unsigned long long)NFS_FILEID(ino),
  1007. lseg == NULL ? "not found" : "found",
  1008. iomode==IOMODE_RW ? "read/write" : "read-only",
  1009. (unsigned long long)pos,
  1010. (unsigned long long)count);
  1011. return lseg;
  1012. out_unlock:
  1013. spin_unlock(&ino->i_lock);
  1014. goto out_put_layout_hdr;
  1015. }
  1016. EXPORT_SYMBOL_GPL(pnfs_update_layout);
  1017. struct pnfs_layout_segment *
  1018. pnfs_layout_process(struct nfs4_layoutget *lgp)
  1019. {
  1020. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  1021. struct nfs4_layoutget_res *res = &lgp->res;
  1022. struct pnfs_layout_segment *lseg;
  1023. struct inode *ino = lo->plh_inode;
  1024. int status = 0;
  1025. /* Inject layout blob into I/O device driver */
  1026. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
  1027. if (!lseg || IS_ERR(lseg)) {
  1028. if (!lseg)
  1029. status = -ENOMEM;
  1030. else
  1031. status = PTR_ERR(lseg);
  1032. dprintk("%s: Could not allocate layout: error %d\n",
  1033. __func__, status);
  1034. goto out;
  1035. }
  1036. spin_lock(&ino->i_lock);
  1037. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  1038. dprintk("%s forget reply due to recall\n", __func__);
  1039. goto out_forget_reply;
  1040. }
  1041. if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
  1042. dprintk("%s forget reply due to state\n", __func__);
  1043. goto out_forget_reply;
  1044. }
  1045. init_lseg(lo, lseg);
  1046. lseg->pls_range = res->range;
  1047. pnfs_get_lseg(lseg);
  1048. pnfs_layout_insert_lseg(lo, lseg);
  1049. if (res->return_on_close) {
  1050. set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
  1051. set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
  1052. }
  1053. /* Done processing layoutget. Set the layout stateid */
  1054. pnfs_set_layout_stateid(lo, &res->stateid, false);
  1055. spin_unlock(&ino->i_lock);
  1056. return lseg;
  1057. out:
  1058. return ERR_PTR(status);
  1059. out_forget_reply:
  1060. spin_unlock(&ino->i_lock);
  1061. lseg->pls_layout = lo;
  1062. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  1063. goto out;
  1064. }
  1065. void
  1066. pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1067. {
  1068. BUG_ON(pgio->pg_lseg != NULL);
  1069. if (req->wb_offset != req->wb_pgbase) {
  1070. nfs_pageio_reset_read_mds(pgio);
  1071. return;
  1072. }
  1073. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1074. req->wb_context,
  1075. req_offset(req),
  1076. req->wb_bytes,
  1077. IOMODE_READ,
  1078. GFP_KERNEL);
  1079. /* If no lseg, fall back to read through mds */
  1080. if (pgio->pg_lseg == NULL)
  1081. nfs_pageio_reset_read_mds(pgio);
  1082. }
  1083. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
  1084. void
  1085. pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1086. {
  1087. BUG_ON(pgio->pg_lseg != NULL);
  1088. if (req->wb_offset != req->wb_pgbase) {
  1089. nfs_pageio_reset_write_mds(pgio);
  1090. return;
  1091. }
  1092. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1093. req->wb_context,
  1094. req_offset(req),
  1095. req->wb_bytes,
  1096. IOMODE_RW,
  1097. GFP_NOFS);
  1098. /* If no lseg, fall back to write through mds */
  1099. if (pgio->pg_lseg == NULL)
  1100. nfs_pageio_reset_write_mds(pgio);
  1101. }
  1102. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
  1103. void
  1104. pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
  1105. const struct nfs_pgio_completion_ops *compl_ops)
  1106. {
  1107. struct nfs_server *server = NFS_SERVER(inode);
  1108. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  1109. if (ld == NULL)
  1110. nfs_pageio_init_read(pgio, inode, compl_ops);
  1111. else
  1112. nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0);
  1113. }
  1114. void
  1115. pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
  1116. int ioflags,
  1117. const struct nfs_pgio_completion_ops *compl_ops)
  1118. {
  1119. struct nfs_server *server = NFS_SERVER(inode);
  1120. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  1121. if (ld == NULL)
  1122. nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
  1123. else
  1124. nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags);
  1125. }
  1126. bool
  1127. pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  1128. struct nfs_page *req)
  1129. {
  1130. if (pgio->pg_lseg == NULL)
  1131. return nfs_generic_pg_test(pgio, prev, req);
  1132. /*
  1133. * Test if a nfs_page is fully contained in the pnfs_layout_range.
  1134. * Note that this test makes several assumptions:
  1135. * - that the previous nfs_page in the struct nfs_pageio_descriptor
  1136. * is known to lie within the range.
  1137. * - that the nfs_page being tested is known to be contiguous with the
  1138. * previous nfs_page.
  1139. * - Layout ranges are page aligned, so we only have to test the
  1140. * start offset of the request.
  1141. *
  1142. * Please also note that 'end_offset' is actually the offset of the
  1143. * first byte that lies outside the pnfs_layout_range. FIXME?
  1144. *
  1145. */
  1146. return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
  1147. pgio->pg_lseg->pls_range.length);
  1148. }
  1149. EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
  1150. int pnfs_write_done_resend_to_mds(struct inode *inode,
  1151. struct list_head *head,
  1152. const struct nfs_pgio_completion_ops *compl_ops)
  1153. {
  1154. struct nfs_pageio_descriptor pgio;
  1155. LIST_HEAD(failed);
  1156. /* Resend all requests through the MDS */
  1157. nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
  1158. while (!list_empty(head)) {
  1159. struct nfs_page *req = nfs_list_entry(head->next);
  1160. nfs_list_remove_request(req);
  1161. if (!nfs_pageio_add_request(&pgio, req))
  1162. nfs_list_add_request(req, &failed);
  1163. }
  1164. nfs_pageio_complete(&pgio);
  1165. if (!list_empty(&failed)) {
  1166. /* For some reason our attempt to resend pages. Mark the
  1167. * overall send request as having failed, and let
  1168. * nfs_writeback_release_full deal with the error.
  1169. */
  1170. list_move(&failed, head);
  1171. return -EIO;
  1172. }
  1173. return 0;
  1174. }
  1175. EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
  1176. static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
  1177. {
  1178. struct nfs_pgio_header *hdr = data->header;
  1179. dprintk("pnfs write error = %d\n", hdr->pnfs_error);
  1180. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1181. PNFS_LAYOUTRET_ON_ERROR) {
  1182. clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
  1183. pnfs_return_layout(hdr->inode);
  1184. }
  1185. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1186. data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
  1187. &hdr->pages,
  1188. hdr->completion_ops);
  1189. }
  1190. /*
  1191. * Called by non rpc-based layout drivers
  1192. */
  1193. void pnfs_ld_write_done(struct nfs_write_data *data)
  1194. {
  1195. struct nfs_pgio_header *hdr = data->header;
  1196. if (!hdr->pnfs_error) {
  1197. pnfs_set_layoutcommit(data);
  1198. hdr->mds_ops->rpc_call_done(&data->task, data);
  1199. } else
  1200. pnfs_ld_handle_write_error(data);
  1201. hdr->mds_ops->rpc_release(data);
  1202. }
  1203. EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
  1204. static void
  1205. pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
  1206. struct nfs_write_data *data)
  1207. {
  1208. struct nfs_pgio_header *hdr = data->header;
  1209. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1210. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1211. nfs_pageio_reset_write_mds(desc);
  1212. desc->pg_recoalesce = 1;
  1213. }
  1214. nfs_writedata_release(data);
  1215. }
  1216. static enum pnfs_try_status
  1217. pnfs_try_to_write_data(struct nfs_write_data *wdata,
  1218. const struct rpc_call_ops *call_ops,
  1219. struct pnfs_layout_segment *lseg,
  1220. int how)
  1221. {
  1222. struct nfs_pgio_header *hdr = wdata->header;
  1223. struct inode *inode = hdr->inode;
  1224. enum pnfs_try_status trypnfs;
  1225. struct nfs_server *nfss = NFS_SERVER(inode);
  1226. hdr->mds_ops = call_ops;
  1227. dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
  1228. inode->i_ino, wdata->args.count, wdata->args.offset, how);
  1229. trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
  1230. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1231. nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
  1232. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1233. return trypnfs;
  1234. }
  1235. static void
  1236. pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
  1237. {
  1238. struct nfs_write_data *data;
  1239. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1240. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1241. desc->pg_lseg = NULL;
  1242. while (!list_empty(head)) {
  1243. enum pnfs_try_status trypnfs;
  1244. data = list_first_entry(head, struct nfs_write_data, list);
  1245. list_del_init(&data->list);
  1246. trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
  1247. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1248. pnfs_write_through_mds(desc, data);
  1249. }
  1250. pnfs_put_lseg(lseg);
  1251. }
  1252. static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
  1253. {
  1254. pnfs_put_lseg(hdr->lseg);
  1255. nfs_writehdr_free(hdr);
  1256. }
  1257. EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
  1258. int
  1259. pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
  1260. {
  1261. struct nfs_write_header *whdr;
  1262. struct nfs_pgio_header *hdr;
  1263. int ret;
  1264. whdr = nfs_writehdr_alloc();
  1265. if (!whdr) {
  1266. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1267. pnfs_put_lseg(desc->pg_lseg);
  1268. desc->pg_lseg = NULL;
  1269. return -ENOMEM;
  1270. }
  1271. hdr = &whdr->header;
  1272. nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
  1273. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1274. atomic_inc(&hdr->refcnt);
  1275. ret = nfs_generic_flush(desc, hdr);
  1276. if (ret != 0) {
  1277. pnfs_put_lseg(desc->pg_lseg);
  1278. desc->pg_lseg = NULL;
  1279. } else
  1280. pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
  1281. if (atomic_dec_and_test(&hdr->refcnt))
  1282. hdr->completion_ops->completion(hdr);
  1283. return ret;
  1284. }
  1285. EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
  1286. int pnfs_read_done_resend_to_mds(struct inode *inode,
  1287. struct list_head *head,
  1288. const struct nfs_pgio_completion_ops *compl_ops)
  1289. {
  1290. struct nfs_pageio_descriptor pgio;
  1291. LIST_HEAD(failed);
  1292. /* Resend all requests through the MDS */
  1293. nfs_pageio_init_read(&pgio, inode, compl_ops);
  1294. while (!list_empty(head)) {
  1295. struct nfs_page *req = nfs_list_entry(head->next);
  1296. nfs_list_remove_request(req);
  1297. if (!nfs_pageio_add_request(&pgio, req))
  1298. nfs_list_add_request(req, &failed);
  1299. }
  1300. nfs_pageio_complete(&pgio);
  1301. if (!list_empty(&failed)) {
  1302. list_move(&failed, head);
  1303. return -EIO;
  1304. }
  1305. return 0;
  1306. }
  1307. EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
  1308. static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
  1309. {
  1310. struct nfs_pgio_header *hdr = data->header;
  1311. dprintk("pnfs read error = %d\n", hdr->pnfs_error);
  1312. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1313. PNFS_LAYOUTRET_ON_ERROR) {
  1314. clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
  1315. pnfs_return_layout(hdr->inode);
  1316. }
  1317. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1318. data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
  1319. &hdr->pages,
  1320. hdr->completion_ops);
  1321. }
  1322. /*
  1323. * Called by non rpc-based layout drivers
  1324. */
  1325. void pnfs_ld_read_done(struct nfs_read_data *data)
  1326. {
  1327. struct nfs_pgio_header *hdr = data->header;
  1328. if (likely(!hdr->pnfs_error)) {
  1329. __nfs4_read_done_cb(data);
  1330. hdr->mds_ops->rpc_call_done(&data->task, data);
  1331. } else
  1332. pnfs_ld_handle_read_error(data);
  1333. hdr->mds_ops->rpc_release(data);
  1334. }
  1335. EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
  1336. static void
  1337. pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
  1338. struct nfs_read_data *data)
  1339. {
  1340. struct nfs_pgio_header *hdr = data->header;
  1341. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1342. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1343. nfs_pageio_reset_read_mds(desc);
  1344. desc->pg_recoalesce = 1;
  1345. }
  1346. nfs_readdata_release(data);
  1347. }
  1348. /*
  1349. * Call the appropriate parallel I/O subsystem read function.
  1350. */
  1351. static enum pnfs_try_status
  1352. pnfs_try_to_read_data(struct nfs_read_data *rdata,
  1353. const struct rpc_call_ops *call_ops,
  1354. struct pnfs_layout_segment *lseg)
  1355. {
  1356. struct nfs_pgio_header *hdr = rdata->header;
  1357. struct inode *inode = hdr->inode;
  1358. struct nfs_server *nfss = NFS_SERVER(inode);
  1359. enum pnfs_try_status trypnfs;
  1360. hdr->mds_ops = call_ops;
  1361. dprintk("%s: Reading ino:%lu %u@%llu\n",
  1362. __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
  1363. trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
  1364. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1365. nfs_inc_stats(inode, NFSIOS_PNFS_READ);
  1366. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1367. return trypnfs;
  1368. }
  1369. static void
  1370. pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
  1371. {
  1372. struct nfs_read_data *data;
  1373. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1374. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1375. desc->pg_lseg = NULL;
  1376. while (!list_empty(head)) {
  1377. enum pnfs_try_status trypnfs;
  1378. data = list_first_entry(head, struct nfs_read_data, list);
  1379. list_del_init(&data->list);
  1380. trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
  1381. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1382. pnfs_read_through_mds(desc, data);
  1383. }
  1384. pnfs_put_lseg(lseg);
  1385. }
  1386. static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
  1387. {
  1388. pnfs_put_lseg(hdr->lseg);
  1389. nfs_readhdr_free(hdr);
  1390. }
  1391. EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
  1392. int
  1393. pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
  1394. {
  1395. struct nfs_read_header *rhdr;
  1396. struct nfs_pgio_header *hdr;
  1397. int ret;
  1398. rhdr = nfs_readhdr_alloc();
  1399. if (!rhdr) {
  1400. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1401. ret = -ENOMEM;
  1402. pnfs_put_lseg(desc->pg_lseg);
  1403. desc->pg_lseg = NULL;
  1404. return ret;
  1405. }
  1406. hdr = &rhdr->header;
  1407. nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
  1408. hdr->lseg = pnfs_get_lseg(desc->pg_lseg);
  1409. atomic_inc(&hdr->refcnt);
  1410. ret = nfs_generic_pagein(desc, hdr);
  1411. if (ret != 0) {
  1412. pnfs_put_lseg(desc->pg_lseg);
  1413. desc->pg_lseg = NULL;
  1414. } else
  1415. pnfs_do_multiple_reads(desc, &hdr->rpc_list);
  1416. if (atomic_dec_and_test(&hdr->refcnt))
  1417. hdr->completion_ops->completion(hdr);
  1418. return ret;
  1419. }
  1420. EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
  1421. /*
  1422. * There can be multiple RW segments.
  1423. */
  1424. static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
  1425. {
  1426. struct pnfs_layout_segment *lseg;
  1427. list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
  1428. if (lseg->pls_range.iomode == IOMODE_RW &&
  1429. test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  1430. list_add(&lseg->pls_lc_list, listp);
  1431. }
  1432. }
  1433. void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
  1434. {
  1435. pnfs_layout_io_set_failed(lseg->pls_layout, lseg->pls_range.iomode);
  1436. }
  1437. EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
  1438. void
  1439. pnfs_set_layoutcommit(struct nfs_write_data *wdata)
  1440. {
  1441. struct nfs_pgio_header *hdr = wdata->header;
  1442. struct inode *inode = hdr->inode;
  1443. struct nfs_inode *nfsi = NFS_I(inode);
  1444. loff_t end_pos = wdata->mds_offset + wdata->res.count;
  1445. bool mark_as_dirty = false;
  1446. spin_lock(&inode->i_lock);
  1447. if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1448. mark_as_dirty = true;
  1449. dprintk("%s: Set layoutcommit for inode %lu ",
  1450. __func__, inode->i_ino);
  1451. }
  1452. if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
  1453. /* references matched in nfs4_layoutcommit_release */
  1454. pnfs_get_lseg(hdr->lseg);
  1455. }
  1456. if (end_pos > nfsi->layout->plh_lwb)
  1457. nfsi->layout->plh_lwb = end_pos;
  1458. spin_unlock(&inode->i_lock);
  1459. dprintk("%s: lseg %p end_pos %llu\n",
  1460. __func__, hdr->lseg, nfsi->layout->plh_lwb);
  1461. /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
  1462. * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
  1463. if (mark_as_dirty)
  1464. mark_inode_dirty_sync(inode);
  1465. }
  1466. EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
  1467. void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
  1468. {
  1469. struct nfs_server *nfss = NFS_SERVER(data->args.inode);
  1470. if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
  1471. nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
  1472. }
  1473. /*
  1474. * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
  1475. * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
  1476. * data to disk to allow the server to recover the data if it crashes.
  1477. * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
  1478. * is off, and a COMMIT is sent to a data server, or
  1479. * if WRITEs to a data server return NFS_DATA_SYNC.
  1480. */
  1481. int
  1482. pnfs_layoutcommit_inode(struct inode *inode, bool sync)
  1483. {
  1484. struct nfs4_layoutcommit_data *data;
  1485. struct nfs_inode *nfsi = NFS_I(inode);
  1486. loff_t end_pos;
  1487. int status = 0;
  1488. dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
  1489. if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  1490. return 0;
  1491. /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
  1492. data = kzalloc(sizeof(*data), GFP_NOFS);
  1493. if (!data) {
  1494. status = -ENOMEM;
  1495. goto out;
  1496. }
  1497. if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  1498. goto out_free;
  1499. if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
  1500. if (!sync) {
  1501. status = -EAGAIN;
  1502. goto out_free;
  1503. }
  1504. status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
  1505. nfs_wait_bit_killable, TASK_KILLABLE);
  1506. if (status)
  1507. goto out_free;
  1508. }
  1509. INIT_LIST_HEAD(&data->lseg_list);
  1510. spin_lock(&inode->i_lock);
  1511. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1512. clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
  1513. spin_unlock(&inode->i_lock);
  1514. wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
  1515. goto out_free;
  1516. }
  1517. pnfs_list_write_lseg(inode, &data->lseg_list);
  1518. end_pos = nfsi->layout->plh_lwb;
  1519. nfsi->layout->plh_lwb = 0;
  1520. nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
  1521. spin_unlock(&inode->i_lock);
  1522. data->args.inode = inode;
  1523. data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
  1524. nfs_fattr_init(&data->fattr);
  1525. data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
  1526. data->res.fattr = &data->fattr;
  1527. data->args.lastbytewritten = end_pos - 1;
  1528. data->res.server = NFS_SERVER(inode);
  1529. status = nfs4_proc_layoutcommit(data, sync);
  1530. out:
  1531. if (status)
  1532. mark_inode_dirty_sync(inode);
  1533. dprintk("<-- %s status %d\n", __func__, status);
  1534. return status;
  1535. out_free:
  1536. kfree(data);
  1537. goto out;
  1538. }
  1539. struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
  1540. {
  1541. struct nfs4_threshold *thp;
  1542. thp = kzalloc(sizeof(*thp), GFP_NOFS);
  1543. if (!thp) {
  1544. dprintk("%s mdsthreshold allocation failed\n", __func__);
  1545. return NULL;
  1546. }
  1547. return thp;
  1548. }