pnfs.c 46 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include <linux/nfs_page.h>
  31. #include <linux/module.h>
  32. #include "internal.h"
  33. #include "pnfs.h"
  34. #include "iostat.h"
  35. #define NFSDBG_FACILITY NFSDBG_PNFS
  36. /* Locking:
  37. *
  38. * pnfs_spinlock:
  39. * protects pnfs_modules_tbl.
  40. */
  41. static DEFINE_SPINLOCK(pnfs_spinlock);
  42. /*
  43. * pnfs_modules_tbl holds all pnfs modules
  44. */
  45. static LIST_HEAD(pnfs_modules_tbl);
  46. /* Return the registered pnfs layout driver module matching given id */
  47. static struct pnfs_layoutdriver_type *
  48. find_pnfs_driver_locked(u32 id)
  49. {
  50. struct pnfs_layoutdriver_type *local;
  51. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  52. if (local->id == id)
  53. goto out;
  54. local = NULL;
  55. out:
  56. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  57. return local;
  58. }
  59. static struct pnfs_layoutdriver_type *
  60. find_pnfs_driver(u32 id)
  61. {
  62. struct pnfs_layoutdriver_type *local;
  63. spin_lock(&pnfs_spinlock);
  64. local = find_pnfs_driver_locked(id);
  65. if (local != NULL && !try_module_get(local->owner)) {
  66. dprintk("%s: Could not grab reference on module\n", __func__);
  67. local = NULL;
  68. }
  69. spin_unlock(&pnfs_spinlock);
  70. return local;
  71. }
  72. void
  73. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  74. {
  75. if (nfss->pnfs_curr_ld) {
  76. if (nfss->pnfs_curr_ld->clear_layoutdriver)
  77. nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
  78. /* Decrement the MDS count. Purge the deviceid cache if zero */
  79. if (atomic_dec_and_test(&nfss->nfs_client->cl_mds_count))
  80. nfs4_deviceid_purge_client(nfss->nfs_client);
  81. module_put(nfss->pnfs_curr_ld->owner);
  82. }
  83. nfss->pnfs_curr_ld = NULL;
  84. }
  85. /*
  86. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  87. * Currently only one pNFS layout driver per filesystem is supported.
  88. *
  89. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  90. */
  91. void
  92. set_pnfs_layoutdriver(struct nfs_server *server, const struct nfs_fh *mntfh,
  93. u32 id)
  94. {
  95. struct pnfs_layoutdriver_type *ld_type = NULL;
  96. if (id == 0)
  97. goto out_no_driver;
  98. if (!(server->nfs_client->cl_exchange_flags &
  99. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  100. printk(KERN_ERR "NFS: %s: id %u cl_exchange_flags 0x%x\n",
  101. __func__, id, server->nfs_client->cl_exchange_flags);
  102. goto out_no_driver;
  103. }
  104. ld_type = find_pnfs_driver(id);
  105. if (!ld_type) {
  106. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  107. ld_type = find_pnfs_driver(id);
  108. if (!ld_type) {
  109. dprintk("%s: No pNFS module found for %u.\n",
  110. __func__, id);
  111. goto out_no_driver;
  112. }
  113. }
  114. server->pnfs_curr_ld = ld_type;
  115. if (ld_type->set_layoutdriver
  116. && ld_type->set_layoutdriver(server, mntfh)) {
  117. printk(KERN_ERR "NFS: %s: Error initializing pNFS layout "
  118. "driver %u.\n", __func__, id);
  119. module_put(ld_type->owner);
  120. goto out_no_driver;
  121. }
  122. /* Bump the MDS count */
  123. atomic_inc(&server->nfs_client->cl_mds_count);
  124. dprintk("%s: pNFS module for %u set\n", __func__, id);
  125. return;
  126. out_no_driver:
  127. dprintk("%s: Using NFSv4 I/O\n", __func__);
  128. server->pnfs_curr_ld = NULL;
  129. }
  130. int
  131. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  132. {
  133. int status = -EINVAL;
  134. struct pnfs_layoutdriver_type *tmp;
  135. if (ld_type->id == 0) {
  136. printk(KERN_ERR "NFS: %s id 0 is reserved\n", __func__);
  137. return status;
  138. }
  139. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  140. printk(KERN_ERR "NFS: %s Layout driver must provide "
  141. "alloc_lseg and free_lseg.\n", __func__);
  142. return status;
  143. }
  144. spin_lock(&pnfs_spinlock);
  145. tmp = find_pnfs_driver_locked(ld_type->id);
  146. if (!tmp) {
  147. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  148. status = 0;
  149. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  150. ld_type->name);
  151. } else {
  152. printk(KERN_ERR "NFS: %s Module with id %d already loaded!\n",
  153. __func__, ld_type->id);
  154. }
  155. spin_unlock(&pnfs_spinlock);
  156. return status;
  157. }
  158. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  159. void
  160. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  161. {
  162. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  163. spin_lock(&pnfs_spinlock);
  164. list_del(&ld_type->pnfs_tblid);
  165. spin_unlock(&pnfs_spinlock);
  166. }
  167. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  168. /*
  169. * pNFS client layout cache
  170. */
  171. /* Need to hold i_lock if caller does not already hold reference */
  172. void
  173. get_layout_hdr(struct pnfs_layout_hdr *lo)
  174. {
  175. atomic_inc(&lo->plh_refcount);
  176. }
  177. static struct pnfs_layout_hdr *
  178. pnfs_alloc_layout_hdr(struct inode *ino, gfp_t gfp_flags)
  179. {
  180. struct pnfs_layoutdriver_type *ld = NFS_SERVER(ino)->pnfs_curr_ld;
  181. return ld->alloc_layout_hdr ? ld->alloc_layout_hdr(ino, gfp_flags) :
  182. kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
  183. }
  184. static void
  185. pnfs_free_layout_hdr(struct pnfs_layout_hdr *lo)
  186. {
  187. struct pnfs_layoutdriver_type *ld = NFS_SERVER(lo->plh_inode)->pnfs_curr_ld;
  188. put_rpccred(lo->plh_lc_cred);
  189. return ld->alloc_layout_hdr ? ld->free_layout_hdr(lo) : kfree(lo);
  190. }
  191. static void
  192. destroy_layout_hdr(struct pnfs_layout_hdr *lo)
  193. {
  194. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  195. BUG_ON(!list_empty(&lo->plh_layouts));
  196. NFS_I(lo->plh_inode)->layout = NULL;
  197. pnfs_free_layout_hdr(lo);
  198. }
  199. static void
  200. put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
  201. {
  202. if (atomic_dec_and_test(&lo->plh_refcount))
  203. destroy_layout_hdr(lo);
  204. }
  205. void
  206. put_layout_hdr(struct pnfs_layout_hdr *lo)
  207. {
  208. struct inode *inode = lo->plh_inode;
  209. if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
  210. destroy_layout_hdr(lo);
  211. spin_unlock(&inode->i_lock);
  212. }
  213. }
  214. static void
  215. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  216. {
  217. INIT_LIST_HEAD(&lseg->pls_list);
  218. INIT_LIST_HEAD(&lseg->pls_lc_list);
  219. atomic_set(&lseg->pls_refcount, 1);
  220. smp_mb();
  221. set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
  222. lseg->pls_layout = lo;
  223. }
  224. static void free_lseg(struct pnfs_layout_segment *lseg)
  225. {
  226. struct inode *ino = lseg->pls_layout->plh_inode;
  227. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  228. /* Matched by get_layout_hdr in pnfs_insert_layout */
  229. put_layout_hdr(NFS_I(ino)->layout);
  230. }
  231. static void
  232. put_lseg_common(struct pnfs_layout_segment *lseg)
  233. {
  234. struct inode *inode = lseg->pls_layout->plh_inode;
  235. WARN_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  236. list_del_init(&lseg->pls_list);
  237. if (list_empty(&lseg->pls_layout->plh_segs)) {
  238. set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
  239. /* Matched by initial refcount set in alloc_init_layout_hdr */
  240. put_layout_hdr_locked(lseg->pls_layout);
  241. }
  242. rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
  243. }
  244. void
  245. put_lseg(struct pnfs_layout_segment *lseg)
  246. {
  247. struct inode *inode;
  248. if (!lseg)
  249. return;
  250. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  251. atomic_read(&lseg->pls_refcount),
  252. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  253. inode = lseg->pls_layout->plh_inode;
  254. if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
  255. LIST_HEAD(free_me);
  256. put_lseg_common(lseg);
  257. list_add(&lseg->pls_list, &free_me);
  258. spin_unlock(&inode->i_lock);
  259. pnfs_free_lseg_list(&free_me);
  260. }
  261. }
  262. EXPORT_SYMBOL_GPL(put_lseg);
  263. static inline u64
  264. end_offset(u64 start, u64 len)
  265. {
  266. u64 end;
  267. end = start + len;
  268. return end >= start ? end : NFS4_MAX_UINT64;
  269. }
  270. /* last octet in a range */
  271. static inline u64
  272. last_byte_offset(u64 start, u64 len)
  273. {
  274. u64 end;
  275. BUG_ON(!len);
  276. end = start + len;
  277. return end > start ? end - 1 : NFS4_MAX_UINT64;
  278. }
  279. /*
  280. * is l2 fully contained in l1?
  281. * start1 end1
  282. * [----------------------------------)
  283. * start2 end2
  284. * [----------------)
  285. */
  286. static inline int
  287. lo_seg_contained(struct pnfs_layout_range *l1,
  288. struct pnfs_layout_range *l2)
  289. {
  290. u64 start1 = l1->offset;
  291. u64 end1 = end_offset(start1, l1->length);
  292. u64 start2 = l2->offset;
  293. u64 end2 = end_offset(start2, l2->length);
  294. return (start1 <= start2) && (end1 >= end2);
  295. }
  296. /*
  297. * is l1 and l2 intersecting?
  298. * start1 end1
  299. * [----------------------------------)
  300. * start2 end2
  301. * [----------------)
  302. */
  303. static inline int
  304. lo_seg_intersecting(struct pnfs_layout_range *l1,
  305. struct pnfs_layout_range *l2)
  306. {
  307. u64 start1 = l1->offset;
  308. u64 end1 = end_offset(start1, l1->length);
  309. u64 start2 = l2->offset;
  310. u64 end2 = end_offset(start2, l2->length);
  311. return (end1 == NFS4_MAX_UINT64 || end1 > start2) &&
  312. (end2 == NFS4_MAX_UINT64 || end2 > start1);
  313. }
  314. static bool
  315. should_free_lseg(struct pnfs_layout_range *lseg_range,
  316. struct pnfs_layout_range *recall_range)
  317. {
  318. return (recall_range->iomode == IOMODE_ANY ||
  319. lseg_range->iomode == recall_range->iomode) &&
  320. lo_seg_intersecting(lseg_range, recall_range);
  321. }
  322. /* Returns 1 if lseg is removed from list, 0 otherwise */
  323. static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
  324. struct list_head *tmp_list)
  325. {
  326. int rv = 0;
  327. if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  328. /* Remove the reference keeping the lseg in the
  329. * list. It will now be removed when all
  330. * outstanding io is finished.
  331. */
  332. dprintk("%s: lseg %p ref %d\n", __func__, lseg,
  333. atomic_read(&lseg->pls_refcount));
  334. if (atomic_dec_and_test(&lseg->pls_refcount)) {
  335. put_lseg_common(lseg);
  336. list_add(&lseg->pls_list, tmp_list);
  337. rv = 1;
  338. }
  339. }
  340. return rv;
  341. }
  342. /* Returns count of number of matching invalid lsegs remaining in list
  343. * after call.
  344. */
  345. int
  346. mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
  347. struct list_head *tmp_list,
  348. struct pnfs_layout_range *recall_range)
  349. {
  350. struct pnfs_layout_segment *lseg, *next;
  351. int invalid = 0, removed = 0;
  352. dprintk("%s:Begin lo %p\n", __func__, lo);
  353. if (list_empty(&lo->plh_segs)) {
  354. /* Reset MDS Threshold I/O counters */
  355. NFS_I(lo->plh_inode)->write_io = 0;
  356. NFS_I(lo->plh_inode)->read_io = 0;
  357. if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
  358. put_layout_hdr_locked(lo);
  359. return 0;
  360. }
  361. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  362. if (!recall_range ||
  363. should_free_lseg(&lseg->pls_range, recall_range)) {
  364. dprintk("%s: freeing lseg %p iomode %d "
  365. "offset %llu length %llu\n", __func__,
  366. lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
  367. lseg->pls_range.length);
  368. invalid++;
  369. removed += mark_lseg_invalid(lseg, tmp_list);
  370. }
  371. dprintk("%s:Return %i\n", __func__, invalid - removed);
  372. return invalid - removed;
  373. }
  374. /* note free_me must contain lsegs from a single layout_hdr */
  375. void
  376. pnfs_free_lseg_list(struct list_head *free_me)
  377. {
  378. struct pnfs_layout_segment *lseg, *tmp;
  379. struct pnfs_layout_hdr *lo;
  380. if (list_empty(free_me))
  381. return;
  382. lo = list_first_entry(free_me, struct pnfs_layout_segment,
  383. pls_list)->pls_layout;
  384. if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
  385. struct nfs_client *clp;
  386. clp = NFS_SERVER(lo->plh_inode)->nfs_client;
  387. spin_lock(&clp->cl_lock);
  388. list_del_init(&lo->plh_layouts);
  389. spin_unlock(&clp->cl_lock);
  390. }
  391. list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
  392. list_del(&lseg->pls_list);
  393. free_lseg(lseg);
  394. }
  395. }
  396. void
  397. pnfs_destroy_layout(struct nfs_inode *nfsi)
  398. {
  399. struct pnfs_layout_hdr *lo;
  400. LIST_HEAD(tmp_list);
  401. spin_lock(&nfsi->vfs_inode.i_lock);
  402. lo = nfsi->layout;
  403. if (lo) {
  404. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  405. mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  406. }
  407. spin_unlock(&nfsi->vfs_inode.i_lock);
  408. pnfs_free_lseg_list(&tmp_list);
  409. }
  410. EXPORT_SYMBOL_GPL(pnfs_destroy_layout);
  411. /*
  412. * Called by the state manger to remove all layouts established under an
  413. * expired lease.
  414. */
  415. void
  416. pnfs_destroy_all_layouts(struct nfs_client *clp)
  417. {
  418. struct nfs_server *server;
  419. struct pnfs_layout_hdr *lo;
  420. LIST_HEAD(tmp_list);
  421. nfs4_deviceid_mark_client_invalid(clp);
  422. nfs4_deviceid_purge_client(clp);
  423. spin_lock(&clp->cl_lock);
  424. rcu_read_lock();
  425. list_for_each_entry_rcu(server, &clp->cl_superblocks, client_link) {
  426. if (!list_empty(&server->layouts))
  427. list_splice_init(&server->layouts, &tmp_list);
  428. }
  429. rcu_read_unlock();
  430. spin_unlock(&clp->cl_lock);
  431. while (!list_empty(&tmp_list)) {
  432. lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
  433. plh_layouts);
  434. dprintk("%s freeing layout for inode %lu\n", __func__,
  435. lo->plh_inode->i_ino);
  436. list_del_init(&lo->plh_layouts);
  437. pnfs_destroy_layout(NFS_I(lo->plh_inode));
  438. }
  439. }
  440. /* update lo->plh_stateid with new if is more recent */
  441. void
  442. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
  443. bool update_barrier)
  444. {
  445. u32 oldseq, newseq;
  446. oldseq = be32_to_cpu(lo->plh_stateid.seqid);
  447. newseq = be32_to_cpu(new->seqid);
  448. if ((int)(newseq - oldseq) > 0) {
  449. nfs4_stateid_copy(&lo->plh_stateid, new);
  450. if (update_barrier) {
  451. u32 new_barrier = be32_to_cpu(new->seqid);
  452. if ((int)(new_barrier - lo->plh_barrier))
  453. lo->plh_barrier = new_barrier;
  454. } else {
  455. /* Because of wraparound, we want to keep the barrier
  456. * "close" to the current seqids. It needs to be
  457. * within 2**31 to count as "behind", so if it
  458. * gets too near that limit, give us a litle leeway
  459. * and bring it to within 2**30.
  460. * NOTE - and yes, this is all unsigned arithmetic.
  461. */
  462. if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
  463. lo->plh_barrier = newseq - (1 << 30);
  464. }
  465. }
  466. }
  467. /* lget is set to 1 if called from inside send_layoutget call chain */
  468. static bool
  469. pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
  470. int lget)
  471. {
  472. if ((stateid) &&
  473. (int)(lo->plh_barrier - be32_to_cpu(stateid->seqid)) >= 0)
  474. return true;
  475. return lo->plh_block_lgets ||
  476. test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
  477. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
  478. (list_empty(&lo->plh_segs) &&
  479. (atomic_read(&lo->plh_outstanding) > lget));
  480. }
  481. int
  482. pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
  483. struct nfs4_state *open_state)
  484. {
  485. int status = 0;
  486. dprintk("--> %s\n", __func__);
  487. spin_lock(&lo->plh_inode->i_lock);
  488. if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
  489. status = -EAGAIN;
  490. } else if (list_empty(&lo->plh_segs)) {
  491. int seq;
  492. do {
  493. seq = read_seqbegin(&open_state->seqlock);
  494. nfs4_stateid_copy(dst, &open_state->stateid);
  495. } while (read_seqretry(&open_state->seqlock, seq));
  496. } else
  497. nfs4_stateid_copy(dst, &lo->plh_stateid);
  498. spin_unlock(&lo->plh_inode->i_lock);
  499. dprintk("<-- %s\n", __func__);
  500. return status;
  501. }
  502. /*
  503. * Get layout from server.
  504. * for now, assume that whole file layouts are requested.
  505. * arg->offset: 0
  506. * arg->length: all ones
  507. */
  508. static struct pnfs_layout_segment *
  509. send_layoutget(struct pnfs_layout_hdr *lo,
  510. struct nfs_open_context *ctx,
  511. struct pnfs_layout_range *range,
  512. gfp_t gfp_flags)
  513. {
  514. struct inode *ino = lo->plh_inode;
  515. struct nfs_server *server = NFS_SERVER(ino);
  516. struct nfs4_layoutget *lgp;
  517. struct pnfs_layout_segment *lseg = NULL;
  518. struct page **pages = NULL;
  519. int i;
  520. u32 max_resp_sz, max_pages;
  521. dprintk("--> %s\n", __func__);
  522. BUG_ON(ctx == NULL);
  523. lgp = kzalloc(sizeof(*lgp), gfp_flags);
  524. if (lgp == NULL)
  525. return NULL;
  526. /* allocate pages for xdr post processing */
  527. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  528. max_pages = nfs_page_array_len(0, max_resp_sz);
  529. pages = kcalloc(max_pages, sizeof(struct page *), gfp_flags);
  530. if (!pages)
  531. goto out_err_free;
  532. for (i = 0; i < max_pages; i++) {
  533. pages[i] = alloc_page(gfp_flags);
  534. if (!pages[i])
  535. goto out_err_free;
  536. }
  537. lgp->args.minlength = PAGE_CACHE_SIZE;
  538. if (lgp->args.minlength > range->length)
  539. lgp->args.minlength = range->length;
  540. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  541. lgp->args.range = *range;
  542. lgp->args.type = server->pnfs_curr_ld->id;
  543. lgp->args.inode = ino;
  544. lgp->args.ctx = get_nfs_open_context(ctx);
  545. lgp->args.layout.pages = pages;
  546. lgp->args.layout.pglen = max_pages * PAGE_SIZE;
  547. lgp->lsegpp = &lseg;
  548. lgp->gfp_flags = gfp_flags;
  549. /* Synchronously retrieve layout information from server and
  550. * store in lseg.
  551. */
  552. nfs4_proc_layoutget(lgp);
  553. if (!lseg) {
  554. /* remember that LAYOUTGET failed and suspend trying */
  555. set_bit(lo_fail_bit(range->iomode), &lo->plh_flags);
  556. }
  557. /* free xdr pages */
  558. for (i = 0; i < max_pages; i++)
  559. __free_page(pages[i]);
  560. kfree(pages);
  561. return lseg;
  562. out_err_free:
  563. /* free any allocated xdr pages, lgp as it's not used */
  564. if (pages) {
  565. for (i = 0; i < max_pages; i++) {
  566. if (!pages[i])
  567. break;
  568. __free_page(pages[i]);
  569. }
  570. kfree(pages);
  571. }
  572. kfree(lgp);
  573. return NULL;
  574. }
  575. /*
  576. * Initiates a LAYOUTRETURN(FILE), and removes the pnfs_layout_hdr
  577. * when the layout segment list is empty.
  578. *
  579. * Note that a pnfs_layout_hdr can exist with an empty layout segment
  580. * list when LAYOUTGET has failed, or when LAYOUTGET succeeded, but the
  581. * deviceid is marked invalid.
  582. */
  583. int
  584. _pnfs_return_layout(struct inode *ino)
  585. {
  586. struct pnfs_layout_hdr *lo = NULL;
  587. struct nfs_inode *nfsi = NFS_I(ino);
  588. LIST_HEAD(tmp_list);
  589. struct nfs4_layoutreturn *lrp;
  590. nfs4_stateid stateid;
  591. int status = 0, empty;
  592. dprintk("NFS: %s for inode %lu\n", __func__, ino->i_ino);
  593. spin_lock(&ino->i_lock);
  594. lo = nfsi->layout;
  595. if (!lo || pnfs_test_layout_returned(lo)) {
  596. spin_unlock(&ino->i_lock);
  597. dprintk("NFS: %s no layout to return\n", __func__);
  598. goto out;
  599. }
  600. stateid = nfsi->layout->plh_stateid;
  601. /* Reference matched in nfs4_layoutreturn_release */
  602. get_layout_hdr(lo);
  603. empty = list_empty(&lo->plh_segs);
  604. mark_matching_lsegs_invalid(lo, &tmp_list, NULL);
  605. /* Don't send a LAYOUTRETURN if list was initially empty */
  606. if (empty) {
  607. spin_unlock(&ino->i_lock);
  608. put_layout_hdr(lo);
  609. dprintk("NFS: %s no layout segments to return\n", __func__);
  610. goto out;
  611. }
  612. lo->plh_block_lgets++;
  613. pnfs_mark_layout_returned(lo);
  614. spin_unlock(&ino->i_lock);
  615. pnfs_free_lseg_list(&tmp_list);
  616. WARN_ON(test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags));
  617. lrp = kzalloc(sizeof(*lrp), GFP_KERNEL);
  618. if (unlikely(lrp == NULL)) {
  619. status = -ENOMEM;
  620. set_bit(NFS_LAYOUT_RW_FAILED, &lo->plh_flags);
  621. set_bit(NFS_LAYOUT_RO_FAILED, &lo->plh_flags);
  622. pnfs_clear_layout_returned(lo);
  623. put_layout_hdr(lo);
  624. goto out;
  625. }
  626. lrp->args.stateid = stateid;
  627. lrp->args.layout_type = NFS_SERVER(ino)->pnfs_curr_ld->id;
  628. lrp->args.inode = ino;
  629. lrp->args.layout = lo;
  630. lrp->clp = NFS_SERVER(ino)->nfs_client;
  631. status = nfs4_proc_layoutreturn(lrp);
  632. out:
  633. dprintk("<-- %s status: %d\n", __func__, status);
  634. return status;
  635. }
  636. EXPORT_SYMBOL_GPL(_pnfs_return_layout);
  637. bool pnfs_roc(struct inode *ino)
  638. {
  639. struct pnfs_layout_hdr *lo;
  640. struct pnfs_layout_segment *lseg, *tmp;
  641. LIST_HEAD(tmp_list);
  642. bool found = false;
  643. spin_lock(&ino->i_lock);
  644. lo = NFS_I(ino)->layout;
  645. if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
  646. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
  647. goto out_nolayout;
  648. list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
  649. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  650. mark_lseg_invalid(lseg, &tmp_list);
  651. found = true;
  652. }
  653. if (!found)
  654. goto out_nolayout;
  655. lo->plh_block_lgets++;
  656. get_layout_hdr(lo); /* matched in pnfs_roc_release */
  657. spin_unlock(&ino->i_lock);
  658. pnfs_free_lseg_list(&tmp_list);
  659. return true;
  660. out_nolayout:
  661. spin_unlock(&ino->i_lock);
  662. return false;
  663. }
  664. void pnfs_roc_release(struct inode *ino)
  665. {
  666. struct pnfs_layout_hdr *lo;
  667. spin_lock(&ino->i_lock);
  668. lo = NFS_I(ino)->layout;
  669. lo->plh_block_lgets--;
  670. put_layout_hdr_locked(lo);
  671. spin_unlock(&ino->i_lock);
  672. }
  673. void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
  674. {
  675. struct pnfs_layout_hdr *lo;
  676. spin_lock(&ino->i_lock);
  677. lo = NFS_I(ino)->layout;
  678. if ((int)(barrier - lo->plh_barrier) > 0)
  679. lo->plh_barrier = barrier;
  680. spin_unlock(&ino->i_lock);
  681. }
  682. bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
  683. {
  684. struct nfs_inode *nfsi = NFS_I(ino);
  685. struct pnfs_layout_segment *lseg;
  686. bool found = false;
  687. spin_lock(&ino->i_lock);
  688. list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
  689. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  690. found = true;
  691. break;
  692. }
  693. if (!found) {
  694. struct pnfs_layout_hdr *lo = nfsi->layout;
  695. u32 current_seqid = be32_to_cpu(lo->plh_stateid.seqid);
  696. /* Since close does not return a layout stateid for use as
  697. * a barrier, we choose the worst-case barrier.
  698. */
  699. *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
  700. }
  701. spin_unlock(&ino->i_lock);
  702. return found;
  703. }
  704. /*
  705. * Compare two layout segments for sorting into layout cache.
  706. * We want to preferentially return RW over RO layouts, so ensure those
  707. * are seen first.
  708. */
  709. static s64
  710. cmp_layout(struct pnfs_layout_range *l1,
  711. struct pnfs_layout_range *l2)
  712. {
  713. s64 d;
  714. /* high offset > low offset */
  715. d = l1->offset - l2->offset;
  716. if (d)
  717. return d;
  718. /* short length > long length */
  719. d = l2->length - l1->length;
  720. if (d)
  721. return d;
  722. /* read > read/write */
  723. return (int)(l1->iomode == IOMODE_READ) - (int)(l2->iomode == IOMODE_READ);
  724. }
  725. static void
  726. pnfs_insert_layout(struct pnfs_layout_hdr *lo,
  727. struct pnfs_layout_segment *lseg)
  728. {
  729. struct pnfs_layout_segment *lp;
  730. dprintk("%s:Begin\n", __func__);
  731. assert_spin_locked(&lo->plh_inode->i_lock);
  732. list_for_each_entry(lp, &lo->plh_segs, pls_list) {
  733. if (cmp_layout(&lseg->pls_range, &lp->pls_range) > 0)
  734. continue;
  735. list_add_tail(&lseg->pls_list, &lp->pls_list);
  736. dprintk("%s: inserted lseg %p "
  737. "iomode %d offset %llu length %llu before "
  738. "lp %p iomode %d offset %llu length %llu\n",
  739. __func__, lseg, lseg->pls_range.iomode,
  740. lseg->pls_range.offset, lseg->pls_range.length,
  741. lp, lp->pls_range.iomode, lp->pls_range.offset,
  742. lp->pls_range.length);
  743. goto out;
  744. }
  745. list_add_tail(&lseg->pls_list, &lo->plh_segs);
  746. dprintk("%s: inserted lseg %p "
  747. "iomode %d offset %llu length %llu at tail\n",
  748. __func__, lseg, lseg->pls_range.iomode,
  749. lseg->pls_range.offset, lseg->pls_range.length);
  750. out:
  751. get_layout_hdr(lo);
  752. dprintk("%s:Return\n", __func__);
  753. }
  754. static struct pnfs_layout_hdr *
  755. alloc_init_layout_hdr(struct inode *ino,
  756. struct nfs_open_context *ctx,
  757. gfp_t gfp_flags)
  758. {
  759. struct pnfs_layout_hdr *lo;
  760. lo = pnfs_alloc_layout_hdr(ino, gfp_flags);
  761. if (!lo)
  762. return NULL;
  763. atomic_set(&lo->plh_refcount, 1);
  764. INIT_LIST_HEAD(&lo->plh_layouts);
  765. INIT_LIST_HEAD(&lo->plh_segs);
  766. INIT_LIST_HEAD(&lo->plh_bulk_recall);
  767. lo->plh_inode = ino;
  768. lo->plh_lc_cred = get_rpccred(ctx->state->owner->so_cred);
  769. return lo;
  770. }
  771. static struct pnfs_layout_hdr *
  772. pnfs_find_alloc_layout(struct inode *ino,
  773. struct nfs_open_context *ctx,
  774. gfp_t gfp_flags)
  775. {
  776. struct nfs_inode *nfsi = NFS_I(ino);
  777. struct pnfs_layout_hdr *new = NULL;
  778. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  779. assert_spin_locked(&ino->i_lock);
  780. if (nfsi->layout) {
  781. if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
  782. return NULL;
  783. else
  784. return nfsi->layout;
  785. }
  786. spin_unlock(&ino->i_lock);
  787. new = alloc_init_layout_hdr(ino, ctx, gfp_flags);
  788. spin_lock(&ino->i_lock);
  789. if (likely(nfsi->layout == NULL)) /* Won the race? */
  790. nfsi->layout = new;
  791. else
  792. pnfs_free_layout_hdr(new);
  793. return nfsi->layout;
  794. }
  795. /*
  796. * iomode matching rules:
  797. * iomode lseg match
  798. * ----- ----- -----
  799. * ANY READ true
  800. * ANY RW true
  801. * RW READ false
  802. * RW RW true
  803. * READ READ true
  804. * READ RW true
  805. */
  806. static int
  807. is_matching_lseg(struct pnfs_layout_range *ls_range,
  808. struct pnfs_layout_range *range)
  809. {
  810. struct pnfs_layout_range range1;
  811. if ((range->iomode == IOMODE_RW &&
  812. ls_range->iomode != IOMODE_RW) ||
  813. !lo_seg_intersecting(ls_range, range))
  814. return 0;
  815. /* range1 covers only the first byte in the range */
  816. range1 = *range;
  817. range1.length = 1;
  818. return lo_seg_contained(ls_range, &range1);
  819. }
  820. /*
  821. * lookup range in layout
  822. */
  823. static struct pnfs_layout_segment *
  824. pnfs_find_lseg(struct pnfs_layout_hdr *lo,
  825. struct pnfs_layout_range *range)
  826. {
  827. struct pnfs_layout_segment *lseg, *ret = NULL;
  828. dprintk("%s:Begin\n", __func__);
  829. assert_spin_locked(&lo->plh_inode->i_lock);
  830. list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
  831. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
  832. is_matching_lseg(&lseg->pls_range, range)) {
  833. ret = get_lseg(lseg);
  834. break;
  835. }
  836. if (lseg->pls_range.offset > range->offset)
  837. break;
  838. }
  839. dprintk("%s:Return lseg %p ref %d\n",
  840. __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
  841. return ret;
  842. }
  843. /*
  844. * Use mdsthreshold hints set at each OPEN to determine if I/O should go
  845. * to the MDS or over pNFS
  846. *
  847. * The nfs_inode read_io and write_io fields are cumulative counters reset
  848. * when there are no layout segments. Note that in pnfs_update_layout iomode
  849. * is set to IOMODE_READ for a READ request, and set to IOMODE_RW for a
  850. * WRITE request.
  851. *
  852. * A return of true means use MDS I/O.
  853. *
  854. * From rfc 5661:
  855. * If a file's size is smaller than the file size threshold, data accesses
  856. * SHOULD be sent to the metadata server. If an I/O request has a length that
  857. * is below the I/O size threshold, the I/O SHOULD be sent to the metadata
  858. * server. If both file size and I/O size are provided, the client SHOULD
  859. * reach or exceed both thresholds before sending its read or write
  860. * requests to the data server.
  861. */
  862. static bool pnfs_within_mdsthreshold(struct nfs_open_context *ctx,
  863. struct inode *ino, int iomode)
  864. {
  865. struct nfs4_threshold *t = ctx->mdsthreshold;
  866. struct nfs_inode *nfsi = NFS_I(ino);
  867. loff_t fsize = i_size_read(ino);
  868. bool size = false, size_set = false, io = false, io_set = false, ret = false;
  869. if (t == NULL)
  870. return ret;
  871. dprintk("%s bm=0x%x rd_sz=%llu wr_sz=%llu rd_io=%llu wr_io=%llu\n",
  872. __func__, t->bm, t->rd_sz, t->wr_sz, t->rd_io_sz, t->wr_io_sz);
  873. switch (iomode) {
  874. case IOMODE_READ:
  875. if (t->bm & THRESHOLD_RD) {
  876. dprintk("%s fsize %llu\n", __func__, fsize);
  877. size_set = true;
  878. if (fsize < t->rd_sz)
  879. size = true;
  880. }
  881. if (t->bm & THRESHOLD_RD_IO) {
  882. dprintk("%s nfsi->read_io %llu\n", __func__,
  883. nfsi->read_io);
  884. io_set = true;
  885. if (nfsi->read_io < t->rd_io_sz)
  886. io = true;
  887. }
  888. break;
  889. case IOMODE_RW:
  890. if (t->bm & THRESHOLD_WR) {
  891. dprintk("%s fsize %llu\n", __func__, fsize);
  892. size_set = true;
  893. if (fsize < t->wr_sz)
  894. size = true;
  895. }
  896. if (t->bm & THRESHOLD_WR_IO) {
  897. dprintk("%s nfsi->write_io %llu\n", __func__,
  898. nfsi->write_io);
  899. io_set = true;
  900. if (nfsi->write_io < t->wr_io_sz)
  901. io = true;
  902. }
  903. break;
  904. }
  905. if (size_set && io_set) {
  906. if (size && io)
  907. ret = true;
  908. } else if (size || io)
  909. ret = true;
  910. dprintk("<-- %s size %d io %d ret %d\n", __func__, size, io, ret);
  911. return ret;
  912. }
  913. /*
  914. * Layout segment is retreived from the server if not cached.
  915. * The appropriate layout segment is referenced and returned to the caller.
  916. */
  917. struct pnfs_layout_segment *
  918. pnfs_update_layout(struct inode *ino,
  919. struct nfs_open_context *ctx,
  920. loff_t pos,
  921. u64 count,
  922. enum pnfs_iomode iomode,
  923. gfp_t gfp_flags)
  924. {
  925. struct pnfs_layout_range arg = {
  926. .iomode = iomode,
  927. .offset = pos,
  928. .length = count,
  929. };
  930. unsigned pg_offset;
  931. struct nfs_inode *nfsi = NFS_I(ino);
  932. struct nfs_server *server = NFS_SERVER(ino);
  933. struct nfs_client *clp = server->nfs_client;
  934. struct pnfs_layout_hdr *lo;
  935. struct pnfs_layout_segment *lseg = NULL;
  936. bool first = false;
  937. if (!pnfs_enabled_sb(NFS_SERVER(ino)))
  938. return NULL;
  939. if (pnfs_within_mdsthreshold(ctx, ino, iomode))
  940. return NULL;
  941. spin_lock(&ino->i_lock);
  942. lo = pnfs_find_alloc_layout(ino, ctx, gfp_flags);
  943. if (lo == NULL) {
  944. dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
  945. goto out_unlock;
  946. }
  947. /* Do we even need to bother with this? */
  948. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  949. dprintk("%s matches recall, use MDS\n", __func__);
  950. goto out_unlock;
  951. }
  952. /* if LAYOUTGET already failed once we don't try again */
  953. if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
  954. goto out_unlock;
  955. /* Check to see if the layout for the given range already exists */
  956. lseg = pnfs_find_lseg(lo, &arg);
  957. if (lseg)
  958. goto out_unlock;
  959. if (pnfs_layoutgets_blocked(lo, NULL, 0))
  960. goto out_unlock;
  961. atomic_inc(&lo->plh_outstanding);
  962. get_layout_hdr(lo);
  963. if (list_empty(&lo->plh_segs))
  964. first = true;
  965. /* Enable LAYOUTRETURNs */
  966. pnfs_clear_layout_returned(lo);
  967. spin_unlock(&ino->i_lock);
  968. if (first) {
  969. /* The lo must be on the clp list if there is any
  970. * chance of a CB_LAYOUTRECALL(FILE) coming in.
  971. */
  972. spin_lock(&clp->cl_lock);
  973. BUG_ON(!list_empty(&lo->plh_layouts));
  974. list_add_tail(&lo->plh_layouts, &server->layouts);
  975. spin_unlock(&clp->cl_lock);
  976. }
  977. pg_offset = arg.offset & ~PAGE_CACHE_MASK;
  978. if (pg_offset) {
  979. arg.offset -= pg_offset;
  980. arg.length += pg_offset;
  981. }
  982. if (arg.length != NFS4_MAX_UINT64)
  983. arg.length = PAGE_CACHE_ALIGN(arg.length);
  984. lseg = send_layoutget(lo, ctx, &arg, gfp_flags);
  985. if (!lseg && first) {
  986. spin_lock(&clp->cl_lock);
  987. list_del_init(&lo->plh_layouts);
  988. spin_unlock(&clp->cl_lock);
  989. }
  990. atomic_dec(&lo->plh_outstanding);
  991. put_layout_hdr(lo);
  992. out:
  993. dprintk("%s end, state 0x%lx lseg %p\n", __func__,
  994. nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
  995. return lseg;
  996. out_unlock:
  997. spin_unlock(&ino->i_lock);
  998. goto out;
  999. }
  1000. EXPORT_SYMBOL_GPL(pnfs_update_layout);
  1001. int
  1002. pnfs_layout_process(struct nfs4_layoutget *lgp)
  1003. {
  1004. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  1005. struct nfs4_layoutget_res *res = &lgp->res;
  1006. struct pnfs_layout_segment *lseg;
  1007. struct inode *ino = lo->plh_inode;
  1008. int status = 0;
  1009. /* Inject layout blob into I/O device driver */
  1010. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
  1011. if (!lseg || IS_ERR(lseg)) {
  1012. if (!lseg)
  1013. status = -ENOMEM;
  1014. else
  1015. status = PTR_ERR(lseg);
  1016. dprintk("%s: Could not allocate layout: error %d\n",
  1017. __func__, status);
  1018. goto out;
  1019. }
  1020. spin_lock(&ino->i_lock);
  1021. if (test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  1022. dprintk("%s forget reply due to recall\n", __func__);
  1023. goto out_forget_reply;
  1024. }
  1025. if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
  1026. dprintk("%s forget reply due to state\n", __func__);
  1027. goto out_forget_reply;
  1028. }
  1029. init_lseg(lo, lseg);
  1030. lseg->pls_range = res->range;
  1031. *lgp->lsegpp = get_lseg(lseg);
  1032. pnfs_insert_layout(lo, lseg);
  1033. if (res->return_on_close) {
  1034. set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
  1035. set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
  1036. }
  1037. /* Done processing layoutget. Set the layout stateid */
  1038. pnfs_set_layout_stateid(lo, &res->stateid, false);
  1039. spin_unlock(&ino->i_lock);
  1040. out:
  1041. return status;
  1042. out_forget_reply:
  1043. spin_unlock(&ino->i_lock);
  1044. lseg->pls_layout = lo;
  1045. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  1046. goto out;
  1047. }
  1048. void
  1049. pnfs_generic_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1050. {
  1051. BUG_ON(pgio->pg_lseg != NULL);
  1052. if (req->wb_offset != req->wb_pgbase) {
  1053. nfs_pageio_reset_read_mds(pgio);
  1054. return;
  1055. }
  1056. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1057. req->wb_context,
  1058. req_offset(req),
  1059. req->wb_bytes,
  1060. IOMODE_READ,
  1061. GFP_KERNEL);
  1062. /* If no lseg, fall back to read through mds */
  1063. if (pgio->pg_lseg == NULL)
  1064. nfs_pageio_reset_read_mds(pgio);
  1065. }
  1066. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_read);
  1067. void
  1068. pnfs_generic_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs_page *req)
  1069. {
  1070. BUG_ON(pgio->pg_lseg != NULL);
  1071. if (req->wb_offset != req->wb_pgbase) {
  1072. nfs_pageio_reset_write_mds(pgio);
  1073. return;
  1074. }
  1075. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  1076. req->wb_context,
  1077. req_offset(req),
  1078. req->wb_bytes,
  1079. IOMODE_RW,
  1080. GFP_NOFS);
  1081. /* If no lseg, fall back to write through mds */
  1082. if (pgio->pg_lseg == NULL)
  1083. nfs_pageio_reset_write_mds(pgio);
  1084. }
  1085. EXPORT_SYMBOL_GPL(pnfs_generic_pg_init_write);
  1086. void
  1087. pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode,
  1088. const struct nfs_pgio_completion_ops *compl_ops)
  1089. {
  1090. struct nfs_server *server = NFS_SERVER(inode);
  1091. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  1092. if (ld == NULL)
  1093. nfs_pageio_init_read(pgio, inode, compl_ops);
  1094. else
  1095. nfs_pageio_init(pgio, inode, ld->pg_read_ops, compl_ops, server->rsize, 0);
  1096. }
  1097. void
  1098. pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode,
  1099. int ioflags,
  1100. const struct nfs_pgio_completion_ops *compl_ops)
  1101. {
  1102. struct nfs_server *server = NFS_SERVER(inode);
  1103. struct pnfs_layoutdriver_type *ld = server->pnfs_curr_ld;
  1104. if (ld == NULL)
  1105. nfs_pageio_init_write(pgio, inode, ioflags, compl_ops);
  1106. else
  1107. nfs_pageio_init(pgio, inode, ld->pg_write_ops, compl_ops, server->wsize, ioflags);
  1108. }
  1109. bool
  1110. pnfs_generic_pg_test(struct nfs_pageio_descriptor *pgio, struct nfs_page *prev,
  1111. struct nfs_page *req)
  1112. {
  1113. if (pgio->pg_lseg == NULL)
  1114. return nfs_generic_pg_test(pgio, prev, req);
  1115. /*
  1116. * Test if a nfs_page is fully contained in the pnfs_layout_range.
  1117. * Note that this test makes several assumptions:
  1118. * - that the previous nfs_page in the struct nfs_pageio_descriptor
  1119. * is known to lie within the range.
  1120. * - that the nfs_page being tested is known to be contiguous with the
  1121. * previous nfs_page.
  1122. * - Layout ranges are page aligned, so we only have to test the
  1123. * start offset of the request.
  1124. *
  1125. * Please also note that 'end_offset' is actually the offset of the
  1126. * first byte that lies outside the pnfs_layout_range. FIXME?
  1127. *
  1128. */
  1129. return req_offset(req) < end_offset(pgio->pg_lseg->pls_range.offset,
  1130. pgio->pg_lseg->pls_range.length);
  1131. }
  1132. EXPORT_SYMBOL_GPL(pnfs_generic_pg_test);
  1133. int pnfs_write_done_resend_to_mds(struct inode *inode,
  1134. struct list_head *head,
  1135. const struct nfs_pgio_completion_ops *compl_ops)
  1136. {
  1137. struct nfs_pageio_descriptor pgio;
  1138. LIST_HEAD(failed);
  1139. /* Resend all requests through the MDS */
  1140. nfs_pageio_init_write(&pgio, inode, FLUSH_STABLE, compl_ops);
  1141. while (!list_empty(head)) {
  1142. struct nfs_page *req = nfs_list_entry(head->next);
  1143. nfs_list_remove_request(req);
  1144. if (!nfs_pageio_add_request(&pgio, req))
  1145. nfs_list_add_request(req, &failed);
  1146. }
  1147. nfs_pageio_complete(&pgio);
  1148. if (!list_empty(&failed)) {
  1149. /* For some reason our attempt to resend pages. Mark the
  1150. * overall send request as having failed, and let
  1151. * nfs_writeback_release_full deal with the error.
  1152. */
  1153. list_move(&failed, head);
  1154. return -EIO;
  1155. }
  1156. return 0;
  1157. }
  1158. EXPORT_SYMBOL_GPL(pnfs_write_done_resend_to_mds);
  1159. static void pnfs_ld_handle_write_error(struct nfs_write_data *data)
  1160. {
  1161. struct nfs_pgio_header *hdr = data->header;
  1162. dprintk("pnfs write error = %d\n", hdr->pnfs_error);
  1163. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1164. PNFS_LAYOUTRET_ON_ERROR) {
  1165. clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
  1166. pnfs_return_layout(hdr->inode);
  1167. }
  1168. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1169. data->task.tk_status = pnfs_write_done_resend_to_mds(hdr->inode,
  1170. &hdr->pages,
  1171. hdr->completion_ops);
  1172. }
  1173. /*
  1174. * Called by non rpc-based layout drivers
  1175. */
  1176. void pnfs_ld_write_done(struct nfs_write_data *data)
  1177. {
  1178. struct nfs_pgio_header *hdr = data->header;
  1179. if (!hdr->pnfs_error) {
  1180. pnfs_set_layoutcommit(data);
  1181. hdr->mds_ops->rpc_call_done(&data->task, data);
  1182. } else
  1183. pnfs_ld_handle_write_error(data);
  1184. hdr->mds_ops->rpc_release(data);
  1185. }
  1186. EXPORT_SYMBOL_GPL(pnfs_ld_write_done);
  1187. static void
  1188. pnfs_write_through_mds(struct nfs_pageio_descriptor *desc,
  1189. struct nfs_write_data *data)
  1190. {
  1191. struct nfs_pgio_header *hdr = data->header;
  1192. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1193. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1194. nfs_pageio_reset_write_mds(desc);
  1195. desc->pg_recoalesce = 1;
  1196. }
  1197. nfs_writedata_release(data);
  1198. }
  1199. static enum pnfs_try_status
  1200. pnfs_try_to_write_data(struct nfs_write_data *wdata,
  1201. const struct rpc_call_ops *call_ops,
  1202. struct pnfs_layout_segment *lseg,
  1203. int how)
  1204. {
  1205. struct nfs_pgio_header *hdr = wdata->header;
  1206. struct inode *inode = hdr->inode;
  1207. enum pnfs_try_status trypnfs;
  1208. struct nfs_server *nfss = NFS_SERVER(inode);
  1209. hdr->mds_ops = call_ops;
  1210. dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
  1211. inode->i_ino, wdata->args.count, wdata->args.offset, how);
  1212. trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
  1213. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1214. nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
  1215. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1216. return trypnfs;
  1217. }
  1218. static void
  1219. pnfs_do_multiple_writes(struct nfs_pageio_descriptor *desc, struct list_head *head, int how)
  1220. {
  1221. struct nfs_write_data *data;
  1222. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1223. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1224. desc->pg_lseg = NULL;
  1225. while (!list_empty(head)) {
  1226. enum pnfs_try_status trypnfs;
  1227. data = list_first_entry(head, struct nfs_write_data, list);
  1228. list_del_init(&data->list);
  1229. trypnfs = pnfs_try_to_write_data(data, call_ops, lseg, how);
  1230. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1231. pnfs_write_through_mds(desc, data);
  1232. }
  1233. put_lseg(lseg);
  1234. }
  1235. static void pnfs_writehdr_free(struct nfs_pgio_header *hdr)
  1236. {
  1237. put_lseg(hdr->lseg);
  1238. nfs_writehdr_free(hdr);
  1239. }
  1240. EXPORT_SYMBOL_GPL(pnfs_writehdr_free);
  1241. int
  1242. pnfs_generic_pg_writepages(struct nfs_pageio_descriptor *desc)
  1243. {
  1244. struct nfs_write_header *whdr;
  1245. struct nfs_pgio_header *hdr;
  1246. int ret;
  1247. whdr = nfs_writehdr_alloc();
  1248. if (!whdr) {
  1249. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1250. put_lseg(desc->pg_lseg);
  1251. desc->pg_lseg = NULL;
  1252. return -ENOMEM;
  1253. }
  1254. hdr = &whdr->header;
  1255. nfs_pgheader_init(desc, hdr, pnfs_writehdr_free);
  1256. hdr->lseg = get_lseg(desc->pg_lseg);
  1257. atomic_inc(&hdr->refcnt);
  1258. ret = nfs_generic_flush(desc, hdr);
  1259. if (ret != 0) {
  1260. put_lseg(desc->pg_lseg);
  1261. desc->pg_lseg = NULL;
  1262. } else
  1263. pnfs_do_multiple_writes(desc, &hdr->rpc_list, desc->pg_ioflags);
  1264. if (atomic_dec_and_test(&hdr->refcnt))
  1265. hdr->completion_ops->completion(hdr);
  1266. return ret;
  1267. }
  1268. EXPORT_SYMBOL_GPL(pnfs_generic_pg_writepages);
  1269. int pnfs_read_done_resend_to_mds(struct inode *inode,
  1270. struct list_head *head,
  1271. const struct nfs_pgio_completion_ops *compl_ops)
  1272. {
  1273. struct nfs_pageio_descriptor pgio;
  1274. LIST_HEAD(failed);
  1275. /* Resend all requests through the MDS */
  1276. nfs_pageio_init_read(&pgio, inode, compl_ops);
  1277. while (!list_empty(head)) {
  1278. struct nfs_page *req = nfs_list_entry(head->next);
  1279. nfs_list_remove_request(req);
  1280. if (!nfs_pageio_add_request(&pgio, req))
  1281. nfs_list_add_request(req, &failed);
  1282. }
  1283. nfs_pageio_complete(&pgio);
  1284. if (!list_empty(&failed)) {
  1285. list_move(&failed, head);
  1286. return -EIO;
  1287. }
  1288. return 0;
  1289. }
  1290. EXPORT_SYMBOL_GPL(pnfs_read_done_resend_to_mds);
  1291. static void pnfs_ld_handle_read_error(struct nfs_read_data *data)
  1292. {
  1293. struct nfs_pgio_header *hdr = data->header;
  1294. dprintk("pnfs read error = %d\n", hdr->pnfs_error);
  1295. if (NFS_SERVER(hdr->inode)->pnfs_curr_ld->flags &
  1296. PNFS_LAYOUTRET_ON_ERROR) {
  1297. clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(hdr->inode)->flags);
  1298. pnfs_return_layout(hdr->inode);
  1299. }
  1300. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags))
  1301. data->task.tk_status = pnfs_read_done_resend_to_mds(hdr->inode,
  1302. &hdr->pages,
  1303. hdr->completion_ops);
  1304. }
  1305. /*
  1306. * Called by non rpc-based layout drivers
  1307. */
  1308. void pnfs_ld_read_done(struct nfs_read_data *data)
  1309. {
  1310. struct nfs_pgio_header *hdr = data->header;
  1311. if (likely(!hdr->pnfs_error)) {
  1312. __nfs4_read_done_cb(data);
  1313. hdr->mds_ops->rpc_call_done(&data->task, data);
  1314. } else
  1315. pnfs_ld_handle_read_error(data);
  1316. hdr->mds_ops->rpc_release(data);
  1317. }
  1318. EXPORT_SYMBOL_GPL(pnfs_ld_read_done);
  1319. static void
  1320. pnfs_read_through_mds(struct nfs_pageio_descriptor *desc,
  1321. struct nfs_read_data *data)
  1322. {
  1323. struct nfs_pgio_header *hdr = data->header;
  1324. if (!test_and_set_bit(NFS_IOHDR_REDO, &hdr->flags)) {
  1325. list_splice_tail_init(&hdr->pages, &desc->pg_list);
  1326. nfs_pageio_reset_read_mds(desc);
  1327. desc->pg_recoalesce = 1;
  1328. }
  1329. nfs_readdata_release(data);
  1330. }
  1331. /*
  1332. * Call the appropriate parallel I/O subsystem read function.
  1333. */
  1334. static enum pnfs_try_status
  1335. pnfs_try_to_read_data(struct nfs_read_data *rdata,
  1336. const struct rpc_call_ops *call_ops,
  1337. struct pnfs_layout_segment *lseg)
  1338. {
  1339. struct nfs_pgio_header *hdr = rdata->header;
  1340. struct inode *inode = hdr->inode;
  1341. struct nfs_server *nfss = NFS_SERVER(inode);
  1342. enum pnfs_try_status trypnfs;
  1343. hdr->mds_ops = call_ops;
  1344. dprintk("%s: Reading ino:%lu %u@%llu\n",
  1345. __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
  1346. trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
  1347. if (trypnfs != PNFS_NOT_ATTEMPTED)
  1348. nfs_inc_stats(inode, NFSIOS_PNFS_READ);
  1349. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  1350. return trypnfs;
  1351. }
  1352. static void
  1353. pnfs_do_multiple_reads(struct nfs_pageio_descriptor *desc, struct list_head *head)
  1354. {
  1355. struct nfs_read_data *data;
  1356. const struct rpc_call_ops *call_ops = desc->pg_rpc_callops;
  1357. struct pnfs_layout_segment *lseg = desc->pg_lseg;
  1358. desc->pg_lseg = NULL;
  1359. while (!list_empty(head)) {
  1360. enum pnfs_try_status trypnfs;
  1361. data = list_first_entry(head, struct nfs_read_data, list);
  1362. list_del_init(&data->list);
  1363. trypnfs = pnfs_try_to_read_data(data, call_ops, lseg);
  1364. if (trypnfs == PNFS_NOT_ATTEMPTED)
  1365. pnfs_read_through_mds(desc, data);
  1366. }
  1367. put_lseg(lseg);
  1368. }
  1369. static void pnfs_readhdr_free(struct nfs_pgio_header *hdr)
  1370. {
  1371. put_lseg(hdr->lseg);
  1372. nfs_readhdr_free(hdr);
  1373. }
  1374. EXPORT_SYMBOL_GPL(pnfs_readhdr_free);
  1375. int
  1376. pnfs_generic_pg_readpages(struct nfs_pageio_descriptor *desc)
  1377. {
  1378. struct nfs_read_header *rhdr;
  1379. struct nfs_pgio_header *hdr;
  1380. int ret;
  1381. rhdr = nfs_readhdr_alloc();
  1382. if (!rhdr) {
  1383. desc->pg_completion_ops->error_cleanup(&desc->pg_list);
  1384. ret = -ENOMEM;
  1385. put_lseg(desc->pg_lseg);
  1386. desc->pg_lseg = NULL;
  1387. return ret;
  1388. }
  1389. hdr = &rhdr->header;
  1390. nfs_pgheader_init(desc, hdr, pnfs_readhdr_free);
  1391. hdr->lseg = get_lseg(desc->pg_lseg);
  1392. atomic_inc(&hdr->refcnt);
  1393. ret = nfs_generic_pagein(desc, hdr);
  1394. if (ret != 0) {
  1395. put_lseg(desc->pg_lseg);
  1396. desc->pg_lseg = NULL;
  1397. } else
  1398. pnfs_do_multiple_reads(desc, &hdr->rpc_list);
  1399. if (atomic_dec_and_test(&hdr->refcnt))
  1400. hdr->completion_ops->completion(hdr);
  1401. return ret;
  1402. }
  1403. EXPORT_SYMBOL_GPL(pnfs_generic_pg_readpages);
  1404. /*
  1405. * There can be multiple RW segments.
  1406. */
  1407. static void pnfs_list_write_lseg(struct inode *inode, struct list_head *listp)
  1408. {
  1409. struct pnfs_layout_segment *lseg;
  1410. list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list) {
  1411. if (lseg->pls_range.iomode == IOMODE_RW &&
  1412. test_bit(NFS_LSEG_LAYOUTCOMMIT, &lseg->pls_flags))
  1413. list_add(&lseg->pls_lc_list, listp);
  1414. }
  1415. }
  1416. void pnfs_set_lo_fail(struct pnfs_layout_segment *lseg)
  1417. {
  1418. if (lseg->pls_range.iomode == IOMODE_RW) {
  1419. dprintk("%s Setting layout IOMODE_RW fail bit\n", __func__);
  1420. set_bit(lo_fail_bit(IOMODE_RW), &lseg->pls_layout->plh_flags);
  1421. } else {
  1422. dprintk("%s Setting layout IOMODE_READ fail bit\n", __func__);
  1423. set_bit(lo_fail_bit(IOMODE_READ), &lseg->pls_layout->plh_flags);
  1424. }
  1425. }
  1426. EXPORT_SYMBOL_GPL(pnfs_set_lo_fail);
  1427. void
  1428. pnfs_set_layoutcommit(struct nfs_write_data *wdata)
  1429. {
  1430. struct nfs_pgio_header *hdr = wdata->header;
  1431. struct inode *inode = hdr->inode;
  1432. struct nfs_inode *nfsi = NFS_I(inode);
  1433. loff_t end_pos = wdata->mds_offset + wdata->res.count;
  1434. bool mark_as_dirty = false;
  1435. spin_lock(&inode->i_lock);
  1436. if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1437. mark_as_dirty = true;
  1438. dprintk("%s: Set layoutcommit for inode %lu ",
  1439. __func__, inode->i_ino);
  1440. }
  1441. if (!test_and_set_bit(NFS_LSEG_LAYOUTCOMMIT, &hdr->lseg->pls_flags)) {
  1442. /* references matched in nfs4_layoutcommit_release */
  1443. get_lseg(hdr->lseg);
  1444. }
  1445. if (end_pos > nfsi->layout->plh_lwb)
  1446. nfsi->layout->plh_lwb = end_pos;
  1447. spin_unlock(&inode->i_lock);
  1448. dprintk("%s: lseg %p end_pos %llu\n",
  1449. __func__, hdr->lseg, nfsi->layout->plh_lwb);
  1450. /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
  1451. * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
  1452. if (mark_as_dirty)
  1453. mark_inode_dirty_sync(inode);
  1454. }
  1455. EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
  1456. void pnfs_cleanup_layoutcommit(struct nfs4_layoutcommit_data *data)
  1457. {
  1458. struct nfs_server *nfss = NFS_SERVER(data->args.inode);
  1459. if (nfss->pnfs_curr_ld->cleanup_layoutcommit)
  1460. nfss->pnfs_curr_ld->cleanup_layoutcommit(data);
  1461. }
  1462. /*
  1463. * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
  1464. * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
  1465. * data to disk to allow the server to recover the data if it crashes.
  1466. * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
  1467. * is off, and a COMMIT is sent to a data server, or
  1468. * if WRITEs to a data server return NFS_DATA_SYNC.
  1469. */
  1470. int
  1471. pnfs_layoutcommit_inode(struct inode *inode, bool sync)
  1472. {
  1473. struct nfs4_layoutcommit_data *data;
  1474. struct nfs_inode *nfsi = NFS_I(inode);
  1475. loff_t end_pos;
  1476. int status = 0;
  1477. dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
  1478. if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  1479. return 0;
  1480. /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
  1481. data = kzalloc(sizeof(*data), GFP_NOFS);
  1482. if (!data) {
  1483. status = -ENOMEM;
  1484. goto out;
  1485. }
  1486. if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  1487. goto out_free;
  1488. if (test_and_set_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags)) {
  1489. if (!sync) {
  1490. status = -EAGAIN;
  1491. goto out_free;
  1492. }
  1493. status = wait_on_bit_lock(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING,
  1494. nfs_wait_bit_killable, TASK_KILLABLE);
  1495. if (status)
  1496. goto out_free;
  1497. }
  1498. INIT_LIST_HEAD(&data->lseg_list);
  1499. spin_lock(&inode->i_lock);
  1500. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  1501. clear_bit(NFS_INO_LAYOUTCOMMITTING, &nfsi->flags);
  1502. spin_unlock(&inode->i_lock);
  1503. wake_up_bit(&nfsi->flags, NFS_INO_LAYOUTCOMMITTING);
  1504. goto out_free;
  1505. }
  1506. pnfs_list_write_lseg(inode, &data->lseg_list);
  1507. end_pos = nfsi->layout->plh_lwb;
  1508. nfsi->layout->plh_lwb = 0;
  1509. nfs4_stateid_copy(&data->args.stateid, &nfsi->layout->plh_stateid);
  1510. spin_unlock(&inode->i_lock);
  1511. data->args.inode = inode;
  1512. data->cred = get_rpccred(nfsi->layout->plh_lc_cred);
  1513. nfs_fattr_init(&data->fattr);
  1514. data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
  1515. data->res.fattr = &data->fattr;
  1516. data->args.lastbytewritten = end_pos - 1;
  1517. data->res.server = NFS_SERVER(inode);
  1518. status = nfs4_proc_layoutcommit(data, sync);
  1519. out:
  1520. if (status)
  1521. mark_inode_dirty_sync(inode);
  1522. dprintk("<-- %s status %d\n", __func__, status);
  1523. return status;
  1524. out_free:
  1525. kfree(data);
  1526. goto out;
  1527. }
  1528. struct nfs4_threshold *pnfs_mdsthreshold_alloc(void)
  1529. {
  1530. struct nfs4_threshold *thp;
  1531. thp = kzalloc(sizeof(*thp), GFP_NOFS);
  1532. if (!thp) {
  1533. dprintk("%s mdsthreshold allocation failed\n", __func__);
  1534. return NULL;
  1535. }
  1536. return thp;
  1537. }