pnfs.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include "internal.h"
  31. #include "pnfs.h"
  32. #include "iostat.h"
  33. #define NFSDBG_FACILITY NFSDBG_PNFS
  34. /* Locking:
  35. *
  36. * pnfs_spinlock:
  37. * protects pnfs_modules_tbl.
  38. */
  39. static DEFINE_SPINLOCK(pnfs_spinlock);
  40. /*
  41. * pnfs_modules_tbl holds all pnfs modules
  42. */
  43. static LIST_HEAD(pnfs_modules_tbl);
  44. /* Return the registered pnfs layout driver module matching given id */
  45. static struct pnfs_layoutdriver_type *
  46. find_pnfs_driver_locked(u32 id)
  47. {
  48. struct pnfs_layoutdriver_type *local;
  49. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  50. if (local->id == id)
  51. goto out;
  52. local = NULL;
  53. out:
  54. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  55. return local;
  56. }
  57. static struct pnfs_layoutdriver_type *
  58. find_pnfs_driver(u32 id)
  59. {
  60. struct pnfs_layoutdriver_type *local;
  61. spin_lock(&pnfs_spinlock);
  62. local = find_pnfs_driver_locked(id);
  63. spin_unlock(&pnfs_spinlock);
  64. return local;
  65. }
  66. void
  67. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  68. {
  69. if (nfss->pnfs_curr_ld)
  70. module_put(nfss->pnfs_curr_ld->owner);
  71. nfss->pnfs_curr_ld = NULL;
  72. }
  73. /*
  74. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  75. * Currently only one pNFS layout driver per filesystem is supported.
  76. *
  77. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  78. */
  79. void
  80. set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
  81. {
  82. struct pnfs_layoutdriver_type *ld_type = NULL;
  83. if (id == 0)
  84. goto out_no_driver;
  85. if (!(server->nfs_client->cl_exchange_flags &
  86. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  87. printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
  88. id, server->nfs_client->cl_exchange_flags);
  89. goto out_no_driver;
  90. }
  91. ld_type = find_pnfs_driver(id);
  92. if (!ld_type) {
  93. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  94. ld_type = find_pnfs_driver(id);
  95. if (!ld_type) {
  96. dprintk("%s: No pNFS module found for %u.\n",
  97. __func__, id);
  98. goto out_no_driver;
  99. }
  100. }
  101. if (!try_module_get(ld_type->owner)) {
  102. dprintk("%s: Could not grab reference on module\n", __func__);
  103. goto out_no_driver;
  104. }
  105. server->pnfs_curr_ld = ld_type;
  106. dprintk("%s: pNFS module for %u set\n", __func__, id);
  107. return;
  108. out_no_driver:
  109. dprintk("%s: Using NFSv4 I/O\n", __func__);
  110. server->pnfs_curr_ld = NULL;
  111. }
  112. int
  113. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  114. {
  115. int status = -EINVAL;
  116. struct pnfs_layoutdriver_type *tmp;
  117. if (ld_type->id == 0) {
  118. printk(KERN_ERR "%s id 0 is reserved\n", __func__);
  119. return status;
  120. }
  121. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  122. printk(KERN_ERR "%s Layout driver must provide "
  123. "alloc_lseg and free_lseg.\n", __func__);
  124. return status;
  125. }
  126. spin_lock(&pnfs_spinlock);
  127. tmp = find_pnfs_driver_locked(ld_type->id);
  128. if (!tmp) {
  129. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  130. status = 0;
  131. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  132. ld_type->name);
  133. } else {
  134. printk(KERN_ERR "%s Module with id %d already loaded!\n",
  135. __func__, ld_type->id);
  136. }
  137. spin_unlock(&pnfs_spinlock);
  138. return status;
  139. }
  140. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  141. void
  142. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  143. {
  144. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  145. spin_lock(&pnfs_spinlock);
  146. list_del(&ld_type->pnfs_tblid);
  147. spin_unlock(&pnfs_spinlock);
  148. }
  149. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  150. /*
  151. * pNFS client layout cache
  152. */
  153. /* Need to hold i_lock if caller does not already hold reference */
  154. void
  155. get_layout_hdr(struct pnfs_layout_hdr *lo)
  156. {
  157. atomic_inc(&lo->plh_refcount);
  158. }
  159. static void
  160. destroy_layout_hdr(struct pnfs_layout_hdr *lo)
  161. {
  162. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  163. BUG_ON(!list_empty(&lo->plh_layouts));
  164. NFS_I(lo->plh_inode)->layout = NULL;
  165. kfree(lo);
  166. }
  167. static void
  168. put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
  169. {
  170. if (atomic_dec_and_test(&lo->plh_refcount))
  171. destroy_layout_hdr(lo);
  172. }
  173. void
  174. put_layout_hdr(struct pnfs_layout_hdr *lo)
  175. {
  176. struct inode *inode = lo->plh_inode;
  177. if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
  178. destroy_layout_hdr(lo);
  179. spin_unlock(&inode->i_lock);
  180. }
  181. }
  182. static void
  183. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  184. {
  185. INIT_LIST_HEAD(&lseg->pls_list);
  186. atomic_set(&lseg->pls_refcount, 1);
  187. smp_mb();
  188. set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
  189. lseg->pls_layout = lo;
  190. }
  191. static void free_lseg(struct pnfs_layout_segment *lseg)
  192. {
  193. struct inode *ino = lseg->pls_layout->plh_inode;
  194. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  195. /* Matched by get_layout_hdr in pnfs_insert_layout */
  196. put_layout_hdr(NFS_I(ino)->layout);
  197. }
  198. static void
  199. put_lseg_common(struct pnfs_layout_segment *lseg)
  200. {
  201. struct inode *inode = lseg->pls_layout->plh_inode;
  202. BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  203. list_del_init(&lseg->pls_list);
  204. if (list_empty(&lseg->pls_layout->plh_segs)) {
  205. set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
  206. /* Matched by initial refcount set in alloc_init_layout_hdr */
  207. put_layout_hdr_locked(lseg->pls_layout);
  208. }
  209. rpc_wake_up(&NFS_SERVER(inode)->roc_rpcwaitq);
  210. }
  211. void
  212. put_lseg(struct pnfs_layout_segment *lseg)
  213. {
  214. struct inode *inode;
  215. if (!lseg)
  216. return;
  217. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  218. atomic_read(&lseg->pls_refcount),
  219. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  220. inode = lseg->pls_layout->plh_inode;
  221. if (atomic_dec_and_lock(&lseg->pls_refcount, &inode->i_lock)) {
  222. LIST_HEAD(free_me);
  223. put_lseg_common(lseg);
  224. list_add(&lseg->pls_list, &free_me);
  225. spin_unlock(&inode->i_lock);
  226. pnfs_free_lseg_list(&free_me);
  227. }
  228. }
  229. EXPORT_SYMBOL_GPL(put_lseg);
  230. static bool
  231. should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
  232. {
  233. return (recall_iomode == IOMODE_ANY ||
  234. lseg_iomode == recall_iomode);
  235. }
  236. /* Returns 1 if lseg is removed from list, 0 otherwise */
  237. static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
  238. struct list_head *tmp_list)
  239. {
  240. int rv = 0;
  241. if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  242. /* Remove the reference keeping the lseg in the
  243. * list. It will now be removed when all
  244. * outstanding io is finished.
  245. */
  246. dprintk("%s: lseg %p ref %d\n", __func__, lseg,
  247. atomic_read(&lseg->pls_refcount));
  248. if (atomic_dec_and_test(&lseg->pls_refcount)) {
  249. put_lseg_common(lseg);
  250. list_add(&lseg->pls_list, tmp_list);
  251. rv = 1;
  252. }
  253. }
  254. return rv;
  255. }
  256. /* Returns count of number of matching invalid lsegs remaining in list
  257. * after call.
  258. */
  259. int
  260. mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
  261. struct list_head *tmp_list,
  262. u32 iomode)
  263. {
  264. struct pnfs_layout_segment *lseg, *next;
  265. int invalid = 0, removed = 0;
  266. dprintk("%s:Begin lo %p\n", __func__, lo);
  267. if (list_empty(&lo->plh_segs)) {
  268. if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
  269. put_layout_hdr_locked(lo);
  270. return 0;
  271. }
  272. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  273. if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
  274. dprintk("%s: freeing lseg %p iomode %d "
  275. "offset %llu length %llu\n", __func__,
  276. lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
  277. lseg->pls_range.length);
  278. invalid++;
  279. removed += mark_lseg_invalid(lseg, tmp_list);
  280. }
  281. dprintk("%s:Return %i\n", __func__, invalid - removed);
  282. return invalid - removed;
  283. }
  284. /* note free_me must contain lsegs from a single layout_hdr */
  285. void
  286. pnfs_free_lseg_list(struct list_head *free_me)
  287. {
  288. struct pnfs_layout_segment *lseg, *tmp;
  289. struct pnfs_layout_hdr *lo;
  290. if (list_empty(free_me))
  291. return;
  292. lo = list_first_entry(free_me, struct pnfs_layout_segment,
  293. pls_list)->pls_layout;
  294. if (test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags)) {
  295. struct nfs_client *clp;
  296. clp = NFS_SERVER(lo->plh_inode)->nfs_client;
  297. spin_lock(&clp->cl_lock);
  298. list_del_init(&lo->plh_layouts);
  299. spin_unlock(&clp->cl_lock);
  300. }
  301. list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
  302. list_del(&lseg->pls_list);
  303. free_lseg(lseg);
  304. }
  305. }
  306. void
  307. pnfs_destroy_layout(struct nfs_inode *nfsi)
  308. {
  309. struct pnfs_layout_hdr *lo;
  310. LIST_HEAD(tmp_list);
  311. spin_lock(&nfsi->vfs_inode.i_lock);
  312. lo = nfsi->layout;
  313. if (lo) {
  314. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  315. mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
  316. }
  317. spin_unlock(&nfsi->vfs_inode.i_lock);
  318. pnfs_free_lseg_list(&tmp_list);
  319. }
  320. /*
  321. * Called by the state manger to remove all layouts established under an
  322. * expired lease.
  323. */
  324. void
  325. pnfs_destroy_all_layouts(struct nfs_client *clp)
  326. {
  327. struct pnfs_layout_hdr *lo;
  328. LIST_HEAD(tmp_list);
  329. spin_lock(&clp->cl_lock);
  330. list_splice_init(&clp->cl_layouts, &tmp_list);
  331. spin_unlock(&clp->cl_lock);
  332. while (!list_empty(&tmp_list)) {
  333. lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
  334. plh_layouts);
  335. dprintk("%s freeing layout for inode %lu\n", __func__,
  336. lo->plh_inode->i_ino);
  337. list_del_init(&lo->plh_layouts);
  338. pnfs_destroy_layout(NFS_I(lo->plh_inode));
  339. }
  340. }
  341. /* update lo->plh_stateid with new if is more recent */
  342. void
  343. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
  344. bool update_barrier)
  345. {
  346. u32 oldseq, newseq;
  347. oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
  348. newseq = be32_to_cpu(new->stateid.seqid);
  349. if ((int)(newseq - oldseq) > 0) {
  350. memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
  351. if (update_barrier) {
  352. u32 new_barrier = be32_to_cpu(new->stateid.seqid);
  353. if ((int)(new_barrier - lo->plh_barrier))
  354. lo->plh_barrier = new_barrier;
  355. } else {
  356. /* Because of wraparound, we want to keep the barrier
  357. * "close" to the current seqids. It needs to be
  358. * within 2**31 to count as "behind", so if it
  359. * gets too near that limit, give us a litle leeway
  360. * and bring it to within 2**30.
  361. * NOTE - and yes, this is all unsigned arithmetic.
  362. */
  363. if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
  364. lo->plh_barrier = newseq - (1 << 30);
  365. }
  366. }
  367. }
  368. /* lget is set to 1 if called from inside send_layoutget call chain */
  369. static bool
  370. pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
  371. int lget)
  372. {
  373. if ((stateid) &&
  374. (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
  375. return true;
  376. return lo->plh_block_lgets ||
  377. test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
  378. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
  379. (list_empty(&lo->plh_segs) &&
  380. (atomic_read(&lo->plh_outstanding) > lget));
  381. }
  382. int
  383. pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
  384. struct nfs4_state *open_state)
  385. {
  386. int status = 0;
  387. dprintk("--> %s\n", __func__);
  388. spin_lock(&lo->plh_inode->i_lock);
  389. if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
  390. status = -EAGAIN;
  391. } else if (list_empty(&lo->plh_segs)) {
  392. int seq;
  393. do {
  394. seq = read_seqbegin(&open_state->seqlock);
  395. memcpy(dst->data, open_state->stateid.data,
  396. sizeof(open_state->stateid.data));
  397. } while (read_seqretry(&open_state->seqlock, seq));
  398. } else
  399. memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
  400. spin_unlock(&lo->plh_inode->i_lock);
  401. dprintk("<-- %s\n", __func__);
  402. return status;
  403. }
  404. /*
  405. * Get layout from server.
  406. * for now, assume that whole file layouts are requested.
  407. * arg->offset: 0
  408. * arg->length: all ones
  409. */
  410. static struct pnfs_layout_segment *
  411. send_layoutget(struct pnfs_layout_hdr *lo,
  412. struct nfs_open_context *ctx,
  413. u32 iomode,
  414. gfp_t gfp_flags)
  415. {
  416. struct inode *ino = lo->plh_inode;
  417. struct nfs_server *server = NFS_SERVER(ino);
  418. struct nfs4_layoutget *lgp;
  419. struct pnfs_layout_segment *lseg = NULL;
  420. struct page **pages = NULL;
  421. int i;
  422. u32 max_resp_sz, max_pages;
  423. dprintk("--> %s\n", __func__);
  424. BUG_ON(ctx == NULL);
  425. lgp = kzalloc(sizeof(*lgp), gfp_flags);
  426. if (lgp == NULL)
  427. return NULL;
  428. /* allocate pages for xdr post processing */
  429. max_resp_sz = server->nfs_client->cl_session->fc_attrs.max_resp_sz;
  430. max_pages = max_resp_sz >> PAGE_SHIFT;
  431. pages = kzalloc(max_pages * sizeof(struct page *), gfp_flags);
  432. if (!pages)
  433. goto out_err_free;
  434. for (i = 0; i < max_pages; i++) {
  435. pages[i] = alloc_page(gfp_flags);
  436. if (!pages[i])
  437. goto out_err_free;
  438. }
  439. lgp->args.minlength = NFS4_MAX_UINT64;
  440. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  441. lgp->args.range.iomode = iomode;
  442. lgp->args.range.offset = 0;
  443. lgp->args.range.length = NFS4_MAX_UINT64;
  444. lgp->args.type = server->pnfs_curr_ld->id;
  445. lgp->args.inode = ino;
  446. lgp->args.ctx = get_nfs_open_context(ctx);
  447. lgp->args.layout.pages = pages;
  448. lgp->args.layout.pglen = max_pages * PAGE_SIZE;
  449. lgp->lsegpp = &lseg;
  450. lgp->gfp_flags = gfp_flags;
  451. /* Synchronously retrieve layout information from server and
  452. * store in lseg.
  453. */
  454. nfs4_proc_layoutget(lgp);
  455. if (!lseg) {
  456. /* remember that LAYOUTGET failed and suspend trying */
  457. set_bit(lo_fail_bit(iomode), &lo->plh_flags);
  458. }
  459. /* free xdr pages */
  460. for (i = 0; i < max_pages; i++)
  461. __free_page(pages[i]);
  462. kfree(pages);
  463. return lseg;
  464. out_err_free:
  465. /* free any allocated xdr pages, lgp as it's not used */
  466. if (pages) {
  467. for (i = 0; i < max_pages; i++) {
  468. if (!pages[i])
  469. break;
  470. __free_page(pages[i]);
  471. }
  472. kfree(pages);
  473. }
  474. kfree(lgp);
  475. return NULL;
  476. }
  477. bool pnfs_roc(struct inode *ino)
  478. {
  479. struct pnfs_layout_hdr *lo;
  480. struct pnfs_layout_segment *lseg, *tmp;
  481. LIST_HEAD(tmp_list);
  482. bool found = false;
  483. spin_lock(&ino->i_lock);
  484. lo = NFS_I(ino)->layout;
  485. if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
  486. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
  487. goto out_nolayout;
  488. list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
  489. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  490. mark_lseg_invalid(lseg, &tmp_list);
  491. found = true;
  492. }
  493. if (!found)
  494. goto out_nolayout;
  495. lo->plh_block_lgets++;
  496. get_layout_hdr(lo); /* matched in pnfs_roc_release */
  497. spin_unlock(&ino->i_lock);
  498. pnfs_free_lseg_list(&tmp_list);
  499. return true;
  500. out_nolayout:
  501. spin_unlock(&ino->i_lock);
  502. return false;
  503. }
  504. void pnfs_roc_release(struct inode *ino)
  505. {
  506. struct pnfs_layout_hdr *lo;
  507. spin_lock(&ino->i_lock);
  508. lo = NFS_I(ino)->layout;
  509. lo->plh_block_lgets--;
  510. put_layout_hdr_locked(lo);
  511. spin_unlock(&ino->i_lock);
  512. }
  513. void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
  514. {
  515. struct pnfs_layout_hdr *lo;
  516. spin_lock(&ino->i_lock);
  517. lo = NFS_I(ino)->layout;
  518. if ((int)(barrier - lo->plh_barrier) > 0)
  519. lo->plh_barrier = barrier;
  520. spin_unlock(&ino->i_lock);
  521. }
  522. bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
  523. {
  524. struct nfs_inode *nfsi = NFS_I(ino);
  525. struct pnfs_layout_segment *lseg;
  526. bool found = false;
  527. spin_lock(&ino->i_lock);
  528. list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
  529. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  530. found = true;
  531. break;
  532. }
  533. if (!found) {
  534. struct pnfs_layout_hdr *lo = nfsi->layout;
  535. u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
  536. /* Since close does not return a layout stateid for use as
  537. * a barrier, we choose the worst-case barrier.
  538. */
  539. *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
  540. }
  541. spin_unlock(&ino->i_lock);
  542. return found;
  543. }
  544. /*
  545. * Compare two layout segments for sorting into layout cache.
  546. * We want to preferentially return RW over RO layouts, so ensure those
  547. * are seen first.
  548. */
  549. static s64
  550. cmp_layout(u32 iomode1, u32 iomode2)
  551. {
  552. /* read > read/write */
  553. return (int)(iomode2 == IOMODE_READ) - (int)(iomode1 == IOMODE_READ);
  554. }
  555. static void
  556. pnfs_insert_layout(struct pnfs_layout_hdr *lo,
  557. struct pnfs_layout_segment *lseg)
  558. {
  559. struct pnfs_layout_segment *lp;
  560. int found = 0;
  561. dprintk("%s:Begin\n", __func__);
  562. assert_spin_locked(&lo->plh_inode->i_lock);
  563. list_for_each_entry(lp, &lo->plh_segs, pls_list) {
  564. if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
  565. continue;
  566. list_add_tail(&lseg->pls_list, &lp->pls_list);
  567. dprintk("%s: inserted lseg %p "
  568. "iomode %d offset %llu length %llu before "
  569. "lp %p iomode %d offset %llu length %llu\n",
  570. __func__, lseg, lseg->pls_range.iomode,
  571. lseg->pls_range.offset, lseg->pls_range.length,
  572. lp, lp->pls_range.iomode, lp->pls_range.offset,
  573. lp->pls_range.length);
  574. found = 1;
  575. break;
  576. }
  577. if (!found) {
  578. list_add_tail(&lseg->pls_list, &lo->plh_segs);
  579. dprintk("%s: inserted lseg %p "
  580. "iomode %d offset %llu length %llu at tail\n",
  581. __func__, lseg, lseg->pls_range.iomode,
  582. lseg->pls_range.offset, lseg->pls_range.length);
  583. }
  584. get_layout_hdr(lo);
  585. dprintk("%s:Return\n", __func__);
  586. }
  587. static struct pnfs_layout_hdr *
  588. alloc_init_layout_hdr(struct inode *ino, gfp_t gfp_flags)
  589. {
  590. struct pnfs_layout_hdr *lo;
  591. lo = kzalloc(sizeof(struct pnfs_layout_hdr), gfp_flags);
  592. if (!lo)
  593. return NULL;
  594. atomic_set(&lo->plh_refcount, 1);
  595. INIT_LIST_HEAD(&lo->plh_layouts);
  596. INIT_LIST_HEAD(&lo->plh_segs);
  597. INIT_LIST_HEAD(&lo->plh_bulk_recall);
  598. lo->plh_inode = ino;
  599. return lo;
  600. }
  601. static struct pnfs_layout_hdr *
  602. pnfs_find_alloc_layout(struct inode *ino, gfp_t gfp_flags)
  603. {
  604. struct nfs_inode *nfsi = NFS_I(ino);
  605. struct pnfs_layout_hdr *new = NULL;
  606. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  607. assert_spin_locked(&ino->i_lock);
  608. if (nfsi->layout) {
  609. if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
  610. return NULL;
  611. else
  612. return nfsi->layout;
  613. }
  614. spin_unlock(&ino->i_lock);
  615. new = alloc_init_layout_hdr(ino, gfp_flags);
  616. spin_lock(&ino->i_lock);
  617. if (likely(nfsi->layout == NULL)) /* Won the race? */
  618. nfsi->layout = new;
  619. else
  620. kfree(new);
  621. return nfsi->layout;
  622. }
  623. /*
  624. * iomode matching rules:
  625. * iomode lseg match
  626. * ----- ----- -----
  627. * ANY READ true
  628. * ANY RW true
  629. * RW READ false
  630. * RW RW true
  631. * READ READ true
  632. * READ RW true
  633. */
  634. static int
  635. is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
  636. {
  637. return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
  638. }
  639. /*
  640. * lookup range in layout
  641. */
  642. static struct pnfs_layout_segment *
  643. pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
  644. {
  645. struct pnfs_layout_segment *lseg, *ret = NULL;
  646. dprintk("%s:Begin\n", __func__);
  647. assert_spin_locked(&lo->plh_inode->i_lock);
  648. list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
  649. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
  650. is_matching_lseg(lseg, iomode)) {
  651. ret = get_lseg(lseg);
  652. break;
  653. }
  654. if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
  655. break;
  656. }
  657. dprintk("%s:Return lseg %p ref %d\n",
  658. __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
  659. return ret;
  660. }
  661. /*
  662. * Layout segment is retreived from the server if not cached.
  663. * The appropriate layout segment is referenced and returned to the caller.
  664. */
  665. struct pnfs_layout_segment *
  666. pnfs_update_layout(struct inode *ino,
  667. struct nfs_open_context *ctx,
  668. enum pnfs_iomode iomode,
  669. gfp_t gfp_flags)
  670. {
  671. struct nfs_inode *nfsi = NFS_I(ino);
  672. struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
  673. struct pnfs_layout_hdr *lo;
  674. struct pnfs_layout_segment *lseg = NULL;
  675. bool first = false;
  676. if (!pnfs_enabled_sb(NFS_SERVER(ino)))
  677. return NULL;
  678. spin_lock(&ino->i_lock);
  679. lo = pnfs_find_alloc_layout(ino, gfp_flags);
  680. if (lo == NULL) {
  681. dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
  682. goto out_unlock;
  683. }
  684. /* Do we even need to bother with this? */
  685. if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
  686. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  687. dprintk("%s matches recall, use MDS\n", __func__);
  688. goto out_unlock;
  689. }
  690. /* if LAYOUTGET already failed once we don't try again */
  691. if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
  692. goto out_unlock;
  693. /* Check to see if the layout for the given range already exists */
  694. lseg = pnfs_find_lseg(lo, iomode);
  695. if (lseg)
  696. goto out_unlock;
  697. if (pnfs_layoutgets_blocked(lo, NULL, 0))
  698. goto out_unlock;
  699. atomic_inc(&lo->plh_outstanding);
  700. get_layout_hdr(lo);
  701. if (list_empty(&lo->plh_segs))
  702. first = true;
  703. spin_unlock(&ino->i_lock);
  704. if (first) {
  705. /* The lo must be on the clp list if there is any
  706. * chance of a CB_LAYOUTRECALL(FILE) coming in.
  707. */
  708. spin_lock(&clp->cl_lock);
  709. BUG_ON(!list_empty(&lo->plh_layouts));
  710. list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
  711. spin_unlock(&clp->cl_lock);
  712. }
  713. lseg = send_layoutget(lo, ctx, iomode, gfp_flags);
  714. if (!lseg && first) {
  715. spin_lock(&clp->cl_lock);
  716. list_del_init(&lo->plh_layouts);
  717. spin_unlock(&clp->cl_lock);
  718. }
  719. atomic_dec(&lo->plh_outstanding);
  720. put_layout_hdr(lo);
  721. out:
  722. dprintk("%s end, state 0x%lx lseg %p\n", __func__,
  723. nfsi->layout ? nfsi->layout->plh_flags : -1, lseg);
  724. return lseg;
  725. out_unlock:
  726. spin_unlock(&ino->i_lock);
  727. goto out;
  728. }
  729. int
  730. pnfs_layout_process(struct nfs4_layoutget *lgp)
  731. {
  732. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  733. struct nfs4_layoutget_res *res = &lgp->res;
  734. struct pnfs_layout_segment *lseg;
  735. struct inode *ino = lo->plh_inode;
  736. struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
  737. int status = 0;
  738. /* Verify we got what we asked for.
  739. * Note that because the xdr parsing only accepts a single
  740. * element array, this can fail even if the server is behaving
  741. * correctly.
  742. */
  743. if (lgp->args.range.iomode > res->range.iomode ||
  744. res->range.offset != 0 ||
  745. res->range.length != NFS4_MAX_UINT64) {
  746. status = -EINVAL;
  747. goto out;
  748. }
  749. /* Inject layout blob into I/O device driver */
  750. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res, lgp->gfp_flags);
  751. if (!lseg || IS_ERR(lseg)) {
  752. if (!lseg)
  753. status = -ENOMEM;
  754. else
  755. status = PTR_ERR(lseg);
  756. dprintk("%s: Could not allocate layout: error %d\n",
  757. __func__, status);
  758. goto out;
  759. }
  760. spin_lock(&ino->i_lock);
  761. if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
  762. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  763. dprintk("%s forget reply due to recall\n", __func__);
  764. goto out_forget_reply;
  765. }
  766. if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
  767. dprintk("%s forget reply due to state\n", __func__);
  768. goto out_forget_reply;
  769. }
  770. init_lseg(lo, lseg);
  771. lseg->pls_range = res->range;
  772. *lgp->lsegpp = get_lseg(lseg);
  773. pnfs_insert_layout(lo, lseg);
  774. if (res->return_on_close) {
  775. set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
  776. set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
  777. }
  778. /* Done processing layoutget. Set the layout stateid */
  779. pnfs_set_layout_stateid(lo, &res->stateid, false);
  780. spin_unlock(&ino->i_lock);
  781. out:
  782. return status;
  783. out_forget_reply:
  784. spin_unlock(&ino->i_lock);
  785. lseg->pls_layout = lo;
  786. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  787. goto out;
  788. }
  789. static int pnfs_read_pg_test(struct nfs_pageio_descriptor *pgio,
  790. struct nfs_page *prev,
  791. struct nfs_page *req)
  792. {
  793. if (pgio->pg_count == prev->wb_bytes) {
  794. /* This is first coelesce call for a series of nfs_pages */
  795. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  796. prev->wb_context,
  797. IOMODE_READ,
  798. GFP_KERNEL);
  799. }
  800. return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
  801. }
  802. void
  803. pnfs_pageio_init_read(struct nfs_pageio_descriptor *pgio, struct inode *inode)
  804. {
  805. struct pnfs_layoutdriver_type *ld;
  806. ld = NFS_SERVER(inode)->pnfs_curr_ld;
  807. pgio->pg_test = (ld && ld->pg_test) ? pnfs_read_pg_test : NULL;
  808. }
  809. static int pnfs_write_pg_test(struct nfs_pageio_descriptor *pgio,
  810. struct nfs_page *prev,
  811. struct nfs_page *req)
  812. {
  813. if (pgio->pg_count == prev->wb_bytes) {
  814. /* This is first coelesce call for a series of nfs_pages */
  815. pgio->pg_lseg = pnfs_update_layout(pgio->pg_inode,
  816. prev->wb_context,
  817. IOMODE_RW,
  818. GFP_NOFS);
  819. }
  820. return NFS_SERVER(pgio->pg_inode)->pnfs_curr_ld->pg_test(pgio, prev, req);
  821. }
  822. void
  823. pnfs_pageio_init_write(struct nfs_pageio_descriptor *pgio, struct inode *inode)
  824. {
  825. struct pnfs_layoutdriver_type *ld;
  826. ld = NFS_SERVER(inode)->pnfs_curr_ld;
  827. pgio->pg_test = (ld && ld->pg_test) ? pnfs_write_pg_test : NULL;
  828. }
  829. enum pnfs_try_status
  830. pnfs_try_to_write_data(struct nfs_write_data *wdata,
  831. const struct rpc_call_ops *call_ops, int how)
  832. {
  833. struct inode *inode = wdata->inode;
  834. enum pnfs_try_status trypnfs;
  835. struct nfs_server *nfss = NFS_SERVER(inode);
  836. wdata->mds_ops = call_ops;
  837. dprintk("%s: Writing ino:%lu %u@%llu (how %d)\n", __func__,
  838. inode->i_ino, wdata->args.count, wdata->args.offset, how);
  839. trypnfs = nfss->pnfs_curr_ld->write_pagelist(wdata, how);
  840. if (trypnfs == PNFS_NOT_ATTEMPTED) {
  841. put_lseg(wdata->lseg);
  842. wdata->lseg = NULL;
  843. } else
  844. nfs_inc_stats(inode, NFSIOS_PNFS_WRITE);
  845. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  846. return trypnfs;
  847. }
  848. /*
  849. * Call the appropriate parallel I/O subsystem read function.
  850. */
  851. enum pnfs_try_status
  852. pnfs_try_to_read_data(struct nfs_read_data *rdata,
  853. const struct rpc_call_ops *call_ops)
  854. {
  855. struct inode *inode = rdata->inode;
  856. struct nfs_server *nfss = NFS_SERVER(inode);
  857. enum pnfs_try_status trypnfs;
  858. rdata->mds_ops = call_ops;
  859. dprintk("%s: Reading ino:%lu %u@%llu\n",
  860. __func__, inode->i_ino, rdata->args.count, rdata->args.offset);
  861. trypnfs = nfss->pnfs_curr_ld->read_pagelist(rdata);
  862. if (trypnfs == PNFS_NOT_ATTEMPTED) {
  863. put_lseg(rdata->lseg);
  864. rdata->lseg = NULL;
  865. } else {
  866. nfs_inc_stats(inode, NFSIOS_PNFS_READ);
  867. }
  868. dprintk("%s End (trypnfs:%d)\n", __func__, trypnfs);
  869. return trypnfs;
  870. }
  871. /*
  872. * Currently there is only one (whole file) write lseg.
  873. */
  874. static struct pnfs_layout_segment *pnfs_list_write_lseg(struct inode *inode)
  875. {
  876. struct pnfs_layout_segment *lseg, *rv = NULL;
  877. list_for_each_entry(lseg, &NFS_I(inode)->layout->plh_segs, pls_list)
  878. if (lseg->pls_range.iomode == IOMODE_RW)
  879. rv = lseg;
  880. return rv;
  881. }
  882. void
  883. pnfs_set_layoutcommit(struct nfs_write_data *wdata)
  884. {
  885. struct nfs_inode *nfsi = NFS_I(wdata->inode);
  886. loff_t end_pos = wdata->args.offset + wdata->res.count;
  887. bool mark_as_dirty = false;
  888. spin_lock(&nfsi->vfs_inode.i_lock);
  889. if (!test_and_set_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  890. /* references matched in nfs4_layoutcommit_release */
  891. get_lseg(wdata->lseg);
  892. wdata->lseg->pls_lc_cred =
  893. get_rpccred(wdata->args.context->state->owner->so_cred);
  894. mark_as_dirty = true;
  895. dprintk("%s: Set layoutcommit for inode %lu ",
  896. __func__, wdata->inode->i_ino);
  897. }
  898. if (end_pos > wdata->lseg->pls_end_pos)
  899. wdata->lseg->pls_end_pos = end_pos;
  900. spin_unlock(&nfsi->vfs_inode.i_lock);
  901. /* if pnfs_layoutcommit_inode() runs between inode locks, the next one
  902. * will be a noop because NFS_INO_LAYOUTCOMMIT will not be set */
  903. if (mark_as_dirty)
  904. mark_inode_dirty_sync(wdata->inode);
  905. }
  906. EXPORT_SYMBOL_GPL(pnfs_set_layoutcommit);
  907. /*
  908. * For the LAYOUT4_NFSV4_1_FILES layout type, NFS_DATA_SYNC WRITEs and
  909. * NFS_UNSTABLE WRITEs with a COMMIT to data servers must store enough
  910. * data to disk to allow the server to recover the data if it crashes.
  911. * LAYOUTCOMMIT is only needed when the NFL4_UFLG_COMMIT_THRU_MDS flag
  912. * is off, and a COMMIT is sent to a data server, or
  913. * if WRITEs to a data server return NFS_DATA_SYNC.
  914. */
  915. int
  916. pnfs_layoutcommit_inode(struct inode *inode, bool sync)
  917. {
  918. struct nfs4_layoutcommit_data *data;
  919. struct nfs_inode *nfsi = NFS_I(inode);
  920. struct pnfs_layout_segment *lseg;
  921. struct rpc_cred *cred;
  922. loff_t end_pos;
  923. int status = 0;
  924. dprintk("--> %s inode %lu\n", __func__, inode->i_ino);
  925. if (!test_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags))
  926. return 0;
  927. /* Note kzalloc ensures data->res.seq_res.sr_slot == NULL */
  928. data = kzalloc(sizeof(*data), GFP_NOFS);
  929. if (!data) {
  930. mark_inode_dirty_sync(inode);
  931. status = -ENOMEM;
  932. goto out;
  933. }
  934. spin_lock(&inode->i_lock);
  935. if (!test_and_clear_bit(NFS_INO_LAYOUTCOMMIT, &nfsi->flags)) {
  936. spin_unlock(&inode->i_lock);
  937. kfree(data);
  938. goto out;
  939. }
  940. /*
  941. * Currently only one (whole file) write lseg which is referenced
  942. * in pnfs_set_layoutcommit and will be found.
  943. */
  944. lseg = pnfs_list_write_lseg(inode);
  945. end_pos = lseg->pls_end_pos;
  946. cred = lseg->pls_lc_cred;
  947. lseg->pls_end_pos = 0;
  948. lseg->pls_lc_cred = NULL;
  949. memcpy(&data->args.stateid.data, nfsi->layout->plh_stateid.data,
  950. sizeof(nfsi->layout->plh_stateid.data));
  951. spin_unlock(&inode->i_lock);
  952. data->args.inode = inode;
  953. data->lseg = lseg;
  954. data->cred = cred;
  955. nfs_fattr_init(&data->fattr);
  956. data->args.bitmask = NFS_SERVER(inode)->cache_consistency_bitmask;
  957. data->res.fattr = &data->fattr;
  958. data->args.lastbytewritten = end_pos - 1;
  959. data->res.server = NFS_SERVER(inode);
  960. status = nfs4_proc_layoutcommit(data, sync);
  961. out:
  962. dprintk("<-- %s status %d\n", __func__, status);
  963. return status;
  964. }