pnfs.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include "internal.h"
  31. #include "pnfs.h"
  32. #define NFSDBG_FACILITY NFSDBG_PNFS
  33. /* Locking:
  34. *
  35. * pnfs_spinlock:
  36. * protects pnfs_modules_tbl.
  37. */
  38. static DEFINE_SPINLOCK(pnfs_spinlock);
  39. /*
  40. * pnfs_modules_tbl holds all pnfs modules
  41. */
  42. static LIST_HEAD(pnfs_modules_tbl);
  43. /* Return the registered pnfs layout driver module matching given id */
  44. static struct pnfs_layoutdriver_type *
  45. find_pnfs_driver_locked(u32 id)
  46. {
  47. struct pnfs_layoutdriver_type *local;
  48. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  49. if (local->id == id)
  50. goto out;
  51. local = NULL;
  52. out:
  53. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  54. return local;
  55. }
  56. static struct pnfs_layoutdriver_type *
  57. find_pnfs_driver(u32 id)
  58. {
  59. struct pnfs_layoutdriver_type *local;
  60. spin_lock(&pnfs_spinlock);
  61. local = find_pnfs_driver_locked(id);
  62. spin_unlock(&pnfs_spinlock);
  63. return local;
  64. }
  65. void
  66. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  67. {
  68. if (nfss->pnfs_curr_ld) {
  69. nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
  70. module_put(nfss->pnfs_curr_ld->owner);
  71. }
  72. nfss->pnfs_curr_ld = NULL;
  73. }
  74. /*
  75. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  76. * Currently only one pNFS layout driver per filesystem is supported.
  77. *
  78. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  79. */
  80. void
  81. set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
  82. {
  83. struct pnfs_layoutdriver_type *ld_type = NULL;
  84. if (id == 0)
  85. goto out_no_driver;
  86. if (!(server->nfs_client->cl_exchange_flags &
  87. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  88. printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
  89. id, server->nfs_client->cl_exchange_flags);
  90. goto out_no_driver;
  91. }
  92. ld_type = find_pnfs_driver(id);
  93. if (!ld_type) {
  94. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  95. ld_type = find_pnfs_driver(id);
  96. if (!ld_type) {
  97. dprintk("%s: No pNFS module found for %u.\n",
  98. __func__, id);
  99. goto out_no_driver;
  100. }
  101. }
  102. if (!try_module_get(ld_type->owner)) {
  103. dprintk("%s: Could not grab reference on module\n", __func__);
  104. goto out_no_driver;
  105. }
  106. server->pnfs_curr_ld = ld_type;
  107. if (ld_type->set_layoutdriver(server)) {
  108. printk(KERN_ERR
  109. "%s: Error initializing mount point for layout driver %u.\n",
  110. __func__, id);
  111. module_put(ld_type->owner);
  112. goto out_no_driver;
  113. }
  114. dprintk("%s: pNFS module for %u set\n", __func__, id);
  115. return;
  116. out_no_driver:
  117. dprintk("%s: Using NFSv4 I/O\n", __func__);
  118. server->pnfs_curr_ld = NULL;
  119. }
  120. int
  121. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  122. {
  123. int status = -EINVAL;
  124. struct pnfs_layoutdriver_type *tmp;
  125. if (ld_type->id == 0) {
  126. printk(KERN_ERR "%s id 0 is reserved\n", __func__);
  127. return status;
  128. }
  129. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  130. printk(KERN_ERR "%s Layout driver must provide "
  131. "alloc_lseg and free_lseg.\n", __func__);
  132. return status;
  133. }
  134. spin_lock(&pnfs_spinlock);
  135. tmp = find_pnfs_driver_locked(ld_type->id);
  136. if (!tmp) {
  137. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  138. status = 0;
  139. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  140. ld_type->name);
  141. } else {
  142. printk(KERN_ERR "%s Module with id %d already loaded!\n",
  143. __func__, ld_type->id);
  144. }
  145. spin_unlock(&pnfs_spinlock);
  146. return status;
  147. }
  148. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  149. void
  150. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  151. {
  152. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  153. spin_lock(&pnfs_spinlock);
  154. list_del(&ld_type->pnfs_tblid);
  155. spin_unlock(&pnfs_spinlock);
  156. }
  157. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  158. /*
  159. * pNFS client layout cache
  160. */
  161. /* Need to hold i_lock if caller does not already hold reference */
  162. void
  163. get_layout_hdr(struct pnfs_layout_hdr *lo)
  164. {
  165. atomic_inc(&lo->plh_refcount);
  166. }
  167. static void
  168. destroy_layout_hdr(struct pnfs_layout_hdr *lo)
  169. {
  170. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  171. BUG_ON(!list_empty(&lo->plh_layouts));
  172. NFS_I(lo->plh_inode)->layout = NULL;
  173. kfree(lo);
  174. }
  175. static void
  176. put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
  177. {
  178. if (atomic_dec_and_test(&lo->plh_refcount))
  179. destroy_layout_hdr(lo);
  180. }
  181. void
  182. put_layout_hdr(struct pnfs_layout_hdr *lo)
  183. {
  184. struct inode *inode = lo->plh_inode;
  185. if (atomic_dec_and_lock(&lo->plh_refcount, &inode->i_lock)) {
  186. destroy_layout_hdr(lo);
  187. spin_unlock(&inode->i_lock);
  188. }
  189. }
  190. static void
  191. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  192. {
  193. INIT_LIST_HEAD(&lseg->pls_list);
  194. atomic_set(&lseg->pls_refcount, 1);
  195. smp_mb();
  196. set_bit(NFS_LSEG_VALID, &lseg->pls_flags);
  197. lseg->pls_layout = lo;
  198. }
  199. static void free_lseg(struct pnfs_layout_segment *lseg)
  200. {
  201. struct inode *ino = lseg->pls_layout->plh_inode;
  202. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  203. /* Matched by get_layout_hdr in pnfs_insert_layout */
  204. put_layout_hdr(NFS_I(ino)->layout);
  205. }
  206. /* The use of tmp_list is necessary because pnfs_curr_ld->free_lseg
  207. * could sleep, so must be called outside of the lock.
  208. * Returns 1 if object was removed, otherwise return 0.
  209. */
  210. static int
  211. put_lseg_locked(struct pnfs_layout_segment *lseg,
  212. struct list_head *tmp_list)
  213. {
  214. dprintk("%s: lseg %p ref %d valid %d\n", __func__, lseg,
  215. atomic_read(&lseg->pls_refcount),
  216. test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  217. if (atomic_dec_and_test(&lseg->pls_refcount)) {
  218. struct inode *ino = lseg->pls_layout->plh_inode;
  219. BUG_ON(test_bit(NFS_LSEG_VALID, &lseg->pls_flags));
  220. list_del(&lseg->pls_list);
  221. if (list_empty(&lseg->pls_layout->plh_segs)) {
  222. struct nfs_client *clp;
  223. clp = NFS_SERVER(ino)->nfs_client;
  224. spin_lock(&clp->cl_lock);
  225. /* List does not take a reference, so no need for put here */
  226. list_del_init(&lseg->pls_layout->plh_layouts);
  227. spin_unlock(&clp->cl_lock);
  228. set_bit(NFS_LAYOUT_DESTROYED, &lseg->pls_layout->plh_flags);
  229. /* Matched by initial refcount set in alloc_init_layout_hdr */
  230. put_layout_hdr_locked(lseg->pls_layout);
  231. }
  232. rpc_wake_up(&NFS_SERVER(ino)->roc_rpcwaitq);
  233. list_add(&lseg->pls_list, tmp_list);
  234. return 1;
  235. }
  236. return 0;
  237. }
  238. static bool
  239. should_free_lseg(u32 lseg_iomode, u32 recall_iomode)
  240. {
  241. return (recall_iomode == IOMODE_ANY ||
  242. lseg_iomode == recall_iomode);
  243. }
  244. /* Returns 1 if lseg is removed from list, 0 otherwise */
  245. static int mark_lseg_invalid(struct pnfs_layout_segment *lseg,
  246. struct list_head *tmp_list)
  247. {
  248. int rv = 0;
  249. if (test_and_clear_bit(NFS_LSEG_VALID, &lseg->pls_flags)) {
  250. /* Remove the reference keeping the lseg in the
  251. * list. It will now be removed when all
  252. * outstanding io is finished.
  253. */
  254. rv = put_lseg_locked(lseg, tmp_list);
  255. }
  256. return rv;
  257. }
  258. /* Returns count of number of matching invalid lsegs remaining in list
  259. * after call.
  260. */
  261. int
  262. mark_matching_lsegs_invalid(struct pnfs_layout_hdr *lo,
  263. struct list_head *tmp_list,
  264. u32 iomode)
  265. {
  266. struct pnfs_layout_segment *lseg, *next;
  267. int invalid = 0, removed = 0;
  268. dprintk("%s:Begin lo %p\n", __func__, lo);
  269. if (list_empty(&lo->plh_segs)) {
  270. if (!test_and_set_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags))
  271. put_layout_hdr_locked(lo);
  272. return 0;
  273. }
  274. list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
  275. if (should_free_lseg(lseg->pls_range.iomode, iomode)) {
  276. dprintk("%s: freeing lseg %p iomode %d "
  277. "offset %llu length %llu\n", __func__,
  278. lseg, lseg->pls_range.iomode, lseg->pls_range.offset,
  279. lseg->pls_range.length);
  280. invalid++;
  281. removed += mark_lseg_invalid(lseg, tmp_list);
  282. }
  283. dprintk("%s:Return %i\n", __func__, invalid - removed);
  284. return invalid - removed;
  285. }
  286. void
  287. pnfs_free_lseg_list(struct list_head *free_me)
  288. {
  289. struct pnfs_layout_segment *lseg, *tmp;
  290. list_for_each_entry_safe(lseg, tmp, free_me, pls_list) {
  291. list_del(&lseg->pls_list);
  292. free_lseg(lseg);
  293. }
  294. }
  295. void
  296. pnfs_destroy_layout(struct nfs_inode *nfsi)
  297. {
  298. struct pnfs_layout_hdr *lo;
  299. LIST_HEAD(tmp_list);
  300. spin_lock(&nfsi->vfs_inode.i_lock);
  301. lo = nfsi->layout;
  302. if (lo) {
  303. lo->plh_block_lgets++; /* permanently block new LAYOUTGETs */
  304. mark_matching_lsegs_invalid(lo, &tmp_list, IOMODE_ANY);
  305. }
  306. spin_unlock(&nfsi->vfs_inode.i_lock);
  307. pnfs_free_lseg_list(&tmp_list);
  308. }
  309. /*
  310. * Called by the state manger to remove all layouts established under an
  311. * expired lease.
  312. */
  313. void
  314. pnfs_destroy_all_layouts(struct nfs_client *clp)
  315. {
  316. struct pnfs_layout_hdr *lo;
  317. LIST_HEAD(tmp_list);
  318. spin_lock(&clp->cl_lock);
  319. list_splice_init(&clp->cl_layouts, &tmp_list);
  320. spin_unlock(&clp->cl_lock);
  321. while (!list_empty(&tmp_list)) {
  322. lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
  323. plh_layouts);
  324. dprintk("%s freeing layout for inode %lu\n", __func__,
  325. lo->plh_inode->i_ino);
  326. pnfs_destroy_layout(NFS_I(lo->plh_inode));
  327. }
  328. }
  329. /* update lo->plh_stateid with new if is more recent */
  330. void
  331. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo, const nfs4_stateid *new,
  332. bool update_barrier)
  333. {
  334. u32 oldseq, newseq;
  335. oldseq = be32_to_cpu(lo->plh_stateid.stateid.seqid);
  336. newseq = be32_to_cpu(new->stateid.seqid);
  337. if ((int)(newseq - oldseq) > 0) {
  338. memcpy(&lo->plh_stateid, &new->stateid, sizeof(new->stateid));
  339. if (update_barrier) {
  340. u32 new_barrier = be32_to_cpu(new->stateid.seqid);
  341. if ((int)(new_barrier - lo->plh_barrier))
  342. lo->plh_barrier = new_barrier;
  343. } else {
  344. /* Because of wraparound, we want to keep the barrier
  345. * "close" to the current seqids. It needs to be
  346. * within 2**31 to count as "behind", so if it
  347. * gets too near that limit, give us a litle leeway
  348. * and bring it to within 2**30.
  349. * NOTE - and yes, this is all unsigned arithmetic.
  350. */
  351. if (unlikely((newseq - lo->plh_barrier) > (3 << 29)))
  352. lo->plh_barrier = newseq - (1 << 30);
  353. }
  354. }
  355. }
  356. /* lget is set to 1 if called from inside send_layoutget call chain */
  357. static bool
  358. pnfs_layoutgets_blocked(struct pnfs_layout_hdr *lo, nfs4_stateid *stateid,
  359. int lget)
  360. {
  361. if ((stateid) &&
  362. (int)(lo->plh_barrier - be32_to_cpu(stateid->stateid.seqid)) >= 0)
  363. return true;
  364. return lo->plh_block_lgets ||
  365. test_bit(NFS_LAYOUT_DESTROYED, &lo->plh_flags) ||
  366. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags) ||
  367. (list_empty(&lo->plh_segs) &&
  368. (atomic_read(&lo->plh_outstanding) > lget));
  369. }
  370. int
  371. pnfs_choose_layoutget_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
  372. struct nfs4_state *open_state)
  373. {
  374. int status = 0;
  375. dprintk("--> %s\n", __func__);
  376. spin_lock(&lo->plh_inode->i_lock);
  377. if (pnfs_layoutgets_blocked(lo, NULL, 1)) {
  378. status = -EAGAIN;
  379. } else if (list_empty(&lo->plh_segs)) {
  380. int seq;
  381. do {
  382. seq = read_seqbegin(&open_state->seqlock);
  383. memcpy(dst->data, open_state->stateid.data,
  384. sizeof(open_state->stateid.data));
  385. } while (read_seqretry(&open_state->seqlock, seq));
  386. } else
  387. memcpy(dst->data, lo->plh_stateid.data, sizeof(lo->plh_stateid.data));
  388. spin_unlock(&lo->plh_inode->i_lock);
  389. dprintk("<-- %s\n", __func__);
  390. return status;
  391. }
  392. /*
  393. * Get layout from server.
  394. * for now, assume that whole file layouts are requested.
  395. * arg->offset: 0
  396. * arg->length: all ones
  397. */
  398. static struct pnfs_layout_segment *
  399. send_layoutget(struct pnfs_layout_hdr *lo,
  400. struct nfs_open_context *ctx,
  401. u32 iomode)
  402. {
  403. struct inode *ino = lo->plh_inode;
  404. struct nfs_server *server = NFS_SERVER(ino);
  405. struct nfs4_layoutget *lgp;
  406. struct pnfs_layout_segment *lseg = NULL;
  407. dprintk("--> %s\n", __func__);
  408. BUG_ON(ctx == NULL);
  409. lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
  410. if (lgp == NULL)
  411. return NULL;
  412. lgp->args.minlength = NFS4_MAX_UINT64;
  413. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  414. lgp->args.range.iomode = iomode;
  415. lgp->args.range.offset = 0;
  416. lgp->args.range.length = NFS4_MAX_UINT64;
  417. lgp->args.type = server->pnfs_curr_ld->id;
  418. lgp->args.inode = ino;
  419. lgp->args.ctx = get_nfs_open_context(ctx);
  420. lgp->lsegpp = &lseg;
  421. /* Synchronously retrieve layout information from server and
  422. * store in lseg.
  423. */
  424. nfs4_proc_layoutget(lgp);
  425. if (!lseg) {
  426. /* remember that LAYOUTGET failed and suspend trying */
  427. set_bit(lo_fail_bit(iomode), &lo->plh_flags);
  428. }
  429. return lseg;
  430. }
  431. bool pnfs_roc(struct inode *ino)
  432. {
  433. struct pnfs_layout_hdr *lo;
  434. struct pnfs_layout_segment *lseg, *tmp;
  435. LIST_HEAD(tmp_list);
  436. bool found = false;
  437. spin_lock(&ino->i_lock);
  438. lo = NFS_I(ino)->layout;
  439. if (!lo || !test_and_clear_bit(NFS_LAYOUT_ROC, &lo->plh_flags) ||
  440. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags))
  441. goto out_nolayout;
  442. list_for_each_entry_safe(lseg, tmp, &lo->plh_segs, pls_list)
  443. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  444. mark_lseg_invalid(lseg, &tmp_list);
  445. found = true;
  446. }
  447. if (!found)
  448. goto out_nolayout;
  449. lo->plh_block_lgets++;
  450. get_layout_hdr(lo); /* matched in pnfs_roc_release */
  451. spin_unlock(&ino->i_lock);
  452. pnfs_free_lseg_list(&tmp_list);
  453. return true;
  454. out_nolayout:
  455. spin_unlock(&ino->i_lock);
  456. return false;
  457. }
  458. void pnfs_roc_release(struct inode *ino)
  459. {
  460. struct pnfs_layout_hdr *lo;
  461. spin_lock(&ino->i_lock);
  462. lo = NFS_I(ino)->layout;
  463. lo->plh_block_lgets--;
  464. put_layout_hdr_locked(lo);
  465. spin_unlock(&ino->i_lock);
  466. }
  467. void pnfs_roc_set_barrier(struct inode *ino, u32 barrier)
  468. {
  469. struct pnfs_layout_hdr *lo;
  470. spin_lock(&ino->i_lock);
  471. lo = NFS_I(ino)->layout;
  472. if ((int)(barrier - lo->plh_barrier) > 0)
  473. lo->plh_barrier = barrier;
  474. spin_unlock(&ino->i_lock);
  475. }
  476. bool pnfs_roc_drain(struct inode *ino, u32 *barrier)
  477. {
  478. struct nfs_inode *nfsi = NFS_I(ino);
  479. struct pnfs_layout_segment *lseg;
  480. bool found = false;
  481. spin_lock(&ino->i_lock);
  482. list_for_each_entry(lseg, &nfsi->layout->plh_segs, pls_list)
  483. if (test_bit(NFS_LSEG_ROC, &lseg->pls_flags)) {
  484. found = true;
  485. break;
  486. }
  487. if (!found) {
  488. struct pnfs_layout_hdr *lo = nfsi->layout;
  489. u32 current_seqid = be32_to_cpu(lo->plh_stateid.stateid.seqid);
  490. /* Since close does not return a layout stateid for use as
  491. * a barrier, we choose the worst-case barrier.
  492. */
  493. *barrier = current_seqid + atomic_read(&lo->plh_outstanding);
  494. }
  495. spin_unlock(&ino->i_lock);
  496. return found;
  497. }
  498. /*
  499. * Compare two layout segments for sorting into layout cache.
  500. * We want to preferentially return RW over RO layouts, so ensure those
  501. * are seen first.
  502. */
  503. static s64
  504. cmp_layout(u32 iomode1, u32 iomode2)
  505. {
  506. /* read > read/write */
  507. return (int)(iomode2 == IOMODE_READ) - (int)(iomode1 == IOMODE_READ);
  508. }
  509. static void
  510. pnfs_insert_layout(struct pnfs_layout_hdr *lo,
  511. struct pnfs_layout_segment *lseg)
  512. {
  513. struct pnfs_layout_segment *lp;
  514. int found = 0;
  515. dprintk("%s:Begin\n", __func__);
  516. assert_spin_locked(&lo->plh_inode->i_lock);
  517. list_for_each_entry(lp, &lo->plh_segs, pls_list) {
  518. if (cmp_layout(lp->pls_range.iomode, lseg->pls_range.iomode) > 0)
  519. continue;
  520. list_add_tail(&lseg->pls_list, &lp->pls_list);
  521. dprintk("%s: inserted lseg %p "
  522. "iomode %d offset %llu length %llu before "
  523. "lp %p iomode %d offset %llu length %llu\n",
  524. __func__, lseg, lseg->pls_range.iomode,
  525. lseg->pls_range.offset, lseg->pls_range.length,
  526. lp, lp->pls_range.iomode, lp->pls_range.offset,
  527. lp->pls_range.length);
  528. found = 1;
  529. break;
  530. }
  531. if (!found) {
  532. list_add_tail(&lseg->pls_list, &lo->plh_segs);
  533. dprintk("%s: inserted lseg %p "
  534. "iomode %d offset %llu length %llu at tail\n",
  535. __func__, lseg, lseg->pls_range.iomode,
  536. lseg->pls_range.offset, lseg->pls_range.length);
  537. }
  538. get_layout_hdr(lo);
  539. dprintk("%s:Return\n", __func__);
  540. }
  541. static struct pnfs_layout_hdr *
  542. alloc_init_layout_hdr(struct inode *ino)
  543. {
  544. struct pnfs_layout_hdr *lo;
  545. lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
  546. if (!lo)
  547. return NULL;
  548. atomic_set(&lo->plh_refcount, 1);
  549. INIT_LIST_HEAD(&lo->plh_layouts);
  550. INIT_LIST_HEAD(&lo->plh_segs);
  551. INIT_LIST_HEAD(&lo->plh_bulk_recall);
  552. lo->plh_inode = ino;
  553. return lo;
  554. }
  555. static struct pnfs_layout_hdr *
  556. pnfs_find_alloc_layout(struct inode *ino)
  557. {
  558. struct nfs_inode *nfsi = NFS_I(ino);
  559. struct pnfs_layout_hdr *new = NULL;
  560. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  561. assert_spin_locked(&ino->i_lock);
  562. if (nfsi->layout) {
  563. if (test_bit(NFS_LAYOUT_DESTROYED, &nfsi->layout->plh_flags))
  564. return NULL;
  565. else
  566. return nfsi->layout;
  567. }
  568. spin_unlock(&ino->i_lock);
  569. new = alloc_init_layout_hdr(ino);
  570. spin_lock(&ino->i_lock);
  571. if (likely(nfsi->layout == NULL)) /* Won the race? */
  572. nfsi->layout = new;
  573. else
  574. kfree(new);
  575. return nfsi->layout;
  576. }
  577. /*
  578. * iomode matching rules:
  579. * iomode lseg match
  580. * ----- ----- -----
  581. * ANY READ true
  582. * ANY RW true
  583. * RW READ false
  584. * RW RW true
  585. * READ READ true
  586. * READ RW true
  587. */
  588. static int
  589. is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
  590. {
  591. return (iomode != IOMODE_RW || lseg->pls_range.iomode == IOMODE_RW);
  592. }
  593. /*
  594. * lookup range in layout
  595. */
  596. static struct pnfs_layout_segment *
  597. pnfs_find_lseg(struct pnfs_layout_hdr *lo, u32 iomode)
  598. {
  599. struct pnfs_layout_segment *lseg, *ret = NULL;
  600. dprintk("%s:Begin\n", __func__);
  601. assert_spin_locked(&lo->plh_inode->i_lock);
  602. list_for_each_entry(lseg, &lo->plh_segs, pls_list) {
  603. if (test_bit(NFS_LSEG_VALID, &lseg->pls_flags) &&
  604. is_matching_lseg(lseg, iomode)) {
  605. ret = lseg;
  606. break;
  607. }
  608. if (cmp_layout(iomode, lseg->pls_range.iomode) > 0)
  609. break;
  610. }
  611. dprintk("%s:Return lseg %p ref %d\n",
  612. __func__, ret, ret ? atomic_read(&ret->pls_refcount) : 0);
  613. return ret;
  614. }
  615. /*
  616. * Layout segment is retreived from the server if not cached.
  617. * The appropriate layout segment is referenced and returned to the caller.
  618. */
  619. struct pnfs_layout_segment *
  620. pnfs_update_layout(struct inode *ino,
  621. struct nfs_open_context *ctx,
  622. enum pnfs_iomode iomode)
  623. {
  624. struct nfs_inode *nfsi = NFS_I(ino);
  625. struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
  626. struct pnfs_layout_hdr *lo;
  627. struct pnfs_layout_segment *lseg = NULL;
  628. if (!pnfs_enabled_sb(NFS_SERVER(ino)))
  629. return NULL;
  630. spin_lock(&ino->i_lock);
  631. lo = pnfs_find_alloc_layout(ino);
  632. if (lo == NULL) {
  633. dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
  634. goto out_unlock;
  635. }
  636. /* Do we even need to bother with this? */
  637. if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
  638. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  639. dprintk("%s matches recall, use MDS\n", __func__);
  640. goto out_unlock;
  641. }
  642. /* Check to see if the layout for the given range already exists */
  643. lseg = pnfs_find_lseg(lo, iomode);
  644. if (lseg)
  645. goto out_unlock;
  646. /* if LAYOUTGET already failed once we don't try again */
  647. if (test_bit(lo_fail_bit(iomode), &nfsi->layout->plh_flags))
  648. goto out_unlock;
  649. if (pnfs_layoutgets_blocked(lo, NULL, 0))
  650. goto out_unlock;
  651. atomic_inc(&lo->plh_outstanding);
  652. get_layout_hdr(lo);
  653. if (list_empty(&lo->plh_segs)) {
  654. /* The lo must be on the clp list if there is any
  655. * chance of a CB_LAYOUTRECALL(FILE) coming in.
  656. */
  657. spin_lock(&clp->cl_lock);
  658. BUG_ON(!list_empty(&lo->plh_layouts));
  659. list_add_tail(&lo->plh_layouts, &clp->cl_layouts);
  660. spin_unlock(&clp->cl_lock);
  661. }
  662. spin_unlock(&ino->i_lock);
  663. lseg = send_layoutget(lo, ctx, iomode);
  664. if (!lseg) {
  665. spin_lock(&ino->i_lock);
  666. if (list_empty(&lo->plh_segs)) {
  667. spin_lock(&clp->cl_lock);
  668. list_del_init(&lo->plh_layouts);
  669. spin_unlock(&clp->cl_lock);
  670. }
  671. spin_unlock(&ino->i_lock);
  672. }
  673. atomic_dec(&lo->plh_outstanding);
  674. put_layout_hdr(lo);
  675. out:
  676. dprintk("%s end, state 0x%lx lseg %p\n", __func__,
  677. nfsi->layout->plh_flags, lseg);
  678. return lseg;
  679. out_unlock:
  680. spin_unlock(&ino->i_lock);
  681. goto out;
  682. }
  683. int
  684. pnfs_layout_process(struct nfs4_layoutget *lgp)
  685. {
  686. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  687. struct nfs4_layoutget_res *res = &lgp->res;
  688. struct pnfs_layout_segment *lseg;
  689. struct inode *ino = lo->plh_inode;
  690. struct nfs_client *clp = NFS_SERVER(ino)->nfs_client;
  691. int status = 0;
  692. /* Verify we got what we asked for.
  693. * Note that because the xdr parsing only accepts a single
  694. * element array, this can fail even if the server is behaving
  695. * correctly.
  696. */
  697. if (lgp->args.range.iomode > res->range.iomode ||
  698. res->range.offset != 0 ||
  699. res->range.length != NFS4_MAX_UINT64) {
  700. status = -EINVAL;
  701. goto out;
  702. }
  703. /* Inject layout blob into I/O device driver */
  704. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
  705. if (!lseg || IS_ERR(lseg)) {
  706. if (!lseg)
  707. status = -ENOMEM;
  708. else
  709. status = PTR_ERR(lseg);
  710. dprintk("%s: Could not allocate layout: error %d\n",
  711. __func__, status);
  712. goto out;
  713. }
  714. spin_lock(&ino->i_lock);
  715. if (test_bit(NFS4CLNT_LAYOUTRECALL, &clp->cl_state) ||
  716. test_bit(NFS_LAYOUT_BULK_RECALL, &lo->plh_flags)) {
  717. dprintk("%s forget reply due to recall\n", __func__);
  718. goto out_forget_reply;
  719. }
  720. if (pnfs_layoutgets_blocked(lo, &res->stateid, 1)) {
  721. dprintk("%s forget reply due to state\n", __func__);
  722. goto out_forget_reply;
  723. }
  724. init_lseg(lo, lseg);
  725. lseg->pls_range = res->range;
  726. *lgp->lsegpp = lseg;
  727. pnfs_insert_layout(lo, lseg);
  728. if (res->return_on_close) {
  729. set_bit(NFS_LSEG_ROC, &lseg->pls_flags);
  730. set_bit(NFS_LAYOUT_ROC, &lo->plh_flags);
  731. }
  732. /* Done processing layoutget. Set the layout stateid */
  733. pnfs_set_layout_stateid(lo, &res->stateid, false);
  734. spin_unlock(&ino->i_lock);
  735. out:
  736. return status;
  737. out_forget_reply:
  738. spin_unlock(&ino->i_lock);
  739. lseg->pls_layout = lo;
  740. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  741. goto out;
  742. }
  743. /*
  744. * Device ID cache. Currently supports one layout type per struct nfs_client.
  745. * Add layout type to the lookup key to expand to support multiple types.
  746. */
  747. int
  748. pnfs_alloc_init_deviceid_cache(struct nfs_client *clp,
  749. void (*free_callback)(struct pnfs_deviceid_node *))
  750. {
  751. struct pnfs_deviceid_cache *c;
  752. c = kzalloc(sizeof(struct pnfs_deviceid_cache), GFP_KERNEL);
  753. if (!c)
  754. return -ENOMEM;
  755. spin_lock(&clp->cl_lock);
  756. if (clp->cl_devid_cache != NULL) {
  757. atomic_inc(&clp->cl_devid_cache->dc_ref);
  758. dprintk("%s [kref [%d]]\n", __func__,
  759. atomic_read(&clp->cl_devid_cache->dc_ref));
  760. kfree(c);
  761. } else {
  762. /* kzalloc initializes hlists */
  763. spin_lock_init(&c->dc_lock);
  764. atomic_set(&c->dc_ref, 1);
  765. c->dc_free_callback = free_callback;
  766. clp->cl_devid_cache = c;
  767. dprintk("%s [new]\n", __func__);
  768. }
  769. spin_unlock(&clp->cl_lock);
  770. return 0;
  771. }
  772. EXPORT_SYMBOL_GPL(pnfs_alloc_init_deviceid_cache);
  773. /*
  774. * Called from pnfs_layoutdriver_type->free_lseg
  775. * last layout segment reference frees deviceid
  776. */
  777. void
  778. pnfs_put_deviceid(struct pnfs_deviceid_cache *c,
  779. struct pnfs_deviceid_node *devid)
  780. {
  781. struct nfs4_deviceid *id = &devid->de_id;
  782. struct pnfs_deviceid_node *d;
  783. struct hlist_node *n;
  784. long h = nfs4_deviceid_hash(id);
  785. dprintk("%s [%d]\n", __func__, atomic_read(&devid->de_ref));
  786. if (!atomic_dec_and_lock(&devid->de_ref, &c->dc_lock))
  787. return;
  788. hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[h], de_node)
  789. if (!memcmp(&d->de_id, id, sizeof(*id))) {
  790. hlist_del_rcu(&d->de_node);
  791. spin_unlock(&c->dc_lock);
  792. synchronize_rcu();
  793. c->dc_free_callback(devid);
  794. return;
  795. }
  796. spin_unlock(&c->dc_lock);
  797. /* Why wasn't it found in the list? */
  798. BUG();
  799. }
  800. EXPORT_SYMBOL_GPL(pnfs_put_deviceid);
  801. /* Find and reference a deviceid */
  802. struct pnfs_deviceid_node *
  803. pnfs_find_get_deviceid(struct pnfs_deviceid_cache *c, struct nfs4_deviceid *id)
  804. {
  805. struct pnfs_deviceid_node *d;
  806. struct hlist_node *n;
  807. long hash = nfs4_deviceid_hash(id);
  808. dprintk("--> %s hash %ld\n", __func__, hash);
  809. rcu_read_lock();
  810. hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[hash], de_node) {
  811. if (!memcmp(&d->de_id, id, sizeof(*id))) {
  812. if (!atomic_inc_not_zero(&d->de_ref)) {
  813. goto fail;
  814. } else {
  815. rcu_read_unlock();
  816. return d;
  817. }
  818. }
  819. }
  820. fail:
  821. rcu_read_unlock();
  822. return NULL;
  823. }
  824. EXPORT_SYMBOL_GPL(pnfs_find_get_deviceid);
  825. /*
  826. * Add a deviceid to the cache.
  827. * GETDEVICEINFOs for same deviceid can race. If deviceid is found, discard new
  828. */
  829. struct pnfs_deviceid_node *
  830. pnfs_add_deviceid(struct pnfs_deviceid_cache *c, struct pnfs_deviceid_node *new)
  831. {
  832. struct pnfs_deviceid_node *d;
  833. long hash = nfs4_deviceid_hash(&new->de_id);
  834. dprintk("--> %s hash %ld\n", __func__, hash);
  835. spin_lock(&c->dc_lock);
  836. d = pnfs_find_get_deviceid(c, &new->de_id);
  837. if (d) {
  838. spin_unlock(&c->dc_lock);
  839. dprintk("%s [discard]\n", __func__);
  840. c->dc_free_callback(new);
  841. return d;
  842. }
  843. INIT_HLIST_NODE(&new->de_node);
  844. atomic_set(&new->de_ref, 1);
  845. hlist_add_head_rcu(&new->de_node, &c->dc_deviceids[hash]);
  846. spin_unlock(&c->dc_lock);
  847. dprintk("%s [new]\n", __func__);
  848. return new;
  849. }
  850. EXPORT_SYMBOL_GPL(pnfs_add_deviceid);
  851. void
  852. pnfs_put_deviceid_cache(struct nfs_client *clp)
  853. {
  854. struct pnfs_deviceid_cache *local = clp->cl_devid_cache;
  855. dprintk("--> %s ({%d})\n", __func__, atomic_read(&local->dc_ref));
  856. if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) {
  857. int i;
  858. /* Verify cache is empty */
  859. for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i++)
  860. BUG_ON(!hlist_empty(&local->dc_deviceids[i]));
  861. clp->cl_devid_cache = NULL;
  862. spin_unlock(&clp->cl_lock);
  863. kfree(local);
  864. }
  865. }
  866. EXPORT_SYMBOL_GPL(pnfs_put_deviceid_cache);