pnfs.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783
  1. /*
  2. * pNFS functions to call and manage layout drivers.
  3. *
  4. * Copyright (c) 2002 [year of first publication]
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. *
  10. * Permission is granted to use, copy, create derivative works, and
  11. * redistribute this software and such derivative works for any purpose,
  12. * so long as the name of the University of Michigan is not used in
  13. * any advertising or publicity pertaining to the use or distribution
  14. * of this software without specific, written prior authorization. If
  15. * the above copyright notice or any other identification of the
  16. * University of Michigan is included in any copy of any portion of
  17. * this software, then the disclaimer below must also be included.
  18. *
  19. * This software is provided as is, without representation or warranty
  20. * of any kind either express or implied, including without limitation
  21. * the implied warranties of merchantability, fitness for a particular
  22. * purpose, or noninfringement. The Regents of the University of
  23. * Michigan shall not be liable for any damages, including special,
  24. * indirect, incidental, or consequential damages, with respect to any
  25. * claim arising out of or in connection with the use of the software,
  26. * even if it has been or is hereafter advised of the possibility of
  27. * such damages.
  28. */
  29. #include <linux/nfs_fs.h>
  30. #include "internal.h"
  31. #include "pnfs.h"
  32. #define NFSDBG_FACILITY NFSDBG_PNFS
  33. /* Locking:
  34. *
  35. * pnfs_spinlock:
  36. * protects pnfs_modules_tbl.
  37. */
  38. static DEFINE_SPINLOCK(pnfs_spinlock);
  39. /*
  40. * pnfs_modules_tbl holds all pnfs modules
  41. */
  42. static LIST_HEAD(pnfs_modules_tbl);
  43. /* Return the registered pnfs layout driver module matching given id */
  44. static struct pnfs_layoutdriver_type *
  45. find_pnfs_driver_locked(u32 id)
  46. {
  47. struct pnfs_layoutdriver_type *local;
  48. list_for_each_entry(local, &pnfs_modules_tbl, pnfs_tblid)
  49. if (local->id == id)
  50. goto out;
  51. local = NULL;
  52. out:
  53. dprintk("%s: Searching for id %u, found %p\n", __func__, id, local);
  54. return local;
  55. }
  56. static struct pnfs_layoutdriver_type *
  57. find_pnfs_driver(u32 id)
  58. {
  59. struct pnfs_layoutdriver_type *local;
  60. spin_lock(&pnfs_spinlock);
  61. local = find_pnfs_driver_locked(id);
  62. spin_unlock(&pnfs_spinlock);
  63. return local;
  64. }
  65. void
  66. unset_pnfs_layoutdriver(struct nfs_server *nfss)
  67. {
  68. if (nfss->pnfs_curr_ld) {
  69. nfss->pnfs_curr_ld->clear_layoutdriver(nfss);
  70. module_put(nfss->pnfs_curr_ld->owner);
  71. }
  72. nfss->pnfs_curr_ld = NULL;
  73. }
  74. /*
  75. * Try to set the server's pnfs module to the pnfs layout type specified by id.
  76. * Currently only one pNFS layout driver per filesystem is supported.
  77. *
  78. * @id layout type. Zero (illegal layout type) indicates pNFS not in use.
  79. */
  80. void
  81. set_pnfs_layoutdriver(struct nfs_server *server, u32 id)
  82. {
  83. struct pnfs_layoutdriver_type *ld_type = NULL;
  84. if (id == 0)
  85. goto out_no_driver;
  86. if (!(server->nfs_client->cl_exchange_flags &
  87. (EXCHGID4_FLAG_USE_NON_PNFS | EXCHGID4_FLAG_USE_PNFS_MDS))) {
  88. printk(KERN_ERR "%s: id %u cl_exchange_flags 0x%x\n", __func__,
  89. id, server->nfs_client->cl_exchange_flags);
  90. goto out_no_driver;
  91. }
  92. ld_type = find_pnfs_driver(id);
  93. if (!ld_type) {
  94. request_module("%s-%u", LAYOUT_NFSV4_1_MODULE_PREFIX, id);
  95. ld_type = find_pnfs_driver(id);
  96. if (!ld_type) {
  97. dprintk("%s: No pNFS module found for %u.\n",
  98. __func__, id);
  99. goto out_no_driver;
  100. }
  101. }
  102. if (!try_module_get(ld_type->owner)) {
  103. dprintk("%s: Could not grab reference on module\n", __func__);
  104. goto out_no_driver;
  105. }
  106. server->pnfs_curr_ld = ld_type;
  107. if (ld_type->set_layoutdriver(server)) {
  108. printk(KERN_ERR
  109. "%s: Error initializing mount point for layout driver %u.\n",
  110. __func__, id);
  111. module_put(ld_type->owner);
  112. goto out_no_driver;
  113. }
  114. dprintk("%s: pNFS module for %u set\n", __func__, id);
  115. return;
  116. out_no_driver:
  117. dprintk("%s: Using NFSv4 I/O\n", __func__);
  118. server->pnfs_curr_ld = NULL;
  119. }
  120. int
  121. pnfs_register_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  122. {
  123. int status = -EINVAL;
  124. struct pnfs_layoutdriver_type *tmp;
  125. if (ld_type->id == 0) {
  126. printk(KERN_ERR "%s id 0 is reserved\n", __func__);
  127. return status;
  128. }
  129. if (!ld_type->alloc_lseg || !ld_type->free_lseg) {
  130. printk(KERN_ERR "%s Layout driver must provide "
  131. "alloc_lseg and free_lseg.\n", __func__);
  132. return status;
  133. }
  134. spin_lock(&pnfs_spinlock);
  135. tmp = find_pnfs_driver_locked(ld_type->id);
  136. if (!tmp) {
  137. list_add(&ld_type->pnfs_tblid, &pnfs_modules_tbl);
  138. status = 0;
  139. dprintk("%s Registering id:%u name:%s\n", __func__, ld_type->id,
  140. ld_type->name);
  141. } else {
  142. printk(KERN_ERR "%s Module with id %d already loaded!\n",
  143. __func__, ld_type->id);
  144. }
  145. spin_unlock(&pnfs_spinlock);
  146. return status;
  147. }
  148. EXPORT_SYMBOL_GPL(pnfs_register_layoutdriver);
  149. void
  150. pnfs_unregister_layoutdriver(struct pnfs_layoutdriver_type *ld_type)
  151. {
  152. dprintk("%s Deregistering id:%u\n", __func__, ld_type->id);
  153. spin_lock(&pnfs_spinlock);
  154. list_del(&ld_type->pnfs_tblid);
  155. spin_unlock(&pnfs_spinlock);
  156. }
  157. EXPORT_SYMBOL_GPL(pnfs_unregister_layoutdriver);
  158. /*
  159. * pNFS client layout cache
  160. */
  161. static void
  162. get_layout_hdr_locked(struct pnfs_layout_hdr *lo)
  163. {
  164. assert_spin_locked(&lo->inode->i_lock);
  165. lo->refcount++;
  166. }
  167. static void
  168. put_layout_hdr_locked(struct pnfs_layout_hdr *lo)
  169. {
  170. assert_spin_locked(&lo->inode->i_lock);
  171. BUG_ON(lo->refcount == 0);
  172. lo->refcount--;
  173. if (!lo->refcount) {
  174. dprintk("%s: freeing layout cache %p\n", __func__, lo);
  175. BUG_ON(!list_empty(&lo->layouts));
  176. NFS_I(lo->inode)->layout = NULL;
  177. kfree(lo);
  178. }
  179. }
  180. void
  181. put_layout_hdr(struct inode *inode)
  182. {
  183. spin_lock(&inode->i_lock);
  184. put_layout_hdr_locked(NFS_I(inode)->layout);
  185. spin_unlock(&inode->i_lock);
  186. }
  187. static void
  188. init_lseg(struct pnfs_layout_hdr *lo, struct pnfs_layout_segment *lseg)
  189. {
  190. INIT_LIST_HEAD(&lseg->fi_list);
  191. kref_init(&lseg->kref);
  192. lseg->layout = lo;
  193. }
  194. /* Called without i_lock held, as the free_lseg call may sleep */
  195. static void
  196. destroy_lseg(struct kref *kref)
  197. {
  198. struct pnfs_layout_segment *lseg =
  199. container_of(kref, struct pnfs_layout_segment, kref);
  200. struct inode *ino = lseg->layout->inode;
  201. dprintk("--> %s\n", __func__);
  202. NFS_SERVER(ino)->pnfs_curr_ld->free_lseg(lseg);
  203. /* Matched by get_layout_hdr_locked in pnfs_insert_layout */
  204. put_layout_hdr(ino);
  205. }
  206. static void
  207. put_lseg(struct pnfs_layout_segment *lseg)
  208. {
  209. if (!lseg)
  210. return;
  211. dprintk("%s: lseg %p ref %d\n", __func__, lseg,
  212. atomic_read(&lseg->kref.refcount));
  213. kref_put(&lseg->kref, destroy_lseg);
  214. }
  215. static void
  216. pnfs_clear_lseg_list(struct pnfs_layout_hdr *lo, struct list_head *tmp_list)
  217. {
  218. struct pnfs_layout_segment *lseg, *next;
  219. struct nfs_client *clp;
  220. dprintk("%s:Begin lo %p\n", __func__, lo);
  221. assert_spin_locked(&lo->inode->i_lock);
  222. list_for_each_entry_safe(lseg, next, &lo->segs, fi_list) {
  223. dprintk("%s: freeing lseg %p\n", __func__, lseg);
  224. list_move(&lseg->fi_list, tmp_list);
  225. }
  226. clp = NFS_SERVER(lo->inode)->nfs_client;
  227. spin_lock(&clp->cl_lock);
  228. /* List does not take a reference, so no need for put here */
  229. list_del_init(&lo->layouts);
  230. spin_unlock(&clp->cl_lock);
  231. write_seqlock(&lo->seqlock);
  232. clear_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
  233. write_sequnlock(&lo->seqlock);
  234. dprintk("%s:Return\n", __func__);
  235. }
  236. static void
  237. pnfs_free_lseg_list(struct list_head *tmp_list)
  238. {
  239. struct pnfs_layout_segment *lseg;
  240. while (!list_empty(tmp_list)) {
  241. lseg = list_entry(tmp_list->next, struct pnfs_layout_segment,
  242. fi_list);
  243. dprintk("%s calling put_lseg on %p\n", __func__, lseg);
  244. list_del(&lseg->fi_list);
  245. put_lseg(lseg);
  246. }
  247. }
  248. void
  249. pnfs_destroy_layout(struct nfs_inode *nfsi)
  250. {
  251. struct pnfs_layout_hdr *lo;
  252. LIST_HEAD(tmp_list);
  253. spin_lock(&nfsi->vfs_inode.i_lock);
  254. lo = nfsi->layout;
  255. if (lo) {
  256. pnfs_clear_lseg_list(lo, &tmp_list);
  257. /* Matched by refcount set to 1 in alloc_init_layout_hdr */
  258. put_layout_hdr_locked(lo);
  259. }
  260. spin_unlock(&nfsi->vfs_inode.i_lock);
  261. pnfs_free_lseg_list(&tmp_list);
  262. }
  263. /*
  264. * Called by the state manger to remove all layouts established under an
  265. * expired lease.
  266. */
  267. void
  268. pnfs_destroy_all_layouts(struct nfs_client *clp)
  269. {
  270. struct pnfs_layout_hdr *lo;
  271. LIST_HEAD(tmp_list);
  272. spin_lock(&clp->cl_lock);
  273. list_splice_init(&clp->cl_layouts, &tmp_list);
  274. spin_unlock(&clp->cl_lock);
  275. while (!list_empty(&tmp_list)) {
  276. lo = list_entry(tmp_list.next, struct pnfs_layout_hdr,
  277. layouts);
  278. dprintk("%s freeing layout for inode %lu\n", __func__,
  279. lo->inode->i_ino);
  280. pnfs_destroy_layout(NFS_I(lo->inode));
  281. }
  282. }
  283. /* update lo->stateid with new if is more recent
  284. *
  285. * lo->stateid could be the open stateid, in which case we just use what given.
  286. */
  287. static void
  288. pnfs_set_layout_stateid(struct pnfs_layout_hdr *lo,
  289. const nfs4_stateid *new)
  290. {
  291. nfs4_stateid *old = &lo->stateid;
  292. bool overwrite = false;
  293. write_seqlock(&lo->seqlock);
  294. if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state) ||
  295. memcmp(old->stateid.other, new->stateid.other, sizeof(new->stateid.other)))
  296. overwrite = true;
  297. else {
  298. u32 oldseq, newseq;
  299. oldseq = be32_to_cpu(old->stateid.seqid);
  300. newseq = be32_to_cpu(new->stateid.seqid);
  301. if ((int)(newseq - oldseq) > 0)
  302. overwrite = true;
  303. }
  304. if (overwrite)
  305. memcpy(&old->stateid, &new->stateid, sizeof(new->stateid));
  306. write_sequnlock(&lo->seqlock);
  307. }
  308. static void
  309. pnfs_layout_from_open_stateid(struct pnfs_layout_hdr *lo,
  310. struct nfs4_state *state)
  311. {
  312. int seq;
  313. dprintk("--> %s\n", __func__);
  314. write_seqlock(&lo->seqlock);
  315. do {
  316. seq = read_seqbegin(&state->seqlock);
  317. memcpy(lo->stateid.data, state->stateid.data,
  318. sizeof(state->stateid.data));
  319. } while (read_seqretry(&state->seqlock, seq));
  320. set_bit(NFS_LAYOUT_STATEID_SET, &lo->state);
  321. write_sequnlock(&lo->seqlock);
  322. dprintk("<-- %s\n", __func__);
  323. }
  324. void
  325. pnfs_get_layout_stateid(nfs4_stateid *dst, struct pnfs_layout_hdr *lo,
  326. struct nfs4_state *open_state)
  327. {
  328. int seq;
  329. dprintk("--> %s\n", __func__);
  330. do {
  331. seq = read_seqbegin(&lo->seqlock);
  332. if (!test_bit(NFS_LAYOUT_STATEID_SET, &lo->state)) {
  333. /* This will trigger retry of the read */
  334. pnfs_layout_from_open_stateid(lo, open_state);
  335. } else
  336. memcpy(dst->data, lo->stateid.data,
  337. sizeof(lo->stateid.data));
  338. } while (read_seqretry(&lo->seqlock, seq));
  339. dprintk("<-- %s\n", __func__);
  340. }
  341. /*
  342. * Get layout from server.
  343. * for now, assume that whole file layouts are requested.
  344. * arg->offset: 0
  345. * arg->length: all ones
  346. */
  347. static struct pnfs_layout_segment *
  348. send_layoutget(struct pnfs_layout_hdr *lo,
  349. struct nfs_open_context *ctx,
  350. u32 iomode)
  351. {
  352. struct inode *ino = lo->inode;
  353. struct nfs_server *server = NFS_SERVER(ino);
  354. struct nfs4_layoutget *lgp;
  355. struct pnfs_layout_segment *lseg = NULL;
  356. dprintk("--> %s\n", __func__);
  357. BUG_ON(ctx == NULL);
  358. lgp = kzalloc(sizeof(*lgp), GFP_KERNEL);
  359. if (lgp == NULL) {
  360. put_layout_hdr(lo->inode);
  361. return NULL;
  362. }
  363. lgp->args.minlength = NFS4_MAX_UINT64;
  364. lgp->args.maxcount = PNFS_LAYOUT_MAXSIZE;
  365. lgp->args.range.iomode = iomode;
  366. lgp->args.range.offset = 0;
  367. lgp->args.range.length = NFS4_MAX_UINT64;
  368. lgp->args.type = server->pnfs_curr_ld->id;
  369. lgp->args.inode = ino;
  370. lgp->args.ctx = get_nfs_open_context(ctx);
  371. lgp->lsegpp = &lseg;
  372. /* Synchronously retrieve layout information from server and
  373. * store in lseg.
  374. */
  375. nfs4_proc_layoutget(lgp);
  376. if (!lseg) {
  377. /* remember that LAYOUTGET failed and suspend trying */
  378. set_bit(lo_fail_bit(iomode), &lo->state);
  379. }
  380. return lseg;
  381. }
  382. /*
  383. * Compare two layout segments for sorting into layout cache.
  384. * We want to preferentially return RW over RO layouts, so ensure those
  385. * are seen first.
  386. */
  387. static s64
  388. cmp_layout(u32 iomode1, u32 iomode2)
  389. {
  390. /* read > read/write */
  391. return (int)(iomode2 == IOMODE_READ) - (int)(iomode1 == IOMODE_READ);
  392. }
  393. static void
  394. pnfs_insert_layout(struct pnfs_layout_hdr *lo,
  395. struct pnfs_layout_segment *lseg)
  396. {
  397. struct pnfs_layout_segment *lp;
  398. int found = 0;
  399. dprintk("%s:Begin\n", __func__);
  400. assert_spin_locked(&lo->inode->i_lock);
  401. if (list_empty(&lo->segs)) {
  402. struct nfs_client *clp = NFS_SERVER(lo->inode)->nfs_client;
  403. spin_lock(&clp->cl_lock);
  404. BUG_ON(!list_empty(&lo->layouts));
  405. list_add_tail(&lo->layouts, &clp->cl_layouts);
  406. spin_unlock(&clp->cl_lock);
  407. }
  408. list_for_each_entry(lp, &lo->segs, fi_list) {
  409. if (cmp_layout(lp->range.iomode, lseg->range.iomode) > 0)
  410. continue;
  411. list_add_tail(&lseg->fi_list, &lp->fi_list);
  412. dprintk("%s: inserted lseg %p "
  413. "iomode %d offset %llu length %llu before "
  414. "lp %p iomode %d offset %llu length %llu\n",
  415. __func__, lseg, lseg->range.iomode,
  416. lseg->range.offset, lseg->range.length,
  417. lp, lp->range.iomode, lp->range.offset,
  418. lp->range.length);
  419. found = 1;
  420. break;
  421. }
  422. if (!found) {
  423. list_add_tail(&lseg->fi_list, &lo->segs);
  424. dprintk("%s: inserted lseg %p "
  425. "iomode %d offset %llu length %llu at tail\n",
  426. __func__, lseg, lseg->range.iomode,
  427. lseg->range.offset, lseg->range.length);
  428. }
  429. get_layout_hdr_locked(lo);
  430. dprintk("%s:Return\n", __func__);
  431. }
  432. static struct pnfs_layout_hdr *
  433. alloc_init_layout_hdr(struct inode *ino)
  434. {
  435. struct pnfs_layout_hdr *lo;
  436. lo = kzalloc(sizeof(struct pnfs_layout_hdr), GFP_KERNEL);
  437. if (!lo)
  438. return NULL;
  439. lo->refcount = 1;
  440. INIT_LIST_HEAD(&lo->layouts);
  441. INIT_LIST_HEAD(&lo->segs);
  442. seqlock_init(&lo->seqlock);
  443. lo->inode = ino;
  444. return lo;
  445. }
  446. static struct pnfs_layout_hdr *
  447. pnfs_find_alloc_layout(struct inode *ino)
  448. {
  449. struct nfs_inode *nfsi = NFS_I(ino);
  450. struct pnfs_layout_hdr *new = NULL;
  451. dprintk("%s Begin ino=%p layout=%p\n", __func__, ino, nfsi->layout);
  452. assert_spin_locked(&ino->i_lock);
  453. if (nfsi->layout)
  454. return nfsi->layout;
  455. spin_unlock(&ino->i_lock);
  456. new = alloc_init_layout_hdr(ino);
  457. spin_lock(&ino->i_lock);
  458. if (likely(nfsi->layout == NULL)) /* Won the race? */
  459. nfsi->layout = new;
  460. else
  461. kfree(new);
  462. return nfsi->layout;
  463. }
  464. /*
  465. * iomode matching rules:
  466. * iomode lseg match
  467. * ----- ----- -----
  468. * ANY READ true
  469. * ANY RW true
  470. * RW READ false
  471. * RW RW true
  472. * READ READ true
  473. * READ RW true
  474. */
  475. static int
  476. is_matching_lseg(struct pnfs_layout_segment *lseg, u32 iomode)
  477. {
  478. return (iomode != IOMODE_RW || lseg->range.iomode == IOMODE_RW);
  479. }
  480. /*
  481. * lookup range in layout
  482. */
  483. static struct pnfs_layout_segment *
  484. pnfs_has_layout(struct pnfs_layout_hdr *lo, u32 iomode)
  485. {
  486. struct pnfs_layout_segment *lseg, *ret = NULL;
  487. dprintk("%s:Begin\n", __func__);
  488. assert_spin_locked(&lo->inode->i_lock);
  489. list_for_each_entry(lseg, &lo->segs, fi_list) {
  490. if (is_matching_lseg(lseg, iomode)) {
  491. ret = lseg;
  492. break;
  493. }
  494. if (cmp_layout(iomode, lseg->range.iomode) > 0)
  495. break;
  496. }
  497. dprintk("%s:Return lseg %p ref %d\n",
  498. __func__, ret, ret ? atomic_read(&ret->kref.refcount) : 0);
  499. return ret;
  500. }
  501. /*
  502. * Layout segment is retreived from the server if not cached.
  503. * The appropriate layout segment is referenced and returned to the caller.
  504. */
  505. struct pnfs_layout_segment *
  506. pnfs_update_layout(struct inode *ino,
  507. struct nfs_open_context *ctx,
  508. enum pnfs_iomode iomode)
  509. {
  510. struct nfs_inode *nfsi = NFS_I(ino);
  511. struct pnfs_layout_hdr *lo;
  512. struct pnfs_layout_segment *lseg = NULL;
  513. if (!pnfs_enabled_sb(NFS_SERVER(ino)))
  514. return NULL;
  515. spin_lock(&ino->i_lock);
  516. lo = pnfs_find_alloc_layout(ino);
  517. if (lo == NULL) {
  518. dprintk("%s ERROR: can't get pnfs_layout_hdr\n", __func__);
  519. goto out_unlock;
  520. }
  521. /* Check to see if the layout for the given range already exists */
  522. lseg = pnfs_has_layout(lo, iomode);
  523. if (lseg) {
  524. dprintk("%s: Using cached lseg %p for iomode %d)\n",
  525. __func__, lseg, iomode);
  526. goto out_unlock;
  527. }
  528. /* if LAYOUTGET already failed once we don't try again */
  529. if (test_bit(lo_fail_bit(iomode), &nfsi->layout->state))
  530. goto out_unlock;
  531. get_layout_hdr_locked(lo); /* Matched in nfs4_layoutget_release */
  532. spin_unlock(&ino->i_lock);
  533. lseg = send_layoutget(lo, ctx, iomode);
  534. out:
  535. dprintk("%s end, state 0x%lx lseg %p\n", __func__,
  536. nfsi->layout->state, lseg);
  537. return lseg;
  538. out_unlock:
  539. spin_unlock(&ino->i_lock);
  540. goto out;
  541. }
  542. int
  543. pnfs_layout_process(struct nfs4_layoutget *lgp)
  544. {
  545. struct pnfs_layout_hdr *lo = NFS_I(lgp->args.inode)->layout;
  546. struct nfs4_layoutget_res *res = &lgp->res;
  547. struct pnfs_layout_segment *lseg;
  548. struct inode *ino = lo->inode;
  549. int status = 0;
  550. /* Inject layout blob into I/O device driver */
  551. lseg = NFS_SERVER(ino)->pnfs_curr_ld->alloc_lseg(lo, res);
  552. if (!lseg || IS_ERR(lseg)) {
  553. if (!lseg)
  554. status = -ENOMEM;
  555. else
  556. status = PTR_ERR(lseg);
  557. dprintk("%s: Could not allocate layout: error %d\n",
  558. __func__, status);
  559. goto out;
  560. }
  561. spin_lock(&ino->i_lock);
  562. init_lseg(lo, lseg);
  563. lseg->range = res->range;
  564. *lgp->lsegpp = lseg;
  565. pnfs_insert_layout(lo, lseg);
  566. /* Done processing layoutget. Set the layout stateid */
  567. pnfs_set_layout_stateid(lo, &res->stateid);
  568. spin_unlock(&ino->i_lock);
  569. out:
  570. return status;
  571. }
  572. /*
  573. * Device ID cache. Currently supports one layout type per struct nfs_client.
  574. * Add layout type to the lookup key to expand to support multiple types.
  575. */
  576. int
  577. pnfs_alloc_init_deviceid_cache(struct nfs_client *clp,
  578. void (*free_callback)(struct pnfs_deviceid_node *))
  579. {
  580. struct pnfs_deviceid_cache *c;
  581. c = kzalloc(sizeof(struct pnfs_deviceid_cache), GFP_KERNEL);
  582. if (!c)
  583. return -ENOMEM;
  584. spin_lock(&clp->cl_lock);
  585. if (clp->cl_devid_cache != NULL) {
  586. atomic_inc(&clp->cl_devid_cache->dc_ref);
  587. dprintk("%s [kref [%d]]\n", __func__,
  588. atomic_read(&clp->cl_devid_cache->dc_ref));
  589. kfree(c);
  590. } else {
  591. /* kzalloc initializes hlists */
  592. spin_lock_init(&c->dc_lock);
  593. atomic_set(&c->dc_ref, 1);
  594. c->dc_free_callback = free_callback;
  595. clp->cl_devid_cache = c;
  596. dprintk("%s [new]\n", __func__);
  597. }
  598. spin_unlock(&clp->cl_lock);
  599. return 0;
  600. }
  601. EXPORT_SYMBOL_GPL(pnfs_alloc_init_deviceid_cache);
  602. /*
  603. * Called from pnfs_layoutdriver_type->free_lseg
  604. * last layout segment reference frees deviceid
  605. */
  606. void
  607. pnfs_put_deviceid(struct pnfs_deviceid_cache *c,
  608. struct pnfs_deviceid_node *devid)
  609. {
  610. struct nfs4_deviceid *id = &devid->de_id;
  611. struct pnfs_deviceid_node *d;
  612. struct hlist_node *n;
  613. long h = nfs4_deviceid_hash(id);
  614. dprintk("%s [%d]\n", __func__, atomic_read(&devid->de_ref));
  615. if (!atomic_dec_and_lock(&devid->de_ref, &c->dc_lock))
  616. return;
  617. hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[h], de_node)
  618. if (!memcmp(&d->de_id, id, sizeof(*id))) {
  619. hlist_del_rcu(&d->de_node);
  620. spin_unlock(&c->dc_lock);
  621. synchronize_rcu();
  622. c->dc_free_callback(devid);
  623. return;
  624. }
  625. spin_unlock(&c->dc_lock);
  626. /* Why wasn't it found in the list? */
  627. BUG();
  628. }
  629. EXPORT_SYMBOL_GPL(pnfs_put_deviceid);
  630. /* Find and reference a deviceid */
  631. struct pnfs_deviceid_node *
  632. pnfs_find_get_deviceid(struct pnfs_deviceid_cache *c, struct nfs4_deviceid *id)
  633. {
  634. struct pnfs_deviceid_node *d;
  635. struct hlist_node *n;
  636. long hash = nfs4_deviceid_hash(id);
  637. dprintk("--> %s hash %ld\n", __func__, hash);
  638. rcu_read_lock();
  639. hlist_for_each_entry_rcu(d, n, &c->dc_deviceids[hash], de_node) {
  640. if (!memcmp(&d->de_id, id, sizeof(*id))) {
  641. if (!atomic_inc_not_zero(&d->de_ref)) {
  642. goto fail;
  643. } else {
  644. rcu_read_unlock();
  645. return d;
  646. }
  647. }
  648. }
  649. fail:
  650. rcu_read_unlock();
  651. return NULL;
  652. }
  653. EXPORT_SYMBOL_GPL(pnfs_find_get_deviceid);
  654. /*
  655. * Add a deviceid to the cache.
  656. * GETDEVICEINFOs for same deviceid can race. If deviceid is found, discard new
  657. */
  658. struct pnfs_deviceid_node *
  659. pnfs_add_deviceid(struct pnfs_deviceid_cache *c, struct pnfs_deviceid_node *new)
  660. {
  661. struct pnfs_deviceid_node *d;
  662. long hash = nfs4_deviceid_hash(&new->de_id);
  663. dprintk("--> %s hash %ld\n", __func__, hash);
  664. spin_lock(&c->dc_lock);
  665. d = pnfs_find_get_deviceid(c, &new->de_id);
  666. if (d) {
  667. spin_unlock(&c->dc_lock);
  668. dprintk("%s [discard]\n", __func__);
  669. c->dc_free_callback(new);
  670. return d;
  671. }
  672. INIT_HLIST_NODE(&new->de_node);
  673. atomic_set(&new->de_ref, 1);
  674. hlist_add_head_rcu(&new->de_node, &c->dc_deviceids[hash]);
  675. spin_unlock(&c->dc_lock);
  676. dprintk("%s [new]\n", __func__);
  677. return new;
  678. }
  679. EXPORT_SYMBOL_GPL(pnfs_add_deviceid);
  680. void
  681. pnfs_put_deviceid_cache(struct nfs_client *clp)
  682. {
  683. struct pnfs_deviceid_cache *local = clp->cl_devid_cache;
  684. dprintk("--> %s cl_devid_cache %p\n", __func__, clp->cl_devid_cache);
  685. if (atomic_dec_and_lock(&local->dc_ref, &clp->cl_lock)) {
  686. int i;
  687. /* Verify cache is empty */
  688. for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i++)
  689. BUG_ON(!hlist_empty(&local->dc_deviceids[i]));
  690. clp->cl_devid_cache = NULL;
  691. spin_unlock(&clp->cl_lock);
  692. kfree(local);
  693. }
  694. }
  695. EXPORT_SYMBOL_GPL(pnfs_put_deviceid_cache);