pnfs_dev.c 6.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255
  1. /*
  2. * Device operations for the pnfs client.
  3. *
  4. * Copyright (c) 2002
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. * Garth Goodson <Garth.Goodson@netapp.com>
  10. *
  11. * Permission is granted to use, copy, create derivative works, and
  12. * redistribute this software and such derivative works for any purpose,
  13. * so long as the name of the University of Michigan is not used in
  14. * any advertising or publicity pertaining to the use or distribution
  15. * of this software without specific, written prior authorization. If
  16. * the above copyright notice or any other identification of the
  17. * University of Michigan is included in any copy of any portion of
  18. * this software, then the disclaimer below must also be included.
  19. *
  20. * This software is provided as is, without representation or warranty
  21. * of any kind either express or implied, including without limitation
  22. * the implied warranties of merchantability, fitness for a particular
  23. * purpose, or noninfringement. The Regents of the University of
  24. * Michigan shall not be liable for any damages, including special,
  25. * indirect, incidental, or consequential damages, with respect to any
  26. * claim arising out of or in connection with the use of the software,
  27. * even if it has been or is hereafter advised of the possibility of
  28. * such damages.
  29. */
  30. #include "pnfs.h"
  31. #define NFSDBG_FACILITY NFSDBG_PNFS
  32. /*
  33. * Device ID RCU cache. A device ID is unique per server and layout type.
  34. */
  35. #define NFS4_DEVICE_ID_HASH_BITS 5
  36. #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
  37. #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
  38. static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
  39. static DEFINE_SPINLOCK(nfs4_deviceid_lock);
  40. void
  41. nfs4_print_deviceid(const struct nfs4_deviceid *id)
  42. {
  43. u32 *p = (u32 *)id;
  44. dprintk("%s: device id= [%x%x%x%x]\n", __func__,
  45. p[0], p[1], p[2], p[3]);
  46. }
  47. EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
  48. static inline u32
  49. nfs4_deviceid_hash(const struct nfs4_deviceid *id)
  50. {
  51. unsigned char *cptr = (unsigned char *)id->data;
  52. unsigned int nbytes = NFS4_DEVICEID4_SIZE;
  53. u32 x = 0;
  54. while (nbytes--) {
  55. x *= 37;
  56. x += *cptr++;
  57. }
  58. return x & NFS4_DEVICE_ID_HASH_MASK;
  59. }
  60. static struct nfs4_deviceid_node *
  61. _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
  62. const struct nfs_client *clp, const struct nfs4_deviceid *id,
  63. long hash)
  64. {
  65. struct nfs4_deviceid_node *d;
  66. struct hlist_node *n;
  67. hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
  68. if (d->ld == ld && d->nfs_client == clp &&
  69. !memcmp(&d->deviceid, id, sizeof(*id))) {
  70. if (atomic_read(&d->ref))
  71. return d;
  72. else
  73. continue;
  74. }
  75. return NULL;
  76. }
  77. /*
  78. * Lookup a deviceid in cache and get a reference count on it if found
  79. *
  80. * @clp nfs_client associated with deviceid
  81. * @id deviceid to look up
  82. */
  83. struct nfs4_deviceid_node *
  84. _find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
  85. const struct nfs_client *clp, const struct nfs4_deviceid *id,
  86. long hash)
  87. {
  88. struct nfs4_deviceid_node *d;
  89. rcu_read_lock();
  90. d = _lookup_deviceid(ld, clp, id, hash);
  91. if (d != NULL)
  92. atomic_inc(&d->ref);
  93. rcu_read_unlock();
  94. return d;
  95. }
  96. struct nfs4_deviceid_node *
  97. nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
  98. const struct nfs_client *clp, const struct nfs4_deviceid *id)
  99. {
  100. return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
  101. }
  102. EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
  103. /*
  104. * Remove a deviceid from cache
  105. *
  106. * @clp nfs_client associated with deviceid
  107. * @id the deviceid to unhash
  108. *
  109. * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
  110. */
  111. void
  112. nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
  113. const struct nfs_client *clp, const struct nfs4_deviceid *id)
  114. {
  115. struct nfs4_deviceid_node *d;
  116. spin_lock(&nfs4_deviceid_lock);
  117. rcu_read_lock();
  118. d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
  119. rcu_read_unlock();
  120. if (!d) {
  121. spin_unlock(&nfs4_deviceid_lock);
  122. return;
  123. }
  124. hlist_del_init_rcu(&d->node);
  125. spin_unlock(&nfs4_deviceid_lock);
  126. synchronize_rcu();
  127. /* balance the initial ref set in pnfs_insert_deviceid */
  128. if (atomic_dec_and_test(&d->ref))
  129. d->ld->free_deviceid_node(d);
  130. }
  131. EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
  132. void
  133. nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
  134. const struct pnfs_layoutdriver_type *ld,
  135. const struct nfs_client *nfs_client,
  136. const struct nfs4_deviceid *id)
  137. {
  138. INIT_HLIST_NODE(&d->node);
  139. INIT_HLIST_NODE(&d->tmpnode);
  140. d->ld = ld;
  141. d->nfs_client = nfs_client;
  142. d->deviceid = *id;
  143. atomic_set(&d->ref, 1);
  144. }
  145. EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
  146. /*
  147. * Uniquely initialize and insert a deviceid node into cache
  148. *
  149. * @new new deviceid node
  150. * Note that the caller must set up the following members:
  151. * new->ld
  152. * new->nfs_client
  153. * new->deviceid
  154. *
  155. * @ret the inserted node, if none found, otherwise, the found entry.
  156. */
  157. struct nfs4_deviceid_node *
  158. nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
  159. {
  160. struct nfs4_deviceid_node *d;
  161. long hash;
  162. spin_lock(&nfs4_deviceid_lock);
  163. hash = nfs4_deviceid_hash(&new->deviceid);
  164. d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
  165. if (d) {
  166. spin_unlock(&nfs4_deviceid_lock);
  167. return d;
  168. }
  169. hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
  170. spin_unlock(&nfs4_deviceid_lock);
  171. atomic_inc(&new->ref);
  172. return new;
  173. }
  174. EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
  175. /*
  176. * Dereference a deviceid node and delete it when its reference count drops
  177. * to zero.
  178. *
  179. * @d deviceid node to put
  180. *
  181. * return true iff the node was deleted
  182. * Note that since the test for d->ref == 0 is sufficient to establish
  183. * that the node is no longer hashed in the global device id cache.
  184. */
  185. bool
  186. nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
  187. {
  188. if (!atomic_dec_and_test(&d->ref))
  189. return false;
  190. d->ld->free_deviceid_node(d);
  191. return true;
  192. }
  193. EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
  194. static void
  195. _deviceid_purge_client(const struct nfs_client *clp, long hash)
  196. {
  197. struct nfs4_deviceid_node *d;
  198. struct hlist_node *n;
  199. HLIST_HEAD(tmp);
  200. spin_lock(&nfs4_deviceid_lock);
  201. rcu_read_lock();
  202. hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
  203. if (d->nfs_client == clp && atomic_read(&d->ref)) {
  204. hlist_del_init_rcu(&d->node);
  205. hlist_add_head(&d->tmpnode, &tmp);
  206. }
  207. rcu_read_unlock();
  208. spin_unlock(&nfs4_deviceid_lock);
  209. if (hlist_empty(&tmp))
  210. return;
  211. synchronize_rcu();
  212. while (!hlist_empty(&tmp)) {
  213. d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
  214. hlist_del(&d->tmpnode);
  215. if (atomic_dec_and_test(&d->ref))
  216. d->ld->free_deviceid_node(d);
  217. }
  218. }
  219. void
  220. nfs4_deviceid_purge_client(const struct nfs_client *clp)
  221. {
  222. long h;
  223. if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
  224. return;
  225. for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
  226. _deviceid_purge_client(clp, h);
  227. }