pnfs_dev.c 7.2 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275
  1. /*
  2. * Device operations for the pnfs client.
  3. *
  4. * Copyright (c) 2002
  5. * The Regents of the University of Michigan
  6. * All Rights Reserved
  7. *
  8. * Dean Hildebrand <dhildebz@umich.edu>
  9. * Garth Goodson <Garth.Goodson@netapp.com>
  10. *
  11. * Permission is granted to use, copy, create derivative works, and
  12. * redistribute this software and such derivative works for any purpose,
  13. * so long as the name of the University of Michigan is not used in
  14. * any advertising or publicity pertaining to the use or distribution
  15. * of this software without specific, written prior authorization. If
  16. * the above copyright notice or any other identification of the
  17. * University of Michigan is included in any copy of any portion of
  18. * this software, then the disclaimer below must also be included.
  19. *
  20. * This software is provided as is, without representation or warranty
  21. * of any kind either express or implied, including without limitation
  22. * the implied warranties of merchantability, fitness for a particular
  23. * purpose, or noninfringement. The Regents of the University of
  24. * Michigan shall not be liable for any damages, including special,
  25. * indirect, incidental, or consequential damages, with respect to any
  26. * claim arising out of or in connection with the use of the software,
  27. * even if it has been or is hereafter advised of the possibility of
  28. * such damages.
  29. */
  30. #include "pnfs.h"
  31. #define NFSDBG_FACILITY NFSDBG_PNFS
  32. /*
  33. * Device ID RCU cache. A device ID is unique per server and layout type.
  34. */
  35. #define NFS4_DEVICE_ID_HASH_BITS 5
  36. #define NFS4_DEVICE_ID_HASH_SIZE (1 << NFS4_DEVICE_ID_HASH_BITS)
  37. #define NFS4_DEVICE_ID_HASH_MASK (NFS4_DEVICE_ID_HASH_SIZE - 1)
  38. static struct hlist_head nfs4_deviceid_cache[NFS4_DEVICE_ID_HASH_SIZE];
  39. static DEFINE_SPINLOCK(nfs4_deviceid_lock);
  40. void
  41. nfs4_print_deviceid(const struct nfs4_deviceid *id)
  42. {
  43. u32 *p = (u32 *)id;
  44. dprintk("%s: device id= [%x%x%x%x]\n", __func__,
  45. p[0], p[1], p[2], p[3]);
  46. }
  47. EXPORT_SYMBOL_GPL(nfs4_print_deviceid);
  48. static inline u32
  49. nfs4_deviceid_hash(const struct nfs4_deviceid *id)
  50. {
  51. unsigned char *cptr = (unsigned char *)id->data;
  52. unsigned int nbytes = NFS4_DEVICEID4_SIZE;
  53. u32 x = 0;
  54. while (nbytes--) {
  55. x *= 37;
  56. x += *cptr++;
  57. }
  58. return x & NFS4_DEVICE_ID_HASH_MASK;
  59. }
  60. static struct nfs4_deviceid_node *
  61. _lookup_deviceid(const struct pnfs_layoutdriver_type *ld,
  62. const struct nfs_client *clp, const struct nfs4_deviceid *id,
  63. long hash)
  64. {
  65. struct nfs4_deviceid_node *d;
  66. struct hlist_node *n;
  67. hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
  68. if (d->ld == ld && d->nfs_client == clp &&
  69. !memcmp(&d->deviceid, id, sizeof(*id))) {
  70. if (atomic_read(&d->ref))
  71. return d;
  72. else
  73. continue;
  74. }
  75. return NULL;
  76. }
  77. /*
  78. * Lookup a deviceid in cache and get a reference count on it if found
  79. *
  80. * @clp nfs_client associated with deviceid
  81. * @id deviceid to look up
  82. */
  83. struct nfs4_deviceid_node *
  84. _find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
  85. const struct nfs_client *clp, const struct nfs4_deviceid *id,
  86. long hash)
  87. {
  88. struct nfs4_deviceid_node *d;
  89. rcu_read_lock();
  90. d = _lookup_deviceid(ld, clp, id, hash);
  91. if (d != NULL)
  92. atomic_inc(&d->ref);
  93. rcu_read_unlock();
  94. return d;
  95. }
  96. struct nfs4_deviceid_node *
  97. nfs4_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
  98. const struct nfs_client *clp, const struct nfs4_deviceid *id)
  99. {
  100. return _find_get_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
  101. }
  102. EXPORT_SYMBOL_GPL(nfs4_find_get_deviceid);
  103. /*
  104. * Remove a deviceid from cache
  105. *
  106. * @clp nfs_client associated with deviceid
  107. * @id the deviceid to unhash
  108. *
  109. * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
  110. */
  111. void
  112. nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
  113. const struct nfs_client *clp, const struct nfs4_deviceid *id)
  114. {
  115. struct nfs4_deviceid_node *d;
  116. spin_lock(&nfs4_deviceid_lock);
  117. rcu_read_lock();
  118. d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
  119. rcu_read_unlock();
  120. if (!d) {
  121. spin_unlock(&nfs4_deviceid_lock);
  122. return;
  123. }
  124. hlist_del_init_rcu(&d->node);
  125. spin_unlock(&nfs4_deviceid_lock);
  126. synchronize_rcu();
  127. /* balance the initial ref set in pnfs_insert_deviceid */
  128. if (atomic_dec_and_test(&d->ref))
  129. d->ld->free_deviceid_node(d);
  130. }
  131. EXPORT_SYMBOL_GPL(nfs4_delete_deviceid);
  132. void
  133. nfs4_init_deviceid_node(struct nfs4_deviceid_node *d,
  134. const struct pnfs_layoutdriver_type *ld,
  135. const struct nfs_client *nfs_client,
  136. const struct nfs4_deviceid *id)
  137. {
  138. INIT_HLIST_NODE(&d->node);
  139. INIT_HLIST_NODE(&d->tmpnode);
  140. d->ld = ld;
  141. d->nfs_client = nfs_client;
  142. d->flags = 0;
  143. d->deviceid = *id;
  144. atomic_set(&d->ref, 1);
  145. }
  146. EXPORT_SYMBOL_GPL(nfs4_init_deviceid_node);
  147. /*
  148. * Uniquely initialize and insert a deviceid node into cache
  149. *
  150. * @new new deviceid node
  151. * Note that the caller must set up the following members:
  152. * new->ld
  153. * new->nfs_client
  154. * new->deviceid
  155. *
  156. * @ret the inserted node, if none found, otherwise, the found entry.
  157. */
  158. struct nfs4_deviceid_node *
  159. nfs4_insert_deviceid_node(struct nfs4_deviceid_node *new)
  160. {
  161. struct nfs4_deviceid_node *d;
  162. long hash;
  163. spin_lock(&nfs4_deviceid_lock);
  164. hash = nfs4_deviceid_hash(&new->deviceid);
  165. d = _find_get_deviceid(new->ld, new->nfs_client, &new->deviceid, hash);
  166. if (d) {
  167. spin_unlock(&nfs4_deviceid_lock);
  168. return d;
  169. }
  170. hlist_add_head_rcu(&new->node, &nfs4_deviceid_cache[hash]);
  171. spin_unlock(&nfs4_deviceid_lock);
  172. atomic_inc(&new->ref);
  173. return new;
  174. }
  175. EXPORT_SYMBOL_GPL(nfs4_insert_deviceid_node);
  176. /*
  177. * Dereference a deviceid node and delete it when its reference count drops
  178. * to zero.
  179. *
  180. * @d deviceid node to put
  181. *
  182. * return true iff the node was deleted
  183. * Note that since the test for d->ref == 0 is sufficient to establish
  184. * that the node is no longer hashed in the global device id cache.
  185. */
  186. bool
  187. nfs4_put_deviceid_node(struct nfs4_deviceid_node *d)
  188. {
  189. if (!atomic_dec_and_test(&d->ref))
  190. return false;
  191. d->ld->free_deviceid_node(d);
  192. return true;
  193. }
  194. EXPORT_SYMBOL_GPL(nfs4_put_deviceid_node);
  195. static void
  196. _deviceid_purge_client(const struct nfs_client *clp, long hash)
  197. {
  198. struct nfs4_deviceid_node *d;
  199. struct hlist_node *n;
  200. HLIST_HEAD(tmp);
  201. spin_lock(&nfs4_deviceid_lock);
  202. rcu_read_lock();
  203. hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[hash], node)
  204. if (d->nfs_client == clp && atomic_read(&d->ref)) {
  205. hlist_del_init_rcu(&d->node);
  206. hlist_add_head(&d->tmpnode, &tmp);
  207. }
  208. rcu_read_unlock();
  209. spin_unlock(&nfs4_deviceid_lock);
  210. if (hlist_empty(&tmp))
  211. return;
  212. synchronize_rcu();
  213. while (!hlist_empty(&tmp)) {
  214. d = hlist_entry(tmp.first, struct nfs4_deviceid_node, tmpnode);
  215. hlist_del(&d->tmpnode);
  216. if (atomic_dec_and_test(&d->ref))
  217. d->ld->free_deviceid_node(d);
  218. }
  219. }
  220. void
  221. nfs4_deviceid_purge_client(const struct nfs_client *clp)
  222. {
  223. long h;
  224. if (!(clp->cl_exchange_flags & EXCHGID4_FLAG_USE_PNFS_MDS))
  225. return;
  226. for (h = 0; h < NFS4_DEVICE_ID_HASH_SIZE; h++)
  227. _deviceid_purge_client(clp, h);
  228. }
  229. /*
  230. * Stop use of all deviceids associated with an nfs_client
  231. */
  232. void
  233. nfs4_deviceid_mark_client_invalid(struct nfs_client *clp)
  234. {
  235. struct nfs4_deviceid_node *d;
  236. struct hlist_node *n;
  237. int i;
  238. rcu_read_lock();
  239. for (i = 0; i < NFS4_DEVICE_ID_HASH_SIZE; i ++){
  240. hlist_for_each_entry_rcu(d, n, &nfs4_deviceid_cache[i], node)
  241. if (d->nfs_client == clp)
  242. set_bit(NFS_DEVICEID_INVALID, &d->flags);
  243. }
  244. rcu_read_unlock();
  245. }