host.c 9.7 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395
  1. /*
  2. * linux/fs/lockd/host.c
  3. *
  4. * Management for NLM peer hosts. The nlm_host struct is shared
  5. * between client and server implementation. The only reason to
  6. * do so is to reduce code bloat.
  7. *
  8. * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  9. */
  10. #include <linux/types.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/in.h>
  14. #include <linux/sunrpc/clnt.h>
  15. #include <linux/sunrpc/svc.h>
  16. #include <linux/lockd/lockd.h>
  17. #include <linux/lockd/sm_inter.h>
  18. #include <linux/mutex.h>
  19. #define NLMDBG_FACILITY NLMDBG_HOSTCACHE
  20. #define NLM_HOST_MAX 64
  21. #define NLM_HOST_NRHASH 32
  22. #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
  23. #define NLM_HOST_REBIND (60 * HZ)
  24. #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
  25. #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
  26. static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
  27. static unsigned long next_gc;
  28. static int nrhosts;
  29. static DEFINE_MUTEX(nlm_host_mutex);
  30. static void nlm_gc_hosts(void);
  31. /*
  32. * Find an NLM server handle in the cache. If there is none, create it.
  33. */
  34. struct nlm_host *
  35. nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
  36. const char *hostname, int hostname_len)
  37. {
  38. return nlm_lookup_host(0, sin, proto, version,
  39. hostname, hostname_len);
  40. }
  41. /*
  42. * Find an NLM client handle in the cache. If there is none, create it.
  43. */
  44. struct nlm_host *
  45. nlmsvc_lookup_host(struct svc_rqst *rqstp,
  46. const char *hostname, int hostname_len)
  47. {
  48. return nlm_lookup_host(1, &rqstp->rq_addr,
  49. rqstp->rq_prot, rqstp->rq_vers,
  50. hostname, hostname_len);
  51. }
  52. /*
  53. * Common host lookup routine for server & client
  54. */
  55. struct nlm_host *
  56. nlm_lookup_host(int server, const struct sockaddr_in *sin,
  57. int proto, int version,
  58. const char *hostname,
  59. int hostname_len)
  60. {
  61. struct nlm_host *host, **hp;
  62. u32 addr;
  63. int hash;
  64. dprintk("lockd: nlm_lookup_host(%u.%u.%u.%u, p=%d, v=%d, my role=%s, name=%.*s)\n",
  65. NIPQUAD(sin->sin_addr.s_addr), proto, version,
  66. server? "server" : "client",
  67. hostname_len,
  68. hostname? hostname : "<none>");
  69. hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
  70. /* Lock hash table */
  71. mutex_lock(&nlm_host_mutex);
  72. if (time_after_eq(jiffies, next_gc))
  73. nlm_gc_hosts();
  74. for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
  75. if (host->h_proto != proto)
  76. continue;
  77. if (host->h_version != version)
  78. continue;
  79. if (host->h_server != server)
  80. continue;
  81. if (nlm_cmp_addr(&host->h_addr, sin)) {
  82. if (hp != nlm_hosts + hash) {
  83. *hp = host->h_next;
  84. host->h_next = nlm_hosts[hash];
  85. nlm_hosts[hash] = host;
  86. }
  87. nlm_get_host(host);
  88. mutex_unlock(&nlm_host_mutex);
  89. return host;
  90. }
  91. }
  92. /* Ooops, no host found, create it */
  93. dprintk("lockd: creating host entry\n");
  94. host = kzalloc(sizeof(*host), GFP_KERNEL);
  95. if (!host)
  96. goto nohost;
  97. addr = sin->sin_addr.s_addr;
  98. sprintf(host->h_name, "%u.%u.%u.%u", NIPQUAD(addr));
  99. host->h_addr = *sin;
  100. host->h_addr.sin_port = 0; /* ouch! */
  101. host->h_version = version;
  102. host->h_proto = proto;
  103. host->h_rpcclnt = NULL;
  104. mutex_init(&host->h_mutex);
  105. host->h_nextrebind = jiffies + NLM_HOST_REBIND;
  106. host->h_expires = jiffies + NLM_HOST_EXPIRE;
  107. atomic_set(&host->h_count, 1);
  108. init_waitqueue_head(&host->h_gracewait);
  109. init_rwsem(&host->h_rwsem);
  110. host->h_state = 0; /* pseudo NSM state */
  111. host->h_nsmstate = 0; /* real NSM state */
  112. host->h_server = server;
  113. host->h_next = nlm_hosts[hash];
  114. nlm_hosts[hash] = host;
  115. INIT_LIST_HEAD(&host->h_lockowners);
  116. spin_lock_init(&host->h_lock);
  117. INIT_LIST_HEAD(&host->h_granted);
  118. INIT_LIST_HEAD(&host->h_reclaim);
  119. if (++nrhosts > NLM_HOST_MAX)
  120. next_gc = 0;
  121. nohost:
  122. mutex_unlock(&nlm_host_mutex);
  123. return host;
  124. }
  125. struct nlm_host *
  126. nlm_find_client(void)
  127. {
  128. /* find a nlm_host for a client for which h_killed == 0.
  129. * and return it
  130. */
  131. int hash;
  132. mutex_lock(&nlm_host_mutex);
  133. for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
  134. struct nlm_host *host, **hp;
  135. for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
  136. if (host->h_server &&
  137. host->h_killed == 0) {
  138. nlm_get_host(host);
  139. mutex_unlock(&nlm_host_mutex);
  140. return host;
  141. }
  142. }
  143. }
  144. mutex_unlock(&nlm_host_mutex);
  145. return NULL;
  146. }
  147. /*
  148. * Create the NLM RPC client for an NLM peer
  149. */
  150. struct rpc_clnt *
  151. nlm_bind_host(struct nlm_host *host)
  152. {
  153. struct rpc_clnt *clnt;
  154. dprintk("lockd: nlm_bind_host(%08x)\n",
  155. (unsigned)ntohl(host->h_addr.sin_addr.s_addr));
  156. /* Lock host handle */
  157. mutex_lock(&host->h_mutex);
  158. /* If we've already created an RPC client, check whether
  159. * RPC rebind is required
  160. */
  161. if ((clnt = host->h_rpcclnt) != NULL) {
  162. if (time_after_eq(jiffies, host->h_nextrebind)) {
  163. rpc_force_rebind(clnt);
  164. host->h_nextrebind = jiffies + NLM_HOST_REBIND;
  165. dprintk("lockd: next rebind in %ld jiffies\n",
  166. host->h_nextrebind - jiffies);
  167. }
  168. } else {
  169. unsigned long increment = nlmsvc_timeout * HZ;
  170. struct rpc_timeout timeparms = {
  171. .to_initval = increment,
  172. .to_increment = increment,
  173. .to_maxval = increment * 6UL,
  174. .to_retries = 5U,
  175. };
  176. struct rpc_create_args args = {
  177. .protocol = host->h_proto,
  178. .address = (struct sockaddr *)&host->h_addr,
  179. .addrsize = sizeof(host->h_addr),
  180. .timeout = &timeparms,
  181. .servername = host->h_name,
  182. .program = &nlm_program,
  183. .version = host->h_version,
  184. .authflavor = RPC_AUTH_UNIX,
  185. .flags = (RPC_CLNT_CREATE_HARDRTRY |
  186. RPC_CLNT_CREATE_AUTOBIND),
  187. };
  188. clnt = rpc_create(&args);
  189. if (!IS_ERR(clnt))
  190. host->h_rpcclnt = clnt;
  191. else {
  192. printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
  193. clnt = NULL;
  194. }
  195. }
  196. mutex_unlock(&host->h_mutex);
  197. return clnt;
  198. }
  199. /*
  200. * Force a portmap lookup of the remote lockd port
  201. */
  202. void
  203. nlm_rebind_host(struct nlm_host *host)
  204. {
  205. dprintk("lockd: rebind host %s\n", host->h_name);
  206. if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
  207. rpc_force_rebind(host->h_rpcclnt);
  208. host->h_nextrebind = jiffies + NLM_HOST_REBIND;
  209. }
  210. }
  211. /*
  212. * Increment NLM host count
  213. */
  214. struct nlm_host * nlm_get_host(struct nlm_host *host)
  215. {
  216. if (host) {
  217. dprintk("lockd: get host %s\n", host->h_name);
  218. atomic_inc(&host->h_count);
  219. host->h_expires = jiffies + NLM_HOST_EXPIRE;
  220. }
  221. return host;
  222. }
  223. /*
  224. * Release NLM host after use
  225. */
  226. void nlm_release_host(struct nlm_host *host)
  227. {
  228. if (host != NULL) {
  229. dprintk("lockd: release host %s\n", host->h_name);
  230. BUG_ON(atomic_read(&host->h_count) < 0);
  231. if (atomic_dec_and_test(&host->h_count)) {
  232. BUG_ON(!list_empty(&host->h_lockowners));
  233. BUG_ON(!list_empty(&host->h_granted));
  234. BUG_ON(!list_empty(&host->h_reclaim));
  235. }
  236. }
  237. }
  238. /*
  239. * We were notified that the host indicated by address &sin
  240. * has rebooted.
  241. * Release all resources held by that peer.
  242. */
  243. void nlm_host_rebooted(const struct sockaddr_in *sin, const struct nlm_reboot *argp)
  244. {
  245. struct nlm_host *host;
  246. int server;
  247. /* Obtain the host pointer for this NFS server and try to
  248. * reclaim all locks we hold on this server.
  249. */
  250. server = (argp->proto & 1)? 1 : 0;
  251. host = nlm_lookup_host(server, sin, argp->proto >> 1, argp->vers,
  252. argp->mon, argp->len);
  253. if (host == NULL)
  254. return;
  255. if (server == 0) {
  256. /* We are client, he's the server: try to reclaim all locks. */
  257. nlmclnt_recovery(host, argp->state);
  258. } else {
  259. /* He's the client, we're the server: delete all locks held by the client */
  260. nlmsvc_free_host_resources(host);
  261. }
  262. nlm_release_host(host);
  263. }
  264. /*
  265. * Shut down the hosts module.
  266. * Note that this routine is called only at server shutdown time.
  267. */
  268. void
  269. nlm_shutdown_hosts(void)
  270. {
  271. struct nlm_host *host;
  272. int i;
  273. dprintk("lockd: shutting down host module\n");
  274. mutex_lock(&nlm_host_mutex);
  275. /* First, make all hosts eligible for gc */
  276. dprintk("lockd: nuking all hosts...\n");
  277. for (i = 0; i < NLM_HOST_NRHASH; i++) {
  278. for (host = nlm_hosts[i]; host; host = host->h_next)
  279. host->h_expires = jiffies - 1;
  280. }
  281. /* Then, perform a garbage collection pass */
  282. nlm_gc_hosts();
  283. mutex_unlock(&nlm_host_mutex);
  284. /* complain if any hosts are left */
  285. if (nrhosts) {
  286. printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
  287. dprintk("lockd: %d hosts left:\n", nrhosts);
  288. for (i = 0; i < NLM_HOST_NRHASH; i++) {
  289. for (host = nlm_hosts[i]; host; host = host->h_next) {
  290. dprintk(" %s (cnt %d use %d exp %ld)\n",
  291. host->h_name, atomic_read(&host->h_count),
  292. host->h_inuse, host->h_expires);
  293. }
  294. }
  295. }
  296. }
  297. /*
  298. * Garbage collect any unused NLM hosts.
  299. * This GC combines reference counting for async operations with
  300. * mark & sweep for resources held by remote clients.
  301. */
  302. static void
  303. nlm_gc_hosts(void)
  304. {
  305. struct nlm_host **q, *host;
  306. struct rpc_clnt *clnt;
  307. int i;
  308. dprintk("lockd: host garbage collection\n");
  309. for (i = 0; i < NLM_HOST_NRHASH; i++) {
  310. for (host = nlm_hosts[i]; host; host = host->h_next)
  311. host->h_inuse = 0;
  312. }
  313. /* Mark all hosts that hold locks, blocks or shares */
  314. nlmsvc_mark_resources();
  315. for (i = 0; i < NLM_HOST_NRHASH; i++) {
  316. q = &nlm_hosts[i];
  317. while ((host = *q) != NULL) {
  318. if (atomic_read(&host->h_count) || host->h_inuse
  319. || time_before(jiffies, host->h_expires)) {
  320. dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
  321. host->h_name, atomic_read(&host->h_count),
  322. host->h_inuse, host->h_expires);
  323. q = &host->h_next;
  324. continue;
  325. }
  326. dprintk("lockd: delete host %s\n", host->h_name);
  327. *q = host->h_next;
  328. /*
  329. * Unmonitor unless host was invalidated (i.e. lockd restarted)
  330. */
  331. nsm_unmonitor(host);
  332. if ((clnt = host->h_rpcclnt) != NULL) {
  333. if (atomic_read(&clnt->cl_users)) {
  334. printk(KERN_WARNING
  335. "lockd: active RPC handle\n");
  336. clnt->cl_dead = 1;
  337. } else {
  338. rpc_destroy_client(host->h_rpcclnt);
  339. }
  340. }
  341. kfree(host);
  342. nrhosts--;
  343. }
  344. }
  345. next_gc = jiffies + NLM_HOST_COLLECT;
  346. }