host.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520
  1. /*
  2. * linux/fs/lockd/host.c
  3. *
  4. * Management for NLM peer hosts. The nlm_host struct is shared
  5. * between client and server implementation. The only reason to
  6. * do so is to reduce code bloat.
  7. *
  8. * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de>
  9. */
  10. #include <linux/types.h>
  11. #include <linux/sched.h>
  12. #include <linux/slab.h>
  13. #include <linux/in.h>
  14. #include <linux/sunrpc/clnt.h>
  15. #include <linux/sunrpc/svc.h>
  16. #include <linux/lockd/lockd.h>
  17. #include <linux/lockd/sm_inter.h>
  18. #include <linux/mutex.h>
  19. #define NLMDBG_FACILITY NLMDBG_HOSTCACHE
  20. #define NLM_HOST_MAX 64
  21. #define NLM_HOST_NRHASH 32
  22. #define NLM_ADDRHASH(addr) (ntohl(addr) & (NLM_HOST_NRHASH-1))
  23. #define NLM_HOST_REBIND (60 * HZ)
  24. #define NLM_HOST_EXPIRE ((nrhosts > NLM_HOST_MAX)? 300 * HZ : 120 * HZ)
  25. #define NLM_HOST_COLLECT ((nrhosts > NLM_HOST_MAX)? 120 * HZ : 60 * HZ)
  26. static struct nlm_host * nlm_hosts[NLM_HOST_NRHASH];
  27. static unsigned long next_gc;
  28. static int nrhosts;
  29. static DEFINE_MUTEX(nlm_host_mutex);
  30. static void nlm_gc_hosts(void);
  31. static struct nsm_handle * __nsm_find(const struct sockaddr_in *,
  32. const char *, int, int);
  33. /*
  34. * Find an NLM server handle in the cache. If there is none, create it.
  35. */
  36. struct nlm_host *
  37. nlmclnt_lookup_host(const struct sockaddr_in *sin, int proto, int version,
  38. const char *hostname, int hostname_len)
  39. {
  40. return nlm_lookup_host(0, sin, proto, version,
  41. hostname, hostname_len);
  42. }
  43. /*
  44. * Find an NLM client handle in the cache. If there is none, create it.
  45. */
  46. struct nlm_host *
  47. nlmsvc_lookup_host(struct svc_rqst *rqstp,
  48. const char *hostname, int hostname_len)
  49. {
  50. return nlm_lookup_host(1, &rqstp->rq_addr,
  51. rqstp->rq_prot, rqstp->rq_vers,
  52. hostname, hostname_len);
  53. }
  54. /*
  55. * Common host lookup routine for server & client
  56. */
  57. struct nlm_host *
  58. nlm_lookup_host(int server, const struct sockaddr_in *sin,
  59. int proto, int version,
  60. const char *hostname,
  61. int hostname_len)
  62. {
  63. struct nlm_host *host, **hp;
  64. struct nsm_handle *nsm = NULL;
  65. int hash;
  66. dprintk("lockd: nlm_lookup_host(%u.%u.%u.%u, p=%d, v=%d, my role=%s, name=%.*s)\n",
  67. NIPQUAD(sin->sin_addr.s_addr), proto, version,
  68. server? "server" : "client",
  69. hostname_len,
  70. hostname? hostname : "<none>");
  71. hash = NLM_ADDRHASH(sin->sin_addr.s_addr);
  72. /* Lock hash table */
  73. mutex_lock(&nlm_host_mutex);
  74. if (time_after_eq(jiffies, next_gc))
  75. nlm_gc_hosts();
  76. /* We may keep several nlm_host objects for a peer, because each
  77. * nlm_host is identified by
  78. * (address, protocol, version, server/client)
  79. * We could probably simplify this a little by putting all those
  80. * different NLM rpc_clients into one single nlm_host object.
  81. * This would allow us to have one nlm_host per address.
  82. */
  83. for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
  84. if (!nlm_cmp_addr(&host->h_addr, sin))
  85. continue;
  86. /* See if we have an NSM handle for this client */
  87. if (!nsm && (nsm = host->h_nsmhandle) != 0)
  88. atomic_inc(&nsm->sm_count);
  89. if (host->h_proto != proto)
  90. continue;
  91. if (host->h_version != version)
  92. continue;
  93. if (host->h_server != server)
  94. continue;
  95. if (hp != nlm_hosts + hash) {
  96. *hp = host->h_next;
  97. host->h_next = nlm_hosts[hash];
  98. nlm_hosts[hash] = host;
  99. }
  100. nlm_get_host(host);
  101. goto out;
  102. }
  103. /* Sadly, the host isn't in our hash table yet. See if
  104. * we have an NSM handle for it. If not, create one.
  105. */
  106. if (!nsm && !(nsm = nsm_find(sin, hostname, hostname_len)))
  107. goto out;
  108. host = kzalloc(sizeof(*host), GFP_KERNEL);
  109. if (!host) {
  110. nsm_release(nsm);
  111. goto out;
  112. }
  113. host->h_name = nsm->sm_name;
  114. host->h_addr = *sin;
  115. host->h_addr.sin_port = 0; /* ouch! */
  116. host->h_version = version;
  117. host->h_proto = proto;
  118. host->h_rpcclnt = NULL;
  119. mutex_init(&host->h_mutex);
  120. host->h_nextrebind = jiffies + NLM_HOST_REBIND;
  121. host->h_expires = jiffies + NLM_HOST_EXPIRE;
  122. atomic_set(&host->h_count, 1);
  123. init_waitqueue_head(&host->h_gracewait);
  124. init_rwsem(&host->h_rwsem);
  125. host->h_state = 0; /* pseudo NSM state */
  126. host->h_nsmstate = 0; /* real NSM state */
  127. host->h_nsmhandle = nsm;
  128. host->h_server = server;
  129. host->h_next = nlm_hosts[hash];
  130. nlm_hosts[hash] = host;
  131. INIT_LIST_HEAD(&host->h_lockowners);
  132. spin_lock_init(&host->h_lock);
  133. INIT_LIST_HEAD(&host->h_granted);
  134. INIT_LIST_HEAD(&host->h_reclaim);
  135. if (++nrhosts > NLM_HOST_MAX)
  136. next_gc = 0;
  137. out:
  138. mutex_unlock(&nlm_host_mutex);
  139. return host;
  140. }
  141. struct nlm_host *
  142. nlm_find_client(void)
  143. {
  144. /* find a nlm_host for a client for which h_killed == 0.
  145. * and return it
  146. */
  147. int hash;
  148. mutex_lock(&nlm_host_mutex);
  149. for (hash = 0 ; hash < NLM_HOST_NRHASH; hash++) {
  150. struct nlm_host *host, **hp;
  151. for (hp = &nlm_hosts[hash]; (host = *hp) != 0; hp = &host->h_next) {
  152. if (host->h_server &&
  153. host->h_killed == 0) {
  154. nlm_get_host(host);
  155. mutex_unlock(&nlm_host_mutex);
  156. return host;
  157. }
  158. }
  159. }
  160. mutex_unlock(&nlm_host_mutex);
  161. return NULL;
  162. }
  163. /*
  164. * Create the NLM RPC client for an NLM peer
  165. */
  166. struct rpc_clnt *
  167. nlm_bind_host(struct nlm_host *host)
  168. {
  169. struct rpc_clnt *clnt;
  170. dprintk("lockd: nlm_bind_host(%08x)\n",
  171. (unsigned)ntohl(host->h_addr.sin_addr.s_addr));
  172. /* Lock host handle */
  173. mutex_lock(&host->h_mutex);
  174. /* If we've already created an RPC client, check whether
  175. * RPC rebind is required
  176. */
  177. if ((clnt = host->h_rpcclnt) != NULL) {
  178. if (time_after_eq(jiffies, host->h_nextrebind)) {
  179. rpc_force_rebind(clnt);
  180. host->h_nextrebind = jiffies + NLM_HOST_REBIND;
  181. dprintk("lockd: next rebind in %ld jiffies\n",
  182. host->h_nextrebind - jiffies);
  183. }
  184. } else {
  185. unsigned long increment = nlmsvc_timeout * HZ;
  186. struct rpc_timeout timeparms = {
  187. .to_initval = increment,
  188. .to_increment = increment,
  189. .to_maxval = increment * 6UL,
  190. .to_retries = 5U,
  191. };
  192. struct rpc_create_args args = {
  193. .protocol = host->h_proto,
  194. .address = (struct sockaddr *)&host->h_addr,
  195. .addrsize = sizeof(host->h_addr),
  196. .timeout = &timeparms,
  197. .servername = host->h_name,
  198. .program = &nlm_program,
  199. .version = host->h_version,
  200. .authflavor = RPC_AUTH_UNIX,
  201. .flags = (RPC_CLNT_CREATE_HARDRTRY |
  202. RPC_CLNT_CREATE_AUTOBIND),
  203. };
  204. clnt = rpc_create(&args);
  205. if (!IS_ERR(clnt))
  206. host->h_rpcclnt = clnt;
  207. else {
  208. printk("lockd: couldn't create RPC handle for %s\n", host->h_name);
  209. clnt = NULL;
  210. }
  211. }
  212. mutex_unlock(&host->h_mutex);
  213. return clnt;
  214. }
  215. /*
  216. * Force a portmap lookup of the remote lockd port
  217. */
  218. void
  219. nlm_rebind_host(struct nlm_host *host)
  220. {
  221. dprintk("lockd: rebind host %s\n", host->h_name);
  222. if (host->h_rpcclnt && time_after_eq(jiffies, host->h_nextrebind)) {
  223. rpc_force_rebind(host->h_rpcclnt);
  224. host->h_nextrebind = jiffies + NLM_HOST_REBIND;
  225. }
  226. }
  227. /*
  228. * Increment NLM host count
  229. */
  230. struct nlm_host * nlm_get_host(struct nlm_host *host)
  231. {
  232. if (host) {
  233. dprintk("lockd: get host %s\n", host->h_name);
  234. atomic_inc(&host->h_count);
  235. host->h_expires = jiffies + NLM_HOST_EXPIRE;
  236. }
  237. return host;
  238. }
  239. /*
  240. * Release NLM host after use
  241. */
  242. void nlm_release_host(struct nlm_host *host)
  243. {
  244. if (host != NULL) {
  245. dprintk("lockd: release host %s\n", host->h_name);
  246. BUG_ON(atomic_read(&host->h_count) < 0);
  247. if (atomic_dec_and_test(&host->h_count)) {
  248. BUG_ON(!list_empty(&host->h_lockowners));
  249. BUG_ON(!list_empty(&host->h_granted));
  250. BUG_ON(!list_empty(&host->h_reclaim));
  251. }
  252. }
  253. }
  254. /*
  255. * We were notified that the host indicated by address &sin
  256. * has rebooted.
  257. * Release all resources held by that peer.
  258. */
  259. void nlm_host_rebooted(const struct sockaddr_in *sin,
  260. const char *hostname, int hostname_len,
  261. u32 new_state)
  262. {
  263. struct nsm_handle *nsm;
  264. struct nlm_host *host, **hp;
  265. int hash;
  266. dprintk("lockd: nlm_host_rebooted(%s, %u.%u.%u.%u)\n",
  267. hostname, NIPQUAD(sin->sin_addr));
  268. /* Find the NSM handle for this peer */
  269. if (!(nsm = __nsm_find(sin, hostname, hostname_len, 0)))
  270. return;
  271. /* When reclaiming locks on this peer, make sure that
  272. * we set up a new notification */
  273. nsm->sm_monitored = 0;
  274. /* Mark all hosts tied to this NSM state as having rebooted.
  275. * We run the loop repeatedly, because we drop the host table
  276. * lock for this.
  277. * To avoid processing a host several times, we match the nsmstate.
  278. */
  279. again: mutex_lock(&nlm_host_mutex);
  280. for (hash = 0; hash < NLM_HOST_NRHASH; hash++) {
  281. for (hp = &nlm_hosts[hash]; (host = *hp); hp = &host->h_next) {
  282. if (host->h_nsmhandle == nsm
  283. && host->h_nsmstate != new_state) {
  284. host->h_nsmstate = new_state;
  285. host->h_state++;
  286. nlm_get_host(host);
  287. mutex_unlock(&nlm_host_mutex);
  288. if (host->h_server) {
  289. /* We're server for this guy, just ditch
  290. * all the locks he held. */
  291. nlmsvc_free_host_resources(host);
  292. } else {
  293. /* He's the server, initiate lock recovery. */
  294. nlmclnt_recovery(host);
  295. }
  296. nlm_release_host(host);
  297. goto again;
  298. }
  299. }
  300. }
  301. mutex_unlock(&nlm_host_mutex);
  302. }
  303. /*
  304. * Shut down the hosts module.
  305. * Note that this routine is called only at server shutdown time.
  306. */
  307. void
  308. nlm_shutdown_hosts(void)
  309. {
  310. struct nlm_host *host;
  311. int i;
  312. dprintk("lockd: shutting down host module\n");
  313. mutex_lock(&nlm_host_mutex);
  314. /* First, make all hosts eligible for gc */
  315. dprintk("lockd: nuking all hosts...\n");
  316. for (i = 0; i < NLM_HOST_NRHASH; i++) {
  317. for (host = nlm_hosts[i]; host; host = host->h_next)
  318. host->h_expires = jiffies - 1;
  319. }
  320. /* Then, perform a garbage collection pass */
  321. nlm_gc_hosts();
  322. mutex_unlock(&nlm_host_mutex);
  323. /* complain if any hosts are left */
  324. if (nrhosts) {
  325. printk(KERN_WARNING "lockd: couldn't shutdown host module!\n");
  326. dprintk("lockd: %d hosts left:\n", nrhosts);
  327. for (i = 0; i < NLM_HOST_NRHASH; i++) {
  328. for (host = nlm_hosts[i]; host; host = host->h_next) {
  329. dprintk(" %s (cnt %d use %d exp %ld)\n",
  330. host->h_name, atomic_read(&host->h_count),
  331. host->h_inuse, host->h_expires);
  332. }
  333. }
  334. }
  335. }
  336. /*
  337. * Garbage collect any unused NLM hosts.
  338. * This GC combines reference counting for async operations with
  339. * mark & sweep for resources held by remote clients.
  340. */
  341. static void
  342. nlm_gc_hosts(void)
  343. {
  344. struct nlm_host **q, *host;
  345. struct rpc_clnt *clnt;
  346. int i;
  347. dprintk("lockd: host garbage collection\n");
  348. for (i = 0; i < NLM_HOST_NRHASH; i++) {
  349. for (host = nlm_hosts[i]; host; host = host->h_next)
  350. host->h_inuse = 0;
  351. }
  352. /* Mark all hosts that hold locks, blocks or shares */
  353. nlmsvc_mark_resources();
  354. for (i = 0; i < NLM_HOST_NRHASH; i++) {
  355. q = &nlm_hosts[i];
  356. while ((host = *q) != NULL) {
  357. if (atomic_read(&host->h_count) || host->h_inuse
  358. || time_before(jiffies, host->h_expires)) {
  359. dprintk("nlm_gc_hosts skipping %s (cnt %d use %d exp %ld)\n",
  360. host->h_name, atomic_read(&host->h_count),
  361. host->h_inuse, host->h_expires);
  362. q = &host->h_next;
  363. continue;
  364. }
  365. dprintk("lockd: delete host %s\n", host->h_name);
  366. *q = host->h_next;
  367. /*
  368. * Unmonitor unless host was invalidated (i.e. lockd restarted)
  369. */
  370. nsm_unmonitor(host);
  371. if ((clnt = host->h_rpcclnt) != NULL) {
  372. if (atomic_read(&clnt->cl_users)) {
  373. printk(KERN_WARNING
  374. "lockd: active RPC handle\n");
  375. clnt->cl_dead = 1;
  376. } else {
  377. rpc_destroy_client(host->h_rpcclnt);
  378. }
  379. }
  380. kfree(host);
  381. nrhosts--;
  382. }
  383. }
  384. next_gc = jiffies + NLM_HOST_COLLECT;
  385. }
  386. /*
  387. * Manage NSM handles
  388. */
  389. static LIST_HEAD(nsm_handles);
  390. static DECLARE_MUTEX(nsm_sema);
  391. static struct nsm_handle *
  392. __nsm_find(const struct sockaddr_in *sin,
  393. const char *hostname, int hostname_len,
  394. int create)
  395. {
  396. struct nsm_handle *nsm = NULL;
  397. struct list_head *pos;
  398. if (!sin)
  399. return NULL;
  400. if (hostname && memchr(hostname, '/', hostname_len) != NULL) {
  401. if (printk_ratelimit()) {
  402. printk(KERN_WARNING "Invalid hostname \"%.*s\" "
  403. "in NFS lock request\n",
  404. hostname_len, hostname);
  405. }
  406. return NULL;
  407. }
  408. down(&nsm_sema);
  409. list_for_each(pos, &nsm_handles) {
  410. nsm = list_entry(pos, struct nsm_handle, sm_link);
  411. if (!nlm_cmp_addr(&nsm->sm_addr, sin))
  412. continue;
  413. atomic_inc(&nsm->sm_count);
  414. goto out;
  415. }
  416. if (!create) {
  417. nsm = NULL;
  418. goto out;
  419. }
  420. nsm = kzalloc(sizeof(*nsm) + hostname_len + 1, GFP_KERNEL);
  421. if (nsm != NULL) {
  422. nsm->sm_addr = *sin;
  423. nsm->sm_name = (char *) (nsm + 1);
  424. memcpy(nsm->sm_name, hostname, hostname_len);
  425. nsm->sm_name[hostname_len] = '\0';
  426. atomic_set(&nsm->sm_count, 1);
  427. list_add(&nsm->sm_link, &nsm_handles);
  428. }
  429. out: up(&nsm_sema);
  430. return nsm;
  431. }
  432. struct nsm_handle *
  433. nsm_find(const struct sockaddr_in *sin, const char *hostname, int hostname_len)
  434. {
  435. return __nsm_find(sin, hostname, hostname_len, 1);
  436. }
  437. /*
  438. * Release an NSM handle
  439. */
  440. void
  441. nsm_release(struct nsm_handle *nsm)
  442. {
  443. if (!nsm)
  444. return;
  445. if (atomic_dec_and_test(&nsm->sm_count)) {
  446. down(&nsm_sema);
  447. if (atomic_read(&nsm->sm_count) == 0) {
  448. list_del(&nsm->sm_link);
  449. kfree(nsm);
  450. }
  451. up(&nsm_sema);
  452. }
  453. }