nfs4state.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997
  1. /*
  2. * fs/nfs/nfs4state.c
  3. *
  4. * Client-side XDR for NFSv4.
  5. *
  6. * Copyright (c) 2002 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Kendrick Smith <kmsmith@umich.edu>
  10. *
  11. * Redistribution and use in source and binary forms, with or without
  12. * modification, are permitted provided that the following conditions
  13. * are met:
  14. *
  15. * 1. Redistributions of source code must retain the above copyright
  16. * notice, this list of conditions and the following disclaimer.
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in the
  19. * documentation and/or other materials provided with the distribution.
  20. * 3. Neither the name of the University nor the names of its
  21. * contributors may be used to endorse or promote products derived
  22. * from this software without specific prior written permission.
  23. *
  24. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  25. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  26. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  27. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  28. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Implementation of the NFSv4 state model. For the time being,
  37. * this is minimal, but will be made much more complex in a
  38. * subsequent patch.
  39. */
  40. #include <linux/config.h>
  41. #include <linux/slab.h>
  42. #include <linux/smp_lock.h>
  43. #include <linux/nfs_fs.h>
  44. #include <linux/nfs_idmap.h>
  45. #include <linux/workqueue.h>
  46. #include <linux/bitops.h>
  47. #include "nfs4_fs.h"
  48. #include "callback.h"
  49. #include "delegation.h"
  50. #define OPENOWNER_POOL_SIZE 8
  51. const nfs4_stateid zero_stateid;
  52. static DEFINE_SPINLOCK(state_spinlock);
  53. static LIST_HEAD(nfs4_clientid_list);
  54. static void nfs4_recover_state(void *);
  55. void
  56. init_nfsv4_state(struct nfs_server *server)
  57. {
  58. server->nfs4_state = NULL;
  59. INIT_LIST_HEAD(&server->nfs4_siblings);
  60. }
  61. void
  62. destroy_nfsv4_state(struct nfs_server *server)
  63. {
  64. if (server->mnt_path) {
  65. kfree(server->mnt_path);
  66. server->mnt_path = NULL;
  67. }
  68. if (server->nfs4_state) {
  69. nfs4_put_client(server->nfs4_state);
  70. server->nfs4_state = NULL;
  71. }
  72. }
  73. /*
  74. * nfs4_get_client(): returns an empty client structure
  75. * nfs4_put_client(): drops reference to client structure
  76. *
  77. * Since these are allocated/deallocated very rarely, we don't
  78. * bother putting them in a slab cache...
  79. */
  80. static struct nfs4_client *
  81. nfs4_alloc_client(struct in_addr *addr)
  82. {
  83. struct nfs4_client *clp;
  84. if (nfs_callback_up() < 0)
  85. return NULL;
  86. if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
  87. nfs_callback_down();
  88. return NULL;
  89. }
  90. memset(clp, 0, sizeof(*clp));
  91. memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
  92. init_rwsem(&clp->cl_sem);
  93. INIT_LIST_HEAD(&clp->cl_delegations);
  94. INIT_LIST_HEAD(&clp->cl_state_owners);
  95. INIT_LIST_HEAD(&clp->cl_unused);
  96. spin_lock_init(&clp->cl_lock);
  97. atomic_set(&clp->cl_count, 1);
  98. INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
  99. INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
  100. INIT_LIST_HEAD(&clp->cl_superblocks);
  101. init_waitqueue_head(&clp->cl_waitq);
  102. rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
  103. clp->cl_rpcclient = ERR_PTR(-EINVAL);
  104. clp->cl_boot_time = CURRENT_TIME;
  105. clp->cl_state = 1 << NFS4CLNT_OK;
  106. return clp;
  107. }
  108. static void
  109. nfs4_free_client(struct nfs4_client *clp)
  110. {
  111. struct nfs4_state_owner *sp;
  112. while (!list_empty(&clp->cl_unused)) {
  113. sp = list_entry(clp->cl_unused.next,
  114. struct nfs4_state_owner,
  115. so_list);
  116. list_del(&sp->so_list);
  117. kfree(sp);
  118. }
  119. BUG_ON(!list_empty(&clp->cl_state_owners));
  120. if (clp->cl_cred)
  121. put_rpccred(clp->cl_cred);
  122. nfs_idmap_delete(clp);
  123. if (!IS_ERR(clp->cl_rpcclient))
  124. rpc_shutdown_client(clp->cl_rpcclient);
  125. kfree(clp);
  126. nfs_callback_down();
  127. }
  128. static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
  129. {
  130. struct nfs4_client *clp;
  131. list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
  132. if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
  133. atomic_inc(&clp->cl_count);
  134. return clp;
  135. }
  136. }
  137. return NULL;
  138. }
  139. struct nfs4_client *nfs4_find_client(struct in_addr *addr)
  140. {
  141. struct nfs4_client *clp;
  142. spin_lock(&state_spinlock);
  143. clp = __nfs4_find_client(addr);
  144. spin_unlock(&state_spinlock);
  145. return clp;
  146. }
  147. struct nfs4_client *
  148. nfs4_get_client(struct in_addr *addr)
  149. {
  150. struct nfs4_client *clp, *new = NULL;
  151. spin_lock(&state_spinlock);
  152. for (;;) {
  153. clp = __nfs4_find_client(addr);
  154. if (clp != NULL)
  155. break;
  156. clp = new;
  157. if (clp != NULL) {
  158. list_add(&clp->cl_servers, &nfs4_clientid_list);
  159. new = NULL;
  160. break;
  161. }
  162. spin_unlock(&state_spinlock);
  163. new = nfs4_alloc_client(addr);
  164. spin_lock(&state_spinlock);
  165. if (new == NULL)
  166. break;
  167. }
  168. spin_unlock(&state_spinlock);
  169. if (new)
  170. nfs4_free_client(new);
  171. return clp;
  172. }
  173. void
  174. nfs4_put_client(struct nfs4_client *clp)
  175. {
  176. if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
  177. return;
  178. list_del(&clp->cl_servers);
  179. spin_unlock(&state_spinlock);
  180. BUG_ON(!list_empty(&clp->cl_superblocks));
  181. wake_up_all(&clp->cl_waitq);
  182. rpc_wake_up(&clp->cl_rpcwaitq);
  183. nfs4_kill_renewd(clp);
  184. nfs4_free_client(clp);
  185. }
  186. static int __nfs4_init_client(struct nfs4_client *clp)
  187. {
  188. int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
  189. if (status == 0)
  190. status = nfs4_proc_setclientid_confirm(clp);
  191. if (status == 0)
  192. nfs4_schedule_state_renewal(clp);
  193. return status;
  194. }
  195. int nfs4_init_client(struct nfs4_client *clp)
  196. {
  197. return nfs4_map_errors(__nfs4_init_client(clp));
  198. }
  199. u32
  200. nfs4_alloc_lockowner_id(struct nfs4_client *clp)
  201. {
  202. return clp->cl_lockowner_id ++;
  203. }
  204. static struct nfs4_state_owner *
  205. nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
  206. {
  207. struct nfs4_state_owner *sp = NULL;
  208. if (!list_empty(&clp->cl_unused)) {
  209. sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
  210. atomic_inc(&sp->so_count);
  211. sp->so_cred = cred;
  212. list_move(&sp->so_list, &clp->cl_state_owners);
  213. clp->cl_nunused--;
  214. }
  215. return sp;
  216. }
  217. static struct nfs4_state_owner *
  218. nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
  219. {
  220. struct nfs4_state_owner *sp, *res = NULL;
  221. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  222. if (sp->so_cred != cred)
  223. continue;
  224. atomic_inc(&sp->so_count);
  225. /* Move to the head of the list */
  226. list_move(&sp->so_list, &clp->cl_state_owners);
  227. res = sp;
  228. break;
  229. }
  230. return res;
  231. }
  232. /*
  233. * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
  234. * create a new state_owner.
  235. *
  236. */
  237. static struct nfs4_state_owner *
  238. nfs4_alloc_state_owner(void)
  239. {
  240. struct nfs4_state_owner *sp;
  241. sp = kzalloc(sizeof(*sp),GFP_KERNEL);
  242. if (!sp)
  243. return NULL;
  244. INIT_LIST_HEAD(&sp->so_states);
  245. INIT_LIST_HEAD(&sp->so_delegations);
  246. rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
  247. sp->so_seqid.sequence = &sp->so_sequence;
  248. spin_lock_init(&sp->so_sequence.lock);
  249. INIT_LIST_HEAD(&sp->so_sequence.list);
  250. atomic_set(&sp->so_count, 1);
  251. return sp;
  252. }
  253. void
  254. nfs4_drop_state_owner(struct nfs4_state_owner *sp)
  255. {
  256. struct nfs4_client *clp = sp->so_client;
  257. spin_lock(&clp->cl_lock);
  258. list_del_init(&sp->so_list);
  259. spin_unlock(&clp->cl_lock);
  260. }
  261. /*
  262. * Note: must be called with clp->cl_sem held in order to prevent races
  263. * with reboot recovery!
  264. */
  265. struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
  266. {
  267. struct nfs4_client *clp = server->nfs4_state;
  268. struct nfs4_state_owner *sp, *new;
  269. get_rpccred(cred);
  270. new = nfs4_alloc_state_owner();
  271. spin_lock(&clp->cl_lock);
  272. sp = nfs4_find_state_owner(clp, cred);
  273. if (sp == NULL)
  274. sp = nfs4_client_grab_unused(clp, cred);
  275. if (sp == NULL && new != NULL) {
  276. list_add(&new->so_list, &clp->cl_state_owners);
  277. new->so_client = clp;
  278. new->so_id = nfs4_alloc_lockowner_id(clp);
  279. new->so_cred = cred;
  280. sp = new;
  281. new = NULL;
  282. }
  283. spin_unlock(&clp->cl_lock);
  284. if (new)
  285. kfree(new);
  286. if (sp != NULL)
  287. return sp;
  288. put_rpccred(cred);
  289. return NULL;
  290. }
  291. /*
  292. * Must be called with clp->cl_sem held in order to avoid races
  293. * with state recovery...
  294. */
  295. void nfs4_put_state_owner(struct nfs4_state_owner *sp)
  296. {
  297. struct nfs4_client *clp = sp->so_client;
  298. struct rpc_cred *cred = sp->so_cred;
  299. if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
  300. return;
  301. if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
  302. goto out_free;
  303. if (list_empty(&sp->so_list))
  304. goto out_free;
  305. list_move(&sp->so_list, &clp->cl_unused);
  306. clp->cl_nunused++;
  307. spin_unlock(&clp->cl_lock);
  308. put_rpccred(cred);
  309. cred = NULL;
  310. return;
  311. out_free:
  312. list_del(&sp->so_list);
  313. spin_unlock(&clp->cl_lock);
  314. put_rpccred(cred);
  315. kfree(sp);
  316. }
  317. static struct nfs4_state *
  318. nfs4_alloc_open_state(void)
  319. {
  320. struct nfs4_state *state;
  321. state = kmalloc(sizeof(*state), GFP_KERNEL);
  322. if (!state)
  323. return NULL;
  324. state->state = 0;
  325. state->nreaders = 0;
  326. state->nwriters = 0;
  327. state->flags = 0;
  328. memset(state->stateid.data, 0, sizeof(state->stateid.data));
  329. atomic_set(&state->count, 1);
  330. INIT_LIST_HEAD(&state->lock_states);
  331. spin_lock_init(&state->state_lock);
  332. return state;
  333. }
  334. static struct nfs4_state *
  335. __nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
  336. {
  337. struct nfs_inode *nfsi = NFS_I(inode);
  338. struct nfs4_state *state;
  339. mode &= (FMODE_READ|FMODE_WRITE);
  340. list_for_each_entry(state, &nfsi->open_states, inode_states) {
  341. if (state->owner->so_cred != cred)
  342. continue;
  343. if ((mode & FMODE_READ) != 0 && state->nreaders == 0)
  344. continue;
  345. if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)
  346. continue;
  347. if ((state->state & mode) != mode)
  348. continue;
  349. atomic_inc(&state->count);
  350. if (mode & FMODE_READ)
  351. state->nreaders++;
  352. if (mode & FMODE_WRITE)
  353. state->nwriters++;
  354. return state;
  355. }
  356. return NULL;
  357. }
  358. static struct nfs4_state *
  359. __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
  360. {
  361. struct nfs_inode *nfsi = NFS_I(inode);
  362. struct nfs4_state *state;
  363. list_for_each_entry(state, &nfsi->open_states, inode_states) {
  364. /* Is this in the process of being freed? */
  365. if (state->nreaders == 0 && state->nwriters == 0)
  366. continue;
  367. if (state->owner == owner) {
  368. atomic_inc(&state->count);
  369. return state;
  370. }
  371. }
  372. return NULL;
  373. }
  374. struct nfs4_state *
  375. nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
  376. {
  377. struct nfs4_state *state;
  378. spin_lock(&inode->i_lock);
  379. state = __nfs4_find_state(inode, cred, mode);
  380. spin_unlock(&inode->i_lock);
  381. return state;
  382. }
  383. static void
  384. nfs4_free_open_state(struct nfs4_state *state)
  385. {
  386. kfree(state);
  387. }
  388. struct nfs4_state *
  389. nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
  390. {
  391. struct nfs4_state *state, *new;
  392. struct nfs_inode *nfsi = NFS_I(inode);
  393. spin_lock(&inode->i_lock);
  394. state = __nfs4_find_state_byowner(inode, owner);
  395. spin_unlock(&inode->i_lock);
  396. if (state)
  397. goto out;
  398. new = nfs4_alloc_open_state();
  399. spin_lock(&inode->i_lock);
  400. state = __nfs4_find_state_byowner(inode, owner);
  401. if (state == NULL && new != NULL) {
  402. state = new;
  403. /* Note: The reclaim code dictates that we add stateless
  404. * and read-only stateids to the end of the list */
  405. list_add_tail(&state->open_states, &owner->so_states);
  406. state->owner = owner;
  407. atomic_inc(&owner->so_count);
  408. list_add(&state->inode_states, &nfsi->open_states);
  409. state->inode = igrab(inode);
  410. spin_unlock(&inode->i_lock);
  411. } else {
  412. spin_unlock(&inode->i_lock);
  413. if (new)
  414. nfs4_free_open_state(new);
  415. }
  416. out:
  417. return state;
  418. }
  419. /*
  420. * Beware! Caller must be holding exactly one
  421. * reference to clp->cl_sem!
  422. */
  423. void nfs4_put_open_state(struct nfs4_state *state)
  424. {
  425. struct inode *inode = state->inode;
  426. struct nfs4_state_owner *owner = state->owner;
  427. if (!atomic_dec_and_lock(&state->count, &inode->i_lock))
  428. return;
  429. if (!list_empty(&state->inode_states))
  430. list_del(&state->inode_states);
  431. spin_unlock(&inode->i_lock);
  432. list_del(&state->open_states);
  433. iput(inode);
  434. BUG_ON (state->state != 0);
  435. nfs4_free_open_state(state);
  436. nfs4_put_state_owner(owner);
  437. }
  438. /*
  439. * Beware! Caller must be holding no references to clp->cl_sem!
  440. */
  441. void nfs4_close_state(struct nfs4_state *state, mode_t mode)
  442. {
  443. struct inode *inode = state->inode;
  444. struct nfs4_state_owner *owner = state->owner;
  445. struct nfs4_client *clp = owner->so_client;
  446. int newstate;
  447. atomic_inc(&owner->so_count);
  448. down_read(&clp->cl_sem);
  449. /* Protect against nfs4_find_state() */
  450. spin_lock(&inode->i_lock);
  451. if (mode & FMODE_READ)
  452. state->nreaders--;
  453. if (mode & FMODE_WRITE)
  454. state->nwriters--;
  455. if (state->nwriters == 0) {
  456. if (state->nreaders == 0)
  457. list_del_init(&state->inode_states);
  458. /* See reclaim code */
  459. list_move_tail(&state->open_states, &owner->so_states);
  460. }
  461. spin_unlock(&inode->i_lock);
  462. newstate = 0;
  463. if (state->state != 0) {
  464. if (state->nreaders)
  465. newstate |= FMODE_READ;
  466. if (state->nwriters)
  467. newstate |= FMODE_WRITE;
  468. if (state->state == newstate)
  469. goto out;
  470. if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
  471. state->state = newstate;
  472. goto out;
  473. }
  474. if (nfs4_do_close(inode, state, newstate) == 0)
  475. return;
  476. }
  477. out:
  478. nfs4_put_open_state(state);
  479. nfs4_put_state_owner(owner);
  480. up_read(&clp->cl_sem);
  481. }
  482. /*
  483. * Search the state->lock_states for an existing lock_owner
  484. * that is compatible with current->files
  485. */
  486. static struct nfs4_lock_state *
  487. __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
  488. {
  489. struct nfs4_lock_state *pos;
  490. list_for_each_entry(pos, &state->lock_states, ls_locks) {
  491. if (pos->ls_owner != fl_owner)
  492. continue;
  493. atomic_inc(&pos->ls_count);
  494. return pos;
  495. }
  496. return NULL;
  497. }
  498. /*
  499. * Return a compatible lock_state. If no initialized lock_state structure
  500. * exists, return an uninitialized one.
  501. *
  502. */
  503. static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
  504. {
  505. struct nfs4_lock_state *lsp;
  506. struct nfs4_client *clp = state->owner->so_client;
  507. lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
  508. if (lsp == NULL)
  509. return NULL;
  510. lsp->ls_seqid.sequence = &state->owner->so_sequence;
  511. atomic_set(&lsp->ls_count, 1);
  512. lsp->ls_owner = fl_owner;
  513. spin_lock(&clp->cl_lock);
  514. lsp->ls_id = nfs4_alloc_lockowner_id(clp);
  515. spin_unlock(&clp->cl_lock);
  516. INIT_LIST_HEAD(&lsp->ls_locks);
  517. return lsp;
  518. }
  519. /*
  520. * Return a compatible lock_state. If no initialized lock_state structure
  521. * exists, return an uninitialized one.
  522. *
  523. * The caller must be holding clp->cl_sem
  524. */
  525. static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
  526. {
  527. struct nfs4_lock_state *lsp, *new = NULL;
  528. for(;;) {
  529. spin_lock(&state->state_lock);
  530. lsp = __nfs4_find_lock_state(state, owner);
  531. if (lsp != NULL)
  532. break;
  533. if (new != NULL) {
  534. new->ls_state = state;
  535. list_add(&new->ls_locks, &state->lock_states);
  536. set_bit(LK_STATE_IN_USE, &state->flags);
  537. lsp = new;
  538. new = NULL;
  539. break;
  540. }
  541. spin_unlock(&state->state_lock);
  542. new = nfs4_alloc_lock_state(state, owner);
  543. if (new == NULL)
  544. return NULL;
  545. }
  546. spin_unlock(&state->state_lock);
  547. kfree(new);
  548. return lsp;
  549. }
  550. /*
  551. * Release reference to lock_state, and free it if we see that
  552. * it is no longer in use
  553. */
  554. static void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
  555. {
  556. struct nfs4_state *state;
  557. if (lsp == NULL)
  558. return;
  559. state = lsp->ls_state;
  560. if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
  561. return;
  562. list_del(&lsp->ls_locks);
  563. if (list_empty(&state->lock_states))
  564. clear_bit(LK_STATE_IN_USE, &state->flags);
  565. spin_unlock(&state->state_lock);
  566. kfree(lsp);
  567. }
  568. static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
  569. {
  570. struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
  571. dst->fl_u.nfs4_fl.owner = lsp;
  572. atomic_inc(&lsp->ls_count);
  573. }
  574. static void nfs4_fl_release_lock(struct file_lock *fl)
  575. {
  576. nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
  577. }
  578. static struct file_lock_operations nfs4_fl_lock_ops = {
  579. .fl_copy_lock = nfs4_fl_copy_lock,
  580. .fl_release_private = nfs4_fl_release_lock,
  581. };
  582. int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
  583. {
  584. struct nfs4_lock_state *lsp;
  585. if (fl->fl_ops != NULL)
  586. return 0;
  587. lsp = nfs4_get_lock_state(state, fl->fl_owner);
  588. if (lsp == NULL)
  589. return -ENOMEM;
  590. fl->fl_u.nfs4_fl.owner = lsp;
  591. fl->fl_ops = &nfs4_fl_lock_ops;
  592. return 0;
  593. }
  594. /*
  595. * Byte-range lock aware utility to initialize the stateid of read/write
  596. * requests.
  597. */
  598. void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
  599. {
  600. struct nfs4_lock_state *lsp;
  601. memcpy(dst, &state->stateid, sizeof(*dst));
  602. if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
  603. return;
  604. spin_lock(&state->state_lock);
  605. lsp = __nfs4_find_lock_state(state, fl_owner);
  606. if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
  607. memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
  608. spin_unlock(&state->state_lock);
  609. nfs4_put_lock_state(lsp);
  610. }
  611. struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
  612. {
  613. struct rpc_sequence *sequence = counter->sequence;
  614. struct nfs_seqid *new;
  615. new = kmalloc(sizeof(*new), GFP_KERNEL);
  616. if (new != NULL) {
  617. new->sequence = counter;
  618. new->task = NULL;
  619. spin_lock(&sequence->lock);
  620. list_add_tail(&new->list, &sequence->list);
  621. spin_unlock(&sequence->lock);
  622. }
  623. return new;
  624. }
  625. void nfs_free_seqid(struct nfs_seqid *seqid)
  626. {
  627. struct rpc_sequence *sequence = seqid->sequence->sequence;
  628. struct rpc_task *next = NULL;
  629. spin_lock(&sequence->lock);
  630. list_del(&seqid->list);
  631. if (!list_empty(&sequence->list)) {
  632. next = list_entry(sequence->list.next, struct nfs_seqid, list)->task;
  633. if (next)
  634. rpc_wake_up_task(next);
  635. }
  636. spin_unlock(&sequence->lock);
  637. kfree(seqid);
  638. }
  639. /*
  640. * Called with clp->cl_sem held.
  641. *
  642. * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
  643. * failed with a seqid incrementing error -
  644. * see comments nfs_fs.h:seqid_mutating_error()
  645. */
  646. static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
  647. {
  648. switch (status) {
  649. case 0:
  650. break;
  651. case -NFS4ERR_BAD_SEQID:
  652. case -NFS4ERR_STALE_CLIENTID:
  653. case -NFS4ERR_STALE_STATEID:
  654. case -NFS4ERR_BAD_STATEID:
  655. case -NFS4ERR_BADXDR:
  656. case -NFS4ERR_RESOURCE:
  657. case -NFS4ERR_NOFILEHANDLE:
  658. /* Non-seqid mutating errors */
  659. return;
  660. };
  661. /*
  662. * Note: no locking needed as we are guaranteed to be first
  663. * on the sequence list
  664. */
  665. seqid->sequence->counter++;
  666. }
  667. void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
  668. {
  669. if (status == -NFS4ERR_BAD_SEQID) {
  670. struct nfs4_state_owner *sp = container_of(seqid->sequence,
  671. struct nfs4_state_owner, so_seqid);
  672. nfs4_drop_state_owner(sp);
  673. }
  674. return nfs_increment_seqid(status, seqid);
  675. }
  676. /*
  677. * Called with clp->cl_sem held.
  678. *
  679. * Increment the seqid if the LOCK/LOCKU succeeded, or
  680. * failed with a seqid incrementing error -
  681. * see comments nfs_fs.h:seqid_mutating_error()
  682. */
  683. void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
  684. {
  685. return nfs_increment_seqid(status, seqid);
  686. }
  687. int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
  688. {
  689. struct rpc_sequence *sequence = seqid->sequence->sequence;
  690. int status = 0;
  691. spin_lock(&sequence->lock);
  692. if (sequence->list.next != &seqid->list) {
  693. seqid->task = task;
  694. rpc_sleep_on(&sequence->wait, task, NULL, NULL);
  695. status = -EAGAIN;
  696. }
  697. spin_unlock(&sequence->lock);
  698. return status;
  699. }
  700. static int reclaimer(void *);
  701. struct reclaimer_args {
  702. struct nfs4_client *clp;
  703. struct completion complete;
  704. };
  705. /*
  706. * State recovery routine
  707. */
  708. void
  709. nfs4_recover_state(void *data)
  710. {
  711. struct nfs4_client *clp = (struct nfs4_client *)data;
  712. struct reclaimer_args args = {
  713. .clp = clp,
  714. };
  715. might_sleep();
  716. init_completion(&args.complete);
  717. if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
  718. goto out_failed_clear;
  719. wait_for_completion(&args.complete);
  720. return;
  721. out_failed_clear:
  722. set_bit(NFS4CLNT_OK, &clp->cl_state);
  723. wake_up_all(&clp->cl_waitq);
  724. rpc_wake_up(&clp->cl_rpcwaitq);
  725. }
  726. /*
  727. * Schedule a state recovery attempt
  728. */
  729. void
  730. nfs4_schedule_state_recovery(struct nfs4_client *clp)
  731. {
  732. if (!clp)
  733. return;
  734. if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
  735. schedule_work(&clp->cl_recoverd);
  736. }
  737. static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
  738. {
  739. struct inode *inode = state->inode;
  740. struct file_lock *fl;
  741. int status = 0;
  742. for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
  743. if (!(fl->fl_flags & FL_POSIX))
  744. continue;
  745. if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
  746. continue;
  747. status = ops->recover_lock(state, fl);
  748. if (status >= 0)
  749. continue;
  750. switch (status) {
  751. default:
  752. printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
  753. __FUNCTION__, status);
  754. case -NFS4ERR_EXPIRED:
  755. case -NFS4ERR_NO_GRACE:
  756. case -NFS4ERR_RECLAIM_BAD:
  757. case -NFS4ERR_RECLAIM_CONFLICT:
  758. /* kill_proc(fl->fl_owner, SIGLOST, 1); */
  759. break;
  760. case -NFS4ERR_STALE_CLIENTID:
  761. goto out_err;
  762. }
  763. }
  764. return 0;
  765. out_err:
  766. return status;
  767. }
  768. static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
  769. {
  770. struct nfs4_state *state;
  771. struct nfs4_lock_state *lock;
  772. int status = 0;
  773. /* Note: we rely on the sp->so_states list being ordered
  774. * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
  775. * states first.
  776. * This is needed to ensure that the server won't give us any
  777. * read delegations that we have to return if, say, we are
  778. * recovering after a network partition or a reboot from a
  779. * server that doesn't support a grace period.
  780. */
  781. list_for_each_entry(state, &sp->so_states, open_states) {
  782. if (state->state == 0)
  783. continue;
  784. status = ops->recover_open(sp, state);
  785. if (status >= 0) {
  786. status = nfs4_reclaim_locks(ops, state);
  787. if (status < 0)
  788. goto out_err;
  789. list_for_each_entry(lock, &state->lock_states, ls_locks) {
  790. if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
  791. printk("%s: Lock reclaim failed!\n",
  792. __FUNCTION__);
  793. }
  794. continue;
  795. }
  796. switch (status) {
  797. default:
  798. printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
  799. __FUNCTION__, status);
  800. case -ENOENT:
  801. case -NFS4ERR_RECLAIM_BAD:
  802. case -NFS4ERR_RECLAIM_CONFLICT:
  803. /*
  804. * Open state on this file cannot be recovered
  805. * All we can do is revert to using the zero stateid.
  806. */
  807. memset(state->stateid.data, 0,
  808. sizeof(state->stateid.data));
  809. /* Mark the file as being 'closed' */
  810. state->state = 0;
  811. break;
  812. case -NFS4ERR_EXPIRED:
  813. case -NFS4ERR_NO_GRACE:
  814. case -NFS4ERR_STALE_CLIENTID:
  815. goto out_err;
  816. }
  817. }
  818. return 0;
  819. out_err:
  820. return status;
  821. }
  822. static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
  823. {
  824. struct nfs4_state_owner *sp;
  825. struct nfs4_state *state;
  826. struct nfs4_lock_state *lock;
  827. /* Reset all sequence ids to zero */
  828. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  829. sp->so_seqid.counter = 0;
  830. sp->so_seqid.flags = 0;
  831. list_for_each_entry(state, &sp->so_states, open_states) {
  832. list_for_each_entry(lock, &state->lock_states, ls_locks) {
  833. lock->ls_seqid.counter = 0;
  834. lock->ls_seqid.flags = 0;
  835. lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
  836. }
  837. }
  838. }
  839. }
  840. static int reclaimer(void *ptr)
  841. {
  842. struct reclaimer_args *args = (struct reclaimer_args *)ptr;
  843. struct nfs4_client *clp = args->clp;
  844. struct nfs4_state_owner *sp;
  845. struct nfs4_state_recovery_ops *ops;
  846. int status = 0;
  847. daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
  848. allow_signal(SIGKILL);
  849. atomic_inc(&clp->cl_count);
  850. complete(&args->complete);
  851. /* Ensure exclusive access to NFSv4 state */
  852. lock_kernel();
  853. down_write(&clp->cl_sem);
  854. /* Are there any NFS mounts out there? */
  855. if (list_empty(&clp->cl_superblocks))
  856. goto out;
  857. restart_loop:
  858. status = nfs4_proc_renew(clp);
  859. switch (status) {
  860. case 0:
  861. case -NFS4ERR_CB_PATH_DOWN:
  862. goto out;
  863. case -NFS4ERR_STALE_CLIENTID:
  864. case -NFS4ERR_LEASE_MOVED:
  865. ops = &nfs4_reboot_recovery_ops;
  866. break;
  867. default:
  868. ops = &nfs4_network_partition_recovery_ops;
  869. };
  870. nfs4_state_mark_reclaim(clp);
  871. status = __nfs4_init_client(clp);
  872. if (status)
  873. goto out_error;
  874. /* Mark all delegations for reclaim */
  875. nfs_delegation_mark_reclaim(clp);
  876. /* Note: list is protected by exclusive lock on cl->cl_sem */
  877. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  878. status = nfs4_reclaim_open_state(ops, sp);
  879. if (status < 0) {
  880. if (status == -NFS4ERR_NO_GRACE) {
  881. ops = &nfs4_network_partition_recovery_ops;
  882. status = nfs4_reclaim_open_state(ops, sp);
  883. }
  884. if (status == -NFS4ERR_STALE_CLIENTID)
  885. goto restart_loop;
  886. if (status == -NFS4ERR_EXPIRED)
  887. goto restart_loop;
  888. }
  889. }
  890. nfs_delegation_reap_unclaimed(clp);
  891. out:
  892. set_bit(NFS4CLNT_OK, &clp->cl_state);
  893. up_write(&clp->cl_sem);
  894. unlock_kernel();
  895. wake_up_all(&clp->cl_waitq);
  896. rpc_wake_up(&clp->cl_rpcwaitq);
  897. if (status == -NFS4ERR_CB_PATH_DOWN)
  898. nfs_handle_cb_pathdown(clp);
  899. nfs4_put_client(clp);
  900. return 0;
  901. out_error:
  902. printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
  903. NIPQUAD(clp->cl_addr.s_addr), -status);
  904. goto out;
  905. }
  906. /*
  907. * Local variables:
  908. * c-basic-offset: 8
  909. * End:
  910. */