nfs4state.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968
  1. /*
  2. * fs/nfs/nfs4state.c
  3. *
  4. * Client-side XDR for NFSv4.
  5. *
  6. * Copyright (c) 2002 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Kendrick Smith <kmsmith@umich.edu>
  10. *
  11. * Redistribution and use in source and binary forms, with or without
  12. * modification, are permitted provided that the following conditions
  13. * are met:
  14. *
  15. * 1. Redistributions of source code must retain the above copyright
  16. * notice, this list of conditions and the following disclaimer.
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in the
  19. * documentation and/or other materials provided with the distribution.
  20. * 3. Neither the name of the University nor the names of its
  21. * contributors may be used to endorse or promote products derived
  22. * from this software without specific prior written permission.
  23. *
  24. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  25. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  26. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  27. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  28. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Implementation of the NFSv4 state model. For the time being,
  37. * this is minimal, but will be made much more complex in a
  38. * subsequent patch.
  39. */
  40. #include <linux/config.h>
  41. #include <linux/slab.h>
  42. #include <linux/smp_lock.h>
  43. #include <linux/nfs_fs.h>
  44. #include <linux/nfs_idmap.h>
  45. #include <linux/kthread.h>
  46. #include <linux/module.h>
  47. #include <linux/workqueue.h>
  48. #include <linux/bitops.h>
  49. #include "nfs4_fs.h"
  50. #include "callback.h"
  51. #include "delegation.h"
  52. #define OPENOWNER_POOL_SIZE 8
  53. const nfs4_stateid zero_stateid;
  54. static DEFINE_SPINLOCK(state_spinlock);
  55. static LIST_HEAD(nfs4_clientid_list);
  56. void
  57. init_nfsv4_state(struct nfs_server *server)
  58. {
  59. server->nfs4_state = NULL;
  60. INIT_LIST_HEAD(&server->nfs4_siblings);
  61. }
  62. void
  63. destroy_nfsv4_state(struct nfs_server *server)
  64. {
  65. kfree(server->mnt_path);
  66. server->mnt_path = NULL;
  67. if (server->nfs4_state) {
  68. nfs4_put_client(server->nfs4_state);
  69. server->nfs4_state = NULL;
  70. }
  71. }
  72. /*
  73. * nfs4_get_client(): returns an empty client structure
  74. * nfs4_put_client(): drops reference to client structure
  75. *
  76. * Since these are allocated/deallocated very rarely, we don't
  77. * bother putting them in a slab cache...
  78. */
  79. static struct nfs4_client *
  80. nfs4_alloc_client(struct in_addr *addr)
  81. {
  82. struct nfs4_client *clp;
  83. if (nfs_callback_up() < 0)
  84. return NULL;
  85. if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
  86. nfs_callback_down();
  87. return NULL;
  88. }
  89. memset(clp, 0, sizeof(*clp));
  90. memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
  91. init_rwsem(&clp->cl_sem);
  92. INIT_LIST_HEAD(&clp->cl_delegations);
  93. INIT_LIST_HEAD(&clp->cl_state_owners);
  94. INIT_LIST_HEAD(&clp->cl_unused);
  95. spin_lock_init(&clp->cl_lock);
  96. atomic_set(&clp->cl_count, 1);
  97. INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
  98. INIT_LIST_HEAD(&clp->cl_superblocks);
  99. rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
  100. clp->cl_rpcclient = ERR_PTR(-EINVAL);
  101. clp->cl_boot_time = CURRENT_TIME;
  102. clp->cl_state = 0;
  103. return clp;
  104. }
  105. static void
  106. nfs4_free_client(struct nfs4_client *clp)
  107. {
  108. struct nfs4_state_owner *sp;
  109. while (!list_empty(&clp->cl_unused)) {
  110. sp = list_entry(clp->cl_unused.next,
  111. struct nfs4_state_owner,
  112. so_list);
  113. list_del(&sp->so_list);
  114. kfree(sp);
  115. }
  116. BUG_ON(!list_empty(&clp->cl_state_owners));
  117. if (clp->cl_cred)
  118. put_rpccred(clp->cl_cred);
  119. nfs_idmap_delete(clp);
  120. if (!IS_ERR(clp->cl_rpcclient))
  121. rpc_shutdown_client(clp->cl_rpcclient);
  122. kfree(clp);
  123. nfs_callback_down();
  124. }
  125. static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
  126. {
  127. struct nfs4_client *clp;
  128. list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
  129. if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
  130. atomic_inc(&clp->cl_count);
  131. return clp;
  132. }
  133. }
  134. return NULL;
  135. }
  136. struct nfs4_client *nfs4_find_client(struct in_addr *addr)
  137. {
  138. struct nfs4_client *clp;
  139. spin_lock(&state_spinlock);
  140. clp = __nfs4_find_client(addr);
  141. spin_unlock(&state_spinlock);
  142. return clp;
  143. }
  144. struct nfs4_client *
  145. nfs4_get_client(struct in_addr *addr)
  146. {
  147. struct nfs4_client *clp, *new = NULL;
  148. spin_lock(&state_spinlock);
  149. for (;;) {
  150. clp = __nfs4_find_client(addr);
  151. if (clp != NULL)
  152. break;
  153. clp = new;
  154. if (clp != NULL) {
  155. list_add(&clp->cl_servers, &nfs4_clientid_list);
  156. new = NULL;
  157. break;
  158. }
  159. spin_unlock(&state_spinlock);
  160. new = nfs4_alloc_client(addr);
  161. spin_lock(&state_spinlock);
  162. if (new == NULL)
  163. break;
  164. }
  165. spin_unlock(&state_spinlock);
  166. if (new)
  167. nfs4_free_client(new);
  168. return clp;
  169. }
  170. void
  171. nfs4_put_client(struct nfs4_client *clp)
  172. {
  173. if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
  174. return;
  175. list_del(&clp->cl_servers);
  176. spin_unlock(&state_spinlock);
  177. BUG_ON(!list_empty(&clp->cl_superblocks));
  178. rpc_wake_up(&clp->cl_rpcwaitq);
  179. nfs4_kill_renewd(clp);
  180. nfs4_free_client(clp);
  181. }
  182. static int __nfs4_init_client(struct nfs4_client *clp)
  183. {
  184. int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
  185. if (status == 0)
  186. status = nfs4_proc_setclientid_confirm(clp);
  187. if (status == 0)
  188. nfs4_schedule_state_renewal(clp);
  189. return status;
  190. }
  191. int nfs4_init_client(struct nfs4_client *clp)
  192. {
  193. return nfs4_map_errors(__nfs4_init_client(clp));
  194. }
  195. u32
  196. nfs4_alloc_lockowner_id(struct nfs4_client *clp)
  197. {
  198. return clp->cl_lockowner_id ++;
  199. }
  200. static struct nfs4_state_owner *
  201. nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
  202. {
  203. struct nfs4_state_owner *sp = NULL;
  204. if (!list_empty(&clp->cl_unused)) {
  205. sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
  206. atomic_inc(&sp->so_count);
  207. sp->so_cred = cred;
  208. list_move(&sp->so_list, &clp->cl_state_owners);
  209. clp->cl_nunused--;
  210. }
  211. return sp;
  212. }
  213. struct rpc_cred *nfs4_get_renew_cred(struct nfs4_client *clp)
  214. {
  215. struct nfs4_state_owner *sp;
  216. struct rpc_cred *cred = NULL;
  217. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  218. if (list_empty(&sp->so_states))
  219. continue;
  220. cred = get_rpccred(sp->so_cred);
  221. break;
  222. }
  223. return cred;
  224. }
  225. static struct nfs4_state_owner *
  226. nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
  227. {
  228. struct nfs4_state_owner *sp, *res = NULL;
  229. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  230. if (sp->so_cred != cred)
  231. continue;
  232. atomic_inc(&sp->so_count);
  233. /* Move to the head of the list */
  234. list_move(&sp->so_list, &clp->cl_state_owners);
  235. res = sp;
  236. break;
  237. }
  238. return res;
  239. }
  240. /*
  241. * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
  242. * create a new state_owner.
  243. *
  244. */
  245. static struct nfs4_state_owner *
  246. nfs4_alloc_state_owner(void)
  247. {
  248. struct nfs4_state_owner *sp;
  249. sp = kzalloc(sizeof(*sp),GFP_KERNEL);
  250. if (!sp)
  251. return NULL;
  252. spin_lock_init(&sp->so_lock);
  253. INIT_LIST_HEAD(&sp->so_states);
  254. INIT_LIST_HEAD(&sp->so_delegations);
  255. rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
  256. sp->so_seqid.sequence = &sp->so_sequence;
  257. spin_lock_init(&sp->so_sequence.lock);
  258. INIT_LIST_HEAD(&sp->so_sequence.list);
  259. atomic_set(&sp->so_count, 1);
  260. return sp;
  261. }
  262. void
  263. nfs4_drop_state_owner(struct nfs4_state_owner *sp)
  264. {
  265. struct nfs4_client *clp = sp->so_client;
  266. spin_lock(&clp->cl_lock);
  267. list_del_init(&sp->so_list);
  268. spin_unlock(&clp->cl_lock);
  269. }
  270. /*
  271. * Note: must be called with clp->cl_sem held in order to prevent races
  272. * with reboot recovery!
  273. */
  274. struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
  275. {
  276. struct nfs4_client *clp = server->nfs4_state;
  277. struct nfs4_state_owner *sp, *new;
  278. get_rpccred(cred);
  279. new = nfs4_alloc_state_owner();
  280. spin_lock(&clp->cl_lock);
  281. sp = nfs4_find_state_owner(clp, cred);
  282. if (sp == NULL)
  283. sp = nfs4_client_grab_unused(clp, cred);
  284. if (sp == NULL && new != NULL) {
  285. list_add(&new->so_list, &clp->cl_state_owners);
  286. new->so_client = clp;
  287. new->so_id = nfs4_alloc_lockowner_id(clp);
  288. new->so_cred = cred;
  289. sp = new;
  290. new = NULL;
  291. }
  292. spin_unlock(&clp->cl_lock);
  293. kfree(new);
  294. if (sp != NULL)
  295. return sp;
  296. put_rpccred(cred);
  297. return NULL;
  298. }
  299. /*
  300. * Must be called with clp->cl_sem held in order to avoid races
  301. * with state recovery...
  302. */
  303. void nfs4_put_state_owner(struct nfs4_state_owner *sp)
  304. {
  305. struct nfs4_client *clp = sp->so_client;
  306. struct rpc_cred *cred = sp->so_cred;
  307. if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
  308. return;
  309. if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
  310. goto out_free;
  311. if (list_empty(&sp->so_list))
  312. goto out_free;
  313. list_move(&sp->so_list, &clp->cl_unused);
  314. clp->cl_nunused++;
  315. spin_unlock(&clp->cl_lock);
  316. put_rpccred(cred);
  317. cred = NULL;
  318. return;
  319. out_free:
  320. list_del(&sp->so_list);
  321. spin_unlock(&clp->cl_lock);
  322. put_rpccred(cred);
  323. kfree(sp);
  324. }
  325. static struct nfs4_state *
  326. nfs4_alloc_open_state(void)
  327. {
  328. struct nfs4_state *state;
  329. state = kzalloc(sizeof(*state), GFP_KERNEL);
  330. if (!state)
  331. return NULL;
  332. atomic_set(&state->count, 1);
  333. INIT_LIST_HEAD(&state->lock_states);
  334. spin_lock_init(&state->state_lock);
  335. return state;
  336. }
  337. void
  338. nfs4_state_set_mode_locked(struct nfs4_state *state, mode_t mode)
  339. {
  340. if (state->state == mode)
  341. return;
  342. /* NB! List reordering - see the reclaim code for why. */
  343. if ((mode & FMODE_WRITE) != (state->state & FMODE_WRITE)) {
  344. if (mode & FMODE_WRITE)
  345. list_move(&state->open_states, &state->owner->so_states);
  346. else
  347. list_move_tail(&state->open_states, &state->owner->so_states);
  348. }
  349. if (mode == 0)
  350. list_del_init(&state->inode_states);
  351. state->state = mode;
  352. }
  353. static struct nfs4_state *
  354. __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
  355. {
  356. struct nfs_inode *nfsi = NFS_I(inode);
  357. struct nfs4_state *state;
  358. list_for_each_entry(state, &nfsi->open_states, inode_states) {
  359. /* Is this in the process of being freed? */
  360. if (state->state == 0)
  361. continue;
  362. if (state->owner == owner) {
  363. atomic_inc(&state->count);
  364. return state;
  365. }
  366. }
  367. return NULL;
  368. }
  369. static void
  370. nfs4_free_open_state(struct nfs4_state *state)
  371. {
  372. kfree(state);
  373. }
  374. struct nfs4_state *
  375. nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
  376. {
  377. struct nfs4_state *state, *new;
  378. struct nfs_inode *nfsi = NFS_I(inode);
  379. spin_lock(&inode->i_lock);
  380. state = __nfs4_find_state_byowner(inode, owner);
  381. spin_unlock(&inode->i_lock);
  382. if (state)
  383. goto out;
  384. new = nfs4_alloc_open_state();
  385. spin_lock(&owner->so_lock);
  386. spin_lock(&inode->i_lock);
  387. state = __nfs4_find_state_byowner(inode, owner);
  388. if (state == NULL && new != NULL) {
  389. state = new;
  390. state->owner = owner;
  391. atomic_inc(&owner->so_count);
  392. list_add(&state->inode_states, &nfsi->open_states);
  393. state->inode = igrab(inode);
  394. spin_unlock(&inode->i_lock);
  395. /* Note: The reclaim code dictates that we add stateless
  396. * and read-only stateids to the end of the list */
  397. list_add_tail(&state->open_states, &owner->so_states);
  398. spin_unlock(&owner->so_lock);
  399. } else {
  400. spin_unlock(&inode->i_lock);
  401. spin_unlock(&owner->so_lock);
  402. if (new)
  403. nfs4_free_open_state(new);
  404. }
  405. out:
  406. return state;
  407. }
  408. /*
  409. * Beware! Caller must be holding exactly one
  410. * reference to clp->cl_sem!
  411. */
  412. void nfs4_put_open_state(struct nfs4_state *state)
  413. {
  414. struct inode *inode = state->inode;
  415. struct nfs4_state_owner *owner = state->owner;
  416. if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
  417. return;
  418. spin_lock(&inode->i_lock);
  419. if (!list_empty(&state->inode_states))
  420. list_del(&state->inode_states);
  421. list_del(&state->open_states);
  422. spin_unlock(&inode->i_lock);
  423. spin_unlock(&owner->so_lock);
  424. iput(inode);
  425. nfs4_free_open_state(state);
  426. nfs4_put_state_owner(owner);
  427. }
  428. /*
  429. * Close the current file.
  430. */
  431. void nfs4_close_state(struct nfs4_state *state, mode_t mode)
  432. {
  433. struct inode *inode = state->inode;
  434. struct nfs4_state_owner *owner = state->owner;
  435. int oldstate, newstate = 0;
  436. atomic_inc(&owner->so_count);
  437. /* Protect against nfs4_find_state() */
  438. spin_lock(&owner->so_lock);
  439. spin_lock(&inode->i_lock);
  440. switch (mode & (FMODE_READ | FMODE_WRITE)) {
  441. case FMODE_READ:
  442. state->n_rdonly--;
  443. break;
  444. case FMODE_WRITE:
  445. state->n_wronly--;
  446. break;
  447. case FMODE_READ|FMODE_WRITE:
  448. state->n_rdwr--;
  449. }
  450. oldstate = newstate = state->state;
  451. if (state->n_rdwr == 0) {
  452. if (state->n_rdonly == 0)
  453. newstate &= ~FMODE_READ;
  454. if (state->n_wronly == 0)
  455. newstate &= ~FMODE_WRITE;
  456. }
  457. if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
  458. nfs4_state_set_mode_locked(state, newstate);
  459. oldstate = newstate;
  460. }
  461. spin_unlock(&inode->i_lock);
  462. spin_unlock(&owner->so_lock);
  463. if (oldstate != newstate && nfs4_do_close(inode, state) == 0)
  464. return;
  465. nfs4_put_open_state(state);
  466. nfs4_put_state_owner(owner);
  467. }
  468. /*
  469. * Search the state->lock_states for an existing lock_owner
  470. * that is compatible with current->files
  471. */
  472. static struct nfs4_lock_state *
  473. __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
  474. {
  475. struct nfs4_lock_state *pos;
  476. list_for_each_entry(pos, &state->lock_states, ls_locks) {
  477. if (pos->ls_owner != fl_owner)
  478. continue;
  479. atomic_inc(&pos->ls_count);
  480. return pos;
  481. }
  482. return NULL;
  483. }
  484. /*
  485. * Return a compatible lock_state. If no initialized lock_state structure
  486. * exists, return an uninitialized one.
  487. *
  488. */
  489. static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
  490. {
  491. struct nfs4_lock_state *lsp;
  492. struct nfs4_client *clp = state->owner->so_client;
  493. lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
  494. if (lsp == NULL)
  495. return NULL;
  496. lsp->ls_seqid.sequence = &state->owner->so_sequence;
  497. atomic_set(&lsp->ls_count, 1);
  498. lsp->ls_owner = fl_owner;
  499. spin_lock(&clp->cl_lock);
  500. lsp->ls_id = nfs4_alloc_lockowner_id(clp);
  501. spin_unlock(&clp->cl_lock);
  502. INIT_LIST_HEAD(&lsp->ls_locks);
  503. return lsp;
  504. }
  505. /*
  506. * Return a compatible lock_state. If no initialized lock_state structure
  507. * exists, return an uninitialized one.
  508. *
  509. * The caller must be holding clp->cl_sem
  510. */
  511. static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
  512. {
  513. struct nfs4_lock_state *lsp, *new = NULL;
  514. for(;;) {
  515. spin_lock(&state->state_lock);
  516. lsp = __nfs4_find_lock_state(state, owner);
  517. if (lsp != NULL)
  518. break;
  519. if (new != NULL) {
  520. new->ls_state = state;
  521. list_add(&new->ls_locks, &state->lock_states);
  522. set_bit(LK_STATE_IN_USE, &state->flags);
  523. lsp = new;
  524. new = NULL;
  525. break;
  526. }
  527. spin_unlock(&state->state_lock);
  528. new = nfs4_alloc_lock_state(state, owner);
  529. if (new == NULL)
  530. return NULL;
  531. }
  532. spin_unlock(&state->state_lock);
  533. kfree(new);
  534. return lsp;
  535. }
  536. /*
  537. * Release reference to lock_state, and free it if we see that
  538. * it is no longer in use
  539. */
  540. void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
  541. {
  542. struct nfs4_state *state;
  543. if (lsp == NULL)
  544. return;
  545. state = lsp->ls_state;
  546. if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
  547. return;
  548. list_del(&lsp->ls_locks);
  549. if (list_empty(&state->lock_states))
  550. clear_bit(LK_STATE_IN_USE, &state->flags);
  551. spin_unlock(&state->state_lock);
  552. kfree(lsp);
  553. }
  554. static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
  555. {
  556. struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
  557. dst->fl_u.nfs4_fl.owner = lsp;
  558. atomic_inc(&lsp->ls_count);
  559. }
  560. static void nfs4_fl_release_lock(struct file_lock *fl)
  561. {
  562. nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
  563. }
  564. static struct file_lock_operations nfs4_fl_lock_ops = {
  565. .fl_copy_lock = nfs4_fl_copy_lock,
  566. .fl_release_private = nfs4_fl_release_lock,
  567. };
  568. int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
  569. {
  570. struct nfs4_lock_state *lsp;
  571. if (fl->fl_ops != NULL)
  572. return 0;
  573. lsp = nfs4_get_lock_state(state, fl->fl_owner);
  574. if (lsp == NULL)
  575. return -ENOMEM;
  576. fl->fl_u.nfs4_fl.owner = lsp;
  577. fl->fl_ops = &nfs4_fl_lock_ops;
  578. return 0;
  579. }
  580. /*
  581. * Byte-range lock aware utility to initialize the stateid of read/write
  582. * requests.
  583. */
  584. void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
  585. {
  586. struct nfs4_lock_state *lsp;
  587. memcpy(dst, &state->stateid, sizeof(*dst));
  588. if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
  589. return;
  590. spin_lock(&state->state_lock);
  591. lsp = __nfs4_find_lock_state(state, fl_owner);
  592. if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
  593. memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
  594. spin_unlock(&state->state_lock);
  595. nfs4_put_lock_state(lsp);
  596. }
  597. struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
  598. {
  599. struct rpc_sequence *sequence = counter->sequence;
  600. struct nfs_seqid *new;
  601. new = kmalloc(sizeof(*new), GFP_KERNEL);
  602. if (new != NULL) {
  603. new->sequence = counter;
  604. spin_lock(&sequence->lock);
  605. list_add_tail(&new->list, &sequence->list);
  606. spin_unlock(&sequence->lock);
  607. }
  608. return new;
  609. }
  610. void nfs_free_seqid(struct nfs_seqid *seqid)
  611. {
  612. struct rpc_sequence *sequence = seqid->sequence->sequence;
  613. spin_lock(&sequence->lock);
  614. list_del(&seqid->list);
  615. spin_unlock(&sequence->lock);
  616. rpc_wake_up(&sequence->wait);
  617. kfree(seqid);
  618. }
  619. /*
  620. * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
  621. * failed with a seqid incrementing error -
  622. * see comments nfs_fs.h:seqid_mutating_error()
  623. */
  624. static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
  625. {
  626. switch (status) {
  627. case 0:
  628. break;
  629. case -NFS4ERR_BAD_SEQID:
  630. case -NFS4ERR_STALE_CLIENTID:
  631. case -NFS4ERR_STALE_STATEID:
  632. case -NFS4ERR_BAD_STATEID:
  633. case -NFS4ERR_BADXDR:
  634. case -NFS4ERR_RESOURCE:
  635. case -NFS4ERR_NOFILEHANDLE:
  636. /* Non-seqid mutating errors */
  637. return;
  638. };
  639. /*
  640. * Note: no locking needed as we are guaranteed to be first
  641. * on the sequence list
  642. */
  643. seqid->sequence->counter++;
  644. }
  645. void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
  646. {
  647. if (status == -NFS4ERR_BAD_SEQID) {
  648. struct nfs4_state_owner *sp = container_of(seqid->sequence,
  649. struct nfs4_state_owner, so_seqid);
  650. nfs4_drop_state_owner(sp);
  651. }
  652. return nfs_increment_seqid(status, seqid);
  653. }
  654. /*
  655. * Increment the seqid if the LOCK/LOCKU succeeded, or
  656. * failed with a seqid incrementing error -
  657. * see comments nfs_fs.h:seqid_mutating_error()
  658. */
  659. void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
  660. {
  661. return nfs_increment_seqid(status, seqid);
  662. }
  663. int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
  664. {
  665. struct rpc_sequence *sequence = seqid->sequence->sequence;
  666. int status = 0;
  667. if (sequence->list.next == &seqid->list)
  668. goto out;
  669. spin_lock(&sequence->lock);
  670. if (sequence->list.next != &seqid->list) {
  671. rpc_sleep_on(&sequence->wait, task, NULL, NULL);
  672. status = -EAGAIN;
  673. }
  674. spin_unlock(&sequence->lock);
  675. out:
  676. return status;
  677. }
  678. static int reclaimer(void *);
  679. static inline void nfs4_clear_recover_bit(struct nfs4_client *clp)
  680. {
  681. smp_mb__before_clear_bit();
  682. clear_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state);
  683. smp_mb__after_clear_bit();
  684. wake_up_bit(&clp->cl_state, NFS4CLNT_STATE_RECOVER);
  685. rpc_wake_up(&clp->cl_rpcwaitq);
  686. }
  687. /*
  688. * State recovery routine
  689. */
  690. static void nfs4_recover_state(struct nfs4_client *clp)
  691. {
  692. struct task_struct *task;
  693. __module_get(THIS_MODULE);
  694. atomic_inc(&clp->cl_count);
  695. task = kthread_run(reclaimer, clp, "%u.%u.%u.%u-reclaim",
  696. NIPQUAD(clp->cl_addr));
  697. if (!IS_ERR(task))
  698. return;
  699. nfs4_clear_recover_bit(clp);
  700. nfs4_put_client(clp);
  701. module_put(THIS_MODULE);
  702. }
  703. /*
  704. * Schedule a state recovery attempt
  705. */
  706. void nfs4_schedule_state_recovery(struct nfs4_client *clp)
  707. {
  708. if (!clp)
  709. return;
  710. if (test_and_set_bit(NFS4CLNT_STATE_RECOVER, &clp->cl_state) == 0)
  711. nfs4_recover_state(clp);
  712. }
  713. static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
  714. {
  715. struct inode *inode = state->inode;
  716. struct file_lock *fl;
  717. int status = 0;
  718. for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
  719. if (!(fl->fl_flags & (FL_POSIX|FL_FLOCK)))
  720. continue;
  721. if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
  722. continue;
  723. status = ops->recover_lock(state, fl);
  724. if (status >= 0)
  725. continue;
  726. switch (status) {
  727. default:
  728. printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
  729. __FUNCTION__, status);
  730. case -NFS4ERR_EXPIRED:
  731. case -NFS4ERR_NO_GRACE:
  732. case -NFS4ERR_RECLAIM_BAD:
  733. case -NFS4ERR_RECLAIM_CONFLICT:
  734. /* kill_proc(fl->fl_pid, SIGLOST, 1); */
  735. break;
  736. case -NFS4ERR_STALE_CLIENTID:
  737. goto out_err;
  738. }
  739. }
  740. return 0;
  741. out_err:
  742. return status;
  743. }
  744. static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
  745. {
  746. struct nfs4_state *state;
  747. struct nfs4_lock_state *lock;
  748. int status = 0;
  749. /* Note: we rely on the sp->so_states list being ordered
  750. * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
  751. * states first.
  752. * This is needed to ensure that the server won't give us any
  753. * read delegations that we have to return if, say, we are
  754. * recovering after a network partition or a reboot from a
  755. * server that doesn't support a grace period.
  756. */
  757. list_for_each_entry(state, &sp->so_states, open_states) {
  758. if (state->state == 0)
  759. continue;
  760. status = ops->recover_open(sp, state);
  761. if (status >= 0) {
  762. status = nfs4_reclaim_locks(ops, state);
  763. if (status < 0)
  764. goto out_err;
  765. list_for_each_entry(lock, &state->lock_states, ls_locks) {
  766. if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
  767. printk("%s: Lock reclaim failed!\n",
  768. __FUNCTION__);
  769. }
  770. continue;
  771. }
  772. switch (status) {
  773. default:
  774. printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
  775. __FUNCTION__, status);
  776. case -ENOENT:
  777. case -NFS4ERR_RECLAIM_BAD:
  778. case -NFS4ERR_RECLAIM_CONFLICT:
  779. /*
  780. * Open state on this file cannot be recovered
  781. * All we can do is revert to using the zero stateid.
  782. */
  783. memset(state->stateid.data, 0,
  784. sizeof(state->stateid.data));
  785. /* Mark the file as being 'closed' */
  786. state->state = 0;
  787. break;
  788. case -NFS4ERR_EXPIRED:
  789. case -NFS4ERR_NO_GRACE:
  790. case -NFS4ERR_STALE_CLIENTID:
  791. goto out_err;
  792. }
  793. }
  794. return 0;
  795. out_err:
  796. return status;
  797. }
  798. static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
  799. {
  800. struct nfs4_state_owner *sp;
  801. struct nfs4_state *state;
  802. struct nfs4_lock_state *lock;
  803. /* Reset all sequence ids to zero */
  804. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  805. sp->so_seqid.counter = 0;
  806. sp->so_seqid.flags = 0;
  807. spin_lock(&sp->so_lock);
  808. list_for_each_entry(state, &sp->so_states, open_states) {
  809. list_for_each_entry(lock, &state->lock_states, ls_locks) {
  810. lock->ls_seqid.counter = 0;
  811. lock->ls_seqid.flags = 0;
  812. lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
  813. }
  814. }
  815. spin_unlock(&sp->so_lock);
  816. }
  817. }
  818. static int reclaimer(void *ptr)
  819. {
  820. struct nfs4_client *clp = ptr;
  821. struct nfs4_state_owner *sp;
  822. struct nfs4_state_recovery_ops *ops;
  823. int status = 0;
  824. allow_signal(SIGKILL);
  825. /* Ensure exclusive access to NFSv4 state */
  826. lock_kernel();
  827. down_write(&clp->cl_sem);
  828. /* Are there any NFS mounts out there? */
  829. if (list_empty(&clp->cl_superblocks))
  830. goto out;
  831. restart_loop:
  832. status = nfs4_proc_renew(clp, clp->cl_cred);
  833. switch (status) {
  834. case 0:
  835. case -NFS4ERR_CB_PATH_DOWN:
  836. goto out;
  837. case -NFS4ERR_STALE_CLIENTID:
  838. case -NFS4ERR_LEASE_MOVED:
  839. ops = &nfs4_reboot_recovery_ops;
  840. break;
  841. default:
  842. ops = &nfs4_network_partition_recovery_ops;
  843. };
  844. nfs4_state_mark_reclaim(clp);
  845. status = __nfs4_init_client(clp);
  846. if (status)
  847. goto out_error;
  848. /* Mark all delegations for reclaim */
  849. nfs_delegation_mark_reclaim(clp);
  850. /* Note: list is protected by exclusive lock on cl->cl_sem */
  851. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  852. status = nfs4_reclaim_open_state(ops, sp);
  853. if (status < 0) {
  854. if (status == -NFS4ERR_NO_GRACE) {
  855. ops = &nfs4_network_partition_recovery_ops;
  856. status = nfs4_reclaim_open_state(ops, sp);
  857. }
  858. if (status == -NFS4ERR_STALE_CLIENTID)
  859. goto restart_loop;
  860. if (status == -NFS4ERR_EXPIRED)
  861. goto restart_loop;
  862. }
  863. }
  864. nfs_delegation_reap_unclaimed(clp);
  865. out:
  866. up_write(&clp->cl_sem);
  867. unlock_kernel();
  868. if (status == -NFS4ERR_CB_PATH_DOWN)
  869. nfs_handle_cb_pathdown(clp);
  870. nfs4_clear_recover_bit(clp);
  871. nfs4_put_client(clp);
  872. module_put_and_exit(0);
  873. return 0;
  874. out_error:
  875. printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
  876. NIPQUAD(clp->cl_addr.s_addr), -status);
  877. goto out;
  878. }
  879. /*
  880. * Local variables:
  881. * c-basic-offset: 8
  882. * End:
  883. */