nfs4state.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996
  1. /*
  2. * fs/nfs/nfs4state.c
  3. *
  4. * Client-side XDR for NFSv4.
  5. *
  6. * Copyright (c) 2002 The Regents of the University of Michigan.
  7. * All rights reserved.
  8. *
  9. * Kendrick Smith <kmsmith@umich.edu>
  10. *
  11. * Redistribution and use in source and binary forms, with or without
  12. * modification, are permitted provided that the following conditions
  13. * are met:
  14. *
  15. * 1. Redistributions of source code must retain the above copyright
  16. * notice, this list of conditions and the following disclaimer.
  17. * 2. Redistributions in binary form must reproduce the above copyright
  18. * notice, this list of conditions and the following disclaimer in the
  19. * documentation and/or other materials provided with the distribution.
  20. * 3. Neither the name of the University nor the names of its
  21. * contributors may be used to endorse or promote products derived
  22. * from this software without specific prior written permission.
  23. *
  24. * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
  25. * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
  26. * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
  27. * DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
  28. * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
  29. * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
  30. * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
  31. * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
  32. * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
  33. * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
  34. * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  35. *
  36. * Implementation of the NFSv4 state model. For the time being,
  37. * this is minimal, but will be made much more complex in a
  38. * subsequent patch.
  39. */
  40. #include <linux/config.h>
  41. #include <linux/slab.h>
  42. #include <linux/smp_lock.h>
  43. #include <linux/nfs_fs.h>
  44. #include <linux/nfs_idmap.h>
  45. #include <linux/workqueue.h>
  46. #include <linux/bitops.h>
  47. #include "nfs4_fs.h"
  48. #include "callback.h"
  49. #include "delegation.h"
  50. #define OPENOWNER_POOL_SIZE 8
  51. const nfs4_stateid zero_stateid;
  52. static DEFINE_SPINLOCK(state_spinlock);
  53. static LIST_HEAD(nfs4_clientid_list);
  54. static void nfs4_recover_state(void *);
  55. void
  56. init_nfsv4_state(struct nfs_server *server)
  57. {
  58. server->nfs4_state = NULL;
  59. INIT_LIST_HEAD(&server->nfs4_siblings);
  60. }
  61. void
  62. destroy_nfsv4_state(struct nfs_server *server)
  63. {
  64. if (server->mnt_path) {
  65. kfree(server->mnt_path);
  66. server->mnt_path = NULL;
  67. }
  68. if (server->nfs4_state) {
  69. nfs4_put_client(server->nfs4_state);
  70. server->nfs4_state = NULL;
  71. }
  72. }
  73. /*
  74. * nfs4_get_client(): returns an empty client structure
  75. * nfs4_put_client(): drops reference to client structure
  76. *
  77. * Since these are allocated/deallocated very rarely, we don't
  78. * bother putting them in a slab cache...
  79. */
  80. static struct nfs4_client *
  81. nfs4_alloc_client(struct in_addr *addr)
  82. {
  83. struct nfs4_client *clp;
  84. if (nfs_callback_up() < 0)
  85. return NULL;
  86. if ((clp = kmalloc(sizeof(*clp), GFP_KERNEL)) == NULL) {
  87. nfs_callback_down();
  88. return NULL;
  89. }
  90. memset(clp, 0, sizeof(*clp));
  91. memcpy(&clp->cl_addr, addr, sizeof(clp->cl_addr));
  92. init_rwsem(&clp->cl_sem);
  93. INIT_LIST_HEAD(&clp->cl_delegations);
  94. INIT_LIST_HEAD(&clp->cl_state_owners);
  95. INIT_LIST_HEAD(&clp->cl_unused);
  96. spin_lock_init(&clp->cl_lock);
  97. atomic_set(&clp->cl_count, 1);
  98. INIT_WORK(&clp->cl_recoverd, nfs4_recover_state, clp);
  99. INIT_WORK(&clp->cl_renewd, nfs4_renew_state, clp);
  100. INIT_LIST_HEAD(&clp->cl_superblocks);
  101. init_waitqueue_head(&clp->cl_waitq);
  102. rpc_init_wait_queue(&clp->cl_rpcwaitq, "NFS4 client");
  103. clp->cl_rpcclient = ERR_PTR(-EINVAL);
  104. clp->cl_boot_time = CURRENT_TIME;
  105. clp->cl_state = 1 << NFS4CLNT_OK;
  106. return clp;
  107. }
  108. static void
  109. nfs4_free_client(struct nfs4_client *clp)
  110. {
  111. struct nfs4_state_owner *sp;
  112. while (!list_empty(&clp->cl_unused)) {
  113. sp = list_entry(clp->cl_unused.next,
  114. struct nfs4_state_owner,
  115. so_list);
  116. list_del(&sp->so_list);
  117. kfree(sp);
  118. }
  119. BUG_ON(!list_empty(&clp->cl_state_owners));
  120. if (clp->cl_cred)
  121. put_rpccred(clp->cl_cred);
  122. nfs_idmap_delete(clp);
  123. if (!IS_ERR(clp->cl_rpcclient))
  124. rpc_shutdown_client(clp->cl_rpcclient);
  125. kfree(clp);
  126. nfs_callback_down();
  127. }
  128. static struct nfs4_client *__nfs4_find_client(struct in_addr *addr)
  129. {
  130. struct nfs4_client *clp;
  131. list_for_each_entry(clp, &nfs4_clientid_list, cl_servers) {
  132. if (memcmp(&clp->cl_addr, addr, sizeof(clp->cl_addr)) == 0) {
  133. atomic_inc(&clp->cl_count);
  134. return clp;
  135. }
  136. }
  137. return NULL;
  138. }
  139. struct nfs4_client *nfs4_find_client(struct in_addr *addr)
  140. {
  141. struct nfs4_client *clp;
  142. spin_lock(&state_spinlock);
  143. clp = __nfs4_find_client(addr);
  144. spin_unlock(&state_spinlock);
  145. return clp;
  146. }
  147. struct nfs4_client *
  148. nfs4_get_client(struct in_addr *addr)
  149. {
  150. struct nfs4_client *clp, *new = NULL;
  151. spin_lock(&state_spinlock);
  152. for (;;) {
  153. clp = __nfs4_find_client(addr);
  154. if (clp != NULL)
  155. break;
  156. clp = new;
  157. if (clp != NULL) {
  158. list_add(&clp->cl_servers, &nfs4_clientid_list);
  159. new = NULL;
  160. break;
  161. }
  162. spin_unlock(&state_spinlock);
  163. new = nfs4_alloc_client(addr);
  164. spin_lock(&state_spinlock);
  165. if (new == NULL)
  166. break;
  167. }
  168. spin_unlock(&state_spinlock);
  169. if (new)
  170. nfs4_free_client(new);
  171. return clp;
  172. }
  173. void
  174. nfs4_put_client(struct nfs4_client *clp)
  175. {
  176. if (!atomic_dec_and_lock(&clp->cl_count, &state_spinlock))
  177. return;
  178. list_del(&clp->cl_servers);
  179. spin_unlock(&state_spinlock);
  180. BUG_ON(!list_empty(&clp->cl_superblocks));
  181. wake_up_all(&clp->cl_waitq);
  182. rpc_wake_up(&clp->cl_rpcwaitq);
  183. nfs4_kill_renewd(clp);
  184. nfs4_free_client(clp);
  185. }
  186. static int __nfs4_init_client(struct nfs4_client *clp)
  187. {
  188. int status = nfs4_proc_setclientid(clp, NFS4_CALLBACK, nfs_callback_tcpport);
  189. if (status == 0)
  190. status = nfs4_proc_setclientid_confirm(clp);
  191. if (status == 0)
  192. nfs4_schedule_state_renewal(clp);
  193. return status;
  194. }
  195. int nfs4_init_client(struct nfs4_client *clp)
  196. {
  197. return nfs4_map_errors(__nfs4_init_client(clp));
  198. }
  199. u32
  200. nfs4_alloc_lockowner_id(struct nfs4_client *clp)
  201. {
  202. return clp->cl_lockowner_id ++;
  203. }
  204. static struct nfs4_state_owner *
  205. nfs4_client_grab_unused(struct nfs4_client *clp, struct rpc_cred *cred)
  206. {
  207. struct nfs4_state_owner *sp = NULL;
  208. if (!list_empty(&clp->cl_unused)) {
  209. sp = list_entry(clp->cl_unused.next, struct nfs4_state_owner, so_list);
  210. atomic_inc(&sp->so_count);
  211. sp->so_cred = cred;
  212. list_move(&sp->so_list, &clp->cl_state_owners);
  213. clp->cl_nunused--;
  214. }
  215. return sp;
  216. }
  217. static struct nfs4_state_owner *
  218. nfs4_find_state_owner(struct nfs4_client *clp, struct rpc_cred *cred)
  219. {
  220. struct nfs4_state_owner *sp, *res = NULL;
  221. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  222. if (sp->so_cred != cred)
  223. continue;
  224. atomic_inc(&sp->so_count);
  225. /* Move to the head of the list */
  226. list_move(&sp->so_list, &clp->cl_state_owners);
  227. res = sp;
  228. break;
  229. }
  230. return res;
  231. }
  232. /*
  233. * nfs4_alloc_state_owner(): this is called on the OPEN or CREATE path to
  234. * create a new state_owner.
  235. *
  236. */
  237. static struct nfs4_state_owner *
  238. nfs4_alloc_state_owner(void)
  239. {
  240. struct nfs4_state_owner *sp;
  241. sp = kzalloc(sizeof(*sp),GFP_KERNEL);
  242. if (!sp)
  243. return NULL;
  244. spin_lock_init(&sp->so_lock);
  245. INIT_LIST_HEAD(&sp->so_states);
  246. INIT_LIST_HEAD(&sp->so_delegations);
  247. rpc_init_wait_queue(&sp->so_sequence.wait, "Seqid_waitqueue");
  248. sp->so_seqid.sequence = &sp->so_sequence;
  249. spin_lock_init(&sp->so_sequence.lock);
  250. INIT_LIST_HEAD(&sp->so_sequence.list);
  251. atomic_set(&sp->so_count, 1);
  252. return sp;
  253. }
  254. void
  255. nfs4_drop_state_owner(struct nfs4_state_owner *sp)
  256. {
  257. struct nfs4_client *clp = sp->so_client;
  258. spin_lock(&clp->cl_lock);
  259. list_del_init(&sp->so_list);
  260. spin_unlock(&clp->cl_lock);
  261. }
  262. /*
  263. * Note: must be called with clp->cl_sem held in order to prevent races
  264. * with reboot recovery!
  265. */
  266. struct nfs4_state_owner *nfs4_get_state_owner(struct nfs_server *server, struct rpc_cred *cred)
  267. {
  268. struct nfs4_client *clp = server->nfs4_state;
  269. struct nfs4_state_owner *sp, *new;
  270. get_rpccred(cred);
  271. new = nfs4_alloc_state_owner();
  272. spin_lock(&clp->cl_lock);
  273. sp = nfs4_find_state_owner(clp, cred);
  274. if (sp == NULL)
  275. sp = nfs4_client_grab_unused(clp, cred);
  276. if (sp == NULL && new != NULL) {
  277. list_add(&new->so_list, &clp->cl_state_owners);
  278. new->so_client = clp;
  279. new->so_id = nfs4_alloc_lockowner_id(clp);
  280. new->so_cred = cred;
  281. sp = new;
  282. new = NULL;
  283. }
  284. spin_unlock(&clp->cl_lock);
  285. if (new)
  286. kfree(new);
  287. if (sp != NULL)
  288. return sp;
  289. put_rpccred(cred);
  290. return NULL;
  291. }
  292. /*
  293. * Must be called with clp->cl_sem held in order to avoid races
  294. * with state recovery...
  295. */
  296. void nfs4_put_state_owner(struct nfs4_state_owner *sp)
  297. {
  298. struct nfs4_client *clp = sp->so_client;
  299. struct rpc_cred *cred = sp->so_cred;
  300. if (!atomic_dec_and_lock(&sp->so_count, &clp->cl_lock))
  301. return;
  302. if (clp->cl_nunused >= OPENOWNER_POOL_SIZE)
  303. goto out_free;
  304. if (list_empty(&sp->so_list))
  305. goto out_free;
  306. list_move(&sp->so_list, &clp->cl_unused);
  307. clp->cl_nunused++;
  308. spin_unlock(&clp->cl_lock);
  309. put_rpccred(cred);
  310. cred = NULL;
  311. return;
  312. out_free:
  313. list_del(&sp->so_list);
  314. spin_unlock(&clp->cl_lock);
  315. put_rpccred(cred);
  316. kfree(sp);
  317. }
  318. static struct nfs4_state *
  319. nfs4_alloc_open_state(void)
  320. {
  321. struct nfs4_state *state;
  322. state = kmalloc(sizeof(*state), GFP_KERNEL);
  323. if (!state)
  324. return NULL;
  325. state->state = 0;
  326. state->nreaders = 0;
  327. state->nwriters = 0;
  328. state->flags = 0;
  329. memset(state->stateid.data, 0, sizeof(state->stateid.data));
  330. atomic_set(&state->count, 1);
  331. INIT_LIST_HEAD(&state->lock_states);
  332. spin_lock_init(&state->state_lock);
  333. return state;
  334. }
  335. static struct nfs4_state *
  336. __nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
  337. {
  338. struct nfs_inode *nfsi = NFS_I(inode);
  339. struct nfs4_state *state;
  340. mode &= (FMODE_READ|FMODE_WRITE);
  341. list_for_each_entry(state, &nfsi->open_states, inode_states) {
  342. if (state->owner->so_cred != cred)
  343. continue;
  344. if ((mode & FMODE_READ) != 0 && state->nreaders == 0)
  345. continue;
  346. if ((mode & FMODE_WRITE) != 0 && state->nwriters == 0)
  347. continue;
  348. if ((state->state & mode) != mode)
  349. continue;
  350. atomic_inc(&state->count);
  351. if (mode & FMODE_READ)
  352. state->nreaders++;
  353. if (mode & FMODE_WRITE)
  354. state->nwriters++;
  355. return state;
  356. }
  357. return NULL;
  358. }
  359. static struct nfs4_state *
  360. __nfs4_find_state_byowner(struct inode *inode, struct nfs4_state_owner *owner)
  361. {
  362. struct nfs_inode *nfsi = NFS_I(inode);
  363. struct nfs4_state *state;
  364. list_for_each_entry(state, &nfsi->open_states, inode_states) {
  365. /* Is this in the process of being freed? */
  366. if (state->nreaders == 0 && state->nwriters == 0)
  367. continue;
  368. if (state->owner == owner) {
  369. atomic_inc(&state->count);
  370. return state;
  371. }
  372. }
  373. return NULL;
  374. }
  375. struct nfs4_state *
  376. nfs4_find_state(struct inode *inode, struct rpc_cred *cred, mode_t mode)
  377. {
  378. struct nfs4_state *state;
  379. spin_lock(&inode->i_lock);
  380. state = __nfs4_find_state(inode, cred, mode);
  381. spin_unlock(&inode->i_lock);
  382. return state;
  383. }
  384. static void
  385. nfs4_free_open_state(struct nfs4_state *state)
  386. {
  387. kfree(state);
  388. }
  389. struct nfs4_state *
  390. nfs4_get_open_state(struct inode *inode, struct nfs4_state_owner *owner)
  391. {
  392. struct nfs4_state *state, *new;
  393. struct nfs_inode *nfsi = NFS_I(inode);
  394. spin_lock(&inode->i_lock);
  395. state = __nfs4_find_state_byowner(inode, owner);
  396. spin_unlock(&inode->i_lock);
  397. if (state)
  398. goto out;
  399. new = nfs4_alloc_open_state();
  400. spin_lock(&owner->so_lock);
  401. spin_lock(&inode->i_lock);
  402. state = __nfs4_find_state_byowner(inode, owner);
  403. if (state == NULL && new != NULL) {
  404. state = new;
  405. state->owner = owner;
  406. atomic_inc(&owner->so_count);
  407. list_add(&state->inode_states, &nfsi->open_states);
  408. state->inode = igrab(inode);
  409. spin_unlock(&inode->i_lock);
  410. /* Note: The reclaim code dictates that we add stateless
  411. * and read-only stateids to the end of the list */
  412. list_add_tail(&state->open_states, &owner->so_states);
  413. spin_unlock(&owner->so_lock);
  414. } else {
  415. spin_unlock(&inode->i_lock);
  416. spin_unlock(&owner->so_lock);
  417. if (new)
  418. nfs4_free_open_state(new);
  419. }
  420. out:
  421. return state;
  422. }
  423. /*
  424. * Beware! Caller must be holding exactly one
  425. * reference to clp->cl_sem!
  426. */
  427. void nfs4_put_open_state(struct nfs4_state *state)
  428. {
  429. struct inode *inode = state->inode;
  430. struct nfs4_state_owner *owner = state->owner;
  431. if (!atomic_dec_and_lock(&state->count, &owner->so_lock))
  432. return;
  433. spin_lock(&inode->i_lock);
  434. if (!list_empty(&state->inode_states))
  435. list_del(&state->inode_states);
  436. list_del(&state->open_states);
  437. spin_unlock(&inode->i_lock);
  438. spin_unlock(&owner->so_lock);
  439. iput(inode);
  440. BUG_ON (state->state != 0);
  441. nfs4_free_open_state(state);
  442. nfs4_put_state_owner(owner);
  443. }
  444. /*
  445. * Close the current file.
  446. */
  447. void nfs4_close_state(struct nfs4_state *state, mode_t mode)
  448. {
  449. struct inode *inode = state->inode;
  450. struct nfs4_state_owner *owner = state->owner;
  451. int newstate;
  452. atomic_inc(&owner->so_count);
  453. /* Protect against nfs4_find_state() */
  454. spin_lock(&owner->so_lock);
  455. spin_lock(&inode->i_lock);
  456. if (mode & FMODE_READ)
  457. state->nreaders--;
  458. if (mode & FMODE_WRITE)
  459. state->nwriters--;
  460. if (state->nwriters == 0) {
  461. if (state->nreaders == 0)
  462. list_del_init(&state->inode_states);
  463. /* See reclaim code */
  464. list_move_tail(&state->open_states, &owner->so_states);
  465. }
  466. spin_unlock(&inode->i_lock);
  467. spin_unlock(&owner->so_lock);
  468. newstate = 0;
  469. if (state->state != 0) {
  470. if (state->nreaders)
  471. newstate |= FMODE_READ;
  472. if (state->nwriters)
  473. newstate |= FMODE_WRITE;
  474. if (state->state == newstate)
  475. goto out;
  476. if (test_bit(NFS_DELEGATED_STATE, &state->flags)) {
  477. state->state = newstate;
  478. goto out;
  479. }
  480. if (nfs4_do_close(inode, state, newstate) == 0)
  481. return;
  482. }
  483. out:
  484. nfs4_put_open_state(state);
  485. nfs4_put_state_owner(owner);
  486. }
  487. /*
  488. * Search the state->lock_states for an existing lock_owner
  489. * that is compatible with current->files
  490. */
  491. static struct nfs4_lock_state *
  492. __nfs4_find_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
  493. {
  494. struct nfs4_lock_state *pos;
  495. list_for_each_entry(pos, &state->lock_states, ls_locks) {
  496. if (pos->ls_owner != fl_owner)
  497. continue;
  498. atomic_inc(&pos->ls_count);
  499. return pos;
  500. }
  501. return NULL;
  502. }
  503. /*
  504. * Return a compatible lock_state. If no initialized lock_state structure
  505. * exists, return an uninitialized one.
  506. *
  507. */
  508. static struct nfs4_lock_state *nfs4_alloc_lock_state(struct nfs4_state *state, fl_owner_t fl_owner)
  509. {
  510. struct nfs4_lock_state *lsp;
  511. struct nfs4_client *clp = state->owner->so_client;
  512. lsp = kzalloc(sizeof(*lsp), GFP_KERNEL);
  513. if (lsp == NULL)
  514. return NULL;
  515. lsp->ls_seqid.sequence = &state->owner->so_sequence;
  516. atomic_set(&lsp->ls_count, 1);
  517. lsp->ls_owner = fl_owner;
  518. spin_lock(&clp->cl_lock);
  519. lsp->ls_id = nfs4_alloc_lockowner_id(clp);
  520. spin_unlock(&clp->cl_lock);
  521. INIT_LIST_HEAD(&lsp->ls_locks);
  522. return lsp;
  523. }
  524. /*
  525. * Return a compatible lock_state. If no initialized lock_state structure
  526. * exists, return an uninitialized one.
  527. *
  528. * The caller must be holding clp->cl_sem
  529. */
  530. static struct nfs4_lock_state *nfs4_get_lock_state(struct nfs4_state *state, fl_owner_t owner)
  531. {
  532. struct nfs4_lock_state *lsp, *new = NULL;
  533. for(;;) {
  534. spin_lock(&state->state_lock);
  535. lsp = __nfs4_find_lock_state(state, owner);
  536. if (lsp != NULL)
  537. break;
  538. if (new != NULL) {
  539. new->ls_state = state;
  540. list_add(&new->ls_locks, &state->lock_states);
  541. set_bit(LK_STATE_IN_USE, &state->flags);
  542. lsp = new;
  543. new = NULL;
  544. break;
  545. }
  546. spin_unlock(&state->state_lock);
  547. new = nfs4_alloc_lock_state(state, owner);
  548. if (new == NULL)
  549. return NULL;
  550. }
  551. spin_unlock(&state->state_lock);
  552. kfree(new);
  553. return lsp;
  554. }
  555. /*
  556. * Release reference to lock_state, and free it if we see that
  557. * it is no longer in use
  558. */
  559. void nfs4_put_lock_state(struct nfs4_lock_state *lsp)
  560. {
  561. struct nfs4_state *state;
  562. if (lsp == NULL)
  563. return;
  564. state = lsp->ls_state;
  565. if (!atomic_dec_and_lock(&lsp->ls_count, &state->state_lock))
  566. return;
  567. list_del(&lsp->ls_locks);
  568. if (list_empty(&state->lock_states))
  569. clear_bit(LK_STATE_IN_USE, &state->flags);
  570. spin_unlock(&state->state_lock);
  571. kfree(lsp);
  572. }
  573. static void nfs4_fl_copy_lock(struct file_lock *dst, struct file_lock *src)
  574. {
  575. struct nfs4_lock_state *lsp = src->fl_u.nfs4_fl.owner;
  576. dst->fl_u.nfs4_fl.owner = lsp;
  577. atomic_inc(&lsp->ls_count);
  578. }
  579. static void nfs4_fl_release_lock(struct file_lock *fl)
  580. {
  581. nfs4_put_lock_state(fl->fl_u.nfs4_fl.owner);
  582. }
  583. static struct file_lock_operations nfs4_fl_lock_ops = {
  584. .fl_copy_lock = nfs4_fl_copy_lock,
  585. .fl_release_private = nfs4_fl_release_lock,
  586. };
  587. int nfs4_set_lock_state(struct nfs4_state *state, struct file_lock *fl)
  588. {
  589. struct nfs4_lock_state *lsp;
  590. if (fl->fl_ops != NULL)
  591. return 0;
  592. lsp = nfs4_get_lock_state(state, fl->fl_owner);
  593. if (lsp == NULL)
  594. return -ENOMEM;
  595. fl->fl_u.nfs4_fl.owner = lsp;
  596. fl->fl_ops = &nfs4_fl_lock_ops;
  597. return 0;
  598. }
  599. /*
  600. * Byte-range lock aware utility to initialize the stateid of read/write
  601. * requests.
  602. */
  603. void nfs4_copy_stateid(nfs4_stateid *dst, struct nfs4_state *state, fl_owner_t fl_owner)
  604. {
  605. struct nfs4_lock_state *lsp;
  606. memcpy(dst, &state->stateid, sizeof(*dst));
  607. if (test_bit(LK_STATE_IN_USE, &state->flags) == 0)
  608. return;
  609. spin_lock(&state->state_lock);
  610. lsp = __nfs4_find_lock_state(state, fl_owner);
  611. if (lsp != NULL && (lsp->ls_flags & NFS_LOCK_INITIALIZED) != 0)
  612. memcpy(dst, &lsp->ls_stateid, sizeof(*dst));
  613. spin_unlock(&state->state_lock);
  614. nfs4_put_lock_state(lsp);
  615. }
  616. struct nfs_seqid *nfs_alloc_seqid(struct nfs_seqid_counter *counter)
  617. {
  618. struct nfs_seqid *new;
  619. new = kmalloc(sizeof(*new), GFP_KERNEL);
  620. if (new != NULL) {
  621. new->sequence = counter;
  622. INIT_LIST_HEAD(&new->list);
  623. }
  624. return new;
  625. }
  626. void nfs_free_seqid(struct nfs_seqid *seqid)
  627. {
  628. struct rpc_sequence *sequence = seqid->sequence->sequence;
  629. if (!list_empty(&seqid->list)) {
  630. spin_lock(&sequence->lock);
  631. list_del(&seqid->list);
  632. spin_unlock(&sequence->lock);
  633. }
  634. rpc_wake_up_next(&sequence->wait);
  635. kfree(seqid);
  636. }
  637. /*
  638. * Increment the seqid if the OPEN/OPEN_DOWNGRADE/CLOSE succeeded, or
  639. * failed with a seqid incrementing error -
  640. * see comments nfs_fs.h:seqid_mutating_error()
  641. */
  642. static inline void nfs_increment_seqid(int status, struct nfs_seqid *seqid)
  643. {
  644. switch (status) {
  645. case 0:
  646. break;
  647. case -NFS4ERR_BAD_SEQID:
  648. case -NFS4ERR_STALE_CLIENTID:
  649. case -NFS4ERR_STALE_STATEID:
  650. case -NFS4ERR_BAD_STATEID:
  651. case -NFS4ERR_BADXDR:
  652. case -NFS4ERR_RESOURCE:
  653. case -NFS4ERR_NOFILEHANDLE:
  654. /* Non-seqid mutating errors */
  655. return;
  656. };
  657. /*
  658. * Note: no locking needed as we are guaranteed to be first
  659. * on the sequence list
  660. */
  661. seqid->sequence->counter++;
  662. }
  663. void nfs_increment_open_seqid(int status, struct nfs_seqid *seqid)
  664. {
  665. if (status == -NFS4ERR_BAD_SEQID) {
  666. struct nfs4_state_owner *sp = container_of(seqid->sequence,
  667. struct nfs4_state_owner, so_seqid);
  668. nfs4_drop_state_owner(sp);
  669. }
  670. return nfs_increment_seqid(status, seqid);
  671. }
  672. /*
  673. * Increment the seqid if the LOCK/LOCKU succeeded, or
  674. * failed with a seqid incrementing error -
  675. * see comments nfs_fs.h:seqid_mutating_error()
  676. */
  677. void nfs_increment_lock_seqid(int status, struct nfs_seqid *seqid)
  678. {
  679. return nfs_increment_seqid(status, seqid);
  680. }
  681. int nfs_wait_on_sequence(struct nfs_seqid *seqid, struct rpc_task *task)
  682. {
  683. struct rpc_sequence *sequence = seqid->sequence->sequence;
  684. int status = 0;
  685. if (sequence->list.next == &seqid->list)
  686. goto out;
  687. spin_lock(&sequence->lock);
  688. if (!list_empty(&sequence->list)) {
  689. rpc_sleep_on(&sequence->wait, task, NULL, NULL);
  690. status = -EAGAIN;
  691. } else
  692. list_add(&seqid->list, &sequence->list);
  693. spin_unlock(&sequence->lock);
  694. out:
  695. return status;
  696. }
  697. static int reclaimer(void *);
  698. struct reclaimer_args {
  699. struct nfs4_client *clp;
  700. struct completion complete;
  701. };
  702. /*
  703. * State recovery routine
  704. */
  705. void
  706. nfs4_recover_state(void *data)
  707. {
  708. struct nfs4_client *clp = (struct nfs4_client *)data;
  709. struct reclaimer_args args = {
  710. .clp = clp,
  711. };
  712. might_sleep();
  713. init_completion(&args.complete);
  714. if (kernel_thread(reclaimer, &args, CLONE_KERNEL) < 0)
  715. goto out_failed_clear;
  716. wait_for_completion(&args.complete);
  717. return;
  718. out_failed_clear:
  719. set_bit(NFS4CLNT_OK, &clp->cl_state);
  720. wake_up_all(&clp->cl_waitq);
  721. rpc_wake_up(&clp->cl_rpcwaitq);
  722. }
  723. /*
  724. * Schedule a state recovery attempt
  725. */
  726. void
  727. nfs4_schedule_state_recovery(struct nfs4_client *clp)
  728. {
  729. if (!clp)
  730. return;
  731. if (test_and_clear_bit(NFS4CLNT_OK, &clp->cl_state))
  732. schedule_work(&clp->cl_recoverd);
  733. }
  734. static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_state *state)
  735. {
  736. struct inode *inode = state->inode;
  737. struct file_lock *fl;
  738. int status = 0;
  739. for (fl = inode->i_flock; fl != 0; fl = fl->fl_next) {
  740. if (!(fl->fl_flags & FL_POSIX))
  741. continue;
  742. if (((struct nfs_open_context *)fl->fl_file->private_data)->state != state)
  743. continue;
  744. status = ops->recover_lock(state, fl);
  745. if (status >= 0)
  746. continue;
  747. switch (status) {
  748. default:
  749. printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
  750. __FUNCTION__, status);
  751. case -NFS4ERR_EXPIRED:
  752. case -NFS4ERR_NO_GRACE:
  753. case -NFS4ERR_RECLAIM_BAD:
  754. case -NFS4ERR_RECLAIM_CONFLICT:
  755. /* kill_proc(fl->fl_owner, SIGLOST, 1); */
  756. break;
  757. case -NFS4ERR_STALE_CLIENTID:
  758. goto out_err;
  759. }
  760. }
  761. return 0;
  762. out_err:
  763. return status;
  764. }
  765. static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct nfs4_state_owner *sp)
  766. {
  767. struct nfs4_state *state;
  768. struct nfs4_lock_state *lock;
  769. int status = 0;
  770. /* Note: we rely on the sp->so_states list being ordered
  771. * so that we always reclaim open(O_RDWR) and/or open(O_WRITE)
  772. * states first.
  773. * This is needed to ensure that the server won't give us any
  774. * read delegations that we have to return if, say, we are
  775. * recovering after a network partition or a reboot from a
  776. * server that doesn't support a grace period.
  777. */
  778. list_for_each_entry(state, &sp->so_states, open_states) {
  779. if (state->state == 0)
  780. continue;
  781. status = ops->recover_open(sp, state);
  782. if (status >= 0) {
  783. status = nfs4_reclaim_locks(ops, state);
  784. if (status < 0)
  785. goto out_err;
  786. list_for_each_entry(lock, &state->lock_states, ls_locks) {
  787. if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
  788. printk("%s: Lock reclaim failed!\n",
  789. __FUNCTION__);
  790. }
  791. continue;
  792. }
  793. switch (status) {
  794. default:
  795. printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
  796. __FUNCTION__, status);
  797. case -ENOENT:
  798. case -NFS4ERR_RECLAIM_BAD:
  799. case -NFS4ERR_RECLAIM_CONFLICT:
  800. /*
  801. * Open state on this file cannot be recovered
  802. * All we can do is revert to using the zero stateid.
  803. */
  804. memset(state->stateid.data, 0,
  805. sizeof(state->stateid.data));
  806. /* Mark the file as being 'closed' */
  807. state->state = 0;
  808. break;
  809. case -NFS4ERR_EXPIRED:
  810. case -NFS4ERR_NO_GRACE:
  811. case -NFS4ERR_STALE_CLIENTID:
  812. goto out_err;
  813. }
  814. }
  815. return 0;
  816. out_err:
  817. return status;
  818. }
  819. static void nfs4_state_mark_reclaim(struct nfs4_client *clp)
  820. {
  821. struct nfs4_state_owner *sp;
  822. struct nfs4_state *state;
  823. struct nfs4_lock_state *lock;
  824. /* Reset all sequence ids to zero */
  825. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  826. sp->so_seqid.counter = 0;
  827. sp->so_seqid.flags = 0;
  828. spin_lock(&sp->so_lock);
  829. list_for_each_entry(state, &sp->so_states, open_states) {
  830. list_for_each_entry(lock, &state->lock_states, ls_locks) {
  831. lock->ls_seqid.counter = 0;
  832. lock->ls_seqid.flags = 0;
  833. lock->ls_flags &= ~NFS_LOCK_INITIALIZED;
  834. }
  835. }
  836. spin_unlock(&sp->so_lock);
  837. }
  838. }
  839. static int reclaimer(void *ptr)
  840. {
  841. struct reclaimer_args *args = (struct reclaimer_args *)ptr;
  842. struct nfs4_client *clp = args->clp;
  843. struct nfs4_state_owner *sp;
  844. struct nfs4_state_recovery_ops *ops;
  845. int status = 0;
  846. daemonize("%u.%u.%u.%u-reclaim", NIPQUAD(clp->cl_addr));
  847. allow_signal(SIGKILL);
  848. atomic_inc(&clp->cl_count);
  849. complete(&args->complete);
  850. /* Ensure exclusive access to NFSv4 state */
  851. lock_kernel();
  852. down_write(&clp->cl_sem);
  853. /* Are there any NFS mounts out there? */
  854. if (list_empty(&clp->cl_superblocks))
  855. goto out;
  856. restart_loop:
  857. status = nfs4_proc_renew(clp);
  858. switch (status) {
  859. case 0:
  860. case -NFS4ERR_CB_PATH_DOWN:
  861. goto out;
  862. case -NFS4ERR_STALE_CLIENTID:
  863. case -NFS4ERR_LEASE_MOVED:
  864. ops = &nfs4_reboot_recovery_ops;
  865. break;
  866. default:
  867. ops = &nfs4_network_partition_recovery_ops;
  868. };
  869. nfs4_state_mark_reclaim(clp);
  870. status = __nfs4_init_client(clp);
  871. if (status)
  872. goto out_error;
  873. /* Mark all delegations for reclaim */
  874. nfs_delegation_mark_reclaim(clp);
  875. /* Note: list is protected by exclusive lock on cl->cl_sem */
  876. list_for_each_entry(sp, &clp->cl_state_owners, so_list) {
  877. status = nfs4_reclaim_open_state(ops, sp);
  878. if (status < 0) {
  879. if (status == -NFS4ERR_NO_GRACE) {
  880. ops = &nfs4_network_partition_recovery_ops;
  881. status = nfs4_reclaim_open_state(ops, sp);
  882. }
  883. if (status == -NFS4ERR_STALE_CLIENTID)
  884. goto restart_loop;
  885. if (status == -NFS4ERR_EXPIRED)
  886. goto restart_loop;
  887. }
  888. }
  889. nfs_delegation_reap_unclaimed(clp);
  890. out:
  891. set_bit(NFS4CLNT_OK, &clp->cl_state);
  892. up_write(&clp->cl_sem);
  893. unlock_kernel();
  894. wake_up_all(&clp->cl_waitq);
  895. rpc_wake_up(&clp->cl_rpcwaitq);
  896. if (status == -NFS4ERR_CB_PATH_DOWN)
  897. nfs_handle_cb_pathdown(clp);
  898. nfs4_put_client(clp);
  899. return 0;
  900. out_error:
  901. printk(KERN_WARNING "Error: state recovery failed on NFSv4 server %u.%u.%u.%u with error %d\n",
  902. NIPQUAD(clp->cl_addr.s_addr), -status);
  903. goto out;
  904. }
  905. /*
  906. * Local variables:
  907. * c-basic-offset: 8
  908. * End:
  909. */