callback.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. /*
  2. * Copyright (c) 2002, 2007 Red Hat, Inc. All rights reserved.
  3. *
  4. * This software may be freely redistributed under the terms of the
  5. * GNU General Public License.
  6. *
  7. * You should have received a copy of the GNU General Public License
  8. * along with this program; if not, write to the Free Software
  9. * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
  10. *
  11. * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
  12. * David Howells <dhowells@redhat.com>
  13. *
  14. */
  15. #include <linux/kernel.h>
  16. #include <linux/module.h>
  17. #include <linux/init.h>
  18. #include <linux/circ_buf.h>
  19. #include "internal.h"
  20. unsigned afs_vnode_update_timeout = 10;
  21. #define afs_breakring_space(server) \
  22. CIRC_SPACE((server)->cb_break_head, (server)->cb_break_tail, \
  23. ARRAY_SIZE((server)->cb_break))
  24. //static void afs_callback_updater(struct work_struct *);
  25. static struct workqueue_struct *afs_callback_update_worker;
  26. /*
  27. * allow the fileserver to request callback state (re-)initialisation
  28. */
  29. void afs_init_callback_state(struct afs_server *server)
  30. {
  31. struct afs_vnode *vnode;
  32. _enter("{%p}", server);
  33. spin_lock(&server->cb_lock);
  34. /* kill all the promises on record from this server */
  35. while (!RB_EMPTY_ROOT(&server->cb_promises)) {
  36. vnode = rb_entry(server->cb_promises.rb_node,
  37. struct afs_vnode, cb_promise);
  38. _debug("UNPROMISE { vid=%x:%u uq=%u}",
  39. vnode->fid.vid, vnode->fid.vnode, vnode->fid.unique);
  40. rb_erase(&vnode->cb_promise, &server->cb_promises);
  41. vnode->cb_promised = false;
  42. }
  43. spin_unlock(&server->cb_lock);
  44. _leave("");
  45. }
  46. /*
  47. * handle the data invalidation side of a callback being broken
  48. */
  49. void afs_broken_callback_work(struct work_struct *work)
  50. {
  51. struct afs_vnode *vnode =
  52. container_of(work, struct afs_vnode, cb_broken_work);
  53. _enter("");
  54. if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
  55. return;
  56. /* we're only interested in dealing with a broken callback on *this*
  57. * vnode and only if no-one else has dealt with it yet */
  58. if (!mutex_trylock(&vnode->validate_lock))
  59. return; /* someone else is dealing with it */
  60. if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
  61. if (S_ISDIR(vnode->vfs_inode.i_mode))
  62. afs_clear_permits(vnode);
  63. if (afs_vnode_fetch_status(vnode, NULL, NULL) < 0)
  64. goto out;
  65. if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
  66. goto out;
  67. /* if the vnode's data version number changed then its contents
  68. * are different */
  69. if (test_and_clear_bit(AFS_VNODE_ZAP_DATA, &vnode->flags))
  70. afs_zap_data(vnode);
  71. }
  72. out:
  73. mutex_unlock(&vnode->validate_lock);
  74. /* avoid the potential race whereby the mutex_trylock() in this
  75. * function happens again between the clear_bit() and the
  76. * mutex_unlock() */
  77. if (test_bit(AFS_VNODE_CB_BROKEN, &vnode->flags)) {
  78. _debug("requeue");
  79. queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
  80. }
  81. _leave("");
  82. }
  83. /*
  84. * actually break a callback
  85. */
  86. static void afs_break_callback(struct afs_server *server,
  87. struct afs_vnode *vnode)
  88. {
  89. _enter("");
  90. set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
  91. if (vnode->cb_promised) {
  92. spin_lock(&vnode->lock);
  93. _debug("break callback");
  94. spin_lock(&server->cb_lock);
  95. if (vnode->cb_promised) {
  96. rb_erase(&vnode->cb_promise, &server->cb_promises);
  97. vnode->cb_promised = false;
  98. }
  99. spin_unlock(&server->cb_lock);
  100. queue_work(afs_callback_update_worker, &vnode->cb_broken_work);
  101. spin_unlock(&vnode->lock);
  102. }
  103. }
  104. /*
  105. * allow the fileserver to explicitly break one callback
  106. * - happens when
  107. * - the backing file is changed
  108. * - a lock is released
  109. */
  110. static void afs_break_one_callback(struct afs_server *server,
  111. struct afs_fid *fid)
  112. {
  113. struct afs_vnode *vnode;
  114. struct rb_node *p;
  115. _debug("find");
  116. spin_lock(&server->fs_lock);
  117. p = server->fs_vnodes.rb_node;
  118. while (p) {
  119. vnode = rb_entry(p, struct afs_vnode, server_rb);
  120. if (fid->vid < vnode->fid.vid)
  121. p = p->rb_left;
  122. else if (fid->vid > vnode->fid.vid)
  123. p = p->rb_right;
  124. else if (fid->vnode < vnode->fid.vnode)
  125. p = p->rb_left;
  126. else if (fid->vnode > vnode->fid.vnode)
  127. p = p->rb_right;
  128. else if (fid->unique < vnode->fid.unique)
  129. p = p->rb_left;
  130. else if (fid->unique > vnode->fid.unique)
  131. p = p->rb_right;
  132. else
  133. goto found;
  134. }
  135. /* not found so we just ignore it (it may have moved to another
  136. * server) */
  137. not_available:
  138. _debug("not avail");
  139. spin_unlock(&server->fs_lock);
  140. _leave("");
  141. return;
  142. found:
  143. _debug("found");
  144. ASSERTCMP(server, ==, vnode->server);
  145. if (!igrab(AFS_VNODE_TO_I(vnode)))
  146. goto not_available;
  147. spin_unlock(&server->fs_lock);
  148. afs_break_callback(server, vnode);
  149. iput(&vnode->vfs_inode);
  150. _leave("");
  151. }
  152. /*
  153. * allow the fileserver to break callback promises
  154. */
  155. void afs_break_callbacks(struct afs_server *server, size_t count,
  156. struct afs_callback callbacks[])
  157. {
  158. _enter("%p,%zu,", server, count);
  159. ASSERT(server != NULL);
  160. ASSERTCMP(count, <=, AFSCBMAX);
  161. for (; count > 0; callbacks++, count--) {
  162. _debug("- Fid { vl=%08x n=%u u=%u } CB { v=%u x=%u t=%u }",
  163. callbacks->fid.vid,
  164. callbacks->fid.vnode,
  165. callbacks->fid.unique,
  166. callbacks->version,
  167. callbacks->expiry,
  168. callbacks->type
  169. );
  170. afs_break_one_callback(server, &callbacks->fid);
  171. }
  172. _leave("");
  173. return;
  174. }
  175. /*
  176. * record the callback for breaking
  177. * - the caller must hold server->cb_lock
  178. */
  179. static void afs_do_give_up_callback(struct afs_server *server,
  180. struct afs_vnode *vnode)
  181. {
  182. struct afs_callback *cb;
  183. _enter("%p,%p", server, vnode);
  184. cb = &server->cb_break[server->cb_break_head];
  185. cb->fid = vnode->fid;
  186. cb->version = vnode->cb_version;
  187. cb->expiry = vnode->cb_expiry;
  188. cb->type = vnode->cb_type;
  189. smp_wmb();
  190. server->cb_break_head =
  191. (server->cb_break_head + 1) &
  192. (ARRAY_SIZE(server->cb_break) - 1);
  193. /* defer the breaking of callbacks to try and collect as many as
  194. * possible to ship in one operation */
  195. switch (atomic_inc_return(&server->cb_break_n)) {
  196. case 1 ... AFSCBMAX - 1:
  197. queue_delayed_work(afs_callback_update_worker,
  198. &server->cb_break_work, HZ * 2);
  199. break;
  200. case AFSCBMAX:
  201. afs_flush_callback_breaks(server);
  202. break;
  203. default:
  204. break;
  205. }
  206. ASSERT(server->cb_promises.rb_node != NULL);
  207. rb_erase(&vnode->cb_promise, &server->cb_promises);
  208. vnode->cb_promised = false;
  209. _leave("");
  210. }
  211. /*
  212. * discard the callback on a deleted item
  213. */
  214. void afs_discard_callback_on_delete(struct afs_vnode *vnode)
  215. {
  216. struct afs_server *server = vnode->server;
  217. _enter("%d", vnode->cb_promised);
  218. if (!vnode->cb_promised) {
  219. _leave(" [not promised]");
  220. return;
  221. }
  222. ASSERT(server != NULL);
  223. spin_lock(&server->cb_lock);
  224. if (vnode->cb_promised) {
  225. ASSERT(server->cb_promises.rb_node != NULL);
  226. rb_erase(&vnode->cb_promise, &server->cb_promises);
  227. vnode->cb_promised = false;
  228. }
  229. spin_unlock(&server->cb_lock);
  230. _leave("");
  231. }
  232. /*
  233. * give up the callback registered for a vnode on the file server when the
  234. * inode is being cleared
  235. */
  236. void afs_give_up_callback(struct afs_vnode *vnode)
  237. {
  238. struct afs_server *server = vnode->server;
  239. DECLARE_WAITQUEUE(myself, current);
  240. _enter("%d", vnode->cb_promised);
  241. _debug("GIVE UP INODE %p", &vnode->vfs_inode);
  242. if (!vnode->cb_promised) {
  243. _leave(" [not promised]");
  244. return;
  245. }
  246. ASSERT(server != NULL);
  247. spin_lock(&server->cb_lock);
  248. if (vnode->cb_promised && afs_breakring_space(server) == 0) {
  249. add_wait_queue(&server->cb_break_waitq, &myself);
  250. for (;;) {
  251. set_current_state(TASK_UNINTERRUPTIBLE);
  252. if (!vnode->cb_promised ||
  253. afs_breakring_space(server) != 0)
  254. break;
  255. spin_unlock(&server->cb_lock);
  256. schedule();
  257. spin_lock(&server->cb_lock);
  258. }
  259. remove_wait_queue(&server->cb_break_waitq, &myself);
  260. __set_current_state(TASK_RUNNING);
  261. }
  262. /* of course, it's always possible for the server to break this vnode's
  263. * callback first... */
  264. if (vnode->cb_promised)
  265. afs_do_give_up_callback(server, vnode);
  266. spin_unlock(&server->cb_lock);
  267. _leave("");
  268. }
  269. /*
  270. * dispatch a deferred give up callbacks operation
  271. */
  272. void afs_dispatch_give_up_callbacks(struct work_struct *work)
  273. {
  274. struct afs_server *server =
  275. container_of(work, struct afs_server, cb_break_work.work);
  276. _enter("");
  277. /* tell the fileserver to discard the callback promises it has
  278. * - in the event of ENOMEM or some other error, we just forget that we
  279. * had callbacks entirely, and the server will call us later to break
  280. * them
  281. */
  282. afs_fs_give_up_callbacks(server, &afs_async_call);
  283. }
  284. /*
  285. * flush the outstanding callback breaks on a server
  286. */
  287. void afs_flush_callback_breaks(struct afs_server *server)
  288. {
  289. cancel_delayed_work(&server->cb_break_work);
  290. queue_delayed_work(afs_callback_update_worker,
  291. &server->cb_break_work, 0);
  292. }
  293. #if 0
  294. /*
  295. * update a bunch of callbacks
  296. */
  297. static void afs_callback_updater(struct work_struct *work)
  298. {
  299. struct afs_server *server;
  300. struct afs_vnode *vnode, *xvnode;
  301. time_t now;
  302. long timeout;
  303. int ret;
  304. server = container_of(work, struct afs_server, updater);
  305. _enter("");
  306. now = get_seconds();
  307. /* find the first vnode to update */
  308. spin_lock(&server->cb_lock);
  309. for (;;) {
  310. if (RB_EMPTY_ROOT(&server->cb_promises)) {
  311. spin_unlock(&server->cb_lock);
  312. _leave(" [nothing]");
  313. return;
  314. }
  315. vnode = rb_entry(rb_first(&server->cb_promises),
  316. struct afs_vnode, cb_promise);
  317. if (atomic_read(&vnode->usage) > 0)
  318. break;
  319. rb_erase(&vnode->cb_promise, &server->cb_promises);
  320. vnode->cb_promised = false;
  321. }
  322. timeout = vnode->update_at - now;
  323. if (timeout > 0) {
  324. queue_delayed_work(afs_vnode_update_worker,
  325. &afs_vnode_update, timeout * HZ);
  326. spin_unlock(&server->cb_lock);
  327. _leave(" [nothing]");
  328. return;
  329. }
  330. list_del_init(&vnode->update);
  331. atomic_inc(&vnode->usage);
  332. spin_unlock(&server->cb_lock);
  333. /* we can now perform the update */
  334. _debug("update %s", vnode->vldb.name);
  335. vnode->state = AFS_VL_UPDATING;
  336. vnode->upd_rej_cnt = 0;
  337. vnode->upd_busy_cnt = 0;
  338. ret = afs_vnode_update_record(vl, &vldb);
  339. switch (ret) {
  340. case 0:
  341. afs_vnode_apply_update(vl, &vldb);
  342. vnode->state = AFS_VL_UPDATING;
  343. break;
  344. case -ENOMEDIUM:
  345. vnode->state = AFS_VL_VOLUME_DELETED;
  346. break;
  347. default:
  348. vnode->state = AFS_VL_UNCERTAIN;
  349. break;
  350. }
  351. /* and then reschedule */
  352. _debug("reschedule");
  353. vnode->update_at = get_seconds() + afs_vnode_update_timeout;
  354. spin_lock(&server->cb_lock);
  355. if (!list_empty(&server->cb_promises)) {
  356. /* next update in 10 minutes, but wait at least 1 second more
  357. * than the newest record already queued so that we don't spam
  358. * the VL server suddenly with lots of requests
  359. */
  360. xvnode = list_entry(server->cb_promises.prev,
  361. struct afs_vnode, update);
  362. if (vnode->update_at <= xvnode->update_at)
  363. vnode->update_at = xvnode->update_at + 1;
  364. xvnode = list_entry(server->cb_promises.next,
  365. struct afs_vnode, update);
  366. timeout = xvnode->update_at - now;
  367. if (timeout < 0)
  368. timeout = 0;
  369. } else {
  370. timeout = afs_vnode_update_timeout;
  371. }
  372. list_add_tail(&vnode->update, &server->cb_promises);
  373. _debug("timeout %ld", timeout);
  374. queue_delayed_work(afs_vnode_update_worker,
  375. &afs_vnode_update, timeout * HZ);
  376. spin_unlock(&server->cb_lock);
  377. afs_put_vnode(vl);
  378. }
  379. #endif
  380. /*
  381. * initialise the callback update process
  382. */
  383. int __init afs_callback_update_init(void)
  384. {
  385. afs_callback_update_worker =
  386. create_singlethread_workqueue("kafs_callbackd");
  387. return afs_callback_update_worker ? 0 : -ENOMEM;
  388. }
  389. /*
  390. * shut down the callback update process
  391. */
  392. void afs_callback_update_kill(void)
  393. {
  394. destroy_workqueue(afs_callback_update_worker);
  395. }