operation.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524
  1. /* FS-Cache worker operation management routines
  2. *
  3. * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. *
  11. * See Documentation/filesystems/caching/operations.txt
  12. */
  13. #define FSCACHE_DEBUG_LEVEL OPERATION
  14. #include <linux/module.h>
  15. #include <linux/seq_file.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. atomic_t fscache_op_debug_id;
  19. EXPORT_SYMBOL(fscache_op_debug_id);
  20. /**
  21. * fscache_enqueue_operation - Enqueue an operation for processing
  22. * @op: The operation to enqueue
  23. *
  24. * Enqueue an operation for processing by the FS-Cache thread pool.
  25. *
  26. * This will get its own ref on the object.
  27. */
  28. void fscache_enqueue_operation(struct fscache_operation *op)
  29. {
  30. _enter("{OBJ%x OP%x,%u}",
  31. op->object->debug_id, op->debug_id, atomic_read(&op->usage));
  32. fscache_set_op_state(op, "EnQ");
  33. ASSERT(list_empty(&op->pend_link));
  34. ASSERT(op->processor != NULL);
  35. ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
  36. ASSERTCMP(atomic_read(&op->usage), >, 0);
  37. fscache_stat(&fscache_n_op_enqueue);
  38. switch (op->flags & FSCACHE_OP_TYPE) {
  39. case FSCACHE_OP_FAST:
  40. _debug("queue fast");
  41. atomic_inc(&op->usage);
  42. if (!schedule_work(&op->fast_work))
  43. fscache_put_operation(op);
  44. break;
  45. case FSCACHE_OP_SLOW:
  46. _debug("queue slow");
  47. slow_work_enqueue(&op->slow_work);
  48. break;
  49. case FSCACHE_OP_MYTHREAD:
  50. _debug("queue for caller's attention");
  51. break;
  52. default:
  53. printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
  54. op->flags);
  55. BUG();
  56. break;
  57. }
  58. }
  59. EXPORT_SYMBOL(fscache_enqueue_operation);
  60. /*
  61. * start an op running
  62. */
  63. static void fscache_run_op(struct fscache_object *object,
  64. struct fscache_operation *op)
  65. {
  66. fscache_set_op_state(op, "Run");
  67. object->n_in_progress++;
  68. if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
  69. wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
  70. if (op->processor)
  71. fscache_enqueue_operation(op);
  72. fscache_stat(&fscache_n_op_run);
  73. }
  74. /*
  75. * submit an exclusive operation for an object
  76. * - other ops are excluded from running simultaneously with this one
  77. * - this gets any extra refs it needs on an op
  78. */
  79. int fscache_submit_exclusive_op(struct fscache_object *object,
  80. struct fscache_operation *op)
  81. {
  82. int ret;
  83. _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
  84. fscache_set_op_state(op, "SubmitX");
  85. spin_lock(&object->lock);
  86. ASSERTCMP(object->n_ops, >=, object->n_in_progress);
  87. ASSERTCMP(object->n_ops, >=, object->n_exclusive);
  88. ASSERT(list_empty(&op->pend_link));
  89. ret = -ENOBUFS;
  90. if (fscache_object_is_active(object)) {
  91. op->object = object;
  92. object->n_ops++;
  93. object->n_exclusive++; /* reads and writes must wait */
  94. if (object->n_ops > 0) {
  95. atomic_inc(&op->usage);
  96. list_add_tail(&op->pend_link, &object->pending_ops);
  97. fscache_stat(&fscache_n_op_pend);
  98. } else if (!list_empty(&object->pending_ops)) {
  99. atomic_inc(&op->usage);
  100. list_add_tail(&op->pend_link, &object->pending_ops);
  101. fscache_stat(&fscache_n_op_pend);
  102. fscache_start_operations(object);
  103. } else {
  104. ASSERTCMP(object->n_in_progress, ==, 0);
  105. fscache_run_op(object, op);
  106. }
  107. /* need to issue a new write op after this */
  108. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  109. ret = 0;
  110. } else if (object->state == FSCACHE_OBJECT_CREATING) {
  111. op->object = object;
  112. object->n_ops++;
  113. object->n_exclusive++; /* reads and writes must wait */
  114. atomic_inc(&op->usage);
  115. list_add_tail(&op->pend_link, &object->pending_ops);
  116. fscache_stat(&fscache_n_op_pend);
  117. ret = 0;
  118. } else {
  119. /* not allowed to submit ops in any other state */
  120. BUG();
  121. }
  122. spin_unlock(&object->lock);
  123. return ret;
  124. }
  125. /*
  126. * report an unexpected submission
  127. */
  128. static void fscache_report_unexpected_submission(struct fscache_object *object,
  129. struct fscache_operation *op,
  130. unsigned long ostate)
  131. {
  132. static bool once_only;
  133. struct fscache_operation *p;
  134. unsigned n;
  135. if (once_only)
  136. return;
  137. once_only = true;
  138. kdebug("unexpected submission OP%x [OBJ%x %s]",
  139. op->debug_id, object->debug_id,
  140. fscache_object_states[object->state]);
  141. kdebug("objstate=%s [%s]",
  142. fscache_object_states[object->state],
  143. fscache_object_states[ostate]);
  144. kdebug("objflags=%lx", object->flags);
  145. kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
  146. kdebug("ops=%u inp=%u exc=%u",
  147. object->n_ops, object->n_in_progress, object->n_exclusive);
  148. if (!list_empty(&object->pending_ops)) {
  149. n = 0;
  150. list_for_each_entry(p, &object->pending_ops, pend_link) {
  151. ASSERTCMP(p->object, ==, object);
  152. kdebug("%p %p", op->processor, op->release);
  153. n++;
  154. }
  155. kdebug("n=%u", n);
  156. }
  157. dump_stack();
  158. }
  159. /*
  160. * submit an operation for an object
  161. * - objects may be submitted only in the following states:
  162. * - during object creation (write ops may be submitted)
  163. * - whilst the object is active
  164. * - after an I/O error incurred in one of the two above states (op rejected)
  165. * - this gets any extra refs it needs on an op
  166. */
  167. int fscache_submit_op(struct fscache_object *object,
  168. struct fscache_operation *op)
  169. {
  170. unsigned long ostate;
  171. int ret;
  172. _enter("{OBJ%x OP%x},{%u}",
  173. object->debug_id, op->debug_id, atomic_read(&op->usage));
  174. ASSERTCMP(atomic_read(&op->usage), >, 0);
  175. fscache_set_op_state(op, "Submit");
  176. spin_lock(&object->lock);
  177. ASSERTCMP(object->n_ops, >=, object->n_in_progress);
  178. ASSERTCMP(object->n_ops, >=, object->n_exclusive);
  179. ASSERT(list_empty(&op->pend_link));
  180. ostate = object->state;
  181. smp_rmb();
  182. if (fscache_object_is_active(object)) {
  183. op->object = object;
  184. object->n_ops++;
  185. if (object->n_exclusive > 0) {
  186. atomic_inc(&op->usage);
  187. list_add_tail(&op->pend_link, &object->pending_ops);
  188. fscache_stat(&fscache_n_op_pend);
  189. } else if (!list_empty(&object->pending_ops)) {
  190. atomic_inc(&op->usage);
  191. list_add_tail(&op->pend_link, &object->pending_ops);
  192. fscache_stat(&fscache_n_op_pend);
  193. fscache_start_operations(object);
  194. } else {
  195. ASSERTCMP(object->n_exclusive, ==, 0);
  196. fscache_run_op(object, op);
  197. }
  198. ret = 0;
  199. } else if (object->state == FSCACHE_OBJECT_CREATING) {
  200. op->object = object;
  201. object->n_ops++;
  202. atomic_inc(&op->usage);
  203. list_add_tail(&op->pend_link, &object->pending_ops);
  204. fscache_stat(&fscache_n_op_pend);
  205. ret = 0;
  206. } else if (object->state == FSCACHE_OBJECT_DYING ||
  207. object->state == FSCACHE_OBJECT_LC_DYING ||
  208. object->state == FSCACHE_OBJECT_WITHDRAWING) {
  209. fscache_stat(&fscache_n_op_rejected);
  210. ret = -ENOBUFS;
  211. } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
  212. fscache_report_unexpected_submission(object, op, ostate);
  213. ASSERT(!fscache_object_is_active(object));
  214. ret = -ENOBUFS;
  215. } else {
  216. ret = -ENOBUFS;
  217. }
  218. spin_unlock(&object->lock);
  219. return ret;
  220. }
  221. /*
  222. * queue an object for withdrawal on error, aborting all following asynchronous
  223. * operations
  224. */
  225. void fscache_abort_object(struct fscache_object *object)
  226. {
  227. _enter("{OBJ%x}", object->debug_id);
  228. fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
  229. }
  230. /*
  231. * jump start the operation processing on an object
  232. * - caller must hold object->lock
  233. */
  234. void fscache_start_operations(struct fscache_object *object)
  235. {
  236. struct fscache_operation *op;
  237. bool stop = false;
  238. while (!list_empty(&object->pending_ops) && !stop) {
  239. op = list_entry(object->pending_ops.next,
  240. struct fscache_operation, pend_link);
  241. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
  242. if (object->n_in_progress > 0)
  243. break;
  244. stop = true;
  245. }
  246. list_del_init(&op->pend_link);
  247. fscache_run_op(object, op);
  248. /* the pending queue was holding a ref on the object */
  249. fscache_put_operation(op);
  250. }
  251. ASSERTCMP(object->n_in_progress, <=, object->n_ops);
  252. _debug("woke %d ops on OBJ%x",
  253. object->n_in_progress, object->debug_id);
  254. }
  255. /*
  256. * cancel an operation that's pending on an object
  257. */
  258. int fscache_cancel_op(struct fscache_operation *op)
  259. {
  260. struct fscache_object *object = op->object;
  261. int ret;
  262. _enter("OBJ%x OP%x}", op->object->debug_id, op->debug_id);
  263. spin_lock(&object->lock);
  264. ret = -EBUSY;
  265. if (!list_empty(&op->pend_link)) {
  266. fscache_stat(&fscache_n_op_cancelled);
  267. list_del_init(&op->pend_link);
  268. object->n_ops--;
  269. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags))
  270. object->n_exclusive--;
  271. if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
  272. wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
  273. fscache_put_operation(op);
  274. ret = 0;
  275. }
  276. spin_unlock(&object->lock);
  277. _leave(" = %d", ret);
  278. return ret;
  279. }
  280. /*
  281. * release an operation
  282. * - queues pending ops if this is the last in-progress op
  283. */
  284. void fscache_put_operation(struct fscache_operation *op)
  285. {
  286. struct fscache_object *object;
  287. struct fscache_cache *cache;
  288. _enter("{OBJ%x OP%x,%d}",
  289. op->object->debug_id, op->debug_id, atomic_read(&op->usage));
  290. ASSERTCMP(atomic_read(&op->usage), >, 0);
  291. if (!atomic_dec_and_test(&op->usage))
  292. return;
  293. fscache_set_op_state(op, "Put");
  294. _debug("PUT OP");
  295. if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
  296. BUG();
  297. fscache_stat(&fscache_n_op_release);
  298. if (op->release) {
  299. op->release(op);
  300. op->release = NULL;
  301. }
  302. object = op->object;
  303. if (test_bit(FSCACHE_OP_DEC_READ_CNT, &op->flags))
  304. atomic_dec(&object->n_reads);
  305. /* now... we may get called with the object spinlock held, so we
  306. * complete the cleanup here only if we can immediately acquire the
  307. * lock, and defer it otherwise */
  308. if (!spin_trylock(&object->lock)) {
  309. _debug("defer put");
  310. fscache_stat(&fscache_n_op_deferred_release);
  311. cache = object->cache;
  312. spin_lock(&cache->op_gc_list_lock);
  313. list_add_tail(&op->pend_link, &cache->op_gc_list);
  314. spin_unlock(&cache->op_gc_list_lock);
  315. schedule_work(&cache->op_gc);
  316. _leave(" [defer]");
  317. return;
  318. }
  319. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
  320. ASSERTCMP(object->n_exclusive, >, 0);
  321. object->n_exclusive--;
  322. }
  323. ASSERTCMP(object->n_in_progress, >, 0);
  324. object->n_in_progress--;
  325. if (object->n_in_progress == 0)
  326. fscache_start_operations(object);
  327. ASSERTCMP(object->n_ops, >, 0);
  328. object->n_ops--;
  329. if (object->n_ops == 0)
  330. fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
  331. spin_unlock(&object->lock);
  332. kfree(op);
  333. _leave(" [done]");
  334. }
  335. EXPORT_SYMBOL(fscache_put_operation);
  336. /*
  337. * garbage collect operations that have had their release deferred
  338. */
  339. void fscache_operation_gc(struct work_struct *work)
  340. {
  341. struct fscache_operation *op;
  342. struct fscache_object *object;
  343. struct fscache_cache *cache =
  344. container_of(work, struct fscache_cache, op_gc);
  345. int count = 0;
  346. _enter("");
  347. do {
  348. spin_lock(&cache->op_gc_list_lock);
  349. if (list_empty(&cache->op_gc_list)) {
  350. spin_unlock(&cache->op_gc_list_lock);
  351. break;
  352. }
  353. op = list_entry(cache->op_gc_list.next,
  354. struct fscache_operation, pend_link);
  355. list_del(&op->pend_link);
  356. spin_unlock(&cache->op_gc_list_lock);
  357. object = op->object;
  358. _debug("GC DEFERRED REL OBJ%x OP%x",
  359. object->debug_id, op->debug_id);
  360. fscache_stat(&fscache_n_op_gc);
  361. ASSERTCMP(atomic_read(&op->usage), ==, 0);
  362. spin_lock(&object->lock);
  363. if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
  364. ASSERTCMP(object->n_exclusive, >, 0);
  365. object->n_exclusive--;
  366. }
  367. ASSERTCMP(object->n_in_progress, >, 0);
  368. object->n_in_progress--;
  369. if (object->n_in_progress == 0)
  370. fscache_start_operations(object);
  371. ASSERTCMP(object->n_ops, >, 0);
  372. object->n_ops--;
  373. if (object->n_ops == 0)
  374. fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
  375. spin_unlock(&object->lock);
  376. } while (count++ < 20);
  377. if (!list_empty(&cache->op_gc_list))
  378. schedule_work(&cache->op_gc);
  379. _leave("");
  380. }
  381. /*
  382. * allow the slow work item processor to get a ref on an operation
  383. */
  384. static int fscache_op_get_ref(struct slow_work *work)
  385. {
  386. struct fscache_operation *op =
  387. container_of(work, struct fscache_operation, slow_work);
  388. atomic_inc(&op->usage);
  389. return 0;
  390. }
  391. /*
  392. * allow the slow work item processor to discard a ref on an operation
  393. */
  394. static void fscache_op_put_ref(struct slow_work *work)
  395. {
  396. struct fscache_operation *op =
  397. container_of(work, struct fscache_operation, slow_work);
  398. fscache_put_operation(op);
  399. }
  400. /*
  401. * execute an operation using the slow thread pool to provide processing context
  402. * - the caller holds a ref to this object, so we don't need to hold one
  403. */
  404. static void fscache_op_execute(struct slow_work *work)
  405. {
  406. struct fscache_operation *op =
  407. container_of(work, struct fscache_operation, slow_work);
  408. unsigned long start;
  409. _enter("{OBJ%x OP%x,%d}",
  410. op->object->debug_id, op->debug_id, atomic_read(&op->usage));
  411. ASSERT(op->processor != NULL);
  412. start = jiffies;
  413. op->processor(op);
  414. fscache_hist(fscache_ops_histogram, start);
  415. _leave("");
  416. }
  417. /*
  418. * describe an operation for slow-work debugging
  419. */
  420. #ifdef CONFIG_SLOW_WORK_DEBUG
  421. static void fscache_op_desc(struct slow_work *work, struct seq_file *m)
  422. {
  423. struct fscache_operation *op =
  424. container_of(work, struct fscache_operation, slow_work);
  425. seq_printf(m, "FSC: OBJ%x OP%x: %s/%s fl=%lx",
  426. op->object->debug_id, op->debug_id,
  427. op->name, op->state, op->flags);
  428. }
  429. #endif
  430. const struct slow_work_ops fscache_op_slow_work_ops = {
  431. .owner = THIS_MODULE,
  432. .get_ref = fscache_op_get_ref,
  433. .put_ref = fscache_op_put_ref,
  434. .execute = fscache_op_execute,
  435. #ifdef CONFIG_SLOW_WORK_DEBUG
  436. .desc = fscache_op_desc,
  437. #endif
  438. };