page.c 29 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. return val != NULL;
  28. }
  29. EXPORT_SYMBOL(__fscache_check_page_write);
  30. /*
  31. * wait for a page to finish being written to the cache
  32. */
  33. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  34. {
  35. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  36. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  37. }
  38. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  39. /*
  40. * decide whether a page can be released, possibly by cancelling a store to it
  41. * - we're allowed to sleep if __GFP_WAIT is flagged
  42. */
  43. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  44. struct page *page,
  45. gfp_t gfp)
  46. {
  47. struct page *xpage;
  48. void *val;
  49. _enter("%p,%p,%x", cookie, page, gfp);
  50. try_again:
  51. rcu_read_lock();
  52. val = radix_tree_lookup(&cookie->stores, page->index);
  53. if (!val) {
  54. rcu_read_unlock();
  55. fscache_stat(&fscache_n_store_vmscan_not_storing);
  56. __fscache_uncache_page(cookie, page);
  57. return true;
  58. }
  59. /* see if the page is actually undergoing storage - if so we can't get
  60. * rid of it till the cache has finished with it */
  61. if (radix_tree_tag_get(&cookie->stores, page->index,
  62. FSCACHE_COOKIE_STORING_TAG)) {
  63. rcu_read_unlock();
  64. goto page_busy;
  65. }
  66. /* the page is pending storage, so we attempt to cancel the store and
  67. * discard the store request so that the page can be reclaimed */
  68. spin_lock(&cookie->stores_lock);
  69. rcu_read_unlock();
  70. if (radix_tree_tag_get(&cookie->stores, page->index,
  71. FSCACHE_COOKIE_STORING_TAG)) {
  72. /* the page started to undergo storage whilst we were looking,
  73. * so now we can only wait or return */
  74. spin_unlock(&cookie->stores_lock);
  75. goto page_busy;
  76. }
  77. xpage = radix_tree_delete(&cookie->stores, page->index);
  78. spin_unlock(&cookie->stores_lock);
  79. if (xpage) {
  80. fscache_stat(&fscache_n_store_vmscan_cancelled);
  81. fscache_stat(&fscache_n_store_radix_deletes);
  82. ASSERTCMP(xpage, ==, page);
  83. } else {
  84. fscache_stat(&fscache_n_store_vmscan_gone);
  85. }
  86. wake_up_bit(&cookie->flags, 0);
  87. if (xpage)
  88. page_cache_release(xpage);
  89. __fscache_uncache_page(cookie, page);
  90. return true;
  91. page_busy:
  92. /* We will wait here if we're allowed to, but that could deadlock the
  93. * allocator as the work threads writing to the cache may all end up
  94. * sleeping on memory allocation, so we may need to impose a timeout
  95. * too. */
  96. if (!(gfp & __GFP_WAIT) || !(gfp & __GFP_FS)) {
  97. fscache_stat(&fscache_n_store_vmscan_busy);
  98. return false;
  99. }
  100. fscache_stat(&fscache_n_store_vmscan_wait);
  101. __fscache_wait_on_page_write(cookie, page);
  102. gfp &= ~__GFP_WAIT;
  103. goto try_again;
  104. }
  105. EXPORT_SYMBOL(__fscache_maybe_release_page);
  106. /*
  107. * note that a page has finished being written to the cache
  108. */
  109. static void fscache_end_page_write(struct fscache_object *object,
  110. struct page *page)
  111. {
  112. struct fscache_cookie *cookie;
  113. struct page *xpage = NULL;
  114. spin_lock(&object->lock);
  115. cookie = object->cookie;
  116. if (cookie) {
  117. /* delete the page from the tree if it is now no longer
  118. * pending */
  119. spin_lock(&cookie->stores_lock);
  120. radix_tree_tag_clear(&cookie->stores, page->index,
  121. FSCACHE_COOKIE_STORING_TAG);
  122. if (!radix_tree_tag_get(&cookie->stores, page->index,
  123. FSCACHE_COOKIE_PENDING_TAG)) {
  124. fscache_stat(&fscache_n_store_radix_deletes);
  125. xpage = radix_tree_delete(&cookie->stores, page->index);
  126. }
  127. spin_unlock(&cookie->stores_lock);
  128. wake_up_bit(&cookie->flags, 0);
  129. }
  130. spin_unlock(&object->lock);
  131. if (xpage)
  132. page_cache_release(xpage);
  133. }
  134. /*
  135. * actually apply the changed attributes to a cache object
  136. */
  137. static void fscache_attr_changed_op(struct fscache_operation *op)
  138. {
  139. struct fscache_object *object = op->object;
  140. int ret;
  141. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  142. fscache_stat(&fscache_n_attr_changed_calls);
  143. if (fscache_object_is_active(object) &&
  144. fscache_use_cookie(object)) {
  145. fscache_stat(&fscache_n_cop_attr_changed);
  146. ret = object->cache->ops->attr_changed(object);
  147. fscache_stat_d(&fscache_n_cop_attr_changed);
  148. fscache_unuse_cookie(object);
  149. if (ret < 0)
  150. fscache_abort_object(object);
  151. }
  152. fscache_op_complete(op, true);
  153. _leave("");
  154. }
  155. /*
  156. * notification that the attributes on an object have changed
  157. */
  158. int __fscache_attr_changed(struct fscache_cookie *cookie)
  159. {
  160. struct fscache_operation *op;
  161. struct fscache_object *object;
  162. _enter("%p", cookie);
  163. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  164. fscache_stat(&fscache_n_attr_changed);
  165. op = kzalloc(sizeof(*op), GFP_KERNEL);
  166. if (!op) {
  167. fscache_stat(&fscache_n_attr_changed_nomem);
  168. _leave(" = -ENOMEM");
  169. return -ENOMEM;
  170. }
  171. fscache_operation_init(op, fscache_attr_changed_op, NULL);
  172. op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
  173. spin_lock(&cookie->lock);
  174. if (hlist_empty(&cookie->backing_objects))
  175. goto nobufs;
  176. object = hlist_entry(cookie->backing_objects.first,
  177. struct fscache_object, cookie_link);
  178. if (fscache_submit_exclusive_op(object, op) < 0)
  179. goto nobufs;
  180. spin_unlock(&cookie->lock);
  181. fscache_stat(&fscache_n_attr_changed_ok);
  182. fscache_put_operation(op);
  183. _leave(" = 0");
  184. return 0;
  185. nobufs:
  186. spin_unlock(&cookie->lock);
  187. kfree(op);
  188. fscache_stat(&fscache_n_attr_changed_nobufs);
  189. _leave(" = %d", -ENOBUFS);
  190. return -ENOBUFS;
  191. }
  192. EXPORT_SYMBOL(__fscache_attr_changed);
  193. /*
  194. * release a retrieval op reference
  195. */
  196. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  197. {
  198. struct fscache_retrieval *op =
  199. container_of(_op, struct fscache_retrieval, op);
  200. _enter("{OP%x}", op->op.debug_id);
  201. ASSERTCMP(atomic_read(&op->n_pages), ==, 0);
  202. fscache_hist(fscache_retrieval_histogram, op->start_time);
  203. if (op->context)
  204. fscache_put_context(op->op.object->cookie, op->context);
  205. _leave("");
  206. }
  207. /*
  208. * allocate a retrieval op
  209. */
  210. static struct fscache_retrieval *fscache_alloc_retrieval(
  211. struct fscache_cookie *cookie,
  212. struct address_space *mapping,
  213. fscache_rw_complete_t end_io_func,
  214. void *context)
  215. {
  216. struct fscache_retrieval *op;
  217. /* allocate a retrieval operation and attempt to submit it */
  218. op = kzalloc(sizeof(*op), GFP_NOIO);
  219. if (!op) {
  220. fscache_stat(&fscache_n_retrievals_nomem);
  221. return NULL;
  222. }
  223. fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
  224. atomic_inc(&cookie->n_active);
  225. op->op.flags = FSCACHE_OP_MYTHREAD |
  226. (1UL << FSCACHE_OP_WAITING) |
  227. (1UL << FSCACHE_OP_UNUSE_COOKIE);
  228. op->mapping = mapping;
  229. op->end_io_func = end_io_func;
  230. op->context = context;
  231. op->start_time = jiffies;
  232. INIT_LIST_HEAD(&op->to_do);
  233. return op;
  234. }
  235. /*
  236. * wait for a deferred lookup to complete
  237. */
  238. static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  239. {
  240. unsigned long jif;
  241. _enter("");
  242. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  243. _leave(" = 0 [imm]");
  244. return 0;
  245. }
  246. fscache_stat(&fscache_n_retrievals_wait);
  247. jif = jiffies;
  248. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  249. fscache_wait_bit_interruptible,
  250. TASK_INTERRUPTIBLE) != 0) {
  251. fscache_stat(&fscache_n_retrievals_intr);
  252. _leave(" = -ERESTARTSYS");
  253. return -ERESTARTSYS;
  254. }
  255. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  256. smp_rmb();
  257. fscache_hist(fscache_retrieval_delay_histogram, jif);
  258. _leave(" = 0 [dly]");
  259. return 0;
  260. }
  261. /*
  262. * Handle cancellation of a pending retrieval op
  263. */
  264. static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
  265. {
  266. struct fscache_retrieval *op =
  267. container_of(_op, struct fscache_retrieval, op);
  268. atomic_set(&op->n_pages, 0);
  269. }
  270. /*
  271. * wait for an object to become active (or dead)
  272. */
  273. static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
  274. struct fscache_retrieval *op,
  275. atomic_t *stat_op_waits,
  276. atomic_t *stat_object_dead)
  277. {
  278. int ret;
  279. if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
  280. goto check_if_dead;
  281. _debug(">>> WT");
  282. fscache_stat(stat_op_waits);
  283. if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  284. fscache_wait_bit_interruptible,
  285. TASK_INTERRUPTIBLE) != 0) {
  286. ret = fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
  287. if (ret == 0)
  288. return -ERESTARTSYS;
  289. /* it's been removed from the pending queue by another party,
  290. * so we should get to run shortly */
  291. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  292. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  293. }
  294. _debug("<<< GO");
  295. check_if_dead:
  296. if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
  297. fscache_stat(stat_object_dead);
  298. _leave(" = -ENOBUFS [cancelled]");
  299. return -ENOBUFS;
  300. }
  301. if (unlikely(fscache_object_is_dead(object))) {
  302. pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
  303. fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
  304. fscache_stat(stat_object_dead);
  305. return -ENOBUFS;
  306. }
  307. return 0;
  308. }
  309. /*
  310. * read a page from the cache or allocate a block in which to store it
  311. * - we return:
  312. * -ENOMEM - out of memory, nothing done
  313. * -ERESTARTSYS - interrupted
  314. * -ENOBUFS - no backing object available in which to cache the block
  315. * -ENODATA - no data available in the backing object for this block
  316. * 0 - dispatched a read - it'll call end_io_func() when finished
  317. */
  318. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  319. struct page *page,
  320. fscache_rw_complete_t end_io_func,
  321. void *context,
  322. gfp_t gfp)
  323. {
  324. struct fscache_retrieval *op;
  325. struct fscache_object *object;
  326. int ret;
  327. _enter("%p,%p,,,", cookie, page);
  328. fscache_stat(&fscache_n_retrievals);
  329. if (hlist_empty(&cookie->backing_objects))
  330. goto nobufs;
  331. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  332. _leave(" = -ENOBUFS [invalidating]");
  333. return -ENOBUFS;
  334. }
  335. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  336. ASSERTCMP(page, !=, NULL);
  337. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  338. return -ERESTARTSYS;
  339. op = fscache_alloc_retrieval(cookie, page->mapping,
  340. end_io_func,context);
  341. if (!op) {
  342. _leave(" = -ENOMEM");
  343. return -ENOMEM;
  344. }
  345. atomic_set(&op->n_pages, 1);
  346. spin_lock(&cookie->lock);
  347. if (hlist_empty(&cookie->backing_objects))
  348. goto nobufs_unlock;
  349. object = hlist_entry(cookie->backing_objects.first,
  350. struct fscache_object, cookie_link);
  351. ASSERT(test_bit(FSCACHE_OBJECT_IS_LOOKED_UP, &object->flags));
  352. atomic_inc(&object->n_reads);
  353. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  354. if (fscache_submit_op(object, &op->op) < 0)
  355. goto nobufs_unlock_dec;
  356. spin_unlock(&cookie->lock);
  357. fscache_stat(&fscache_n_retrieval_ops);
  358. /* pin the netfs read context in case we need to do the actual netfs
  359. * read because we've encountered a cache read failure */
  360. fscache_get_context(object->cookie, op->context);
  361. /* we wait for the operation to become active, and then process it
  362. * *here*, in this thread, and not in the thread pool */
  363. ret = fscache_wait_for_retrieval_activation(
  364. object, op,
  365. __fscache_stat(&fscache_n_retrieval_op_waits),
  366. __fscache_stat(&fscache_n_retrievals_object_dead));
  367. if (ret < 0)
  368. goto error;
  369. /* ask the cache to honour the operation */
  370. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  371. fscache_stat(&fscache_n_cop_allocate_page);
  372. ret = object->cache->ops->allocate_page(op, page, gfp);
  373. fscache_stat_d(&fscache_n_cop_allocate_page);
  374. if (ret == 0)
  375. ret = -ENODATA;
  376. } else {
  377. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  378. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  379. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  380. }
  381. error:
  382. if (ret == -ENOMEM)
  383. fscache_stat(&fscache_n_retrievals_nomem);
  384. else if (ret == -ERESTARTSYS)
  385. fscache_stat(&fscache_n_retrievals_intr);
  386. else if (ret == -ENODATA)
  387. fscache_stat(&fscache_n_retrievals_nodata);
  388. else if (ret < 0)
  389. fscache_stat(&fscache_n_retrievals_nobufs);
  390. else
  391. fscache_stat(&fscache_n_retrievals_ok);
  392. fscache_put_retrieval(op);
  393. _leave(" = %d", ret);
  394. return ret;
  395. nobufs_unlock_dec:
  396. atomic_dec(&object->n_reads);
  397. nobufs_unlock:
  398. spin_unlock(&cookie->lock);
  399. atomic_dec(&cookie->n_active);
  400. kfree(op);
  401. nobufs:
  402. fscache_stat(&fscache_n_retrievals_nobufs);
  403. _leave(" = -ENOBUFS");
  404. return -ENOBUFS;
  405. }
  406. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  407. /*
  408. * read a list of page from the cache or allocate a block in which to store
  409. * them
  410. * - we return:
  411. * -ENOMEM - out of memory, some pages may be being read
  412. * -ERESTARTSYS - interrupted, some pages may be being read
  413. * -ENOBUFS - no backing object or space available in which to cache any
  414. * pages not being read
  415. * -ENODATA - no data available in the backing object for some or all of
  416. * the pages
  417. * 0 - dispatched a read on all pages
  418. *
  419. * end_io_func() will be called for each page read from the cache as it is
  420. * finishes being read
  421. *
  422. * any pages for which a read is dispatched will be removed from pages and
  423. * nr_pages
  424. */
  425. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  426. struct address_space *mapping,
  427. struct list_head *pages,
  428. unsigned *nr_pages,
  429. fscache_rw_complete_t end_io_func,
  430. void *context,
  431. gfp_t gfp)
  432. {
  433. struct fscache_retrieval *op;
  434. struct fscache_object *object;
  435. int ret;
  436. _enter("%p,,%d,,,", cookie, *nr_pages);
  437. fscache_stat(&fscache_n_retrievals);
  438. if (hlist_empty(&cookie->backing_objects))
  439. goto nobufs;
  440. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  441. _leave(" = -ENOBUFS [invalidating]");
  442. return -ENOBUFS;
  443. }
  444. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  445. ASSERTCMP(*nr_pages, >, 0);
  446. ASSERT(!list_empty(pages));
  447. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  448. return -ERESTARTSYS;
  449. op = fscache_alloc_retrieval(cookie, mapping, end_io_func, context);
  450. if (!op)
  451. return -ENOMEM;
  452. atomic_set(&op->n_pages, *nr_pages);
  453. spin_lock(&cookie->lock);
  454. if (hlist_empty(&cookie->backing_objects))
  455. goto nobufs_unlock;
  456. object = hlist_entry(cookie->backing_objects.first,
  457. struct fscache_object, cookie_link);
  458. atomic_inc(&object->n_reads);
  459. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  460. if (fscache_submit_op(object, &op->op) < 0)
  461. goto nobufs_unlock_dec;
  462. spin_unlock(&cookie->lock);
  463. fscache_stat(&fscache_n_retrieval_ops);
  464. /* pin the netfs read context in case we need to do the actual netfs
  465. * read because we've encountered a cache read failure */
  466. fscache_get_context(object->cookie, op->context);
  467. /* we wait for the operation to become active, and then process it
  468. * *here*, in this thread, and not in the thread pool */
  469. ret = fscache_wait_for_retrieval_activation(
  470. object, op,
  471. __fscache_stat(&fscache_n_retrieval_op_waits),
  472. __fscache_stat(&fscache_n_retrievals_object_dead));
  473. if (ret < 0)
  474. goto error;
  475. /* ask the cache to honour the operation */
  476. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  477. fscache_stat(&fscache_n_cop_allocate_pages);
  478. ret = object->cache->ops->allocate_pages(
  479. op, pages, nr_pages, gfp);
  480. fscache_stat_d(&fscache_n_cop_allocate_pages);
  481. } else {
  482. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  483. ret = object->cache->ops->read_or_alloc_pages(
  484. op, pages, nr_pages, gfp);
  485. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  486. }
  487. error:
  488. if (ret == -ENOMEM)
  489. fscache_stat(&fscache_n_retrievals_nomem);
  490. else if (ret == -ERESTARTSYS)
  491. fscache_stat(&fscache_n_retrievals_intr);
  492. else if (ret == -ENODATA)
  493. fscache_stat(&fscache_n_retrievals_nodata);
  494. else if (ret < 0)
  495. fscache_stat(&fscache_n_retrievals_nobufs);
  496. else
  497. fscache_stat(&fscache_n_retrievals_ok);
  498. fscache_put_retrieval(op);
  499. _leave(" = %d", ret);
  500. return ret;
  501. nobufs_unlock_dec:
  502. atomic_dec(&object->n_reads);
  503. nobufs_unlock:
  504. spin_unlock(&cookie->lock);
  505. atomic_dec(&cookie->n_active);
  506. kfree(op);
  507. nobufs:
  508. fscache_stat(&fscache_n_retrievals_nobufs);
  509. _leave(" = -ENOBUFS");
  510. return -ENOBUFS;
  511. }
  512. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  513. /*
  514. * allocate a block in the cache on which to store a page
  515. * - we return:
  516. * -ENOMEM - out of memory, nothing done
  517. * -ERESTARTSYS - interrupted
  518. * -ENOBUFS - no backing object available in which to cache the block
  519. * 0 - block allocated
  520. */
  521. int __fscache_alloc_page(struct fscache_cookie *cookie,
  522. struct page *page,
  523. gfp_t gfp)
  524. {
  525. struct fscache_retrieval *op;
  526. struct fscache_object *object;
  527. int ret;
  528. _enter("%p,%p,,,", cookie, page);
  529. fscache_stat(&fscache_n_allocs);
  530. if (hlist_empty(&cookie->backing_objects))
  531. goto nobufs;
  532. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  533. ASSERTCMP(page, !=, NULL);
  534. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  535. _leave(" = -ENOBUFS [invalidating]");
  536. return -ENOBUFS;
  537. }
  538. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  539. return -ERESTARTSYS;
  540. op = fscache_alloc_retrieval(cookie, page->mapping, NULL, NULL);
  541. if (!op)
  542. return -ENOMEM;
  543. atomic_set(&op->n_pages, 1);
  544. spin_lock(&cookie->lock);
  545. if (hlist_empty(&cookie->backing_objects))
  546. goto nobufs_unlock;
  547. object = hlist_entry(cookie->backing_objects.first,
  548. struct fscache_object, cookie_link);
  549. if (fscache_submit_op(object, &op->op) < 0)
  550. goto nobufs_unlock;
  551. spin_unlock(&cookie->lock);
  552. fscache_stat(&fscache_n_alloc_ops);
  553. ret = fscache_wait_for_retrieval_activation(
  554. object, op,
  555. __fscache_stat(&fscache_n_alloc_op_waits),
  556. __fscache_stat(&fscache_n_allocs_object_dead));
  557. if (ret < 0)
  558. goto error;
  559. /* ask the cache to honour the operation */
  560. fscache_stat(&fscache_n_cop_allocate_page);
  561. ret = object->cache->ops->allocate_page(op, page, gfp);
  562. fscache_stat_d(&fscache_n_cop_allocate_page);
  563. error:
  564. if (ret == -ERESTARTSYS)
  565. fscache_stat(&fscache_n_allocs_intr);
  566. else if (ret < 0)
  567. fscache_stat(&fscache_n_allocs_nobufs);
  568. else
  569. fscache_stat(&fscache_n_allocs_ok);
  570. fscache_put_retrieval(op);
  571. _leave(" = %d", ret);
  572. return ret;
  573. nobufs_unlock:
  574. spin_unlock(&cookie->lock);
  575. atomic_dec(&cookie->n_active);
  576. kfree(op);
  577. nobufs:
  578. fscache_stat(&fscache_n_allocs_nobufs);
  579. _leave(" = -ENOBUFS");
  580. return -ENOBUFS;
  581. }
  582. EXPORT_SYMBOL(__fscache_alloc_page);
  583. /*
  584. * release a write op reference
  585. */
  586. static void fscache_release_write_op(struct fscache_operation *_op)
  587. {
  588. _enter("{OP%x}", _op->debug_id);
  589. }
  590. /*
  591. * perform the background storage of a page into the cache
  592. */
  593. static void fscache_write_op(struct fscache_operation *_op)
  594. {
  595. struct fscache_storage *op =
  596. container_of(_op, struct fscache_storage, op);
  597. struct fscache_object *object = op->op.object;
  598. struct fscache_cookie *cookie;
  599. struct page *page;
  600. unsigned n;
  601. void *results[1];
  602. int ret;
  603. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  604. spin_lock(&object->lock);
  605. cookie = object->cookie;
  606. if (!fscache_object_is_active(object)) {
  607. /* If we get here, then the on-disk cache object likely longer
  608. * exists, so we should just cancel this write operation.
  609. */
  610. spin_unlock(&object->lock);
  611. fscache_op_complete(&op->op, false);
  612. _leave(" [inactive]");
  613. return;
  614. }
  615. if (!cookie) {
  616. /* If we get here, then the cookie belonging to the object was
  617. * detached, probably by the cookie being withdrawn due to
  618. * memory pressure, which means that the pages we might write
  619. * to the cache from no longer exist - therefore, we can just
  620. * cancel this write operation.
  621. */
  622. spin_unlock(&object->lock);
  623. fscache_op_complete(&op->op, false);
  624. _leave(" [cancel] op{f=%lx s=%u} obj{s=%s f=%lx}",
  625. _op->flags, _op->state, object->state->short_name,
  626. object->flags);
  627. return;
  628. }
  629. spin_lock(&cookie->stores_lock);
  630. fscache_stat(&fscache_n_store_calls);
  631. /* find a page to store */
  632. page = NULL;
  633. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  634. FSCACHE_COOKIE_PENDING_TAG);
  635. if (n != 1)
  636. goto superseded;
  637. page = results[0];
  638. _debug("gang %d [%lx]", n, page->index);
  639. if (page->index > op->store_limit) {
  640. fscache_stat(&fscache_n_store_pages_over_limit);
  641. goto superseded;
  642. }
  643. radix_tree_tag_set(&cookie->stores, page->index,
  644. FSCACHE_COOKIE_STORING_TAG);
  645. radix_tree_tag_clear(&cookie->stores, page->index,
  646. FSCACHE_COOKIE_PENDING_TAG);
  647. spin_unlock(&cookie->stores_lock);
  648. spin_unlock(&object->lock);
  649. fscache_stat(&fscache_n_store_pages);
  650. fscache_stat(&fscache_n_cop_write_page);
  651. ret = object->cache->ops->write_page(op, page);
  652. fscache_stat_d(&fscache_n_cop_write_page);
  653. fscache_end_page_write(object, page);
  654. if (ret < 0) {
  655. fscache_abort_object(object);
  656. fscache_op_complete(&op->op, true);
  657. } else {
  658. fscache_enqueue_operation(&op->op);
  659. }
  660. _leave("");
  661. return;
  662. superseded:
  663. /* this writer is going away and there aren't any more things to
  664. * write */
  665. _debug("cease");
  666. spin_unlock(&cookie->stores_lock);
  667. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  668. spin_unlock(&object->lock);
  669. fscache_op_complete(&op->op, true);
  670. _leave("");
  671. }
  672. /*
  673. * Clear the pages pending writing for invalidation
  674. */
  675. void fscache_invalidate_writes(struct fscache_cookie *cookie)
  676. {
  677. struct page *page;
  678. void *results[16];
  679. int n, i;
  680. _enter("");
  681. for (;;) {
  682. spin_lock(&cookie->stores_lock);
  683. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
  684. ARRAY_SIZE(results),
  685. FSCACHE_COOKIE_PENDING_TAG);
  686. if (n == 0) {
  687. spin_unlock(&cookie->stores_lock);
  688. break;
  689. }
  690. for (i = n - 1; i >= 0; i--) {
  691. page = results[i];
  692. radix_tree_delete(&cookie->stores, page->index);
  693. }
  694. spin_unlock(&cookie->stores_lock);
  695. for (i = n - 1; i >= 0; i--)
  696. page_cache_release(results[i]);
  697. }
  698. _leave("");
  699. }
  700. /*
  701. * request a page be stored in the cache
  702. * - returns:
  703. * -ENOMEM - out of memory, nothing done
  704. * -ENOBUFS - no backing object available in which to cache the page
  705. * 0 - dispatched a write - it'll call end_io_func() when finished
  706. *
  707. * if the cookie still has a backing object at this point, that object can be
  708. * in one of a few states with respect to storage processing:
  709. *
  710. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  711. * set)
  712. *
  713. * (a) no writes yet
  714. *
  715. * (b) writes deferred till post-creation (mark page for writing and
  716. * return immediately)
  717. *
  718. * (2) negative lookup, object created, initial fill being made from netfs
  719. *
  720. * (a) fill point not yet reached this page (mark page for writing and
  721. * return)
  722. *
  723. * (b) fill point passed this page (queue op to store this page)
  724. *
  725. * (3) object extant (queue op to store this page)
  726. *
  727. * any other state is invalid
  728. */
  729. int __fscache_write_page(struct fscache_cookie *cookie,
  730. struct page *page,
  731. gfp_t gfp)
  732. {
  733. struct fscache_storage *op;
  734. struct fscache_object *object;
  735. int ret;
  736. _enter("%p,%x,", cookie, (u32) page->flags);
  737. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  738. ASSERT(PageFsCache(page));
  739. fscache_stat(&fscache_n_stores);
  740. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  741. _leave(" = -ENOBUFS [invalidating]");
  742. return -ENOBUFS;
  743. }
  744. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  745. if (!op)
  746. goto nomem;
  747. fscache_operation_init(&op->op, fscache_write_op,
  748. fscache_release_write_op);
  749. op->op.flags = FSCACHE_OP_ASYNC |
  750. (1 << FSCACHE_OP_WAITING) |
  751. (1 << FSCACHE_OP_UNUSE_COOKIE);
  752. ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
  753. if (ret < 0)
  754. goto nomem_free;
  755. ret = -ENOBUFS;
  756. spin_lock(&cookie->lock);
  757. if (hlist_empty(&cookie->backing_objects))
  758. goto nobufs;
  759. object = hlist_entry(cookie->backing_objects.first,
  760. struct fscache_object, cookie_link);
  761. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  762. goto nobufs;
  763. /* add the page to the pending-storage radix tree on the backing
  764. * object */
  765. spin_lock(&object->lock);
  766. spin_lock(&cookie->stores_lock);
  767. _debug("store limit %llx", (unsigned long long) object->store_limit);
  768. ret = radix_tree_insert(&cookie->stores, page->index, page);
  769. if (ret < 0) {
  770. if (ret == -EEXIST)
  771. goto already_queued;
  772. _debug("insert failed %d", ret);
  773. goto nobufs_unlock_obj;
  774. }
  775. radix_tree_tag_set(&cookie->stores, page->index,
  776. FSCACHE_COOKIE_PENDING_TAG);
  777. page_cache_get(page);
  778. /* we only want one writer at a time, but we do need to queue new
  779. * writers after exclusive ops */
  780. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  781. goto already_pending;
  782. spin_unlock(&cookie->stores_lock);
  783. spin_unlock(&object->lock);
  784. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  785. op->store_limit = object->store_limit;
  786. atomic_inc(&cookie->n_active);
  787. if (fscache_submit_op(object, &op->op) < 0)
  788. goto submit_failed;
  789. spin_unlock(&cookie->lock);
  790. radix_tree_preload_end();
  791. fscache_stat(&fscache_n_store_ops);
  792. fscache_stat(&fscache_n_stores_ok);
  793. /* the work queue now carries its own ref on the object */
  794. fscache_put_operation(&op->op);
  795. _leave(" = 0");
  796. return 0;
  797. already_queued:
  798. fscache_stat(&fscache_n_stores_again);
  799. already_pending:
  800. spin_unlock(&cookie->stores_lock);
  801. spin_unlock(&object->lock);
  802. spin_unlock(&cookie->lock);
  803. radix_tree_preload_end();
  804. kfree(op);
  805. fscache_stat(&fscache_n_stores_ok);
  806. _leave(" = 0");
  807. return 0;
  808. submit_failed:
  809. atomic_dec(&cookie->n_active);
  810. spin_lock(&cookie->stores_lock);
  811. radix_tree_delete(&cookie->stores, page->index);
  812. spin_unlock(&cookie->stores_lock);
  813. page_cache_release(page);
  814. ret = -ENOBUFS;
  815. goto nobufs;
  816. nobufs_unlock_obj:
  817. spin_unlock(&cookie->stores_lock);
  818. spin_unlock(&object->lock);
  819. nobufs:
  820. spin_unlock(&cookie->lock);
  821. radix_tree_preload_end();
  822. kfree(op);
  823. fscache_stat(&fscache_n_stores_nobufs);
  824. _leave(" = -ENOBUFS");
  825. return -ENOBUFS;
  826. nomem_free:
  827. kfree(op);
  828. nomem:
  829. fscache_stat(&fscache_n_stores_oom);
  830. _leave(" = -ENOMEM");
  831. return -ENOMEM;
  832. }
  833. EXPORT_SYMBOL(__fscache_write_page);
  834. /*
  835. * remove a page from the cache
  836. */
  837. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  838. {
  839. struct fscache_object *object;
  840. _enter(",%p", page);
  841. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  842. ASSERTCMP(page, !=, NULL);
  843. fscache_stat(&fscache_n_uncaches);
  844. /* cache withdrawal may beat us to it */
  845. if (!PageFsCache(page))
  846. goto done;
  847. /* get the object */
  848. spin_lock(&cookie->lock);
  849. if (hlist_empty(&cookie->backing_objects)) {
  850. ClearPageFsCache(page);
  851. goto done_unlock;
  852. }
  853. object = hlist_entry(cookie->backing_objects.first,
  854. struct fscache_object, cookie_link);
  855. /* there might now be stuff on disk we could read */
  856. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  857. /* only invoke the cache backend if we managed to mark the page
  858. * uncached here; this deals with synchronisation vs withdrawal */
  859. if (TestClearPageFsCache(page) &&
  860. object->cache->ops->uncache_page) {
  861. /* the cache backend releases the cookie lock */
  862. fscache_stat(&fscache_n_cop_uncache_page);
  863. object->cache->ops->uncache_page(object, page);
  864. fscache_stat_d(&fscache_n_cop_uncache_page);
  865. goto done;
  866. }
  867. done_unlock:
  868. spin_unlock(&cookie->lock);
  869. done:
  870. _leave("");
  871. }
  872. EXPORT_SYMBOL(__fscache_uncache_page);
  873. /**
  874. * fscache_mark_page_cached - Mark a page as being cached
  875. * @op: The retrieval op pages are being marked for
  876. * @page: The page to be marked
  877. *
  878. * Mark a netfs page as being cached. After this is called, the netfs
  879. * must call fscache_uncache_page() to remove the mark.
  880. */
  881. void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
  882. {
  883. struct fscache_cookie *cookie = op->op.object->cookie;
  884. #ifdef CONFIG_FSCACHE_STATS
  885. atomic_inc(&fscache_n_marks);
  886. #endif
  887. _debug("- mark %p{%lx}", page, page->index);
  888. if (TestSetPageFsCache(page)) {
  889. static bool once_only;
  890. if (!once_only) {
  891. once_only = true;
  892. printk(KERN_WARNING "FS-Cache:"
  893. " Cookie type %s marked page %lx"
  894. " multiple times\n",
  895. cookie->def->name, page->index);
  896. }
  897. }
  898. if (cookie->def->mark_page_cached)
  899. cookie->def->mark_page_cached(cookie->netfs_data,
  900. op->mapping, page);
  901. }
  902. EXPORT_SYMBOL(fscache_mark_page_cached);
  903. /**
  904. * fscache_mark_pages_cached - Mark pages as being cached
  905. * @op: The retrieval op pages are being marked for
  906. * @pagevec: The pages to be marked
  907. *
  908. * Mark a bunch of netfs pages as being cached. After this is called,
  909. * the netfs must call fscache_uncache_page() to remove the mark.
  910. */
  911. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  912. struct pagevec *pagevec)
  913. {
  914. unsigned long loop;
  915. for (loop = 0; loop < pagevec->nr; loop++)
  916. fscache_mark_page_cached(op, pagevec->pages[loop]);
  917. pagevec_reinit(pagevec);
  918. }
  919. EXPORT_SYMBOL(fscache_mark_pages_cached);
  920. /*
  921. * Uncache all the pages in an inode that are marked PG_fscache, assuming them
  922. * to be associated with the given cookie.
  923. */
  924. void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
  925. struct inode *inode)
  926. {
  927. struct address_space *mapping = inode->i_mapping;
  928. struct pagevec pvec;
  929. pgoff_t next;
  930. int i;
  931. _enter("%p,%p", cookie, inode);
  932. if (!mapping || mapping->nrpages == 0) {
  933. _leave(" [no pages]");
  934. return;
  935. }
  936. pagevec_init(&pvec, 0);
  937. next = 0;
  938. do {
  939. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
  940. break;
  941. for (i = 0; i < pagevec_count(&pvec); i++) {
  942. struct page *page = pvec.pages[i];
  943. next = page->index;
  944. if (PageFsCache(page)) {
  945. __fscache_wait_on_page_write(cookie, page);
  946. __fscache_uncache_page(cookie, page);
  947. }
  948. }
  949. pagevec_release(&pvec);
  950. cond_resched();
  951. } while (++next);
  952. _leave("");
  953. }
  954. EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);