page.c 29 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. return val != NULL;
  28. }
  29. EXPORT_SYMBOL(__fscache_check_page_write);
  30. /*
  31. * wait for a page to finish being written to the cache
  32. */
  33. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  34. {
  35. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  36. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  37. }
  38. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  39. /*
  40. * decide whether a page can be released, possibly by cancelling a store to it
  41. * - we're allowed to sleep if __GFP_WAIT is flagged
  42. */
  43. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  44. struct page *page,
  45. gfp_t gfp)
  46. {
  47. struct page *xpage;
  48. void *val;
  49. _enter("%p,%p,%x", cookie, page, gfp);
  50. try_again:
  51. rcu_read_lock();
  52. val = radix_tree_lookup(&cookie->stores, page->index);
  53. if (!val) {
  54. rcu_read_unlock();
  55. fscache_stat(&fscache_n_store_vmscan_not_storing);
  56. __fscache_uncache_page(cookie, page);
  57. return true;
  58. }
  59. /* see if the page is actually undergoing storage - if so we can't get
  60. * rid of it till the cache has finished with it */
  61. if (radix_tree_tag_get(&cookie->stores, page->index,
  62. FSCACHE_COOKIE_STORING_TAG)) {
  63. rcu_read_unlock();
  64. goto page_busy;
  65. }
  66. /* the page is pending storage, so we attempt to cancel the store and
  67. * discard the store request so that the page can be reclaimed */
  68. spin_lock(&cookie->stores_lock);
  69. rcu_read_unlock();
  70. if (radix_tree_tag_get(&cookie->stores, page->index,
  71. FSCACHE_COOKIE_STORING_TAG)) {
  72. /* the page started to undergo storage whilst we were looking,
  73. * so now we can only wait or return */
  74. spin_unlock(&cookie->stores_lock);
  75. goto page_busy;
  76. }
  77. xpage = radix_tree_delete(&cookie->stores, page->index);
  78. spin_unlock(&cookie->stores_lock);
  79. if (xpage) {
  80. fscache_stat(&fscache_n_store_vmscan_cancelled);
  81. fscache_stat(&fscache_n_store_radix_deletes);
  82. ASSERTCMP(xpage, ==, page);
  83. } else {
  84. fscache_stat(&fscache_n_store_vmscan_gone);
  85. }
  86. wake_up_bit(&cookie->flags, 0);
  87. if (xpage)
  88. page_cache_release(xpage);
  89. __fscache_uncache_page(cookie, page);
  90. return true;
  91. page_busy:
  92. /* We will wait here if we're allowed to, but that could deadlock the
  93. * allocator as the work threads writing to the cache may all end up
  94. * sleeping on memory allocation, so we may need to impose a timeout
  95. * too. */
  96. if (!(gfp & __GFP_WAIT)) {
  97. fscache_stat(&fscache_n_store_vmscan_busy);
  98. return false;
  99. }
  100. fscache_stat(&fscache_n_store_vmscan_wait);
  101. __fscache_wait_on_page_write(cookie, page);
  102. gfp &= ~__GFP_WAIT;
  103. goto try_again;
  104. }
  105. EXPORT_SYMBOL(__fscache_maybe_release_page);
  106. /*
  107. * note that a page has finished being written to the cache
  108. */
  109. static void fscache_end_page_write(struct fscache_object *object,
  110. struct page *page)
  111. {
  112. struct fscache_cookie *cookie;
  113. struct page *xpage = NULL;
  114. spin_lock(&object->lock);
  115. cookie = object->cookie;
  116. if (cookie) {
  117. /* delete the page from the tree if it is now no longer
  118. * pending */
  119. spin_lock(&cookie->stores_lock);
  120. radix_tree_tag_clear(&cookie->stores, page->index,
  121. FSCACHE_COOKIE_STORING_TAG);
  122. if (!radix_tree_tag_get(&cookie->stores, page->index,
  123. FSCACHE_COOKIE_PENDING_TAG)) {
  124. fscache_stat(&fscache_n_store_radix_deletes);
  125. xpage = radix_tree_delete(&cookie->stores, page->index);
  126. }
  127. spin_unlock(&cookie->stores_lock);
  128. wake_up_bit(&cookie->flags, 0);
  129. }
  130. spin_unlock(&object->lock);
  131. if (xpage)
  132. page_cache_release(xpage);
  133. }
  134. /*
  135. * actually apply the changed attributes to a cache object
  136. */
  137. static void fscache_attr_changed_op(struct fscache_operation *op)
  138. {
  139. struct fscache_object *object = op->object;
  140. int ret;
  141. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  142. fscache_stat(&fscache_n_attr_changed_calls);
  143. if (fscache_object_is_active(object)) {
  144. fscache_stat(&fscache_n_cop_attr_changed);
  145. ret = object->cache->ops->attr_changed(object);
  146. fscache_stat_d(&fscache_n_cop_attr_changed);
  147. if (ret < 0)
  148. fscache_abort_object(object);
  149. }
  150. fscache_op_complete(op, true);
  151. _leave("");
  152. }
  153. /*
  154. * notification that the attributes on an object have changed
  155. */
  156. int __fscache_attr_changed(struct fscache_cookie *cookie)
  157. {
  158. struct fscache_operation *op;
  159. struct fscache_object *object;
  160. _enter("%p", cookie);
  161. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  162. fscache_stat(&fscache_n_attr_changed);
  163. op = kzalloc(sizeof(*op), GFP_KERNEL);
  164. if (!op) {
  165. fscache_stat(&fscache_n_attr_changed_nomem);
  166. _leave(" = -ENOMEM");
  167. return -ENOMEM;
  168. }
  169. fscache_operation_init(op, fscache_attr_changed_op, NULL);
  170. op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
  171. spin_lock(&cookie->lock);
  172. if (hlist_empty(&cookie->backing_objects))
  173. goto nobufs;
  174. object = hlist_entry(cookie->backing_objects.first,
  175. struct fscache_object, cookie_link);
  176. if (fscache_submit_exclusive_op(object, op) < 0)
  177. goto nobufs;
  178. spin_unlock(&cookie->lock);
  179. fscache_stat(&fscache_n_attr_changed_ok);
  180. fscache_put_operation(op);
  181. _leave(" = 0");
  182. return 0;
  183. nobufs:
  184. spin_unlock(&cookie->lock);
  185. kfree(op);
  186. fscache_stat(&fscache_n_attr_changed_nobufs);
  187. _leave(" = %d", -ENOBUFS);
  188. return -ENOBUFS;
  189. }
  190. EXPORT_SYMBOL(__fscache_attr_changed);
  191. /*
  192. * release a retrieval op reference
  193. */
  194. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  195. {
  196. struct fscache_retrieval *op =
  197. container_of(_op, struct fscache_retrieval, op);
  198. _enter("{OP%x}", op->op.debug_id);
  199. ASSERTCMP(op->n_pages, ==, 0);
  200. fscache_hist(fscache_retrieval_histogram, op->start_time);
  201. if (op->context)
  202. fscache_put_context(op->op.object->cookie, op->context);
  203. _leave("");
  204. }
  205. /*
  206. * allocate a retrieval op
  207. */
  208. static struct fscache_retrieval *fscache_alloc_retrieval(
  209. struct address_space *mapping,
  210. fscache_rw_complete_t end_io_func,
  211. void *context)
  212. {
  213. struct fscache_retrieval *op;
  214. /* allocate a retrieval operation and attempt to submit it */
  215. op = kzalloc(sizeof(*op), GFP_NOIO);
  216. if (!op) {
  217. fscache_stat(&fscache_n_retrievals_nomem);
  218. return NULL;
  219. }
  220. fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
  221. op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
  222. op->mapping = mapping;
  223. op->end_io_func = end_io_func;
  224. op->context = context;
  225. op->start_time = jiffies;
  226. INIT_LIST_HEAD(&op->to_do);
  227. return op;
  228. }
  229. /*
  230. * wait for a deferred lookup to complete
  231. */
  232. static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  233. {
  234. unsigned long jif;
  235. _enter("");
  236. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  237. _leave(" = 0 [imm]");
  238. return 0;
  239. }
  240. fscache_stat(&fscache_n_retrievals_wait);
  241. jif = jiffies;
  242. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  243. fscache_wait_bit_interruptible,
  244. TASK_INTERRUPTIBLE) != 0) {
  245. fscache_stat(&fscache_n_retrievals_intr);
  246. _leave(" = -ERESTARTSYS");
  247. return -ERESTARTSYS;
  248. }
  249. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  250. smp_rmb();
  251. fscache_hist(fscache_retrieval_delay_histogram, jif);
  252. _leave(" = 0 [dly]");
  253. return 0;
  254. }
  255. /*
  256. * Handle cancellation of a pending retrieval op
  257. */
  258. static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
  259. {
  260. struct fscache_retrieval *op =
  261. container_of(_op, struct fscache_retrieval, op);
  262. op->n_pages = 0;
  263. }
  264. /*
  265. * wait for an object to become active (or dead)
  266. */
  267. static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
  268. struct fscache_retrieval *op,
  269. atomic_t *stat_op_waits,
  270. atomic_t *stat_object_dead)
  271. {
  272. int ret;
  273. if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
  274. goto check_if_dead;
  275. _debug(">>> WT");
  276. fscache_stat(stat_op_waits);
  277. if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  278. fscache_wait_bit_interruptible,
  279. TASK_INTERRUPTIBLE) != 0) {
  280. ret = fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
  281. if (ret == 0)
  282. return -ERESTARTSYS;
  283. /* it's been removed from the pending queue by another party,
  284. * so we should get to run shortly */
  285. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  286. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  287. }
  288. _debug("<<< GO");
  289. check_if_dead:
  290. if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
  291. fscache_stat(stat_object_dead);
  292. _leave(" = -ENOBUFS [cancelled]");
  293. return -ENOBUFS;
  294. }
  295. if (unlikely(fscache_object_is_dead(object))) {
  296. pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
  297. fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
  298. fscache_stat(stat_object_dead);
  299. return -ENOBUFS;
  300. }
  301. return 0;
  302. }
  303. /*
  304. * read a page from the cache or allocate a block in which to store it
  305. * - we return:
  306. * -ENOMEM - out of memory, nothing done
  307. * -ERESTARTSYS - interrupted
  308. * -ENOBUFS - no backing object available in which to cache the block
  309. * -ENODATA - no data available in the backing object for this block
  310. * 0 - dispatched a read - it'll call end_io_func() when finished
  311. */
  312. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  313. struct page *page,
  314. fscache_rw_complete_t end_io_func,
  315. void *context,
  316. gfp_t gfp)
  317. {
  318. struct fscache_retrieval *op;
  319. struct fscache_object *object;
  320. int ret;
  321. _enter("%p,%p,,,", cookie, page);
  322. fscache_stat(&fscache_n_retrievals);
  323. if (hlist_empty(&cookie->backing_objects))
  324. goto nobufs;
  325. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  326. _leave(" = -ENOBUFS [invalidating]");
  327. return -ENOBUFS;
  328. }
  329. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  330. ASSERTCMP(page, !=, NULL);
  331. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  332. return -ERESTARTSYS;
  333. op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
  334. if (!op) {
  335. _leave(" = -ENOMEM");
  336. return -ENOMEM;
  337. }
  338. op->n_pages = 1;
  339. spin_lock(&cookie->lock);
  340. if (hlist_empty(&cookie->backing_objects))
  341. goto nobufs_unlock;
  342. object = hlist_entry(cookie->backing_objects.first,
  343. struct fscache_object, cookie_link);
  344. ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
  345. atomic_inc(&object->n_reads);
  346. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  347. if (fscache_submit_op(object, &op->op) < 0)
  348. goto nobufs_unlock_dec;
  349. spin_unlock(&cookie->lock);
  350. fscache_stat(&fscache_n_retrieval_ops);
  351. /* pin the netfs read context in case we need to do the actual netfs
  352. * read because we've encountered a cache read failure */
  353. fscache_get_context(object->cookie, op->context);
  354. /* we wait for the operation to become active, and then process it
  355. * *here*, in this thread, and not in the thread pool */
  356. ret = fscache_wait_for_retrieval_activation(
  357. object, op,
  358. __fscache_stat(&fscache_n_retrieval_op_waits),
  359. __fscache_stat(&fscache_n_retrievals_object_dead));
  360. if (ret < 0)
  361. goto error;
  362. /* ask the cache to honour the operation */
  363. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  364. fscache_stat(&fscache_n_cop_allocate_page);
  365. ret = object->cache->ops->allocate_page(op, page, gfp);
  366. fscache_stat_d(&fscache_n_cop_allocate_page);
  367. if (ret == 0)
  368. ret = -ENODATA;
  369. } else {
  370. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  371. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  372. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  373. }
  374. error:
  375. if (ret == -ENOMEM)
  376. fscache_stat(&fscache_n_retrievals_nomem);
  377. else if (ret == -ERESTARTSYS)
  378. fscache_stat(&fscache_n_retrievals_intr);
  379. else if (ret == -ENODATA)
  380. fscache_stat(&fscache_n_retrievals_nodata);
  381. else if (ret < 0)
  382. fscache_stat(&fscache_n_retrievals_nobufs);
  383. else
  384. fscache_stat(&fscache_n_retrievals_ok);
  385. fscache_put_retrieval(op);
  386. _leave(" = %d", ret);
  387. return ret;
  388. nobufs_unlock_dec:
  389. atomic_dec(&object->n_reads);
  390. nobufs_unlock:
  391. spin_unlock(&cookie->lock);
  392. kfree(op);
  393. nobufs:
  394. fscache_stat(&fscache_n_retrievals_nobufs);
  395. _leave(" = -ENOBUFS");
  396. return -ENOBUFS;
  397. }
  398. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  399. /*
  400. * read a list of page from the cache or allocate a block in which to store
  401. * them
  402. * - we return:
  403. * -ENOMEM - out of memory, some pages may be being read
  404. * -ERESTARTSYS - interrupted, some pages may be being read
  405. * -ENOBUFS - no backing object or space available in which to cache any
  406. * pages not being read
  407. * -ENODATA - no data available in the backing object for some or all of
  408. * the pages
  409. * 0 - dispatched a read on all pages
  410. *
  411. * end_io_func() will be called for each page read from the cache as it is
  412. * finishes being read
  413. *
  414. * any pages for which a read is dispatched will be removed from pages and
  415. * nr_pages
  416. */
  417. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  418. struct address_space *mapping,
  419. struct list_head *pages,
  420. unsigned *nr_pages,
  421. fscache_rw_complete_t end_io_func,
  422. void *context,
  423. gfp_t gfp)
  424. {
  425. struct fscache_retrieval *op;
  426. struct fscache_object *object;
  427. int ret;
  428. _enter("%p,,%d,,,", cookie, *nr_pages);
  429. fscache_stat(&fscache_n_retrievals);
  430. if (hlist_empty(&cookie->backing_objects))
  431. goto nobufs;
  432. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  433. _leave(" = -ENOBUFS [invalidating]");
  434. return -ENOBUFS;
  435. }
  436. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  437. ASSERTCMP(*nr_pages, >, 0);
  438. ASSERT(!list_empty(pages));
  439. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  440. return -ERESTARTSYS;
  441. op = fscache_alloc_retrieval(mapping, end_io_func, context);
  442. if (!op)
  443. return -ENOMEM;
  444. op->n_pages = *nr_pages;
  445. spin_lock(&cookie->lock);
  446. if (hlist_empty(&cookie->backing_objects))
  447. goto nobufs_unlock;
  448. object = hlist_entry(cookie->backing_objects.first,
  449. struct fscache_object, cookie_link);
  450. atomic_inc(&object->n_reads);
  451. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  452. if (fscache_submit_op(object, &op->op) < 0)
  453. goto nobufs_unlock_dec;
  454. spin_unlock(&cookie->lock);
  455. fscache_stat(&fscache_n_retrieval_ops);
  456. /* pin the netfs read context in case we need to do the actual netfs
  457. * read because we've encountered a cache read failure */
  458. fscache_get_context(object->cookie, op->context);
  459. /* we wait for the operation to become active, and then process it
  460. * *here*, in this thread, and not in the thread pool */
  461. ret = fscache_wait_for_retrieval_activation(
  462. object, op,
  463. __fscache_stat(&fscache_n_retrieval_op_waits),
  464. __fscache_stat(&fscache_n_retrievals_object_dead));
  465. if (ret < 0)
  466. goto error;
  467. /* ask the cache to honour the operation */
  468. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  469. fscache_stat(&fscache_n_cop_allocate_pages);
  470. ret = object->cache->ops->allocate_pages(
  471. op, pages, nr_pages, gfp);
  472. fscache_stat_d(&fscache_n_cop_allocate_pages);
  473. } else {
  474. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  475. ret = object->cache->ops->read_or_alloc_pages(
  476. op, pages, nr_pages, gfp);
  477. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  478. }
  479. error:
  480. if (ret == -ENOMEM)
  481. fscache_stat(&fscache_n_retrievals_nomem);
  482. else if (ret == -ERESTARTSYS)
  483. fscache_stat(&fscache_n_retrievals_intr);
  484. else if (ret == -ENODATA)
  485. fscache_stat(&fscache_n_retrievals_nodata);
  486. else if (ret < 0)
  487. fscache_stat(&fscache_n_retrievals_nobufs);
  488. else
  489. fscache_stat(&fscache_n_retrievals_ok);
  490. fscache_put_retrieval(op);
  491. _leave(" = %d", ret);
  492. return ret;
  493. nobufs_unlock_dec:
  494. atomic_dec(&object->n_reads);
  495. nobufs_unlock:
  496. spin_unlock(&cookie->lock);
  497. kfree(op);
  498. nobufs:
  499. fscache_stat(&fscache_n_retrievals_nobufs);
  500. _leave(" = -ENOBUFS");
  501. return -ENOBUFS;
  502. }
  503. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  504. /*
  505. * allocate a block in the cache on which to store a page
  506. * - we return:
  507. * -ENOMEM - out of memory, nothing done
  508. * -ERESTARTSYS - interrupted
  509. * -ENOBUFS - no backing object available in which to cache the block
  510. * 0 - block allocated
  511. */
  512. int __fscache_alloc_page(struct fscache_cookie *cookie,
  513. struct page *page,
  514. gfp_t gfp)
  515. {
  516. struct fscache_retrieval *op;
  517. struct fscache_object *object;
  518. int ret;
  519. _enter("%p,%p,,,", cookie, page);
  520. fscache_stat(&fscache_n_allocs);
  521. if (hlist_empty(&cookie->backing_objects))
  522. goto nobufs;
  523. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  524. ASSERTCMP(page, !=, NULL);
  525. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  526. _leave(" = -ENOBUFS [invalidating]");
  527. return -ENOBUFS;
  528. }
  529. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  530. return -ERESTARTSYS;
  531. op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
  532. if (!op)
  533. return -ENOMEM;
  534. op->n_pages = 1;
  535. spin_lock(&cookie->lock);
  536. if (hlist_empty(&cookie->backing_objects))
  537. goto nobufs_unlock;
  538. object = hlist_entry(cookie->backing_objects.first,
  539. struct fscache_object, cookie_link);
  540. if (fscache_submit_op(object, &op->op) < 0)
  541. goto nobufs_unlock;
  542. spin_unlock(&cookie->lock);
  543. fscache_stat(&fscache_n_alloc_ops);
  544. ret = fscache_wait_for_retrieval_activation(
  545. object, op,
  546. __fscache_stat(&fscache_n_alloc_op_waits),
  547. __fscache_stat(&fscache_n_allocs_object_dead));
  548. if (ret < 0)
  549. goto error;
  550. /* ask the cache to honour the operation */
  551. fscache_stat(&fscache_n_cop_allocate_page);
  552. ret = object->cache->ops->allocate_page(op, page, gfp);
  553. fscache_stat_d(&fscache_n_cop_allocate_page);
  554. error:
  555. if (ret == -ERESTARTSYS)
  556. fscache_stat(&fscache_n_allocs_intr);
  557. else if (ret < 0)
  558. fscache_stat(&fscache_n_allocs_nobufs);
  559. else
  560. fscache_stat(&fscache_n_allocs_ok);
  561. fscache_put_retrieval(op);
  562. _leave(" = %d", ret);
  563. return ret;
  564. nobufs_unlock:
  565. spin_unlock(&cookie->lock);
  566. kfree(op);
  567. nobufs:
  568. fscache_stat(&fscache_n_allocs_nobufs);
  569. _leave(" = -ENOBUFS");
  570. return -ENOBUFS;
  571. }
  572. EXPORT_SYMBOL(__fscache_alloc_page);
  573. /*
  574. * release a write op reference
  575. */
  576. static void fscache_release_write_op(struct fscache_operation *_op)
  577. {
  578. _enter("{OP%x}", _op->debug_id);
  579. }
  580. /*
  581. * perform the background storage of a page into the cache
  582. */
  583. static void fscache_write_op(struct fscache_operation *_op)
  584. {
  585. struct fscache_storage *op =
  586. container_of(_op, struct fscache_storage, op);
  587. struct fscache_object *object = op->op.object;
  588. struct fscache_cookie *cookie;
  589. struct page *page;
  590. unsigned n;
  591. void *results[1];
  592. int ret;
  593. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  594. spin_lock(&object->lock);
  595. cookie = object->cookie;
  596. if (!fscache_object_is_active(object)) {
  597. /* If we get here, then the on-disk cache object likely longer
  598. * exists, so we should just cancel this write operation.
  599. */
  600. spin_unlock(&object->lock);
  601. fscache_op_complete(&op->op, false);
  602. _leave(" [inactive]");
  603. return;
  604. }
  605. if (!cookie) {
  606. /* If we get here, then the cookie belonging to the object was
  607. * detached, probably by the cookie being withdrawn due to
  608. * memory pressure, which means that the pages we might write
  609. * to the cache from no longer exist - therefore, we can just
  610. * cancel this write operation.
  611. */
  612. spin_unlock(&object->lock);
  613. fscache_op_complete(&op->op, false);
  614. _leave(" [cancel] op{f=%lx s=%u} obj{s=%u f=%lx}",
  615. _op->flags, _op->state, object->state, object->flags);
  616. return;
  617. }
  618. spin_lock(&cookie->stores_lock);
  619. fscache_stat(&fscache_n_store_calls);
  620. /* find a page to store */
  621. page = NULL;
  622. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  623. FSCACHE_COOKIE_PENDING_TAG);
  624. if (n != 1)
  625. goto superseded;
  626. page = results[0];
  627. _debug("gang %d [%lx]", n, page->index);
  628. if (page->index > op->store_limit) {
  629. fscache_stat(&fscache_n_store_pages_over_limit);
  630. goto superseded;
  631. }
  632. radix_tree_tag_set(&cookie->stores, page->index,
  633. FSCACHE_COOKIE_STORING_TAG);
  634. radix_tree_tag_clear(&cookie->stores, page->index,
  635. FSCACHE_COOKIE_PENDING_TAG);
  636. spin_unlock(&cookie->stores_lock);
  637. spin_unlock(&object->lock);
  638. fscache_stat(&fscache_n_store_pages);
  639. fscache_stat(&fscache_n_cop_write_page);
  640. ret = object->cache->ops->write_page(op, page);
  641. fscache_stat_d(&fscache_n_cop_write_page);
  642. fscache_end_page_write(object, page);
  643. if (ret < 0) {
  644. fscache_abort_object(object);
  645. fscache_op_complete(&op->op, true);
  646. } else {
  647. fscache_enqueue_operation(&op->op);
  648. }
  649. _leave("");
  650. return;
  651. superseded:
  652. /* this writer is going away and there aren't any more things to
  653. * write */
  654. _debug("cease");
  655. spin_unlock(&cookie->stores_lock);
  656. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  657. spin_unlock(&object->lock);
  658. fscache_op_complete(&op->op, true);
  659. _leave("");
  660. }
  661. /*
  662. * Clear the pages pending writing for invalidation
  663. */
  664. void fscache_invalidate_writes(struct fscache_cookie *cookie)
  665. {
  666. struct page *page;
  667. void *results[16];
  668. int n, i;
  669. _enter("");
  670. while (spin_lock(&cookie->stores_lock),
  671. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
  672. ARRAY_SIZE(results),
  673. FSCACHE_COOKIE_PENDING_TAG),
  674. n > 0) {
  675. for (i = n - 1; i >= 0; i--) {
  676. page = results[i];
  677. radix_tree_delete(&cookie->stores, page->index);
  678. }
  679. spin_unlock(&cookie->stores_lock);
  680. for (i = n - 1; i >= 0; i--)
  681. page_cache_release(results[i]);
  682. }
  683. spin_unlock(&cookie->stores_lock);
  684. _leave("");
  685. }
  686. /*
  687. * request a page be stored in the cache
  688. * - returns:
  689. * -ENOMEM - out of memory, nothing done
  690. * -ENOBUFS - no backing object available in which to cache the page
  691. * 0 - dispatched a write - it'll call end_io_func() when finished
  692. *
  693. * if the cookie still has a backing object at this point, that object can be
  694. * in one of a few states with respect to storage processing:
  695. *
  696. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  697. * set)
  698. *
  699. * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
  700. * fill op)
  701. *
  702. * (b) writes deferred till post-creation (mark page for writing and
  703. * return immediately)
  704. *
  705. * (2) negative lookup, object created, initial fill being made from netfs
  706. * (FSCACHE_COOKIE_INITIAL_FILL is set)
  707. *
  708. * (a) fill point not yet reached this page (mark page for writing and
  709. * return)
  710. *
  711. * (b) fill point passed this page (queue op to store this page)
  712. *
  713. * (3) object extant (queue op to store this page)
  714. *
  715. * any other state is invalid
  716. */
  717. int __fscache_write_page(struct fscache_cookie *cookie,
  718. struct page *page,
  719. gfp_t gfp)
  720. {
  721. struct fscache_storage *op;
  722. struct fscache_object *object;
  723. int ret;
  724. _enter("%p,%x,", cookie, (u32) page->flags);
  725. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  726. ASSERT(PageFsCache(page));
  727. fscache_stat(&fscache_n_stores);
  728. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  729. _leave(" = -ENOBUFS [invalidating]");
  730. return -ENOBUFS;
  731. }
  732. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  733. if (!op)
  734. goto nomem;
  735. fscache_operation_init(&op->op, fscache_write_op,
  736. fscache_release_write_op);
  737. op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
  738. ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
  739. if (ret < 0)
  740. goto nomem_free;
  741. ret = -ENOBUFS;
  742. spin_lock(&cookie->lock);
  743. if (hlist_empty(&cookie->backing_objects))
  744. goto nobufs;
  745. object = hlist_entry(cookie->backing_objects.first,
  746. struct fscache_object, cookie_link);
  747. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  748. goto nobufs;
  749. /* add the page to the pending-storage radix tree on the backing
  750. * object */
  751. spin_lock(&object->lock);
  752. spin_lock(&cookie->stores_lock);
  753. _debug("store limit %llx", (unsigned long long) object->store_limit);
  754. ret = radix_tree_insert(&cookie->stores, page->index, page);
  755. if (ret < 0) {
  756. if (ret == -EEXIST)
  757. goto already_queued;
  758. _debug("insert failed %d", ret);
  759. goto nobufs_unlock_obj;
  760. }
  761. radix_tree_tag_set(&cookie->stores, page->index,
  762. FSCACHE_COOKIE_PENDING_TAG);
  763. page_cache_get(page);
  764. /* we only want one writer at a time, but we do need to queue new
  765. * writers after exclusive ops */
  766. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  767. goto already_pending;
  768. spin_unlock(&cookie->stores_lock);
  769. spin_unlock(&object->lock);
  770. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  771. op->store_limit = object->store_limit;
  772. if (fscache_submit_op(object, &op->op) < 0)
  773. goto submit_failed;
  774. spin_unlock(&cookie->lock);
  775. radix_tree_preload_end();
  776. fscache_stat(&fscache_n_store_ops);
  777. fscache_stat(&fscache_n_stores_ok);
  778. /* the work queue now carries its own ref on the object */
  779. fscache_put_operation(&op->op);
  780. _leave(" = 0");
  781. return 0;
  782. already_queued:
  783. fscache_stat(&fscache_n_stores_again);
  784. already_pending:
  785. spin_unlock(&cookie->stores_lock);
  786. spin_unlock(&object->lock);
  787. spin_unlock(&cookie->lock);
  788. radix_tree_preload_end();
  789. kfree(op);
  790. fscache_stat(&fscache_n_stores_ok);
  791. _leave(" = 0");
  792. return 0;
  793. submit_failed:
  794. spin_lock(&cookie->stores_lock);
  795. radix_tree_delete(&cookie->stores, page->index);
  796. spin_unlock(&cookie->stores_lock);
  797. page_cache_release(page);
  798. ret = -ENOBUFS;
  799. goto nobufs;
  800. nobufs_unlock_obj:
  801. spin_unlock(&cookie->stores_lock);
  802. spin_unlock(&object->lock);
  803. nobufs:
  804. spin_unlock(&cookie->lock);
  805. radix_tree_preload_end();
  806. kfree(op);
  807. fscache_stat(&fscache_n_stores_nobufs);
  808. _leave(" = -ENOBUFS");
  809. return -ENOBUFS;
  810. nomem_free:
  811. kfree(op);
  812. nomem:
  813. fscache_stat(&fscache_n_stores_oom);
  814. _leave(" = -ENOMEM");
  815. return -ENOMEM;
  816. }
  817. EXPORT_SYMBOL(__fscache_write_page);
  818. /*
  819. * remove a page from the cache
  820. */
  821. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  822. {
  823. struct fscache_object *object;
  824. _enter(",%p", page);
  825. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  826. ASSERTCMP(page, !=, NULL);
  827. fscache_stat(&fscache_n_uncaches);
  828. /* cache withdrawal may beat us to it */
  829. if (!PageFsCache(page))
  830. goto done;
  831. /* get the object */
  832. spin_lock(&cookie->lock);
  833. if (hlist_empty(&cookie->backing_objects)) {
  834. ClearPageFsCache(page);
  835. goto done_unlock;
  836. }
  837. object = hlist_entry(cookie->backing_objects.first,
  838. struct fscache_object, cookie_link);
  839. /* there might now be stuff on disk we could read */
  840. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  841. /* only invoke the cache backend if we managed to mark the page
  842. * uncached here; this deals with synchronisation vs withdrawal */
  843. if (TestClearPageFsCache(page) &&
  844. object->cache->ops->uncache_page) {
  845. /* the cache backend releases the cookie lock */
  846. fscache_stat(&fscache_n_cop_uncache_page);
  847. object->cache->ops->uncache_page(object, page);
  848. fscache_stat_d(&fscache_n_cop_uncache_page);
  849. goto done;
  850. }
  851. done_unlock:
  852. spin_unlock(&cookie->lock);
  853. done:
  854. _leave("");
  855. }
  856. EXPORT_SYMBOL(__fscache_uncache_page);
  857. /**
  858. * fscache_mark_page_cached - Mark a page as being cached
  859. * @op: The retrieval op pages are being marked for
  860. * @page: The page to be marked
  861. *
  862. * Mark a netfs page as being cached. After this is called, the netfs
  863. * must call fscache_uncache_page() to remove the mark.
  864. */
  865. void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
  866. {
  867. struct fscache_cookie *cookie = op->op.object->cookie;
  868. #ifdef CONFIG_FSCACHE_STATS
  869. atomic_inc(&fscache_n_marks);
  870. #endif
  871. _debug("- mark %p{%lx}", page, page->index);
  872. if (TestSetPageFsCache(page)) {
  873. static bool once_only;
  874. if (!once_only) {
  875. once_only = true;
  876. printk(KERN_WARNING "FS-Cache:"
  877. " Cookie type %s marked page %lx"
  878. " multiple times\n",
  879. cookie->def->name, page->index);
  880. }
  881. }
  882. if (cookie->def->mark_page_cached)
  883. cookie->def->mark_page_cached(cookie->netfs_data,
  884. op->mapping, page);
  885. }
  886. EXPORT_SYMBOL(fscache_mark_page_cached);
  887. /**
  888. * fscache_mark_pages_cached - Mark pages as being cached
  889. * @op: The retrieval op pages are being marked for
  890. * @pagevec: The pages to be marked
  891. *
  892. * Mark a bunch of netfs pages as being cached. After this is called,
  893. * the netfs must call fscache_uncache_page() to remove the mark.
  894. */
  895. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  896. struct pagevec *pagevec)
  897. {
  898. unsigned long loop;
  899. for (loop = 0; loop < pagevec->nr; loop++)
  900. fscache_mark_page_cached(op, pagevec->pages[loop]);
  901. pagevec_reinit(pagevec);
  902. }
  903. EXPORT_SYMBOL(fscache_mark_pages_cached);
  904. /*
  905. * Uncache all the pages in an inode that are marked PG_fscache, assuming them
  906. * to be associated with the given cookie.
  907. */
  908. void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
  909. struct inode *inode)
  910. {
  911. struct address_space *mapping = inode->i_mapping;
  912. struct pagevec pvec;
  913. pgoff_t next;
  914. int i;
  915. _enter("%p,%p", cookie, inode);
  916. if (!mapping || mapping->nrpages == 0) {
  917. _leave(" [no pages]");
  918. return;
  919. }
  920. pagevec_init(&pvec, 0);
  921. next = 0;
  922. do {
  923. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
  924. break;
  925. for (i = 0; i < pagevec_count(&pvec); i++) {
  926. struct page *page = pvec.pages[i];
  927. next = page->index;
  928. if (PageFsCache(page)) {
  929. __fscache_wait_on_page_write(cookie, page);
  930. __fscache_uncache_page(cookie, page);
  931. }
  932. }
  933. pagevec_release(&pvec);
  934. cond_resched();
  935. } while (++next);
  936. _leave("");
  937. }
  938. EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);