page.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. return val != NULL;
  28. }
  29. EXPORT_SYMBOL(__fscache_check_page_write);
  30. /*
  31. * wait for a page to finish being written to the cache
  32. */
  33. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  34. {
  35. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  36. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  37. }
  38. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  39. /*
  40. * decide whether a page can be released, possibly by cancelling a store to it
  41. * - we're allowed to sleep if __GFP_WAIT is flagged
  42. */
  43. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  44. struct page *page,
  45. gfp_t gfp)
  46. {
  47. struct page *xpage;
  48. void *val;
  49. _enter("%p,%p,%x", cookie, page, gfp);
  50. rcu_read_lock();
  51. val = radix_tree_lookup(&cookie->stores, page->index);
  52. if (!val) {
  53. rcu_read_unlock();
  54. fscache_stat(&fscache_n_store_vmscan_not_storing);
  55. __fscache_uncache_page(cookie, page);
  56. return true;
  57. }
  58. /* see if the page is actually undergoing storage - if so we can't get
  59. * rid of it till the cache has finished with it */
  60. if (radix_tree_tag_get(&cookie->stores, page->index,
  61. FSCACHE_COOKIE_STORING_TAG)) {
  62. rcu_read_unlock();
  63. goto page_busy;
  64. }
  65. /* the page is pending storage, so we attempt to cancel the store and
  66. * discard the store request so that the page can be reclaimed */
  67. spin_lock(&cookie->stores_lock);
  68. rcu_read_unlock();
  69. if (radix_tree_tag_get(&cookie->stores, page->index,
  70. FSCACHE_COOKIE_STORING_TAG)) {
  71. /* the page started to undergo storage whilst we were looking,
  72. * so now we can only wait or return */
  73. spin_unlock(&cookie->stores_lock);
  74. goto page_busy;
  75. }
  76. xpage = radix_tree_delete(&cookie->stores, page->index);
  77. spin_unlock(&cookie->stores_lock);
  78. if (xpage) {
  79. fscache_stat(&fscache_n_store_vmscan_cancelled);
  80. fscache_stat(&fscache_n_store_radix_deletes);
  81. ASSERTCMP(xpage, ==, page);
  82. } else {
  83. fscache_stat(&fscache_n_store_vmscan_gone);
  84. }
  85. wake_up_bit(&cookie->flags, 0);
  86. if (xpage)
  87. page_cache_release(xpage);
  88. __fscache_uncache_page(cookie, page);
  89. return true;
  90. page_busy:
  91. /* we might want to wait here, but that could deadlock the allocator as
  92. * the work threads writing to the cache may all end up sleeping
  93. * on memory allocation */
  94. fscache_stat(&fscache_n_store_vmscan_busy);
  95. return false;
  96. }
  97. EXPORT_SYMBOL(__fscache_maybe_release_page);
  98. /*
  99. * note that a page has finished being written to the cache
  100. */
  101. static void fscache_end_page_write(struct fscache_object *object,
  102. struct page *page)
  103. {
  104. struct fscache_cookie *cookie;
  105. struct page *xpage = NULL;
  106. spin_lock(&object->lock);
  107. cookie = object->cookie;
  108. if (cookie) {
  109. /* delete the page from the tree if it is now no longer
  110. * pending */
  111. spin_lock(&cookie->stores_lock);
  112. radix_tree_tag_clear(&cookie->stores, page->index,
  113. FSCACHE_COOKIE_STORING_TAG);
  114. if (!radix_tree_tag_get(&cookie->stores, page->index,
  115. FSCACHE_COOKIE_PENDING_TAG)) {
  116. fscache_stat(&fscache_n_store_radix_deletes);
  117. xpage = radix_tree_delete(&cookie->stores, page->index);
  118. }
  119. spin_unlock(&cookie->stores_lock);
  120. wake_up_bit(&cookie->flags, 0);
  121. }
  122. spin_unlock(&object->lock);
  123. if (xpage)
  124. page_cache_release(xpage);
  125. }
  126. /*
  127. * actually apply the changed attributes to a cache object
  128. */
  129. static void fscache_attr_changed_op(struct fscache_operation *op)
  130. {
  131. struct fscache_object *object = op->object;
  132. int ret;
  133. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  134. fscache_stat(&fscache_n_attr_changed_calls);
  135. if (fscache_object_is_active(object)) {
  136. fscache_set_op_state(op, "CallFS");
  137. fscache_stat(&fscache_n_cop_attr_changed);
  138. ret = object->cache->ops->attr_changed(object);
  139. fscache_stat_d(&fscache_n_cop_attr_changed);
  140. fscache_set_op_state(op, "Done");
  141. if (ret < 0)
  142. fscache_abort_object(object);
  143. }
  144. _leave("");
  145. }
  146. /*
  147. * notification that the attributes on an object have changed
  148. */
  149. int __fscache_attr_changed(struct fscache_cookie *cookie)
  150. {
  151. struct fscache_operation *op;
  152. struct fscache_object *object;
  153. _enter("%p", cookie);
  154. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  155. fscache_stat(&fscache_n_attr_changed);
  156. op = kzalloc(sizeof(*op), GFP_KERNEL);
  157. if (!op) {
  158. fscache_stat(&fscache_n_attr_changed_nomem);
  159. _leave(" = -ENOMEM");
  160. return -ENOMEM;
  161. }
  162. fscache_operation_init(op, fscache_attr_changed_op, NULL);
  163. op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
  164. fscache_set_op_name(op, "Attr");
  165. spin_lock(&cookie->lock);
  166. if (hlist_empty(&cookie->backing_objects))
  167. goto nobufs;
  168. object = hlist_entry(cookie->backing_objects.first,
  169. struct fscache_object, cookie_link);
  170. if (fscache_submit_exclusive_op(object, op) < 0)
  171. goto nobufs;
  172. spin_unlock(&cookie->lock);
  173. fscache_stat(&fscache_n_attr_changed_ok);
  174. fscache_put_operation(op);
  175. _leave(" = 0");
  176. return 0;
  177. nobufs:
  178. spin_unlock(&cookie->lock);
  179. kfree(op);
  180. fscache_stat(&fscache_n_attr_changed_nobufs);
  181. _leave(" = %d", -ENOBUFS);
  182. return -ENOBUFS;
  183. }
  184. EXPORT_SYMBOL(__fscache_attr_changed);
  185. /*
  186. * release a retrieval op reference
  187. */
  188. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  189. {
  190. struct fscache_retrieval *op =
  191. container_of(_op, struct fscache_retrieval, op);
  192. _enter("{OP%x}", op->op.debug_id);
  193. fscache_hist(fscache_retrieval_histogram, op->start_time);
  194. if (op->context)
  195. fscache_put_context(op->op.object->cookie, op->context);
  196. _leave("");
  197. }
  198. /*
  199. * allocate a retrieval op
  200. */
  201. static struct fscache_retrieval *fscache_alloc_retrieval(
  202. struct address_space *mapping,
  203. fscache_rw_complete_t end_io_func,
  204. void *context)
  205. {
  206. struct fscache_retrieval *op;
  207. /* allocate a retrieval operation and attempt to submit it */
  208. op = kzalloc(sizeof(*op), GFP_NOIO);
  209. if (!op) {
  210. fscache_stat(&fscache_n_retrievals_nomem);
  211. return NULL;
  212. }
  213. fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
  214. op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
  215. op->mapping = mapping;
  216. op->end_io_func = end_io_func;
  217. op->context = context;
  218. op->start_time = jiffies;
  219. INIT_LIST_HEAD(&op->to_do);
  220. fscache_set_op_name(&op->op, "Retr");
  221. return op;
  222. }
  223. /*
  224. * wait for a deferred lookup to complete
  225. */
  226. static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  227. {
  228. unsigned long jif;
  229. _enter("");
  230. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  231. _leave(" = 0 [imm]");
  232. return 0;
  233. }
  234. fscache_stat(&fscache_n_retrievals_wait);
  235. jif = jiffies;
  236. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  237. fscache_wait_bit_interruptible,
  238. TASK_INTERRUPTIBLE) != 0) {
  239. fscache_stat(&fscache_n_retrievals_intr);
  240. _leave(" = -ERESTARTSYS");
  241. return -ERESTARTSYS;
  242. }
  243. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  244. smp_rmb();
  245. fscache_hist(fscache_retrieval_delay_histogram, jif);
  246. _leave(" = 0 [dly]");
  247. return 0;
  248. }
  249. /*
  250. * wait for an object to become active (or dead)
  251. */
  252. static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
  253. struct fscache_retrieval *op,
  254. atomic_t *stat_op_waits,
  255. atomic_t *stat_object_dead)
  256. {
  257. int ret;
  258. if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
  259. goto check_if_dead;
  260. _debug(">>> WT");
  261. fscache_stat(stat_op_waits);
  262. if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  263. fscache_wait_bit_interruptible,
  264. TASK_INTERRUPTIBLE) < 0) {
  265. ret = fscache_cancel_op(&op->op);
  266. if (ret == 0)
  267. return -ERESTARTSYS;
  268. /* it's been removed from the pending queue by another party,
  269. * so we should get to run shortly */
  270. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  271. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  272. }
  273. _debug("<<< GO");
  274. check_if_dead:
  275. if (unlikely(fscache_object_is_dead(object))) {
  276. fscache_stat(stat_object_dead);
  277. return -ENOBUFS;
  278. }
  279. return 0;
  280. }
  281. /*
  282. * read a page from the cache or allocate a block in which to store it
  283. * - we return:
  284. * -ENOMEM - out of memory, nothing done
  285. * -ERESTARTSYS - interrupted
  286. * -ENOBUFS - no backing object available in which to cache the block
  287. * -ENODATA - no data available in the backing object for this block
  288. * 0 - dispatched a read - it'll call end_io_func() when finished
  289. */
  290. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  291. struct page *page,
  292. fscache_rw_complete_t end_io_func,
  293. void *context,
  294. gfp_t gfp)
  295. {
  296. struct fscache_retrieval *op;
  297. struct fscache_object *object;
  298. int ret;
  299. _enter("%p,%p,,,", cookie, page);
  300. fscache_stat(&fscache_n_retrievals);
  301. if (hlist_empty(&cookie->backing_objects))
  302. goto nobufs;
  303. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  304. ASSERTCMP(page, !=, NULL);
  305. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  306. return -ERESTARTSYS;
  307. op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
  308. if (!op) {
  309. _leave(" = -ENOMEM");
  310. return -ENOMEM;
  311. }
  312. fscache_set_op_name(&op->op, "RetrRA1");
  313. spin_lock(&cookie->lock);
  314. if (hlist_empty(&cookie->backing_objects))
  315. goto nobufs_unlock;
  316. object = hlist_entry(cookie->backing_objects.first,
  317. struct fscache_object, cookie_link);
  318. ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
  319. atomic_inc(&object->n_reads);
  320. set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  321. if (fscache_submit_op(object, &op->op) < 0)
  322. goto nobufs_unlock;
  323. spin_unlock(&cookie->lock);
  324. fscache_stat(&fscache_n_retrieval_ops);
  325. /* pin the netfs read context in case we need to do the actual netfs
  326. * read because we've encountered a cache read failure */
  327. fscache_get_context(object->cookie, op->context);
  328. /* we wait for the operation to become active, and then process it
  329. * *here*, in this thread, and not in the thread pool */
  330. ret = fscache_wait_for_retrieval_activation(
  331. object, op,
  332. __fscache_stat(&fscache_n_retrieval_op_waits),
  333. __fscache_stat(&fscache_n_retrievals_object_dead));
  334. if (ret < 0)
  335. goto error;
  336. /* ask the cache to honour the operation */
  337. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  338. fscache_stat(&fscache_n_cop_allocate_page);
  339. ret = object->cache->ops->allocate_page(op, page, gfp);
  340. fscache_stat_d(&fscache_n_cop_allocate_page);
  341. if (ret == 0)
  342. ret = -ENODATA;
  343. } else {
  344. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  345. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  346. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  347. }
  348. error:
  349. if (ret == -ENOMEM)
  350. fscache_stat(&fscache_n_retrievals_nomem);
  351. else if (ret == -ERESTARTSYS)
  352. fscache_stat(&fscache_n_retrievals_intr);
  353. else if (ret == -ENODATA)
  354. fscache_stat(&fscache_n_retrievals_nodata);
  355. else if (ret < 0)
  356. fscache_stat(&fscache_n_retrievals_nobufs);
  357. else
  358. fscache_stat(&fscache_n_retrievals_ok);
  359. fscache_put_retrieval(op);
  360. _leave(" = %d", ret);
  361. return ret;
  362. nobufs_unlock:
  363. spin_unlock(&cookie->lock);
  364. kfree(op);
  365. nobufs:
  366. fscache_stat(&fscache_n_retrievals_nobufs);
  367. _leave(" = -ENOBUFS");
  368. return -ENOBUFS;
  369. }
  370. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  371. /*
  372. * read a list of page from the cache or allocate a block in which to store
  373. * them
  374. * - we return:
  375. * -ENOMEM - out of memory, some pages may be being read
  376. * -ERESTARTSYS - interrupted, some pages may be being read
  377. * -ENOBUFS - no backing object or space available in which to cache any
  378. * pages not being read
  379. * -ENODATA - no data available in the backing object for some or all of
  380. * the pages
  381. * 0 - dispatched a read on all pages
  382. *
  383. * end_io_func() will be called for each page read from the cache as it is
  384. * finishes being read
  385. *
  386. * any pages for which a read is dispatched will be removed from pages and
  387. * nr_pages
  388. */
  389. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  390. struct address_space *mapping,
  391. struct list_head *pages,
  392. unsigned *nr_pages,
  393. fscache_rw_complete_t end_io_func,
  394. void *context,
  395. gfp_t gfp)
  396. {
  397. struct fscache_retrieval *op;
  398. struct fscache_object *object;
  399. int ret;
  400. _enter("%p,,%d,,,", cookie, *nr_pages);
  401. fscache_stat(&fscache_n_retrievals);
  402. if (hlist_empty(&cookie->backing_objects))
  403. goto nobufs;
  404. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  405. ASSERTCMP(*nr_pages, >, 0);
  406. ASSERT(!list_empty(pages));
  407. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  408. return -ERESTARTSYS;
  409. op = fscache_alloc_retrieval(mapping, end_io_func, context);
  410. if (!op)
  411. return -ENOMEM;
  412. fscache_set_op_name(&op->op, "RetrRAN");
  413. spin_lock(&cookie->lock);
  414. if (hlist_empty(&cookie->backing_objects))
  415. goto nobufs_unlock;
  416. object = hlist_entry(cookie->backing_objects.first,
  417. struct fscache_object, cookie_link);
  418. atomic_inc(&object->n_reads);
  419. set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  420. if (fscache_submit_op(object, &op->op) < 0)
  421. goto nobufs_unlock;
  422. spin_unlock(&cookie->lock);
  423. fscache_stat(&fscache_n_retrieval_ops);
  424. /* pin the netfs read context in case we need to do the actual netfs
  425. * read because we've encountered a cache read failure */
  426. fscache_get_context(object->cookie, op->context);
  427. /* we wait for the operation to become active, and then process it
  428. * *here*, in this thread, and not in the thread pool */
  429. ret = fscache_wait_for_retrieval_activation(
  430. object, op,
  431. __fscache_stat(&fscache_n_retrieval_op_waits),
  432. __fscache_stat(&fscache_n_retrievals_object_dead));
  433. if (ret < 0)
  434. goto error;
  435. /* ask the cache to honour the operation */
  436. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  437. fscache_stat(&fscache_n_cop_allocate_pages);
  438. ret = object->cache->ops->allocate_pages(
  439. op, pages, nr_pages, gfp);
  440. fscache_stat_d(&fscache_n_cop_allocate_pages);
  441. } else {
  442. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  443. ret = object->cache->ops->read_or_alloc_pages(
  444. op, pages, nr_pages, gfp);
  445. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  446. }
  447. error:
  448. if (ret == -ENOMEM)
  449. fscache_stat(&fscache_n_retrievals_nomem);
  450. else if (ret == -ERESTARTSYS)
  451. fscache_stat(&fscache_n_retrievals_intr);
  452. else if (ret == -ENODATA)
  453. fscache_stat(&fscache_n_retrievals_nodata);
  454. else if (ret < 0)
  455. fscache_stat(&fscache_n_retrievals_nobufs);
  456. else
  457. fscache_stat(&fscache_n_retrievals_ok);
  458. fscache_put_retrieval(op);
  459. _leave(" = %d", ret);
  460. return ret;
  461. nobufs_unlock:
  462. spin_unlock(&cookie->lock);
  463. kfree(op);
  464. nobufs:
  465. fscache_stat(&fscache_n_retrievals_nobufs);
  466. _leave(" = -ENOBUFS");
  467. return -ENOBUFS;
  468. }
  469. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  470. /*
  471. * allocate a block in the cache on which to store a page
  472. * - we return:
  473. * -ENOMEM - out of memory, nothing done
  474. * -ERESTARTSYS - interrupted
  475. * -ENOBUFS - no backing object available in which to cache the block
  476. * 0 - block allocated
  477. */
  478. int __fscache_alloc_page(struct fscache_cookie *cookie,
  479. struct page *page,
  480. gfp_t gfp)
  481. {
  482. struct fscache_retrieval *op;
  483. struct fscache_object *object;
  484. int ret;
  485. _enter("%p,%p,,,", cookie, page);
  486. fscache_stat(&fscache_n_allocs);
  487. if (hlist_empty(&cookie->backing_objects))
  488. goto nobufs;
  489. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  490. ASSERTCMP(page, !=, NULL);
  491. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  492. return -ERESTARTSYS;
  493. op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
  494. if (!op)
  495. return -ENOMEM;
  496. fscache_set_op_name(&op->op, "RetrAL1");
  497. spin_lock(&cookie->lock);
  498. if (hlist_empty(&cookie->backing_objects))
  499. goto nobufs_unlock;
  500. object = hlist_entry(cookie->backing_objects.first,
  501. struct fscache_object, cookie_link);
  502. if (fscache_submit_op(object, &op->op) < 0)
  503. goto nobufs_unlock;
  504. spin_unlock(&cookie->lock);
  505. fscache_stat(&fscache_n_alloc_ops);
  506. ret = fscache_wait_for_retrieval_activation(
  507. object, op,
  508. __fscache_stat(&fscache_n_alloc_op_waits),
  509. __fscache_stat(&fscache_n_allocs_object_dead));
  510. if (ret < 0)
  511. goto error;
  512. /* ask the cache to honour the operation */
  513. fscache_stat(&fscache_n_cop_allocate_page);
  514. ret = object->cache->ops->allocate_page(op, page, gfp);
  515. fscache_stat_d(&fscache_n_cop_allocate_page);
  516. error:
  517. if (ret == -ERESTARTSYS)
  518. fscache_stat(&fscache_n_allocs_intr);
  519. else if (ret < 0)
  520. fscache_stat(&fscache_n_allocs_nobufs);
  521. else
  522. fscache_stat(&fscache_n_allocs_ok);
  523. fscache_put_retrieval(op);
  524. _leave(" = %d", ret);
  525. return ret;
  526. nobufs_unlock:
  527. spin_unlock(&cookie->lock);
  528. kfree(op);
  529. nobufs:
  530. fscache_stat(&fscache_n_allocs_nobufs);
  531. _leave(" = -ENOBUFS");
  532. return -ENOBUFS;
  533. }
  534. EXPORT_SYMBOL(__fscache_alloc_page);
  535. /*
  536. * release a write op reference
  537. */
  538. static void fscache_release_write_op(struct fscache_operation *_op)
  539. {
  540. _enter("{OP%x}", _op->debug_id);
  541. }
  542. /*
  543. * perform the background storage of a page into the cache
  544. */
  545. static void fscache_write_op(struct fscache_operation *_op)
  546. {
  547. struct fscache_storage *op =
  548. container_of(_op, struct fscache_storage, op);
  549. struct fscache_object *object = op->op.object;
  550. struct fscache_cookie *cookie;
  551. struct page *page;
  552. unsigned n;
  553. void *results[1];
  554. int ret;
  555. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  556. fscache_set_op_state(&op->op, "GetPage");
  557. spin_lock(&object->lock);
  558. cookie = object->cookie;
  559. if (!fscache_object_is_active(object) || !cookie) {
  560. spin_unlock(&object->lock);
  561. _leave("");
  562. return;
  563. }
  564. spin_lock(&cookie->stores_lock);
  565. fscache_stat(&fscache_n_store_calls);
  566. /* find a page to store */
  567. page = NULL;
  568. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  569. FSCACHE_COOKIE_PENDING_TAG);
  570. if (n != 1)
  571. goto superseded;
  572. page = results[0];
  573. _debug("gang %d [%lx]", n, page->index);
  574. if (page->index > op->store_limit) {
  575. fscache_stat(&fscache_n_store_pages_over_limit);
  576. goto superseded;
  577. }
  578. radix_tree_tag_set(&cookie->stores, page->index,
  579. FSCACHE_COOKIE_STORING_TAG);
  580. radix_tree_tag_clear(&cookie->stores, page->index,
  581. FSCACHE_COOKIE_PENDING_TAG);
  582. spin_unlock(&cookie->stores_lock);
  583. spin_unlock(&object->lock);
  584. fscache_set_op_state(&op->op, "Store");
  585. fscache_stat(&fscache_n_store_pages);
  586. fscache_stat(&fscache_n_cop_write_page);
  587. ret = object->cache->ops->write_page(op, page);
  588. fscache_stat_d(&fscache_n_cop_write_page);
  589. fscache_set_op_state(&op->op, "EndWrite");
  590. fscache_end_page_write(object, page);
  591. if (ret < 0) {
  592. fscache_set_op_state(&op->op, "Abort");
  593. fscache_abort_object(object);
  594. } else {
  595. fscache_enqueue_operation(&op->op);
  596. }
  597. _leave("");
  598. return;
  599. superseded:
  600. /* this writer is going away and there aren't any more things to
  601. * write */
  602. _debug("cease");
  603. spin_unlock(&cookie->stores_lock);
  604. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  605. spin_unlock(&object->lock);
  606. _leave("");
  607. }
  608. /*
  609. * request a page be stored in the cache
  610. * - returns:
  611. * -ENOMEM - out of memory, nothing done
  612. * -ENOBUFS - no backing object available in which to cache the page
  613. * 0 - dispatched a write - it'll call end_io_func() when finished
  614. *
  615. * if the cookie still has a backing object at this point, that object can be
  616. * in one of a few states with respect to storage processing:
  617. *
  618. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  619. * set)
  620. *
  621. * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
  622. * fill op)
  623. *
  624. * (b) writes deferred till post-creation (mark page for writing and
  625. * return immediately)
  626. *
  627. * (2) negative lookup, object created, initial fill being made from netfs
  628. * (FSCACHE_COOKIE_INITIAL_FILL is set)
  629. *
  630. * (a) fill point not yet reached this page (mark page for writing and
  631. * return)
  632. *
  633. * (b) fill point passed this page (queue op to store this page)
  634. *
  635. * (3) object extant (queue op to store this page)
  636. *
  637. * any other state is invalid
  638. */
  639. int __fscache_write_page(struct fscache_cookie *cookie,
  640. struct page *page,
  641. gfp_t gfp)
  642. {
  643. struct fscache_storage *op;
  644. struct fscache_object *object;
  645. int ret;
  646. _enter("%p,%x,", cookie, (u32) page->flags);
  647. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  648. ASSERT(PageFsCache(page));
  649. fscache_stat(&fscache_n_stores);
  650. op = kzalloc(sizeof(*op), GFP_NOIO);
  651. if (!op)
  652. goto nomem;
  653. fscache_operation_init(&op->op, fscache_write_op,
  654. fscache_release_write_op);
  655. op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
  656. fscache_set_op_name(&op->op, "Write1");
  657. ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
  658. if (ret < 0)
  659. goto nomem_free;
  660. ret = -ENOBUFS;
  661. spin_lock(&cookie->lock);
  662. if (hlist_empty(&cookie->backing_objects))
  663. goto nobufs;
  664. object = hlist_entry(cookie->backing_objects.first,
  665. struct fscache_object, cookie_link);
  666. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  667. goto nobufs;
  668. /* add the page to the pending-storage radix tree on the backing
  669. * object */
  670. spin_lock(&object->lock);
  671. spin_lock(&cookie->stores_lock);
  672. _debug("store limit %llx", (unsigned long long) object->store_limit);
  673. ret = radix_tree_insert(&cookie->stores, page->index, page);
  674. if (ret < 0) {
  675. if (ret == -EEXIST)
  676. goto already_queued;
  677. _debug("insert failed %d", ret);
  678. goto nobufs_unlock_obj;
  679. }
  680. radix_tree_tag_set(&cookie->stores, page->index,
  681. FSCACHE_COOKIE_PENDING_TAG);
  682. page_cache_get(page);
  683. /* we only want one writer at a time, but we do need to queue new
  684. * writers after exclusive ops */
  685. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  686. goto already_pending;
  687. spin_unlock(&cookie->stores_lock);
  688. spin_unlock(&object->lock);
  689. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  690. op->store_limit = object->store_limit;
  691. if (fscache_submit_op(object, &op->op) < 0)
  692. goto submit_failed;
  693. spin_unlock(&cookie->lock);
  694. radix_tree_preload_end();
  695. fscache_stat(&fscache_n_store_ops);
  696. fscache_stat(&fscache_n_stores_ok);
  697. /* the work queue now carries its own ref on the object */
  698. fscache_put_operation(&op->op);
  699. _leave(" = 0");
  700. return 0;
  701. already_queued:
  702. fscache_stat(&fscache_n_stores_again);
  703. already_pending:
  704. spin_unlock(&cookie->stores_lock);
  705. spin_unlock(&object->lock);
  706. spin_unlock(&cookie->lock);
  707. radix_tree_preload_end();
  708. kfree(op);
  709. fscache_stat(&fscache_n_stores_ok);
  710. _leave(" = 0");
  711. return 0;
  712. submit_failed:
  713. spin_lock(&cookie->stores_lock);
  714. radix_tree_delete(&cookie->stores, page->index);
  715. spin_unlock(&cookie->stores_lock);
  716. page_cache_release(page);
  717. ret = -ENOBUFS;
  718. goto nobufs;
  719. nobufs_unlock_obj:
  720. spin_unlock(&cookie->stores_lock);
  721. spin_unlock(&object->lock);
  722. nobufs:
  723. spin_unlock(&cookie->lock);
  724. radix_tree_preload_end();
  725. kfree(op);
  726. fscache_stat(&fscache_n_stores_nobufs);
  727. _leave(" = -ENOBUFS");
  728. return -ENOBUFS;
  729. nomem_free:
  730. kfree(op);
  731. nomem:
  732. fscache_stat(&fscache_n_stores_oom);
  733. _leave(" = -ENOMEM");
  734. return -ENOMEM;
  735. }
  736. EXPORT_SYMBOL(__fscache_write_page);
  737. /*
  738. * remove a page from the cache
  739. */
  740. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  741. {
  742. struct fscache_object *object;
  743. _enter(",%p", page);
  744. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  745. ASSERTCMP(page, !=, NULL);
  746. fscache_stat(&fscache_n_uncaches);
  747. /* cache withdrawal may beat us to it */
  748. if (!PageFsCache(page))
  749. goto done;
  750. /* get the object */
  751. spin_lock(&cookie->lock);
  752. if (hlist_empty(&cookie->backing_objects)) {
  753. ClearPageFsCache(page);
  754. goto done_unlock;
  755. }
  756. object = hlist_entry(cookie->backing_objects.first,
  757. struct fscache_object, cookie_link);
  758. /* there might now be stuff on disk we could read */
  759. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  760. /* only invoke the cache backend if we managed to mark the page
  761. * uncached here; this deals with synchronisation vs withdrawal */
  762. if (TestClearPageFsCache(page) &&
  763. object->cache->ops->uncache_page) {
  764. /* the cache backend releases the cookie lock */
  765. fscache_stat(&fscache_n_cop_uncache_page);
  766. object->cache->ops->uncache_page(object, page);
  767. fscache_stat_d(&fscache_n_cop_uncache_page);
  768. goto done;
  769. }
  770. done_unlock:
  771. spin_unlock(&cookie->lock);
  772. done:
  773. _leave("");
  774. }
  775. EXPORT_SYMBOL(__fscache_uncache_page);
  776. /**
  777. * fscache_mark_pages_cached - Mark pages as being cached
  778. * @op: The retrieval op pages are being marked for
  779. * @pagevec: The pages to be marked
  780. *
  781. * Mark a bunch of netfs pages as being cached. After this is called,
  782. * the netfs must call fscache_uncache_page() to remove the mark.
  783. */
  784. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  785. struct pagevec *pagevec)
  786. {
  787. struct fscache_cookie *cookie = op->op.object->cookie;
  788. unsigned long loop;
  789. #ifdef CONFIG_FSCACHE_STATS
  790. atomic_add(pagevec->nr, &fscache_n_marks);
  791. #endif
  792. for (loop = 0; loop < pagevec->nr; loop++) {
  793. struct page *page = pagevec->pages[loop];
  794. _debug("- mark %p{%lx}", page, page->index);
  795. if (TestSetPageFsCache(page)) {
  796. static bool once_only;
  797. if (!once_only) {
  798. once_only = true;
  799. printk(KERN_WARNING "FS-Cache:"
  800. " Cookie type %s marked page %lx"
  801. " multiple times\n",
  802. cookie->def->name, page->index);
  803. }
  804. }
  805. }
  806. if (cookie->def->mark_pages_cached)
  807. cookie->def->mark_pages_cached(cookie->netfs_data,
  808. op->mapping, pagevec);
  809. pagevec_reinit(pagevec);
  810. }
  811. EXPORT_SYMBOL(fscache_mark_pages_cached);