page.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include "internal.h"
  17. /*
  18. * check to see if a page is being written to the cache
  19. */
  20. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  21. {
  22. void *val;
  23. rcu_read_lock();
  24. val = radix_tree_lookup(&cookie->stores, page->index);
  25. rcu_read_unlock();
  26. return val != NULL;
  27. }
  28. EXPORT_SYMBOL(__fscache_check_page_write);
  29. /*
  30. * wait for a page to finish being written to the cache
  31. */
  32. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  33. {
  34. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  35. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  36. }
  37. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  38. /*
  39. * note that a page has finished being written to the cache
  40. */
  41. static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page)
  42. {
  43. struct page *xpage;
  44. spin_lock(&cookie->lock);
  45. xpage = radix_tree_delete(&cookie->stores, page->index);
  46. spin_unlock(&cookie->lock);
  47. ASSERT(xpage != NULL);
  48. wake_up_bit(&cookie->flags, 0);
  49. }
  50. /*
  51. * actually apply the changed attributes to a cache object
  52. */
  53. static void fscache_attr_changed_op(struct fscache_operation *op)
  54. {
  55. struct fscache_object *object = op->object;
  56. int ret;
  57. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  58. fscache_stat(&fscache_n_attr_changed_calls);
  59. if (fscache_object_is_active(object)) {
  60. fscache_set_op_state(op, "CallFS");
  61. fscache_stat(&fscache_n_cop_attr_changed);
  62. ret = object->cache->ops->attr_changed(object);
  63. fscache_stat_d(&fscache_n_cop_attr_changed);
  64. fscache_set_op_state(op, "Done");
  65. if (ret < 0)
  66. fscache_abort_object(object);
  67. }
  68. _leave("");
  69. }
  70. /*
  71. * notification that the attributes on an object have changed
  72. */
  73. int __fscache_attr_changed(struct fscache_cookie *cookie)
  74. {
  75. struct fscache_operation *op;
  76. struct fscache_object *object;
  77. _enter("%p", cookie);
  78. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  79. fscache_stat(&fscache_n_attr_changed);
  80. op = kzalloc(sizeof(*op), GFP_KERNEL);
  81. if (!op) {
  82. fscache_stat(&fscache_n_attr_changed_nomem);
  83. _leave(" = -ENOMEM");
  84. return -ENOMEM;
  85. }
  86. fscache_operation_init(op, NULL);
  87. fscache_operation_init_slow(op, fscache_attr_changed_op);
  88. op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
  89. fscache_set_op_name(op, "Attr");
  90. spin_lock(&cookie->lock);
  91. if (hlist_empty(&cookie->backing_objects))
  92. goto nobufs;
  93. object = hlist_entry(cookie->backing_objects.first,
  94. struct fscache_object, cookie_link);
  95. if (fscache_submit_exclusive_op(object, op) < 0)
  96. goto nobufs;
  97. spin_unlock(&cookie->lock);
  98. fscache_stat(&fscache_n_attr_changed_ok);
  99. fscache_put_operation(op);
  100. _leave(" = 0");
  101. return 0;
  102. nobufs:
  103. spin_unlock(&cookie->lock);
  104. kfree(op);
  105. fscache_stat(&fscache_n_attr_changed_nobufs);
  106. _leave(" = %d", -ENOBUFS);
  107. return -ENOBUFS;
  108. }
  109. EXPORT_SYMBOL(__fscache_attr_changed);
  110. /*
  111. * handle secondary execution given to a retrieval op on behalf of the
  112. * cache
  113. */
  114. static void fscache_retrieval_work(struct work_struct *work)
  115. {
  116. struct fscache_retrieval *op =
  117. container_of(work, struct fscache_retrieval, op.fast_work);
  118. unsigned long start;
  119. _enter("{OP%x}", op->op.debug_id);
  120. start = jiffies;
  121. op->op.processor(&op->op);
  122. fscache_hist(fscache_ops_histogram, start);
  123. fscache_put_operation(&op->op);
  124. }
  125. /*
  126. * release a retrieval op reference
  127. */
  128. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  129. {
  130. struct fscache_retrieval *op =
  131. container_of(_op, struct fscache_retrieval, op);
  132. _enter("{OP%x}", op->op.debug_id);
  133. fscache_hist(fscache_retrieval_histogram, op->start_time);
  134. if (op->context)
  135. fscache_put_context(op->op.object->cookie, op->context);
  136. _leave("");
  137. }
  138. /*
  139. * allocate a retrieval op
  140. */
  141. static struct fscache_retrieval *fscache_alloc_retrieval(
  142. struct address_space *mapping,
  143. fscache_rw_complete_t end_io_func,
  144. void *context)
  145. {
  146. struct fscache_retrieval *op;
  147. /* allocate a retrieval operation and attempt to submit it */
  148. op = kzalloc(sizeof(*op), GFP_NOIO);
  149. if (!op) {
  150. fscache_stat(&fscache_n_retrievals_nomem);
  151. return NULL;
  152. }
  153. fscache_operation_init(&op->op, fscache_release_retrieval_op);
  154. op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
  155. op->mapping = mapping;
  156. op->end_io_func = end_io_func;
  157. op->context = context;
  158. op->start_time = jiffies;
  159. INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
  160. INIT_LIST_HEAD(&op->to_do);
  161. fscache_set_op_name(&op->op, "Retr");
  162. return op;
  163. }
  164. /*
  165. * wait for a deferred lookup to complete
  166. */
  167. static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  168. {
  169. unsigned long jif;
  170. _enter("");
  171. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  172. _leave(" = 0 [imm]");
  173. return 0;
  174. }
  175. fscache_stat(&fscache_n_retrievals_wait);
  176. jif = jiffies;
  177. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  178. fscache_wait_bit_interruptible,
  179. TASK_INTERRUPTIBLE) != 0) {
  180. fscache_stat(&fscache_n_retrievals_intr);
  181. _leave(" = -ERESTARTSYS");
  182. return -ERESTARTSYS;
  183. }
  184. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  185. smp_rmb();
  186. fscache_hist(fscache_retrieval_delay_histogram, jif);
  187. _leave(" = 0 [dly]");
  188. return 0;
  189. }
  190. /*
  191. * read a page from the cache or allocate a block in which to store it
  192. * - we return:
  193. * -ENOMEM - out of memory, nothing done
  194. * -ERESTARTSYS - interrupted
  195. * -ENOBUFS - no backing object available in which to cache the block
  196. * -ENODATA - no data available in the backing object for this block
  197. * 0 - dispatched a read - it'll call end_io_func() when finished
  198. */
  199. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  200. struct page *page,
  201. fscache_rw_complete_t end_io_func,
  202. void *context,
  203. gfp_t gfp)
  204. {
  205. struct fscache_retrieval *op;
  206. struct fscache_object *object;
  207. int ret;
  208. _enter("%p,%p,,,", cookie, page);
  209. fscache_stat(&fscache_n_retrievals);
  210. if (hlist_empty(&cookie->backing_objects))
  211. goto nobufs;
  212. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  213. ASSERTCMP(page, !=, NULL);
  214. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  215. return -ERESTARTSYS;
  216. op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
  217. if (!op) {
  218. _leave(" = -ENOMEM");
  219. return -ENOMEM;
  220. }
  221. fscache_set_op_name(&op->op, "RetrRA1");
  222. spin_lock(&cookie->lock);
  223. if (hlist_empty(&cookie->backing_objects))
  224. goto nobufs_unlock;
  225. object = hlist_entry(cookie->backing_objects.first,
  226. struct fscache_object, cookie_link);
  227. ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
  228. atomic_inc(&object->n_reads);
  229. set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  230. if (fscache_submit_op(object, &op->op) < 0)
  231. goto nobufs_unlock;
  232. spin_unlock(&cookie->lock);
  233. fscache_stat(&fscache_n_retrieval_ops);
  234. /* pin the netfs read context in case we need to do the actual netfs
  235. * read because we've encountered a cache read failure */
  236. fscache_get_context(object->cookie, op->context);
  237. /* we wait for the operation to become active, and then process it
  238. * *here*, in this thread, and not in the thread pool */
  239. if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
  240. _debug(">>> WT");
  241. fscache_stat(&fscache_n_retrieval_op_waits);
  242. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  243. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  244. _debug("<<< GO");
  245. }
  246. /* ask the cache to honour the operation */
  247. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  248. fscache_stat(&fscache_n_cop_allocate_page);
  249. ret = object->cache->ops->allocate_page(op, page, gfp);
  250. fscache_stat_d(&fscache_n_cop_allocate_page);
  251. if (ret == 0)
  252. ret = -ENODATA;
  253. } else {
  254. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  255. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  256. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  257. }
  258. if (ret == -ENOMEM)
  259. fscache_stat(&fscache_n_retrievals_nomem);
  260. else if (ret == -ERESTARTSYS)
  261. fscache_stat(&fscache_n_retrievals_intr);
  262. else if (ret == -ENODATA)
  263. fscache_stat(&fscache_n_retrievals_nodata);
  264. else if (ret < 0)
  265. fscache_stat(&fscache_n_retrievals_nobufs);
  266. else
  267. fscache_stat(&fscache_n_retrievals_ok);
  268. fscache_put_retrieval(op);
  269. _leave(" = %d", ret);
  270. return ret;
  271. nobufs_unlock:
  272. spin_unlock(&cookie->lock);
  273. kfree(op);
  274. nobufs:
  275. fscache_stat(&fscache_n_retrievals_nobufs);
  276. _leave(" = -ENOBUFS");
  277. return -ENOBUFS;
  278. }
  279. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  280. /*
  281. * read a list of page from the cache or allocate a block in which to store
  282. * them
  283. * - we return:
  284. * -ENOMEM - out of memory, some pages may be being read
  285. * -ERESTARTSYS - interrupted, some pages may be being read
  286. * -ENOBUFS - no backing object or space available in which to cache any
  287. * pages not being read
  288. * -ENODATA - no data available in the backing object for some or all of
  289. * the pages
  290. * 0 - dispatched a read on all pages
  291. *
  292. * end_io_func() will be called for each page read from the cache as it is
  293. * finishes being read
  294. *
  295. * any pages for which a read is dispatched will be removed from pages and
  296. * nr_pages
  297. */
  298. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  299. struct address_space *mapping,
  300. struct list_head *pages,
  301. unsigned *nr_pages,
  302. fscache_rw_complete_t end_io_func,
  303. void *context,
  304. gfp_t gfp)
  305. {
  306. struct fscache_retrieval *op;
  307. struct fscache_object *object;
  308. int ret;
  309. _enter("%p,,%d,,,", cookie, *nr_pages);
  310. fscache_stat(&fscache_n_retrievals);
  311. if (hlist_empty(&cookie->backing_objects))
  312. goto nobufs;
  313. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  314. ASSERTCMP(*nr_pages, >, 0);
  315. ASSERT(!list_empty(pages));
  316. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  317. return -ERESTARTSYS;
  318. op = fscache_alloc_retrieval(mapping, end_io_func, context);
  319. if (!op)
  320. return -ENOMEM;
  321. fscache_set_op_name(&op->op, "RetrRAN");
  322. spin_lock(&cookie->lock);
  323. if (hlist_empty(&cookie->backing_objects))
  324. goto nobufs_unlock;
  325. object = hlist_entry(cookie->backing_objects.first,
  326. struct fscache_object, cookie_link);
  327. atomic_inc(&object->n_reads);
  328. set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  329. if (fscache_submit_op(object, &op->op) < 0)
  330. goto nobufs_unlock;
  331. spin_unlock(&cookie->lock);
  332. fscache_stat(&fscache_n_retrieval_ops);
  333. /* pin the netfs read context in case we need to do the actual netfs
  334. * read because we've encountered a cache read failure */
  335. fscache_get_context(object->cookie, op->context);
  336. /* we wait for the operation to become active, and then process it
  337. * *here*, in this thread, and not in the thread pool */
  338. if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
  339. _debug(">>> WT");
  340. fscache_stat(&fscache_n_retrieval_op_waits);
  341. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  342. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  343. _debug("<<< GO");
  344. }
  345. /* ask the cache to honour the operation */
  346. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  347. fscache_stat(&fscache_n_cop_allocate_pages);
  348. ret = object->cache->ops->allocate_pages(
  349. op, pages, nr_pages, gfp);
  350. fscache_stat_d(&fscache_n_cop_allocate_pages);
  351. } else {
  352. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  353. ret = object->cache->ops->read_or_alloc_pages(
  354. op, pages, nr_pages, gfp);
  355. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  356. }
  357. if (ret == -ENOMEM)
  358. fscache_stat(&fscache_n_retrievals_nomem);
  359. else if (ret == -ERESTARTSYS)
  360. fscache_stat(&fscache_n_retrievals_intr);
  361. else if (ret == -ENODATA)
  362. fscache_stat(&fscache_n_retrievals_nodata);
  363. else if (ret < 0)
  364. fscache_stat(&fscache_n_retrievals_nobufs);
  365. else
  366. fscache_stat(&fscache_n_retrievals_ok);
  367. fscache_put_retrieval(op);
  368. _leave(" = %d", ret);
  369. return ret;
  370. nobufs_unlock:
  371. spin_unlock(&cookie->lock);
  372. kfree(op);
  373. nobufs:
  374. fscache_stat(&fscache_n_retrievals_nobufs);
  375. _leave(" = -ENOBUFS");
  376. return -ENOBUFS;
  377. }
  378. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  379. /*
  380. * allocate a block in the cache on which to store a page
  381. * - we return:
  382. * -ENOMEM - out of memory, nothing done
  383. * -ERESTARTSYS - interrupted
  384. * -ENOBUFS - no backing object available in which to cache the block
  385. * 0 - block allocated
  386. */
  387. int __fscache_alloc_page(struct fscache_cookie *cookie,
  388. struct page *page,
  389. gfp_t gfp)
  390. {
  391. struct fscache_retrieval *op;
  392. struct fscache_object *object;
  393. int ret;
  394. _enter("%p,%p,,,", cookie, page);
  395. fscache_stat(&fscache_n_allocs);
  396. if (hlist_empty(&cookie->backing_objects))
  397. goto nobufs;
  398. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  399. ASSERTCMP(page, !=, NULL);
  400. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  401. return -ERESTARTSYS;
  402. op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
  403. if (!op)
  404. return -ENOMEM;
  405. fscache_set_op_name(&op->op, "RetrAL1");
  406. spin_lock(&cookie->lock);
  407. if (hlist_empty(&cookie->backing_objects))
  408. goto nobufs_unlock;
  409. object = hlist_entry(cookie->backing_objects.first,
  410. struct fscache_object, cookie_link);
  411. if (fscache_submit_op(object, &op->op) < 0)
  412. goto nobufs_unlock;
  413. spin_unlock(&cookie->lock);
  414. fscache_stat(&fscache_n_alloc_ops);
  415. if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
  416. _debug(">>> WT");
  417. fscache_stat(&fscache_n_alloc_op_waits);
  418. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  419. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  420. _debug("<<< GO");
  421. }
  422. /* ask the cache to honour the operation */
  423. fscache_stat(&fscache_n_cop_allocate_page);
  424. ret = object->cache->ops->allocate_page(op, page, gfp);
  425. fscache_stat_d(&fscache_n_cop_allocate_page);
  426. if (ret < 0)
  427. fscache_stat(&fscache_n_allocs_nobufs);
  428. else
  429. fscache_stat(&fscache_n_allocs_ok);
  430. fscache_put_retrieval(op);
  431. _leave(" = %d", ret);
  432. return ret;
  433. nobufs_unlock:
  434. spin_unlock(&cookie->lock);
  435. kfree(op);
  436. nobufs:
  437. fscache_stat(&fscache_n_allocs_nobufs);
  438. _leave(" = -ENOBUFS");
  439. return -ENOBUFS;
  440. }
  441. EXPORT_SYMBOL(__fscache_alloc_page);
  442. /*
  443. * release a write op reference
  444. */
  445. static void fscache_release_write_op(struct fscache_operation *_op)
  446. {
  447. _enter("{OP%x}", _op->debug_id);
  448. }
  449. /*
  450. * perform the background storage of a page into the cache
  451. */
  452. static void fscache_write_op(struct fscache_operation *_op)
  453. {
  454. struct fscache_storage *op =
  455. container_of(_op, struct fscache_storage, op);
  456. struct fscache_object *object = op->op.object;
  457. struct fscache_cookie *cookie = object->cookie;
  458. struct page *page;
  459. unsigned n;
  460. void *results[1];
  461. int ret;
  462. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  463. fscache_set_op_state(&op->op, "GetPage");
  464. spin_lock(&cookie->lock);
  465. spin_lock(&object->lock);
  466. if (!fscache_object_is_active(object)) {
  467. spin_unlock(&object->lock);
  468. spin_unlock(&cookie->lock);
  469. _leave("");
  470. return;
  471. }
  472. fscache_stat(&fscache_n_store_calls);
  473. /* find a page to store */
  474. page = NULL;
  475. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  476. FSCACHE_COOKIE_PENDING_TAG);
  477. if (n != 1)
  478. goto superseded;
  479. page = results[0];
  480. _debug("gang %d [%lx]", n, page->index);
  481. if (page->index > op->store_limit)
  482. goto superseded;
  483. radix_tree_tag_clear(&cookie->stores, page->index,
  484. FSCACHE_COOKIE_PENDING_TAG);
  485. spin_unlock(&object->lock);
  486. spin_unlock(&cookie->lock);
  487. if (page) {
  488. fscache_set_op_state(&op->op, "Store");
  489. fscache_stat(&fscache_n_cop_write_page);
  490. ret = object->cache->ops->write_page(op, page);
  491. fscache_stat_d(&fscache_n_cop_write_page);
  492. fscache_set_op_state(&op->op, "EndWrite");
  493. fscache_end_page_write(cookie, page);
  494. page_cache_release(page);
  495. if (ret < 0) {
  496. fscache_set_op_state(&op->op, "Abort");
  497. fscache_abort_object(object);
  498. } else {
  499. fscache_enqueue_operation(&op->op);
  500. }
  501. }
  502. _leave("");
  503. return;
  504. superseded:
  505. /* this writer is going away and there aren't any more things to
  506. * write */
  507. _debug("cease");
  508. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  509. spin_unlock(&object->lock);
  510. spin_unlock(&cookie->lock);
  511. _leave("");
  512. }
  513. /*
  514. * request a page be stored in the cache
  515. * - returns:
  516. * -ENOMEM - out of memory, nothing done
  517. * -ENOBUFS - no backing object available in which to cache the page
  518. * 0 - dispatched a write - it'll call end_io_func() when finished
  519. *
  520. * if the cookie still has a backing object at this point, that object can be
  521. * in one of a few states with respect to storage processing:
  522. *
  523. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  524. * set)
  525. *
  526. * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
  527. * fill op)
  528. *
  529. * (b) writes deferred till post-creation (mark page for writing and
  530. * return immediately)
  531. *
  532. * (2) negative lookup, object created, initial fill being made from netfs
  533. * (FSCACHE_COOKIE_INITIAL_FILL is set)
  534. *
  535. * (a) fill point not yet reached this page (mark page for writing and
  536. * return)
  537. *
  538. * (b) fill point passed this page (queue op to store this page)
  539. *
  540. * (3) object extant (queue op to store this page)
  541. *
  542. * any other state is invalid
  543. */
  544. int __fscache_write_page(struct fscache_cookie *cookie,
  545. struct page *page,
  546. gfp_t gfp)
  547. {
  548. struct fscache_storage *op;
  549. struct fscache_object *object;
  550. int ret;
  551. _enter("%p,%x,", cookie, (u32) page->flags);
  552. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  553. ASSERT(PageFsCache(page));
  554. fscache_stat(&fscache_n_stores);
  555. op = kzalloc(sizeof(*op), GFP_NOIO);
  556. if (!op)
  557. goto nomem;
  558. fscache_operation_init(&op->op, fscache_release_write_op);
  559. fscache_operation_init_slow(&op->op, fscache_write_op);
  560. op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
  561. fscache_set_op_name(&op->op, "Write1");
  562. ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
  563. if (ret < 0)
  564. goto nomem_free;
  565. ret = -ENOBUFS;
  566. spin_lock(&cookie->lock);
  567. if (hlist_empty(&cookie->backing_objects))
  568. goto nobufs;
  569. object = hlist_entry(cookie->backing_objects.first,
  570. struct fscache_object, cookie_link);
  571. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  572. goto nobufs;
  573. /* add the page to the pending-storage radix tree on the backing
  574. * object */
  575. spin_lock(&object->lock);
  576. _debug("store limit %llx", (unsigned long long) object->store_limit);
  577. ret = radix_tree_insert(&cookie->stores, page->index, page);
  578. if (ret < 0) {
  579. if (ret == -EEXIST)
  580. goto already_queued;
  581. _debug("insert failed %d", ret);
  582. goto nobufs_unlock_obj;
  583. }
  584. radix_tree_tag_set(&cookie->stores, page->index,
  585. FSCACHE_COOKIE_PENDING_TAG);
  586. page_cache_get(page);
  587. /* we only want one writer at a time, but we do need to queue new
  588. * writers after exclusive ops */
  589. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  590. goto already_pending;
  591. spin_unlock(&object->lock);
  592. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  593. op->store_limit = object->store_limit;
  594. if (fscache_submit_op(object, &op->op) < 0)
  595. goto submit_failed;
  596. spin_unlock(&cookie->lock);
  597. radix_tree_preload_end();
  598. fscache_stat(&fscache_n_store_ops);
  599. fscache_stat(&fscache_n_stores_ok);
  600. /* the slow work queue now carries its own ref on the object */
  601. fscache_put_operation(&op->op);
  602. _leave(" = 0");
  603. return 0;
  604. already_queued:
  605. fscache_stat(&fscache_n_stores_again);
  606. already_pending:
  607. spin_unlock(&object->lock);
  608. spin_unlock(&cookie->lock);
  609. radix_tree_preload_end();
  610. kfree(op);
  611. fscache_stat(&fscache_n_stores_ok);
  612. _leave(" = 0");
  613. return 0;
  614. submit_failed:
  615. radix_tree_delete(&cookie->stores, page->index);
  616. page_cache_release(page);
  617. ret = -ENOBUFS;
  618. goto nobufs;
  619. nobufs_unlock_obj:
  620. spin_unlock(&object->lock);
  621. nobufs:
  622. spin_unlock(&cookie->lock);
  623. radix_tree_preload_end();
  624. kfree(op);
  625. fscache_stat(&fscache_n_stores_nobufs);
  626. _leave(" = -ENOBUFS");
  627. return -ENOBUFS;
  628. nomem_free:
  629. kfree(op);
  630. nomem:
  631. fscache_stat(&fscache_n_stores_oom);
  632. _leave(" = -ENOMEM");
  633. return -ENOMEM;
  634. }
  635. EXPORT_SYMBOL(__fscache_write_page);
  636. /*
  637. * remove a page from the cache
  638. */
  639. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  640. {
  641. struct fscache_object *object;
  642. _enter(",%p", page);
  643. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  644. ASSERTCMP(page, !=, NULL);
  645. fscache_stat(&fscache_n_uncaches);
  646. /* cache withdrawal may beat us to it */
  647. if (!PageFsCache(page))
  648. goto done;
  649. /* get the object */
  650. spin_lock(&cookie->lock);
  651. if (hlist_empty(&cookie->backing_objects)) {
  652. ClearPageFsCache(page);
  653. goto done_unlock;
  654. }
  655. object = hlist_entry(cookie->backing_objects.first,
  656. struct fscache_object, cookie_link);
  657. /* there might now be stuff on disk we could read */
  658. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  659. /* only invoke the cache backend if we managed to mark the page
  660. * uncached here; this deals with synchronisation vs withdrawal */
  661. if (TestClearPageFsCache(page) &&
  662. object->cache->ops->uncache_page) {
  663. /* the cache backend releases the cookie lock */
  664. fscache_stat(&fscache_n_cop_uncache_page);
  665. object->cache->ops->uncache_page(object, page);
  666. fscache_stat_d(&fscache_n_cop_uncache_page);
  667. goto done;
  668. }
  669. done_unlock:
  670. spin_unlock(&cookie->lock);
  671. done:
  672. _leave("");
  673. }
  674. EXPORT_SYMBOL(__fscache_uncache_page);
  675. /**
  676. * fscache_mark_pages_cached - Mark pages as being cached
  677. * @op: The retrieval op pages are being marked for
  678. * @pagevec: The pages to be marked
  679. *
  680. * Mark a bunch of netfs pages as being cached. After this is called,
  681. * the netfs must call fscache_uncache_page() to remove the mark.
  682. */
  683. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  684. struct pagevec *pagevec)
  685. {
  686. struct fscache_cookie *cookie = op->op.object->cookie;
  687. unsigned long loop;
  688. #ifdef CONFIG_FSCACHE_STATS
  689. atomic_add(pagevec->nr, &fscache_n_marks);
  690. #endif
  691. for (loop = 0; loop < pagevec->nr; loop++) {
  692. struct page *page = pagevec->pages[loop];
  693. _debug("- mark %p{%lx}", page, page->index);
  694. if (TestSetPageFsCache(page)) {
  695. static bool once_only;
  696. if (!once_only) {
  697. once_only = true;
  698. printk(KERN_WARNING "FS-Cache:"
  699. " Cookie type %s marked page %lx"
  700. " multiple times\n",
  701. cookie->def->name, page->index);
  702. }
  703. }
  704. }
  705. if (cookie->def->mark_pages_cached)
  706. cookie->def->mark_pages_cached(cookie->netfs_data,
  707. op->mapping, pagevec);
  708. pagevec_reinit(pagevec);
  709. }
  710. EXPORT_SYMBOL(fscache_mark_pages_cached);