page.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include "internal.h"
  17. /*
  18. * check to see if a page is being written to the cache
  19. */
  20. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  21. {
  22. void *val;
  23. rcu_read_lock();
  24. val = radix_tree_lookup(&cookie->stores, page->index);
  25. rcu_read_unlock();
  26. return val != NULL;
  27. }
  28. EXPORT_SYMBOL(__fscache_check_page_write);
  29. /*
  30. * wait for a page to finish being written to the cache
  31. */
  32. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  33. {
  34. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  35. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  36. }
  37. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  38. /*
  39. * note that a page has finished being written to the cache
  40. */
  41. static void fscache_end_page_write(struct fscache_cookie *cookie, struct page *page)
  42. {
  43. struct page *xpage;
  44. spin_lock(&cookie->lock);
  45. xpage = radix_tree_delete(&cookie->stores, page->index);
  46. spin_unlock(&cookie->lock);
  47. ASSERT(xpage != NULL);
  48. wake_up_bit(&cookie->flags, 0);
  49. }
  50. /*
  51. * actually apply the changed attributes to a cache object
  52. */
  53. static void fscache_attr_changed_op(struct fscache_operation *op)
  54. {
  55. struct fscache_object *object = op->object;
  56. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  57. fscache_stat(&fscache_n_attr_changed_calls);
  58. if (fscache_object_is_active(object) &&
  59. object->cache->ops->attr_changed(object) < 0)
  60. fscache_abort_object(object);
  61. _leave("");
  62. }
  63. /*
  64. * notification that the attributes on an object have changed
  65. */
  66. int __fscache_attr_changed(struct fscache_cookie *cookie)
  67. {
  68. struct fscache_operation *op;
  69. struct fscache_object *object;
  70. _enter("%p", cookie);
  71. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  72. fscache_stat(&fscache_n_attr_changed);
  73. op = kzalloc(sizeof(*op), GFP_KERNEL);
  74. if (!op) {
  75. fscache_stat(&fscache_n_attr_changed_nomem);
  76. _leave(" = -ENOMEM");
  77. return -ENOMEM;
  78. }
  79. fscache_operation_init(op, NULL);
  80. fscache_operation_init_slow(op, fscache_attr_changed_op);
  81. op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
  82. spin_lock(&cookie->lock);
  83. if (hlist_empty(&cookie->backing_objects))
  84. goto nobufs;
  85. object = hlist_entry(cookie->backing_objects.first,
  86. struct fscache_object, cookie_link);
  87. if (fscache_submit_exclusive_op(object, op) < 0)
  88. goto nobufs;
  89. spin_unlock(&cookie->lock);
  90. fscache_stat(&fscache_n_attr_changed_ok);
  91. fscache_put_operation(op);
  92. _leave(" = 0");
  93. return 0;
  94. nobufs:
  95. spin_unlock(&cookie->lock);
  96. kfree(op);
  97. fscache_stat(&fscache_n_attr_changed_nobufs);
  98. _leave(" = %d", -ENOBUFS);
  99. return -ENOBUFS;
  100. }
  101. EXPORT_SYMBOL(__fscache_attr_changed);
  102. /*
  103. * handle secondary execution given to a retrieval op on behalf of the
  104. * cache
  105. */
  106. static void fscache_retrieval_work(struct work_struct *work)
  107. {
  108. struct fscache_retrieval *op =
  109. container_of(work, struct fscache_retrieval, op.fast_work);
  110. unsigned long start;
  111. _enter("{OP%x}", op->op.debug_id);
  112. start = jiffies;
  113. op->op.processor(&op->op);
  114. fscache_hist(fscache_ops_histogram, start);
  115. fscache_put_operation(&op->op);
  116. }
  117. /*
  118. * release a retrieval op reference
  119. */
  120. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  121. {
  122. struct fscache_retrieval *op =
  123. container_of(_op, struct fscache_retrieval, op);
  124. _enter("{OP%x}", op->op.debug_id);
  125. fscache_hist(fscache_retrieval_histogram, op->start_time);
  126. if (op->context)
  127. fscache_put_context(op->op.object->cookie, op->context);
  128. _leave("");
  129. }
  130. /*
  131. * allocate a retrieval op
  132. */
  133. static struct fscache_retrieval *fscache_alloc_retrieval(
  134. struct address_space *mapping,
  135. fscache_rw_complete_t end_io_func,
  136. void *context)
  137. {
  138. struct fscache_retrieval *op;
  139. /* allocate a retrieval operation and attempt to submit it */
  140. op = kzalloc(sizeof(*op), GFP_NOIO);
  141. if (!op) {
  142. fscache_stat(&fscache_n_retrievals_nomem);
  143. return NULL;
  144. }
  145. fscache_operation_init(&op->op, fscache_release_retrieval_op);
  146. op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
  147. op->mapping = mapping;
  148. op->end_io_func = end_io_func;
  149. op->context = context;
  150. op->start_time = jiffies;
  151. INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
  152. INIT_LIST_HEAD(&op->to_do);
  153. return op;
  154. }
  155. /*
  156. * wait for a deferred lookup to complete
  157. */
  158. static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  159. {
  160. unsigned long jif;
  161. _enter("");
  162. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  163. _leave(" = 0 [imm]");
  164. return 0;
  165. }
  166. fscache_stat(&fscache_n_retrievals_wait);
  167. jif = jiffies;
  168. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  169. fscache_wait_bit_interruptible,
  170. TASK_INTERRUPTIBLE) != 0) {
  171. fscache_stat(&fscache_n_retrievals_intr);
  172. _leave(" = -ERESTARTSYS");
  173. return -ERESTARTSYS;
  174. }
  175. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  176. smp_rmb();
  177. fscache_hist(fscache_retrieval_delay_histogram, jif);
  178. _leave(" = 0 [dly]");
  179. return 0;
  180. }
  181. /*
  182. * read a page from the cache or allocate a block in which to store it
  183. * - we return:
  184. * -ENOMEM - out of memory, nothing done
  185. * -ERESTARTSYS - interrupted
  186. * -ENOBUFS - no backing object available in which to cache the block
  187. * -ENODATA - no data available in the backing object for this block
  188. * 0 - dispatched a read - it'll call end_io_func() when finished
  189. */
  190. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  191. struct page *page,
  192. fscache_rw_complete_t end_io_func,
  193. void *context,
  194. gfp_t gfp)
  195. {
  196. struct fscache_retrieval *op;
  197. struct fscache_object *object;
  198. int ret;
  199. _enter("%p,%p,,,", cookie, page);
  200. fscache_stat(&fscache_n_retrievals);
  201. if (hlist_empty(&cookie->backing_objects))
  202. goto nobufs;
  203. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  204. ASSERTCMP(page, !=, NULL);
  205. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  206. return -ERESTARTSYS;
  207. op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
  208. if (!op) {
  209. _leave(" = -ENOMEM");
  210. return -ENOMEM;
  211. }
  212. spin_lock(&cookie->lock);
  213. if (hlist_empty(&cookie->backing_objects))
  214. goto nobufs_unlock;
  215. object = hlist_entry(cookie->backing_objects.first,
  216. struct fscache_object, cookie_link);
  217. ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
  218. if (fscache_submit_op(object, &op->op) < 0)
  219. goto nobufs_unlock;
  220. spin_unlock(&cookie->lock);
  221. fscache_stat(&fscache_n_retrieval_ops);
  222. /* pin the netfs read context in case we need to do the actual netfs
  223. * read because we've encountered a cache read failure */
  224. fscache_get_context(object->cookie, op->context);
  225. /* we wait for the operation to become active, and then process it
  226. * *here*, in this thread, and not in the thread pool */
  227. if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
  228. _debug(">>> WT");
  229. fscache_stat(&fscache_n_retrieval_op_waits);
  230. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  231. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  232. _debug("<<< GO");
  233. }
  234. /* ask the cache to honour the operation */
  235. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  236. ret = object->cache->ops->allocate_page(op, page, gfp);
  237. if (ret == 0)
  238. ret = -ENODATA;
  239. } else {
  240. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  241. }
  242. if (ret == -ENOMEM)
  243. fscache_stat(&fscache_n_retrievals_nomem);
  244. else if (ret == -ERESTARTSYS)
  245. fscache_stat(&fscache_n_retrievals_intr);
  246. else if (ret == -ENODATA)
  247. fscache_stat(&fscache_n_retrievals_nodata);
  248. else if (ret < 0)
  249. fscache_stat(&fscache_n_retrievals_nobufs);
  250. else
  251. fscache_stat(&fscache_n_retrievals_ok);
  252. fscache_put_retrieval(op);
  253. _leave(" = %d", ret);
  254. return ret;
  255. nobufs_unlock:
  256. spin_unlock(&cookie->lock);
  257. kfree(op);
  258. nobufs:
  259. fscache_stat(&fscache_n_retrievals_nobufs);
  260. _leave(" = -ENOBUFS");
  261. return -ENOBUFS;
  262. }
  263. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  264. /*
  265. * read a list of page from the cache or allocate a block in which to store
  266. * them
  267. * - we return:
  268. * -ENOMEM - out of memory, some pages may be being read
  269. * -ERESTARTSYS - interrupted, some pages may be being read
  270. * -ENOBUFS - no backing object or space available in which to cache any
  271. * pages not being read
  272. * -ENODATA - no data available in the backing object for some or all of
  273. * the pages
  274. * 0 - dispatched a read on all pages
  275. *
  276. * end_io_func() will be called for each page read from the cache as it is
  277. * finishes being read
  278. *
  279. * any pages for which a read is dispatched will be removed from pages and
  280. * nr_pages
  281. */
  282. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  283. struct address_space *mapping,
  284. struct list_head *pages,
  285. unsigned *nr_pages,
  286. fscache_rw_complete_t end_io_func,
  287. void *context,
  288. gfp_t gfp)
  289. {
  290. fscache_pages_retrieval_func_t func;
  291. struct fscache_retrieval *op;
  292. struct fscache_object *object;
  293. int ret;
  294. _enter("%p,,%d,,,", cookie, *nr_pages);
  295. fscache_stat(&fscache_n_retrievals);
  296. if (hlist_empty(&cookie->backing_objects))
  297. goto nobufs;
  298. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  299. ASSERTCMP(*nr_pages, >, 0);
  300. ASSERT(!list_empty(pages));
  301. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  302. return -ERESTARTSYS;
  303. op = fscache_alloc_retrieval(mapping, end_io_func, context);
  304. if (!op)
  305. return -ENOMEM;
  306. spin_lock(&cookie->lock);
  307. if (hlist_empty(&cookie->backing_objects))
  308. goto nobufs_unlock;
  309. object = hlist_entry(cookie->backing_objects.first,
  310. struct fscache_object, cookie_link);
  311. if (fscache_submit_op(object, &op->op) < 0)
  312. goto nobufs_unlock;
  313. spin_unlock(&cookie->lock);
  314. fscache_stat(&fscache_n_retrieval_ops);
  315. /* pin the netfs read context in case we need to do the actual netfs
  316. * read because we've encountered a cache read failure */
  317. fscache_get_context(object->cookie, op->context);
  318. /* we wait for the operation to become active, and then process it
  319. * *here*, in this thread, and not in the thread pool */
  320. if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
  321. _debug(">>> WT");
  322. fscache_stat(&fscache_n_retrieval_op_waits);
  323. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  324. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  325. _debug("<<< GO");
  326. }
  327. /* ask the cache to honour the operation */
  328. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags))
  329. func = object->cache->ops->allocate_pages;
  330. else
  331. func = object->cache->ops->read_or_alloc_pages;
  332. ret = func(op, pages, nr_pages, gfp);
  333. if (ret == -ENOMEM)
  334. fscache_stat(&fscache_n_retrievals_nomem);
  335. else if (ret == -ERESTARTSYS)
  336. fscache_stat(&fscache_n_retrievals_intr);
  337. else if (ret == -ENODATA)
  338. fscache_stat(&fscache_n_retrievals_nodata);
  339. else if (ret < 0)
  340. fscache_stat(&fscache_n_retrievals_nobufs);
  341. else
  342. fscache_stat(&fscache_n_retrievals_ok);
  343. fscache_put_retrieval(op);
  344. _leave(" = %d", ret);
  345. return ret;
  346. nobufs_unlock:
  347. spin_unlock(&cookie->lock);
  348. kfree(op);
  349. nobufs:
  350. fscache_stat(&fscache_n_retrievals_nobufs);
  351. _leave(" = -ENOBUFS");
  352. return -ENOBUFS;
  353. }
  354. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  355. /*
  356. * allocate a block in the cache on which to store a page
  357. * - we return:
  358. * -ENOMEM - out of memory, nothing done
  359. * -ERESTARTSYS - interrupted
  360. * -ENOBUFS - no backing object available in which to cache the block
  361. * 0 - block allocated
  362. */
  363. int __fscache_alloc_page(struct fscache_cookie *cookie,
  364. struct page *page,
  365. gfp_t gfp)
  366. {
  367. struct fscache_retrieval *op;
  368. struct fscache_object *object;
  369. int ret;
  370. _enter("%p,%p,,,", cookie, page);
  371. fscache_stat(&fscache_n_allocs);
  372. if (hlist_empty(&cookie->backing_objects))
  373. goto nobufs;
  374. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  375. ASSERTCMP(page, !=, NULL);
  376. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  377. return -ERESTARTSYS;
  378. op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
  379. if (!op)
  380. return -ENOMEM;
  381. spin_lock(&cookie->lock);
  382. if (hlist_empty(&cookie->backing_objects))
  383. goto nobufs_unlock;
  384. object = hlist_entry(cookie->backing_objects.first,
  385. struct fscache_object, cookie_link);
  386. if (fscache_submit_op(object, &op->op) < 0)
  387. goto nobufs_unlock;
  388. spin_unlock(&cookie->lock);
  389. fscache_stat(&fscache_n_alloc_ops);
  390. if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
  391. _debug(">>> WT");
  392. fscache_stat(&fscache_n_alloc_op_waits);
  393. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  394. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  395. _debug("<<< GO");
  396. }
  397. /* ask the cache to honour the operation */
  398. ret = object->cache->ops->allocate_page(op, page, gfp);
  399. if (ret < 0)
  400. fscache_stat(&fscache_n_allocs_nobufs);
  401. else
  402. fscache_stat(&fscache_n_allocs_ok);
  403. fscache_put_retrieval(op);
  404. _leave(" = %d", ret);
  405. return ret;
  406. nobufs_unlock:
  407. spin_unlock(&cookie->lock);
  408. kfree(op);
  409. nobufs:
  410. fscache_stat(&fscache_n_allocs_nobufs);
  411. _leave(" = -ENOBUFS");
  412. return -ENOBUFS;
  413. }
  414. EXPORT_SYMBOL(__fscache_alloc_page);
  415. /*
  416. * release a write op reference
  417. */
  418. static void fscache_release_write_op(struct fscache_operation *_op)
  419. {
  420. _enter("{OP%x}", _op->debug_id);
  421. }
  422. /*
  423. * perform the background storage of a page into the cache
  424. */
  425. static void fscache_write_op(struct fscache_operation *_op)
  426. {
  427. struct fscache_storage *op =
  428. container_of(_op, struct fscache_storage, op);
  429. struct fscache_object *object = op->op.object;
  430. struct fscache_cookie *cookie = object->cookie;
  431. struct page *page;
  432. unsigned n;
  433. void *results[1];
  434. int ret;
  435. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  436. spin_lock(&cookie->lock);
  437. spin_lock(&object->lock);
  438. if (!fscache_object_is_active(object)) {
  439. spin_unlock(&object->lock);
  440. spin_unlock(&cookie->lock);
  441. _leave("");
  442. return;
  443. }
  444. fscache_stat(&fscache_n_store_calls);
  445. /* find a page to store */
  446. page = NULL;
  447. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  448. FSCACHE_COOKIE_PENDING_TAG);
  449. if (n != 1)
  450. goto superseded;
  451. page = results[0];
  452. _debug("gang %d [%lx]", n, page->index);
  453. if (page->index > op->store_limit)
  454. goto superseded;
  455. radix_tree_tag_clear(&cookie->stores, page->index,
  456. FSCACHE_COOKIE_PENDING_TAG);
  457. spin_unlock(&object->lock);
  458. spin_unlock(&cookie->lock);
  459. if (page) {
  460. ret = object->cache->ops->write_page(op, page);
  461. fscache_end_page_write(cookie, page);
  462. page_cache_release(page);
  463. if (ret < 0)
  464. fscache_abort_object(object);
  465. else
  466. fscache_enqueue_operation(&op->op);
  467. }
  468. _leave("");
  469. return;
  470. superseded:
  471. /* this writer is going away and there aren't any more things to
  472. * write */
  473. _debug("cease");
  474. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  475. spin_unlock(&object->lock);
  476. spin_unlock(&cookie->lock);
  477. _leave("");
  478. }
  479. /*
  480. * request a page be stored in the cache
  481. * - returns:
  482. * -ENOMEM - out of memory, nothing done
  483. * -ENOBUFS - no backing object available in which to cache the page
  484. * 0 - dispatched a write - it'll call end_io_func() when finished
  485. *
  486. * if the cookie still has a backing object at this point, that object can be
  487. * in one of a few states with respect to storage processing:
  488. *
  489. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  490. * set)
  491. *
  492. * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
  493. * fill op)
  494. *
  495. * (b) writes deferred till post-creation (mark page for writing and
  496. * return immediately)
  497. *
  498. * (2) negative lookup, object created, initial fill being made from netfs
  499. * (FSCACHE_COOKIE_INITIAL_FILL is set)
  500. *
  501. * (a) fill point not yet reached this page (mark page for writing and
  502. * return)
  503. *
  504. * (b) fill point passed this page (queue op to store this page)
  505. *
  506. * (3) object extant (queue op to store this page)
  507. *
  508. * any other state is invalid
  509. */
  510. int __fscache_write_page(struct fscache_cookie *cookie,
  511. struct page *page,
  512. gfp_t gfp)
  513. {
  514. struct fscache_storage *op;
  515. struct fscache_object *object;
  516. int ret;
  517. _enter("%p,%x,", cookie, (u32) page->flags);
  518. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  519. ASSERT(PageFsCache(page));
  520. fscache_stat(&fscache_n_stores);
  521. op = kzalloc(sizeof(*op), GFP_NOIO);
  522. if (!op)
  523. goto nomem;
  524. fscache_operation_init(&op->op, fscache_release_write_op);
  525. fscache_operation_init_slow(&op->op, fscache_write_op);
  526. op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
  527. ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
  528. if (ret < 0)
  529. goto nomem_free;
  530. ret = -ENOBUFS;
  531. spin_lock(&cookie->lock);
  532. if (hlist_empty(&cookie->backing_objects))
  533. goto nobufs;
  534. object = hlist_entry(cookie->backing_objects.first,
  535. struct fscache_object, cookie_link);
  536. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  537. goto nobufs;
  538. /* add the page to the pending-storage radix tree on the backing
  539. * object */
  540. spin_lock(&object->lock);
  541. _debug("store limit %llx", (unsigned long long) object->store_limit);
  542. ret = radix_tree_insert(&cookie->stores, page->index, page);
  543. if (ret < 0) {
  544. if (ret == -EEXIST)
  545. goto already_queued;
  546. _debug("insert failed %d", ret);
  547. goto nobufs_unlock_obj;
  548. }
  549. radix_tree_tag_set(&cookie->stores, page->index,
  550. FSCACHE_COOKIE_PENDING_TAG);
  551. page_cache_get(page);
  552. /* we only want one writer at a time, but we do need to queue new
  553. * writers after exclusive ops */
  554. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  555. goto already_pending;
  556. spin_unlock(&object->lock);
  557. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  558. op->store_limit = object->store_limit;
  559. if (fscache_submit_op(object, &op->op) < 0)
  560. goto submit_failed;
  561. spin_unlock(&cookie->lock);
  562. radix_tree_preload_end();
  563. fscache_stat(&fscache_n_store_ops);
  564. fscache_stat(&fscache_n_stores_ok);
  565. /* the slow work queue now carries its own ref on the object */
  566. fscache_put_operation(&op->op);
  567. _leave(" = 0");
  568. return 0;
  569. already_queued:
  570. fscache_stat(&fscache_n_stores_again);
  571. already_pending:
  572. spin_unlock(&object->lock);
  573. spin_unlock(&cookie->lock);
  574. radix_tree_preload_end();
  575. kfree(op);
  576. fscache_stat(&fscache_n_stores_ok);
  577. _leave(" = 0");
  578. return 0;
  579. submit_failed:
  580. radix_tree_delete(&cookie->stores, page->index);
  581. page_cache_release(page);
  582. ret = -ENOBUFS;
  583. goto nobufs;
  584. nobufs_unlock_obj:
  585. spin_unlock(&object->lock);
  586. nobufs:
  587. spin_unlock(&cookie->lock);
  588. radix_tree_preload_end();
  589. kfree(op);
  590. fscache_stat(&fscache_n_stores_nobufs);
  591. _leave(" = -ENOBUFS");
  592. return -ENOBUFS;
  593. nomem_free:
  594. kfree(op);
  595. nomem:
  596. fscache_stat(&fscache_n_stores_oom);
  597. _leave(" = -ENOMEM");
  598. return -ENOMEM;
  599. }
  600. EXPORT_SYMBOL(__fscache_write_page);
  601. /*
  602. * remove a page from the cache
  603. */
  604. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  605. {
  606. struct fscache_object *object;
  607. _enter(",%p", page);
  608. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  609. ASSERTCMP(page, !=, NULL);
  610. fscache_stat(&fscache_n_uncaches);
  611. /* cache withdrawal may beat us to it */
  612. if (!PageFsCache(page))
  613. goto done;
  614. /* get the object */
  615. spin_lock(&cookie->lock);
  616. if (hlist_empty(&cookie->backing_objects)) {
  617. ClearPageFsCache(page);
  618. goto done_unlock;
  619. }
  620. object = hlist_entry(cookie->backing_objects.first,
  621. struct fscache_object, cookie_link);
  622. /* there might now be stuff on disk we could read */
  623. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  624. /* only invoke the cache backend if we managed to mark the page
  625. * uncached here; this deals with synchronisation vs withdrawal */
  626. if (TestClearPageFsCache(page) &&
  627. object->cache->ops->uncache_page) {
  628. /* the cache backend releases the cookie lock */
  629. object->cache->ops->uncache_page(object, page);
  630. goto done;
  631. }
  632. done_unlock:
  633. spin_unlock(&cookie->lock);
  634. done:
  635. _leave("");
  636. }
  637. EXPORT_SYMBOL(__fscache_uncache_page);
  638. /**
  639. * fscache_mark_pages_cached - Mark pages as being cached
  640. * @op: The retrieval op pages are being marked for
  641. * @pagevec: The pages to be marked
  642. *
  643. * Mark a bunch of netfs pages as being cached. After this is called,
  644. * the netfs must call fscache_uncache_page() to remove the mark.
  645. */
  646. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  647. struct pagevec *pagevec)
  648. {
  649. struct fscache_cookie *cookie = op->op.object->cookie;
  650. unsigned long loop;
  651. #ifdef CONFIG_FSCACHE_STATS
  652. atomic_add(pagevec->nr, &fscache_n_marks);
  653. #endif
  654. for (loop = 0; loop < pagevec->nr; loop++) {
  655. struct page *page = pagevec->pages[loop];
  656. _debug("- mark %p{%lx}", page, page->index);
  657. if (TestSetPageFsCache(page)) {
  658. static bool once_only;
  659. if (!once_only) {
  660. once_only = true;
  661. printk(KERN_WARNING "FS-Cache:"
  662. " Cookie type %s marked page %lx"
  663. " multiple times\n",
  664. cookie->def->name, page->index);
  665. }
  666. }
  667. }
  668. if (cookie->def->mark_pages_cached)
  669. cookie->def->mark_pages_cached(cookie->netfs_data,
  670. op->mapping, pagevec);
  671. pagevec_reinit(pagevec);
  672. }
  673. EXPORT_SYMBOL(fscache_mark_pages_cached);