page.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. return val != NULL;
  28. }
  29. EXPORT_SYMBOL(__fscache_check_page_write);
  30. /*
  31. * wait for a page to finish being written to the cache
  32. */
  33. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  34. {
  35. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  36. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  37. }
  38. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  39. /*
  40. * decide whether a page can be released, possibly by cancelling a store to it
  41. * - we're allowed to sleep if __GFP_WAIT is flagged
  42. */
  43. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  44. struct page *page,
  45. gfp_t gfp)
  46. {
  47. struct page *xpage;
  48. void *val;
  49. _enter("%p,%p,%x", cookie, page, gfp);
  50. rcu_read_lock();
  51. val = radix_tree_lookup(&cookie->stores, page->index);
  52. if (!val) {
  53. rcu_read_unlock();
  54. fscache_stat(&fscache_n_store_vmscan_not_storing);
  55. __fscache_uncache_page(cookie, page);
  56. return true;
  57. }
  58. /* see if the page is actually undergoing storage - if so we can't get
  59. * rid of it till the cache has finished with it */
  60. if (radix_tree_tag_get(&cookie->stores, page->index,
  61. FSCACHE_COOKIE_STORING_TAG)) {
  62. rcu_read_unlock();
  63. goto page_busy;
  64. }
  65. /* the page is pending storage, so we attempt to cancel the store and
  66. * discard the store request so that the page can be reclaimed */
  67. spin_lock(&cookie->stores_lock);
  68. rcu_read_unlock();
  69. if (radix_tree_tag_get(&cookie->stores, page->index,
  70. FSCACHE_COOKIE_STORING_TAG)) {
  71. /* the page started to undergo storage whilst we were looking,
  72. * so now we can only wait or return */
  73. spin_unlock(&cookie->stores_lock);
  74. goto page_busy;
  75. }
  76. xpage = radix_tree_delete(&cookie->stores, page->index);
  77. spin_unlock(&cookie->stores_lock);
  78. if (xpage) {
  79. fscache_stat(&fscache_n_store_vmscan_cancelled);
  80. fscache_stat(&fscache_n_store_radix_deletes);
  81. ASSERTCMP(xpage, ==, page);
  82. } else {
  83. fscache_stat(&fscache_n_store_vmscan_gone);
  84. }
  85. wake_up_bit(&cookie->flags, 0);
  86. if (xpage)
  87. page_cache_release(xpage);
  88. __fscache_uncache_page(cookie, page);
  89. return true;
  90. page_busy:
  91. /* we might want to wait here, but that could deadlock the allocator as
  92. * the work threads writing to the cache may all end up sleeping
  93. * on memory allocation */
  94. fscache_stat(&fscache_n_store_vmscan_busy);
  95. return false;
  96. }
  97. EXPORT_SYMBOL(__fscache_maybe_release_page);
  98. /*
  99. * note that a page has finished being written to the cache
  100. */
  101. static void fscache_end_page_write(struct fscache_object *object,
  102. struct page *page)
  103. {
  104. struct fscache_cookie *cookie;
  105. struct page *xpage = NULL;
  106. spin_lock(&object->lock);
  107. cookie = object->cookie;
  108. if (cookie) {
  109. /* delete the page from the tree if it is now no longer
  110. * pending */
  111. spin_lock(&cookie->stores_lock);
  112. radix_tree_tag_clear(&cookie->stores, page->index,
  113. FSCACHE_COOKIE_STORING_TAG);
  114. if (!radix_tree_tag_get(&cookie->stores, page->index,
  115. FSCACHE_COOKIE_PENDING_TAG)) {
  116. fscache_stat(&fscache_n_store_radix_deletes);
  117. xpage = radix_tree_delete(&cookie->stores, page->index);
  118. }
  119. spin_unlock(&cookie->stores_lock);
  120. wake_up_bit(&cookie->flags, 0);
  121. }
  122. spin_unlock(&object->lock);
  123. if (xpage)
  124. page_cache_release(xpage);
  125. }
  126. /*
  127. * actually apply the changed attributes to a cache object
  128. */
  129. static void fscache_attr_changed_op(struct fscache_operation *op)
  130. {
  131. struct fscache_object *object = op->object;
  132. int ret;
  133. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  134. fscache_stat(&fscache_n_attr_changed_calls);
  135. if (fscache_object_is_active(object)) {
  136. fscache_stat(&fscache_n_cop_attr_changed);
  137. ret = object->cache->ops->attr_changed(object);
  138. fscache_stat_d(&fscache_n_cop_attr_changed);
  139. if (ret < 0)
  140. fscache_abort_object(object);
  141. }
  142. fscache_op_complete(op);
  143. _leave("");
  144. }
  145. /*
  146. * notification that the attributes on an object have changed
  147. */
  148. int __fscache_attr_changed(struct fscache_cookie *cookie)
  149. {
  150. struct fscache_operation *op;
  151. struct fscache_object *object;
  152. _enter("%p", cookie);
  153. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  154. fscache_stat(&fscache_n_attr_changed);
  155. op = kzalloc(sizeof(*op), GFP_KERNEL);
  156. if (!op) {
  157. fscache_stat(&fscache_n_attr_changed_nomem);
  158. _leave(" = -ENOMEM");
  159. return -ENOMEM;
  160. }
  161. fscache_operation_init(op, fscache_attr_changed_op, NULL);
  162. op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
  163. spin_lock(&cookie->lock);
  164. if (hlist_empty(&cookie->backing_objects))
  165. goto nobufs;
  166. object = hlist_entry(cookie->backing_objects.first,
  167. struct fscache_object, cookie_link);
  168. if (fscache_submit_exclusive_op(object, op) < 0)
  169. goto nobufs;
  170. spin_unlock(&cookie->lock);
  171. fscache_stat(&fscache_n_attr_changed_ok);
  172. fscache_put_operation(op);
  173. _leave(" = 0");
  174. return 0;
  175. nobufs:
  176. spin_unlock(&cookie->lock);
  177. kfree(op);
  178. fscache_stat(&fscache_n_attr_changed_nobufs);
  179. _leave(" = %d", -ENOBUFS);
  180. return -ENOBUFS;
  181. }
  182. EXPORT_SYMBOL(__fscache_attr_changed);
  183. /*
  184. * release a retrieval op reference
  185. */
  186. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  187. {
  188. struct fscache_retrieval *op =
  189. container_of(_op, struct fscache_retrieval, op);
  190. _enter("{OP%x}", op->op.debug_id);
  191. ASSERTCMP(op->n_pages, ==, 0);
  192. fscache_hist(fscache_retrieval_histogram, op->start_time);
  193. if (op->context)
  194. fscache_put_context(op->op.object->cookie, op->context);
  195. _leave("");
  196. }
  197. /*
  198. * allocate a retrieval op
  199. */
  200. static struct fscache_retrieval *fscache_alloc_retrieval(
  201. struct address_space *mapping,
  202. fscache_rw_complete_t end_io_func,
  203. void *context)
  204. {
  205. struct fscache_retrieval *op;
  206. /* allocate a retrieval operation and attempt to submit it */
  207. op = kzalloc(sizeof(*op), GFP_NOIO);
  208. if (!op) {
  209. fscache_stat(&fscache_n_retrievals_nomem);
  210. return NULL;
  211. }
  212. fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
  213. op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
  214. op->mapping = mapping;
  215. op->end_io_func = end_io_func;
  216. op->context = context;
  217. op->start_time = jiffies;
  218. INIT_LIST_HEAD(&op->to_do);
  219. return op;
  220. }
  221. /*
  222. * wait for a deferred lookup to complete
  223. */
  224. static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  225. {
  226. unsigned long jif;
  227. _enter("");
  228. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  229. _leave(" = 0 [imm]");
  230. return 0;
  231. }
  232. fscache_stat(&fscache_n_retrievals_wait);
  233. jif = jiffies;
  234. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  235. fscache_wait_bit_interruptible,
  236. TASK_INTERRUPTIBLE) != 0) {
  237. fscache_stat(&fscache_n_retrievals_intr);
  238. _leave(" = -ERESTARTSYS");
  239. return -ERESTARTSYS;
  240. }
  241. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  242. smp_rmb();
  243. fscache_hist(fscache_retrieval_delay_histogram, jif);
  244. _leave(" = 0 [dly]");
  245. return 0;
  246. }
  247. /*
  248. * wait for an object to become active (or dead)
  249. */
  250. static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
  251. struct fscache_retrieval *op,
  252. atomic_t *stat_op_waits,
  253. atomic_t *stat_object_dead)
  254. {
  255. int ret;
  256. if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
  257. goto check_if_dead;
  258. _debug(">>> WT");
  259. fscache_stat(stat_op_waits);
  260. if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  261. fscache_wait_bit_interruptible,
  262. TASK_INTERRUPTIBLE) < 0) {
  263. ret = fscache_cancel_op(&op->op);
  264. if (ret == 0)
  265. return -ERESTARTSYS;
  266. /* it's been removed from the pending queue by another party,
  267. * so we should get to run shortly */
  268. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  269. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  270. }
  271. _debug("<<< GO");
  272. check_if_dead:
  273. if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
  274. fscache_stat(stat_object_dead);
  275. _leave(" = -ENOBUFS [cancelled]");
  276. return -ENOBUFS;
  277. }
  278. if (unlikely(fscache_object_is_dead(object))) {
  279. pr_err("%s() = -ENOBUFS [obj dead %d]", __func__, op->op.state);
  280. fscache_cancel_op(&op->op);
  281. fscache_stat(stat_object_dead);
  282. return -ENOBUFS;
  283. }
  284. return 0;
  285. }
  286. /*
  287. * read a page from the cache or allocate a block in which to store it
  288. * - we return:
  289. * -ENOMEM - out of memory, nothing done
  290. * -ERESTARTSYS - interrupted
  291. * -ENOBUFS - no backing object available in which to cache the block
  292. * -ENODATA - no data available in the backing object for this block
  293. * 0 - dispatched a read - it'll call end_io_func() when finished
  294. */
  295. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  296. struct page *page,
  297. fscache_rw_complete_t end_io_func,
  298. void *context,
  299. gfp_t gfp)
  300. {
  301. struct fscache_retrieval *op;
  302. struct fscache_object *object;
  303. int ret;
  304. _enter("%p,%p,,,", cookie, page);
  305. fscache_stat(&fscache_n_retrievals);
  306. if (hlist_empty(&cookie->backing_objects))
  307. goto nobufs;
  308. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  309. _leave(" = -ENOBUFS [invalidating]");
  310. return -ENOBUFS;
  311. }
  312. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  313. ASSERTCMP(page, !=, NULL);
  314. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  315. return -ERESTARTSYS;
  316. op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
  317. if (!op) {
  318. _leave(" = -ENOMEM");
  319. return -ENOMEM;
  320. }
  321. op->n_pages = 1;
  322. spin_lock(&cookie->lock);
  323. if (hlist_empty(&cookie->backing_objects))
  324. goto nobufs_unlock;
  325. object = hlist_entry(cookie->backing_objects.first,
  326. struct fscache_object, cookie_link);
  327. ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
  328. atomic_inc(&object->n_reads);
  329. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  330. if (fscache_submit_op(object, &op->op) < 0)
  331. goto nobufs_unlock_dec;
  332. spin_unlock(&cookie->lock);
  333. fscache_stat(&fscache_n_retrieval_ops);
  334. /* pin the netfs read context in case we need to do the actual netfs
  335. * read because we've encountered a cache read failure */
  336. fscache_get_context(object->cookie, op->context);
  337. /* we wait for the operation to become active, and then process it
  338. * *here*, in this thread, and not in the thread pool */
  339. ret = fscache_wait_for_retrieval_activation(
  340. object, op,
  341. __fscache_stat(&fscache_n_retrieval_op_waits),
  342. __fscache_stat(&fscache_n_retrievals_object_dead));
  343. if (ret < 0)
  344. goto error;
  345. /* ask the cache to honour the operation */
  346. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  347. fscache_stat(&fscache_n_cop_allocate_page);
  348. ret = object->cache->ops->allocate_page(op, page, gfp);
  349. fscache_stat_d(&fscache_n_cop_allocate_page);
  350. if (ret == 0)
  351. ret = -ENODATA;
  352. } else {
  353. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  354. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  355. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  356. }
  357. error:
  358. if (ret == -ENOMEM)
  359. fscache_stat(&fscache_n_retrievals_nomem);
  360. else if (ret == -ERESTARTSYS)
  361. fscache_stat(&fscache_n_retrievals_intr);
  362. else if (ret == -ENODATA)
  363. fscache_stat(&fscache_n_retrievals_nodata);
  364. else if (ret < 0)
  365. fscache_stat(&fscache_n_retrievals_nobufs);
  366. else
  367. fscache_stat(&fscache_n_retrievals_ok);
  368. fscache_put_retrieval(op);
  369. _leave(" = %d", ret);
  370. return ret;
  371. nobufs_unlock_dec:
  372. atomic_dec(&object->n_reads);
  373. nobufs_unlock:
  374. spin_unlock(&cookie->lock);
  375. kfree(op);
  376. nobufs:
  377. fscache_stat(&fscache_n_retrievals_nobufs);
  378. _leave(" = -ENOBUFS");
  379. return -ENOBUFS;
  380. }
  381. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  382. /*
  383. * read a list of page from the cache or allocate a block in which to store
  384. * them
  385. * - we return:
  386. * -ENOMEM - out of memory, some pages may be being read
  387. * -ERESTARTSYS - interrupted, some pages may be being read
  388. * -ENOBUFS - no backing object or space available in which to cache any
  389. * pages not being read
  390. * -ENODATA - no data available in the backing object for some or all of
  391. * the pages
  392. * 0 - dispatched a read on all pages
  393. *
  394. * end_io_func() will be called for each page read from the cache as it is
  395. * finishes being read
  396. *
  397. * any pages for which a read is dispatched will be removed from pages and
  398. * nr_pages
  399. */
  400. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  401. struct address_space *mapping,
  402. struct list_head *pages,
  403. unsigned *nr_pages,
  404. fscache_rw_complete_t end_io_func,
  405. void *context,
  406. gfp_t gfp)
  407. {
  408. struct fscache_retrieval *op;
  409. struct fscache_object *object;
  410. int ret;
  411. _enter("%p,,%d,,,", cookie, *nr_pages);
  412. fscache_stat(&fscache_n_retrievals);
  413. if (hlist_empty(&cookie->backing_objects))
  414. goto nobufs;
  415. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  416. _leave(" = -ENOBUFS [invalidating]");
  417. return -ENOBUFS;
  418. }
  419. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  420. ASSERTCMP(*nr_pages, >, 0);
  421. ASSERT(!list_empty(pages));
  422. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  423. return -ERESTARTSYS;
  424. op = fscache_alloc_retrieval(mapping, end_io_func, context);
  425. if (!op)
  426. return -ENOMEM;
  427. op->n_pages = *nr_pages;
  428. spin_lock(&cookie->lock);
  429. if (hlist_empty(&cookie->backing_objects))
  430. goto nobufs_unlock;
  431. object = hlist_entry(cookie->backing_objects.first,
  432. struct fscache_object, cookie_link);
  433. atomic_inc(&object->n_reads);
  434. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  435. if (fscache_submit_op(object, &op->op) < 0)
  436. goto nobufs_unlock_dec;
  437. spin_unlock(&cookie->lock);
  438. fscache_stat(&fscache_n_retrieval_ops);
  439. /* pin the netfs read context in case we need to do the actual netfs
  440. * read because we've encountered a cache read failure */
  441. fscache_get_context(object->cookie, op->context);
  442. /* we wait for the operation to become active, and then process it
  443. * *here*, in this thread, and not in the thread pool */
  444. ret = fscache_wait_for_retrieval_activation(
  445. object, op,
  446. __fscache_stat(&fscache_n_retrieval_op_waits),
  447. __fscache_stat(&fscache_n_retrievals_object_dead));
  448. if (ret < 0)
  449. goto error;
  450. /* ask the cache to honour the operation */
  451. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  452. fscache_stat(&fscache_n_cop_allocate_pages);
  453. ret = object->cache->ops->allocate_pages(
  454. op, pages, nr_pages, gfp);
  455. fscache_stat_d(&fscache_n_cop_allocate_pages);
  456. } else {
  457. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  458. ret = object->cache->ops->read_or_alloc_pages(
  459. op, pages, nr_pages, gfp);
  460. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  461. }
  462. error:
  463. if (ret == -ENOMEM)
  464. fscache_stat(&fscache_n_retrievals_nomem);
  465. else if (ret == -ERESTARTSYS)
  466. fscache_stat(&fscache_n_retrievals_intr);
  467. else if (ret == -ENODATA)
  468. fscache_stat(&fscache_n_retrievals_nodata);
  469. else if (ret < 0)
  470. fscache_stat(&fscache_n_retrievals_nobufs);
  471. else
  472. fscache_stat(&fscache_n_retrievals_ok);
  473. fscache_put_retrieval(op);
  474. _leave(" = %d", ret);
  475. return ret;
  476. nobufs_unlock_dec:
  477. atomic_dec(&object->n_reads);
  478. nobufs_unlock:
  479. spin_unlock(&cookie->lock);
  480. kfree(op);
  481. nobufs:
  482. fscache_stat(&fscache_n_retrievals_nobufs);
  483. _leave(" = -ENOBUFS");
  484. return -ENOBUFS;
  485. }
  486. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  487. /*
  488. * allocate a block in the cache on which to store a page
  489. * - we return:
  490. * -ENOMEM - out of memory, nothing done
  491. * -ERESTARTSYS - interrupted
  492. * -ENOBUFS - no backing object available in which to cache the block
  493. * 0 - block allocated
  494. */
  495. int __fscache_alloc_page(struct fscache_cookie *cookie,
  496. struct page *page,
  497. gfp_t gfp)
  498. {
  499. struct fscache_retrieval *op;
  500. struct fscache_object *object;
  501. int ret;
  502. _enter("%p,%p,,,", cookie, page);
  503. fscache_stat(&fscache_n_allocs);
  504. if (hlist_empty(&cookie->backing_objects))
  505. goto nobufs;
  506. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  507. ASSERTCMP(page, !=, NULL);
  508. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  509. _leave(" = -ENOBUFS [invalidating]");
  510. return -ENOBUFS;
  511. }
  512. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  513. return -ERESTARTSYS;
  514. op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
  515. if (!op)
  516. return -ENOMEM;
  517. op->n_pages = 1;
  518. spin_lock(&cookie->lock);
  519. if (hlist_empty(&cookie->backing_objects))
  520. goto nobufs_unlock;
  521. object = hlist_entry(cookie->backing_objects.first,
  522. struct fscache_object, cookie_link);
  523. if (fscache_submit_op(object, &op->op) < 0)
  524. goto nobufs_unlock;
  525. spin_unlock(&cookie->lock);
  526. fscache_stat(&fscache_n_alloc_ops);
  527. ret = fscache_wait_for_retrieval_activation(
  528. object, op,
  529. __fscache_stat(&fscache_n_alloc_op_waits),
  530. __fscache_stat(&fscache_n_allocs_object_dead));
  531. if (ret < 0)
  532. goto error;
  533. /* ask the cache to honour the operation */
  534. fscache_stat(&fscache_n_cop_allocate_page);
  535. ret = object->cache->ops->allocate_page(op, page, gfp);
  536. fscache_stat_d(&fscache_n_cop_allocate_page);
  537. error:
  538. if (ret == -ERESTARTSYS)
  539. fscache_stat(&fscache_n_allocs_intr);
  540. else if (ret < 0)
  541. fscache_stat(&fscache_n_allocs_nobufs);
  542. else
  543. fscache_stat(&fscache_n_allocs_ok);
  544. fscache_put_retrieval(op);
  545. _leave(" = %d", ret);
  546. return ret;
  547. nobufs_unlock:
  548. spin_unlock(&cookie->lock);
  549. kfree(op);
  550. nobufs:
  551. fscache_stat(&fscache_n_allocs_nobufs);
  552. _leave(" = -ENOBUFS");
  553. return -ENOBUFS;
  554. }
  555. EXPORT_SYMBOL(__fscache_alloc_page);
  556. /*
  557. * release a write op reference
  558. */
  559. static void fscache_release_write_op(struct fscache_operation *_op)
  560. {
  561. _enter("{OP%x}", _op->debug_id);
  562. }
  563. /*
  564. * perform the background storage of a page into the cache
  565. */
  566. static void fscache_write_op(struct fscache_operation *_op)
  567. {
  568. struct fscache_storage *op =
  569. container_of(_op, struct fscache_storage, op);
  570. struct fscache_object *object = op->op.object;
  571. struct fscache_cookie *cookie;
  572. struct page *page;
  573. unsigned n;
  574. void *results[1];
  575. int ret;
  576. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  577. spin_lock(&object->lock);
  578. cookie = object->cookie;
  579. if (!fscache_object_is_active(object) || !cookie) {
  580. spin_unlock(&object->lock);
  581. _leave("");
  582. return;
  583. }
  584. spin_lock(&cookie->stores_lock);
  585. fscache_stat(&fscache_n_store_calls);
  586. /* find a page to store */
  587. page = NULL;
  588. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  589. FSCACHE_COOKIE_PENDING_TAG);
  590. if (n != 1)
  591. goto superseded;
  592. page = results[0];
  593. _debug("gang %d [%lx]", n, page->index);
  594. if (page->index > op->store_limit) {
  595. fscache_stat(&fscache_n_store_pages_over_limit);
  596. goto superseded;
  597. }
  598. radix_tree_tag_set(&cookie->stores, page->index,
  599. FSCACHE_COOKIE_STORING_TAG);
  600. radix_tree_tag_clear(&cookie->stores, page->index,
  601. FSCACHE_COOKIE_PENDING_TAG);
  602. spin_unlock(&cookie->stores_lock);
  603. spin_unlock(&object->lock);
  604. fscache_stat(&fscache_n_store_pages);
  605. fscache_stat(&fscache_n_cop_write_page);
  606. ret = object->cache->ops->write_page(op, page);
  607. fscache_stat_d(&fscache_n_cop_write_page);
  608. fscache_end_page_write(object, page);
  609. if (ret < 0) {
  610. fscache_abort_object(object);
  611. fscache_op_complete(&op->op);
  612. } else {
  613. fscache_enqueue_operation(&op->op);
  614. }
  615. _leave("");
  616. return;
  617. superseded:
  618. /* this writer is going away and there aren't any more things to
  619. * write */
  620. _debug("cease");
  621. spin_unlock(&cookie->stores_lock);
  622. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  623. spin_unlock(&object->lock);
  624. fscache_op_complete(&op->op);
  625. _leave("");
  626. }
  627. /*
  628. * Clear the pages pending writing for invalidation
  629. */
  630. void fscache_invalidate_writes(struct fscache_cookie *cookie)
  631. {
  632. struct page *page;
  633. void *results[16];
  634. int n, i;
  635. _enter("");
  636. while (spin_lock(&cookie->stores_lock),
  637. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
  638. ARRAY_SIZE(results),
  639. FSCACHE_COOKIE_PENDING_TAG),
  640. n > 0) {
  641. for (i = n - 1; i >= 0; i--) {
  642. page = results[i];
  643. radix_tree_delete(&cookie->stores, page->index);
  644. }
  645. spin_unlock(&cookie->stores_lock);
  646. for (i = n - 1; i >= 0; i--)
  647. page_cache_release(results[i]);
  648. }
  649. spin_unlock(&cookie->stores_lock);
  650. _leave("");
  651. }
  652. /*
  653. * request a page be stored in the cache
  654. * - returns:
  655. * -ENOMEM - out of memory, nothing done
  656. * -ENOBUFS - no backing object available in which to cache the page
  657. * 0 - dispatched a write - it'll call end_io_func() when finished
  658. *
  659. * if the cookie still has a backing object at this point, that object can be
  660. * in one of a few states with respect to storage processing:
  661. *
  662. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  663. * set)
  664. *
  665. * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
  666. * fill op)
  667. *
  668. * (b) writes deferred till post-creation (mark page for writing and
  669. * return immediately)
  670. *
  671. * (2) negative lookup, object created, initial fill being made from netfs
  672. * (FSCACHE_COOKIE_INITIAL_FILL is set)
  673. *
  674. * (a) fill point not yet reached this page (mark page for writing and
  675. * return)
  676. *
  677. * (b) fill point passed this page (queue op to store this page)
  678. *
  679. * (3) object extant (queue op to store this page)
  680. *
  681. * any other state is invalid
  682. */
  683. int __fscache_write_page(struct fscache_cookie *cookie,
  684. struct page *page,
  685. gfp_t gfp)
  686. {
  687. struct fscache_storage *op;
  688. struct fscache_object *object;
  689. int ret;
  690. _enter("%p,%x,", cookie, (u32) page->flags);
  691. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  692. ASSERT(PageFsCache(page));
  693. fscache_stat(&fscache_n_stores);
  694. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  695. _leave(" = -ENOBUFS [invalidating]");
  696. return -ENOBUFS;
  697. }
  698. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  699. if (!op)
  700. goto nomem;
  701. fscache_operation_init(&op->op, fscache_write_op,
  702. fscache_release_write_op);
  703. op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
  704. ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
  705. if (ret < 0)
  706. goto nomem_free;
  707. ret = -ENOBUFS;
  708. spin_lock(&cookie->lock);
  709. if (hlist_empty(&cookie->backing_objects))
  710. goto nobufs;
  711. object = hlist_entry(cookie->backing_objects.first,
  712. struct fscache_object, cookie_link);
  713. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  714. goto nobufs;
  715. /* add the page to the pending-storage radix tree on the backing
  716. * object */
  717. spin_lock(&object->lock);
  718. spin_lock(&cookie->stores_lock);
  719. _debug("store limit %llx", (unsigned long long) object->store_limit);
  720. ret = radix_tree_insert(&cookie->stores, page->index, page);
  721. if (ret < 0) {
  722. if (ret == -EEXIST)
  723. goto already_queued;
  724. _debug("insert failed %d", ret);
  725. goto nobufs_unlock_obj;
  726. }
  727. radix_tree_tag_set(&cookie->stores, page->index,
  728. FSCACHE_COOKIE_PENDING_TAG);
  729. page_cache_get(page);
  730. /* we only want one writer at a time, but we do need to queue new
  731. * writers after exclusive ops */
  732. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  733. goto already_pending;
  734. spin_unlock(&cookie->stores_lock);
  735. spin_unlock(&object->lock);
  736. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  737. op->store_limit = object->store_limit;
  738. if (fscache_submit_op(object, &op->op) < 0)
  739. goto submit_failed;
  740. spin_unlock(&cookie->lock);
  741. radix_tree_preload_end();
  742. fscache_stat(&fscache_n_store_ops);
  743. fscache_stat(&fscache_n_stores_ok);
  744. /* the work queue now carries its own ref on the object */
  745. fscache_put_operation(&op->op);
  746. _leave(" = 0");
  747. return 0;
  748. already_queued:
  749. fscache_stat(&fscache_n_stores_again);
  750. already_pending:
  751. spin_unlock(&cookie->stores_lock);
  752. spin_unlock(&object->lock);
  753. spin_unlock(&cookie->lock);
  754. radix_tree_preload_end();
  755. kfree(op);
  756. fscache_stat(&fscache_n_stores_ok);
  757. _leave(" = 0");
  758. return 0;
  759. submit_failed:
  760. spin_lock(&cookie->stores_lock);
  761. radix_tree_delete(&cookie->stores, page->index);
  762. spin_unlock(&cookie->stores_lock);
  763. page_cache_release(page);
  764. ret = -ENOBUFS;
  765. goto nobufs;
  766. nobufs_unlock_obj:
  767. spin_unlock(&cookie->stores_lock);
  768. spin_unlock(&object->lock);
  769. nobufs:
  770. spin_unlock(&cookie->lock);
  771. radix_tree_preload_end();
  772. kfree(op);
  773. fscache_stat(&fscache_n_stores_nobufs);
  774. _leave(" = -ENOBUFS");
  775. return -ENOBUFS;
  776. nomem_free:
  777. kfree(op);
  778. nomem:
  779. fscache_stat(&fscache_n_stores_oom);
  780. _leave(" = -ENOMEM");
  781. return -ENOMEM;
  782. }
  783. EXPORT_SYMBOL(__fscache_write_page);
  784. /*
  785. * remove a page from the cache
  786. */
  787. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  788. {
  789. struct fscache_object *object;
  790. _enter(",%p", page);
  791. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  792. ASSERTCMP(page, !=, NULL);
  793. fscache_stat(&fscache_n_uncaches);
  794. /* cache withdrawal may beat us to it */
  795. if (!PageFsCache(page))
  796. goto done;
  797. /* get the object */
  798. spin_lock(&cookie->lock);
  799. if (hlist_empty(&cookie->backing_objects)) {
  800. ClearPageFsCache(page);
  801. goto done_unlock;
  802. }
  803. object = hlist_entry(cookie->backing_objects.first,
  804. struct fscache_object, cookie_link);
  805. /* there might now be stuff on disk we could read */
  806. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  807. /* only invoke the cache backend if we managed to mark the page
  808. * uncached here; this deals with synchronisation vs withdrawal */
  809. if (TestClearPageFsCache(page) &&
  810. object->cache->ops->uncache_page) {
  811. /* the cache backend releases the cookie lock */
  812. fscache_stat(&fscache_n_cop_uncache_page);
  813. object->cache->ops->uncache_page(object, page);
  814. fscache_stat_d(&fscache_n_cop_uncache_page);
  815. goto done;
  816. }
  817. done_unlock:
  818. spin_unlock(&cookie->lock);
  819. done:
  820. _leave("");
  821. }
  822. EXPORT_SYMBOL(__fscache_uncache_page);
  823. /**
  824. * fscache_mark_page_cached - Mark a page as being cached
  825. * @op: The retrieval op pages are being marked for
  826. * @page: The page to be marked
  827. *
  828. * Mark a netfs page as being cached. After this is called, the netfs
  829. * must call fscache_uncache_page() to remove the mark.
  830. */
  831. void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
  832. {
  833. struct fscache_cookie *cookie = op->op.object->cookie;
  834. #ifdef CONFIG_FSCACHE_STATS
  835. atomic_inc(&fscache_n_marks);
  836. #endif
  837. _debug("- mark %p{%lx}", page, page->index);
  838. if (TestSetPageFsCache(page)) {
  839. static bool once_only;
  840. if (!once_only) {
  841. once_only = true;
  842. printk(KERN_WARNING "FS-Cache:"
  843. " Cookie type %s marked page %lx"
  844. " multiple times\n",
  845. cookie->def->name, page->index);
  846. }
  847. }
  848. if (cookie->def->mark_page_cached)
  849. cookie->def->mark_page_cached(cookie->netfs_data,
  850. op->mapping, page);
  851. }
  852. EXPORT_SYMBOL(fscache_mark_page_cached);
  853. /**
  854. * fscache_mark_pages_cached - Mark pages as being cached
  855. * @op: The retrieval op pages are being marked for
  856. * @pagevec: The pages to be marked
  857. *
  858. * Mark a bunch of netfs pages as being cached. After this is called,
  859. * the netfs must call fscache_uncache_page() to remove the mark.
  860. */
  861. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  862. struct pagevec *pagevec)
  863. {
  864. unsigned long loop;
  865. for (loop = 0; loop < pagevec->nr; loop++)
  866. fscache_mark_page_cached(op, pagevec->pages[loop]);
  867. pagevec_reinit(pagevec);
  868. }
  869. EXPORT_SYMBOL(fscache_mark_pages_cached);
  870. /*
  871. * Uncache all the pages in an inode that are marked PG_fscache, assuming them
  872. * to be associated with the given cookie.
  873. */
  874. void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
  875. struct inode *inode)
  876. {
  877. struct address_space *mapping = inode->i_mapping;
  878. struct pagevec pvec;
  879. pgoff_t next;
  880. int i;
  881. _enter("%p,%p", cookie, inode);
  882. if (!mapping || mapping->nrpages == 0) {
  883. _leave(" [no pages]");
  884. return;
  885. }
  886. pagevec_init(&pvec, 0);
  887. next = 0;
  888. do {
  889. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
  890. break;
  891. for (i = 0; i < pagevec_count(&pvec); i++) {
  892. struct page *page = pvec.pages[i];
  893. next = page->index;
  894. if (PageFsCache(page)) {
  895. __fscache_wait_on_page_write(cookie, page);
  896. __fscache_uncache_page(cookie, page);
  897. }
  898. }
  899. pagevec_release(&pvec);
  900. cond_resched();
  901. } while (++next);
  902. _leave("");
  903. }
  904. EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);