page.c 28 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include <linux/slab.h>
  17. #include "internal.h"
  18. /*
  19. * check to see if a page is being written to the cache
  20. */
  21. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  22. {
  23. void *val;
  24. rcu_read_lock();
  25. val = radix_tree_lookup(&cookie->stores, page->index);
  26. rcu_read_unlock();
  27. return val != NULL;
  28. }
  29. EXPORT_SYMBOL(__fscache_check_page_write);
  30. /*
  31. * wait for a page to finish being written to the cache
  32. */
  33. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  34. {
  35. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  36. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  37. }
  38. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  39. /*
  40. * decide whether a page can be released, possibly by cancelling a store to it
  41. * - we're allowed to sleep if __GFP_WAIT is flagged
  42. */
  43. bool __fscache_maybe_release_page(struct fscache_cookie *cookie,
  44. struct page *page,
  45. gfp_t gfp)
  46. {
  47. struct page *xpage;
  48. void *val;
  49. _enter("%p,%p,%x", cookie, page, gfp);
  50. try_again:
  51. rcu_read_lock();
  52. val = radix_tree_lookup(&cookie->stores, page->index);
  53. if (!val) {
  54. rcu_read_unlock();
  55. fscache_stat(&fscache_n_store_vmscan_not_storing);
  56. __fscache_uncache_page(cookie, page);
  57. return true;
  58. }
  59. /* see if the page is actually undergoing storage - if so we can't get
  60. * rid of it till the cache has finished with it */
  61. if (radix_tree_tag_get(&cookie->stores, page->index,
  62. FSCACHE_COOKIE_STORING_TAG)) {
  63. rcu_read_unlock();
  64. goto page_busy;
  65. }
  66. /* the page is pending storage, so we attempt to cancel the store and
  67. * discard the store request so that the page can be reclaimed */
  68. spin_lock(&cookie->stores_lock);
  69. rcu_read_unlock();
  70. if (radix_tree_tag_get(&cookie->stores, page->index,
  71. FSCACHE_COOKIE_STORING_TAG)) {
  72. /* the page started to undergo storage whilst we were looking,
  73. * so now we can only wait or return */
  74. spin_unlock(&cookie->stores_lock);
  75. goto page_busy;
  76. }
  77. xpage = radix_tree_delete(&cookie->stores, page->index);
  78. spin_unlock(&cookie->stores_lock);
  79. if (xpage) {
  80. fscache_stat(&fscache_n_store_vmscan_cancelled);
  81. fscache_stat(&fscache_n_store_radix_deletes);
  82. ASSERTCMP(xpage, ==, page);
  83. } else {
  84. fscache_stat(&fscache_n_store_vmscan_gone);
  85. }
  86. wake_up_bit(&cookie->flags, 0);
  87. if (xpage)
  88. page_cache_release(xpage);
  89. __fscache_uncache_page(cookie, page);
  90. return true;
  91. page_busy:
  92. /* We will wait here if we're allowed to, but that could deadlock the
  93. * allocator as the work threads writing to the cache may all end up
  94. * sleeping on memory allocation, so we may need to impose a timeout
  95. * too. */
  96. if (!(gfp & __GFP_WAIT)) {
  97. fscache_stat(&fscache_n_store_vmscan_busy);
  98. return false;
  99. }
  100. fscache_stat(&fscache_n_store_vmscan_wait);
  101. __fscache_wait_on_page_write(cookie, page);
  102. gfp &= ~__GFP_WAIT;
  103. goto try_again;
  104. }
  105. EXPORT_SYMBOL(__fscache_maybe_release_page);
  106. /*
  107. * note that a page has finished being written to the cache
  108. */
  109. static void fscache_end_page_write(struct fscache_object *object,
  110. struct page *page)
  111. {
  112. struct fscache_cookie *cookie;
  113. struct page *xpage = NULL;
  114. spin_lock(&object->lock);
  115. cookie = object->cookie;
  116. if (cookie) {
  117. /* delete the page from the tree if it is now no longer
  118. * pending */
  119. spin_lock(&cookie->stores_lock);
  120. radix_tree_tag_clear(&cookie->stores, page->index,
  121. FSCACHE_COOKIE_STORING_TAG);
  122. if (!radix_tree_tag_get(&cookie->stores, page->index,
  123. FSCACHE_COOKIE_PENDING_TAG)) {
  124. fscache_stat(&fscache_n_store_radix_deletes);
  125. xpage = radix_tree_delete(&cookie->stores, page->index);
  126. }
  127. spin_unlock(&cookie->stores_lock);
  128. wake_up_bit(&cookie->flags, 0);
  129. }
  130. spin_unlock(&object->lock);
  131. if (xpage)
  132. page_cache_release(xpage);
  133. }
  134. /*
  135. * actually apply the changed attributes to a cache object
  136. */
  137. static void fscache_attr_changed_op(struct fscache_operation *op)
  138. {
  139. struct fscache_object *object = op->object;
  140. int ret;
  141. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  142. fscache_stat(&fscache_n_attr_changed_calls);
  143. if (fscache_object_is_active(object)) {
  144. fscache_stat(&fscache_n_cop_attr_changed);
  145. ret = object->cache->ops->attr_changed(object);
  146. fscache_stat_d(&fscache_n_cop_attr_changed);
  147. if (ret < 0)
  148. fscache_abort_object(object);
  149. }
  150. fscache_op_complete(op);
  151. _leave("");
  152. }
  153. /*
  154. * notification that the attributes on an object have changed
  155. */
  156. int __fscache_attr_changed(struct fscache_cookie *cookie)
  157. {
  158. struct fscache_operation *op;
  159. struct fscache_object *object;
  160. _enter("%p", cookie);
  161. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  162. fscache_stat(&fscache_n_attr_changed);
  163. op = kzalloc(sizeof(*op), GFP_KERNEL);
  164. if (!op) {
  165. fscache_stat(&fscache_n_attr_changed_nomem);
  166. _leave(" = -ENOMEM");
  167. return -ENOMEM;
  168. }
  169. fscache_operation_init(op, fscache_attr_changed_op, NULL);
  170. op->flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_EXCLUSIVE);
  171. spin_lock(&cookie->lock);
  172. if (hlist_empty(&cookie->backing_objects))
  173. goto nobufs;
  174. object = hlist_entry(cookie->backing_objects.first,
  175. struct fscache_object, cookie_link);
  176. if (fscache_submit_exclusive_op(object, op) < 0)
  177. goto nobufs;
  178. spin_unlock(&cookie->lock);
  179. fscache_stat(&fscache_n_attr_changed_ok);
  180. fscache_put_operation(op);
  181. _leave(" = 0");
  182. return 0;
  183. nobufs:
  184. spin_unlock(&cookie->lock);
  185. kfree(op);
  186. fscache_stat(&fscache_n_attr_changed_nobufs);
  187. _leave(" = %d", -ENOBUFS);
  188. return -ENOBUFS;
  189. }
  190. EXPORT_SYMBOL(__fscache_attr_changed);
  191. /*
  192. * release a retrieval op reference
  193. */
  194. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  195. {
  196. struct fscache_retrieval *op =
  197. container_of(_op, struct fscache_retrieval, op);
  198. _enter("{OP%x}", op->op.debug_id);
  199. ASSERTCMP(op->n_pages, ==, 0);
  200. fscache_hist(fscache_retrieval_histogram, op->start_time);
  201. if (op->context)
  202. fscache_put_context(op->op.object->cookie, op->context);
  203. _leave("");
  204. }
  205. /*
  206. * allocate a retrieval op
  207. */
  208. static struct fscache_retrieval *fscache_alloc_retrieval(
  209. struct address_space *mapping,
  210. fscache_rw_complete_t end_io_func,
  211. void *context)
  212. {
  213. struct fscache_retrieval *op;
  214. /* allocate a retrieval operation and attempt to submit it */
  215. op = kzalloc(sizeof(*op), GFP_NOIO);
  216. if (!op) {
  217. fscache_stat(&fscache_n_retrievals_nomem);
  218. return NULL;
  219. }
  220. fscache_operation_init(&op->op, NULL, fscache_release_retrieval_op);
  221. op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
  222. op->mapping = mapping;
  223. op->end_io_func = end_io_func;
  224. op->context = context;
  225. op->start_time = jiffies;
  226. INIT_LIST_HEAD(&op->to_do);
  227. return op;
  228. }
  229. /*
  230. * wait for a deferred lookup to complete
  231. */
  232. static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  233. {
  234. unsigned long jif;
  235. _enter("");
  236. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  237. _leave(" = 0 [imm]");
  238. return 0;
  239. }
  240. fscache_stat(&fscache_n_retrievals_wait);
  241. jif = jiffies;
  242. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  243. fscache_wait_bit_interruptible,
  244. TASK_INTERRUPTIBLE) != 0) {
  245. fscache_stat(&fscache_n_retrievals_intr);
  246. _leave(" = -ERESTARTSYS");
  247. return -ERESTARTSYS;
  248. }
  249. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  250. smp_rmb();
  251. fscache_hist(fscache_retrieval_delay_histogram, jif);
  252. _leave(" = 0 [dly]");
  253. return 0;
  254. }
  255. /*
  256. * wait for an object to become active (or dead)
  257. */
  258. static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
  259. struct fscache_retrieval *op,
  260. atomic_t *stat_op_waits,
  261. atomic_t *stat_object_dead)
  262. {
  263. int ret;
  264. if (!test_bit(FSCACHE_OP_WAITING, &op->op.flags))
  265. goto check_if_dead;
  266. _debug(">>> WT");
  267. fscache_stat(stat_op_waits);
  268. if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  269. fscache_wait_bit_interruptible,
  270. TASK_INTERRUPTIBLE) < 0) {
  271. ret = fscache_cancel_op(&op->op);
  272. if (ret == 0)
  273. return -ERESTARTSYS;
  274. /* it's been removed from the pending queue by another party,
  275. * so we should get to run shortly */
  276. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  277. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  278. }
  279. _debug("<<< GO");
  280. check_if_dead:
  281. if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
  282. fscache_stat(stat_object_dead);
  283. _leave(" = -ENOBUFS [cancelled]");
  284. return -ENOBUFS;
  285. }
  286. if (unlikely(fscache_object_is_dead(object))) {
  287. pr_err("%s() = -ENOBUFS [obj dead %d]", __func__, op->op.state);
  288. fscache_cancel_op(&op->op);
  289. fscache_stat(stat_object_dead);
  290. return -ENOBUFS;
  291. }
  292. return 0;
  293. }
  294. /*
  295. * read a page from the cache or allocate a block in which to store it
  296. * - we return:
  297. * -ENOMEM - out of memory, nothing done
  298. * -ERESTARTSYS - interrupted
  299. * -ENOBUFS - no backing object available in which to cache the block
  300. * -ENODATA - no data available in the backing object for this block
  301. * 0 - dispatched a read - it'll call end_io_func() when finished
  302. */
  303. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  304. struct page *page,
  305. fscache_rw_complete_t end_io_func,
  306. void *context,
  307. gfp_t gfp)
  308. {
  309. struct fscache_retrieval *op;
  310. struct fscache_object *object;
  311. int ret;
  312. _enter("%p,%p,,,", cookie, page);
  313. fscache_stat(&fscache_n_retrievals);
  314. if (hlist_empty(&cookie->backing_objects))
  315. goto nobufs;
  316. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  317. _leave(" = -ENOBUFS [invalidating]");
  318. return -ENOBUFS;
  319. }
  320. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  321. ASSERTCMP(page, !=, NULL);
  322. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  323. return -ERESTARTSYS;
  324. op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
  325. if (!op) {
  326. _leave(" = -ENOMEM");
  327. return -ENOMEM;
  328. }
  329. op->n_pages = 1;
  330. spin_lock(&cookie->lock);
  331. if (hlist_empty(&cookie->backing_objects))
  332. goto nobufs_unlock;
  333. object = hlist_entry(cookie->backing_objects.first,
  334. struct fscache_object, cookie_link);
  335. ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
  336. atomic_inc(&object->n_reads);
  337. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  338. if (fscache_submit_op(object, &op->op) < 0)
  339. goto nobufs_unlock_dec;
  340. spin_unlock(&cookie->lock);
  341. fscache_stat(&fscache_n_retrieval_ops);
  342. /* pin the netfs read context in case we need to do the actual netfs
  343. * read because we've encountered a cache read failure */
  344. fscache_get_context(object->cookie, op->context);
  345. /* we wait for the operation to become active, and then process it
  346. * *here*, in this thread, and not in the thread pool */
  347. ret = fscache_wait_for_retrieval_activation(
  348. object, op,
  349. __fscache_stat(&fscache_n_retrieval_op_waits),
  350. __fscache_stat(&fscache_n_retrievals_object_dead));
  351. if (ret < 0)
  352. goto error;
  353. /* ask the cache to honour the operation */
  354. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  355. fscache_stat(&fscache_n_cop_allocate_page);
  356. ret = object->cache->ops->allocate_page(op, page, gfp);
  357. fscache_stat_d(&fscache_n_cop_allocate_page);
  358. if (ret == 0)
  359. ret = -ENODATA;
  360. } else {
  361. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  362. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  363. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  364. }
  365. error:
  366. if (ret == -ENOMEM)
  367. fscache_stat(&fscache_n_retrievals_nomem);
  368. else if (ret == -ERESTARTSYS)
  369. fscache_stat(&fscache_n_retrievals_intr);
  370. else if (ret == -ENODATA)
  371. fscache_stat(&fscache_n_retrievals_nodata);
  372. else if (ret < 0)
  373. fscache_stat(&fscache_n_retrievals_nobufs);
  374. else
  375. fscache_stat(&fscache_n_retrievals_ok);
  376. fscache_put_retrieval(op);
  377. _leave(" = %d", ret);
  378. return ret;
  379. nobufs_unlock_dec:
  380. atomic_dec(&object->n_reads);
  381. nobufs_unlock:
  382. spin_unlock(&cookie->lock);
  383. kfree(op);
  384. nobufs:
  385. fscache_stat(&fscache_n_retrievals_nobufs);
  386. _leave(" = -ENOBUFS");
  387. return -ENOBUFS;
  388. }
  389. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  390. /*
  391. * read a list of page from the cache or allocate a block in which to store
  392. * them
  393. * - we return:
  394. * -ENOMEM - out of memory, some pages may be being read
  395. * -ERESTARTSYS - interrupted, some pages may be being read
  396. * -ENOBUFS - no backing object or space available in which to cache any
  397. * pages not being read
  398. * -ENODATA - no data available in the backing object for some or all of
  399. * the pages
  400. * 0 - dispatched a read on all pages
  401. *
  402. * end_io_func() will be called for each page read from the cache as it is
  403. * finishes being read
  404. *
  405. * any pages for which a read is dispatched will be removed from pages and
  406. * nr_pages
  407. */
  408. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  409. struct address_space *mapping,
  410. struct list_head *pages,
  411. unsigned *nr_pages,
  412. fscache_rw_complete_t end_io_func,
  413. void *context,
  414. gfp_t gfp)
  415. {
  416. struct fscache_retrieval *op;
  417. struct fscache_object *object;
  418. int ret;
  419. _enter("%p,,%d,,,", cookie, *nr_pages);
  420. fscache_stat(&fscache_n_retrievals);
  421. if (hlist_empty(&cookie->backing_objects))
  422. goto nobufs;
  423. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  424. _leave(" = -ENOBUFS [invalidating]");
  425. return -ENOBUFS;
  426. }
  427. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  428. ASSERTCMP(*nr_pages, >, 0);
  429. ASSERT(!list_empty(pages));
  430. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  431. return -ERESTARTSYS;
  432. op = fscache_alloc_retrieval(mapping, end_io_func, context);
  433. if (!op)
  434. return -ENOMEM;
  435. op->n_pages = *nr_pages;
  436. spin_lock(&cookie->lock);
  437. if (hlist_empty(&cookie->backing_objects))
  438. goto nobufs_unlock;
  439. object = hlist_entry(cookie->backing_objects.first,
  440. struct fscache_object, cookie_link);
  441. atomic_inc(&object->n_reads);
  442. __set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  443. if (fscache_submit_op(object, &op->op) < 0)
  444. goto nobufs_unlock_dec;
  445. spin_unlock(&cookie->lock);
  446. fscache_stat(&fscache_n_retrieval_ops);
  447. /* pin the netfs read context in case we need to do the actual netfs
  448. * read because we've encountered a cache read failure */
  449. fscache_get_context(object->cookie, op->context);
  450. /* we wait for the operation to become active, and then process it
  451. * *here*, in this thread, and not in the thread pool */
  452. ret = fscache_wait_for_retrieval_activation(
  453. object, op,
  454. __fscache_stat(&fscache_n_retrieval_op_waits),
  455. __fscache_stat(&fscache_n_retrievals_object_dead));
  456. if (ret < 0)
  457. goto error;
  458. /* ask the cache to honour the operation */
  459. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  460. fscache_stat(&fscache_n_cop_allocate_pages);
  461. ret = object->cache->ops->allocate_pages(
  462. op, pages, nr_pages, gfp);
  463. fscache_stat_d(&fscache_n_cop_allocate_pages);
  464. } else {
  465. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  466. ret = object->cache->ops->read_or_alloc_pages(
  467. op, pages, nr_pages, gfp);
  468. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  469. }
  470. error:
  471. if (ret == -ENOMEM)
  472. fscache_stat(&fscache_n_retrievals_nomem);
  473. else if (ret == -ERESTARTSYS)
  474. fscache_stat(&fscache_n_retrievals_intr);
  475. else if (ret == -ENODATA)
  476. fscache_stat(&fscache_n_retrievals_nodata);
  477. else if (ret < 0)
  478. fscache_stat(&fscache_n_retrievals_nobufs);
  479. else
  480. fscache_stat(&fscache_n_retrievals_ok);
  481. fscache_put_retrieval(op);
  482. _leave(" = %d", ret);
  483. return ret;
  484. nobufs_unlock_dec:
  485. atomic_dec(&object->n_reads);
  486. nobufs_unlock:
  487. spin_unlock(&cookie->lock);
  488. kfree(op);
  489. nobufs:
  490. fscache_stat(&fscache_n_retrievals_nobufs);
  491. _leave(" = -ENOBUFS");
  492. return -ENOBUFS;
  493. }
  494. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  495. /*
  496. * allocate a block in the cache on which to store a page
  497. * - we return:
  498. * -ENOMEM - out of memory, nothing done
  499. * -ERESTARTSYS - interrupted
  500. * -ENOBUFS - no backing object available in which to cache the block
  501. * 0 - block allocated
  502. */
  503. int __fscache_alloc_page(struct fscache_cookie *cookie,
  504. struct page *page,
  505. gfp_t gfp)
  506. {
  507. struct fscache_retrieval *op;
  508. struct fscache_object *object;
  509. int ret;
  510. _enter("%p,%p,,,", cookie, page);
  511. fscache_stat(&fscache_n_allocs);
  512. if (hlist_empty(&cookie->backing_objects))
  513. goto nobufs;
  514. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  515. ASSERTCMP(page, !=, NULL);
  516. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  517. _leave(" = -ENOBUFS [invalidating]");
  518. return -ENOBUFS;
  519. }
  520. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  521. return -ERESTARTSYS;
  522. op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
  523. if (!op)
  524. return -ENOMEM;
  525. op->n_pages = 1;
  526. spin_lock(&cookie->lock);
  527. if (hlist_empty(&cookie->backing_objects))
  528. goto nobufs_unlock;
  529. object = hlist_entry(cookie->backing_objects.first,
  530. struct fscache_object, cookie_link);
  531. if (fscache_submit_op(object, &op->op) < 0)
  532. goto nobufs_unlock;
  533. spin_unlock(&cookie->lock);
  534. fscache_stat(&fscache_n_alloc_ops);
  535. ret = fscache_wait_for_retrieval_activation(
  536. object, op,
  537. __fscache_stat(&fscache_n_alloc_op_waits),
  538. __fscache_stat(&fscache_n_allocs_object_dead));
  539. if (ret < 0)
  540. goto error;
  541. /* ask the cache to honour the operation */
  542. fscache_stat(&fscache_n_cop_allocate_page);
  543. ret = object->cache->ops->allocate_page(op, page, gfp);
  544. fscache_stat_d(&fscache_n_cop_allocate_page);
  545. error:
  546. if (ret == -ERESTARTSYS)
  547. fscache_stat(&fscache_n_allocs_intr);
  548. else if (ret < 0)
  549. fscache_stat(&fscache_n_allocs_nobufs);
  550. else
  551. fscache_stat(&fscache_n_allocs_ok);
  552. fscache_put_retrieval(op);
  553. _leave(" = %d", ret);
  554. return ret;
  555. nobufs_unlock:
  556. spin_unlock(&cookie->lock);
  557. kfree(op);
  558. nobufs:
  559. fscache_stat(&fscache_n_allocs_nobufs);
  560. _leave(" = -ENOBUFS");
  561. return -ENOBUFS;
  562. }
  563. EXPORT_SYMBOL(__fscache_alloc_page);
  564. /*
  565. * release a write op reference
  566. */
  567. static void fscache_release_write_op(struct fscache_operation *_op)
  568. {
  569. _enter("{OP%x}", _op->debug_id);
  570. }
  571. /*
  572. * perform the background storage of a page into the cache
  573. */
  574. static void fscache_write_op(struct fscache_operation *_op)
  575. {
  576. struct fscache_storage *op =
  577. container_of(_op, struct fscache_storage, op);
  578. struct fscache_object *object = op->op.object;
  579. struct fscache_cookie *cookie;
  580. struct page *page;
  581. unsigned n;
  582. void *results[1];
  583. int ret;
  584. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  585. spin_lock(&object->lock);
  586. cookie = object->cookie;
  587. if (!fscache_object_is_active(object) || !cookie) {
  588. spin_unlock(&object->lock);
  589. _leave("");
  590. return;
  591. }
  592. spin_lock(&cookie->stores_lock);
  593. fscache_stat(&fscache_n_store_calls);
  594. /* find a page to store */
  595. page = NULL;
  596. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  597. FSCACHE_COOKIE_PENDING_TAG);
  598. if (n != 1)
  599. goto superseded;
  600. page = results[0];
  601. _debug("gang %d [%lx]", n, page->index);
  602. if (page->index > op->store_limit) {
  603. fscache_stat(&fscache_n_store_pages_over_limit);
  604. goto superseded;
  605. }
  606. radix_tree_tag_set(&cookie->stores, page->index,
  607. FSCACHE_COOKIE_STORING_TAG);
  608. radix_tree_tag_clear(&cookie->stores, page->index,
  609. FSCACHE_COOKIE_PENDING_TAG);
  610. spin_unlock(&cookie->stores_lock);
  611. spin_unlock(&object->lock);
  612. fscache_stat(&fscache_n_store_pages);
  613. fscache_stat(&fscache_n_cop_write_page);
  614. ret = object->cache->ops->write_page(op, page);
  615. fscache_stat_d(&fscache_n_cop_write_page);
  616. fscache_end_page_write(object, page);
  617. if (ret < 0) {
  618. fscache_abort_object(object);
  619. fscache_op_complete(&op->op);
  620. } else {
  621. fscache_enqueue_operation(&op->op);
  622. }
  623. _leave("");
  624. return;
  625. superseded:
  626. /* this writer is going away and there aren't any more things to
  627. * write */
  628. _debug("cease");
  629. spin_unlock(&cookie->stores_lock);
  630. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  631. spin_unlock(&object->lock);
  632. fscache_op_complete(&op->op);
  633. _leave("");
  634. }
  635. /*
  636. * Clear the pages pending writing for invalidation
  637. */
  638. void fscache_invalidate_writes(struct fscache_cookie *cookie)
  639. {
  640. struct page *page;
  641. void *results[16];
  642. int n, i;
  643. _enter("");
  644. while (spin_lock(&cookie->stores_lock),
  645. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0,
  646. ARRAY_SIZE(results),
  647. FSCACHE_COOKIE_PENDING_TAG),
  648. n > 0) {
  649. for (i = n - 1; i >= 0; i--) {
  650. page = results[i];
  651. radix_tree_delete(&cookie->stores, page->index);
  652. }
  653. spin_unlock(&cookie->stores_lock);
  654. for (i = n - 1; i >= 0; i--)
  655. page_cache_release(results[i]);
  656. }
  657. spin_unlock(&cookie->stores_lock);
  658. _leave("");
  659. }
  660. /*
  661. * request a page be stored in the cache
  662. * - returns:
  663. * -ENOMEM - out of memory, nothing done
  664. * -ENOBUFS - no backing object available in which to cache the page
  665. * 0 - dispatched a write - it'll call end_io_func() when finished
  666. *
  667. * if the cookie still has a backing object at this point, that object can be
  668. * in one of a few states with respect to storage processing:
  669. *
  670. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  671. * set)
  672. *
  673. * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
  674. * fill op)
  675. *
  676. * (b) writes deferred till post-creation (mark page for writing and
  677. * return immediately)
  678. *
  679. * (2) negative lookup, object created, initial fill being made from netfs
  680. * (FSCACHE_COOKIE_INITIAL_FILL is set)
  681. *
  682. * (a) fill point not yet reached this page (mark page for writing and
  683. * return)
  684. *
  685. * (b) fill point passed this page (queue op to store this page)
  686. *
  687. * (3) object extant (queue op to store this page)
  688. *
  689. * any other state is invalid
  690. */
  691. int __fscache_write_page(struct fscache_cookie *cookie,
  692. struct page *page,
  693. gfp_t gfp)
  694. {
  695. struct fscache_storage *op;
  696. struct fscache_object *object;
  697. int ret;
  698. _enter("%p,%x,", cookie, (u32) page->flags);
  699. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  700. ASSERT(PageFsCache(page));
  701. fscache_stat(&fscache_n_stores);
  702. if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
  703. _leave(" = -ENOBUFS [invalidating]");
  704. return -ENOBUFS;
  705. }
  706. op = kzalloc(sizeof(*op), GFP_NOIO | __GFP_NOMEMALLOC | __GFP_NORETRY);
  707. if (!op)
  708. goto nomem;
  709. fscache_operation_init(&op->op, fscache_write_op,
  710. fscache_release_write_op);
  711. op->op.flags = FSCACHE_OP_ASYNC | (1 << FSCACHE_OP_WAITING);
  712. ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
  713. if (ret < 0)
  714. goto nomem_free;
  715. ret = -ENOBUFS;
  716. spin_lock(&cookie->lock);
  717. if (hlist_empty(&cookie->backing_objects))
  718. goto nobufs;
  719. object = hlist_entry(cookie->backing_objects.first,
  720. struct fscache_object, cookie_link);
  721. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  722. goto nobufs;
  723. /* add the page to the pending-storage radix tree on the backing
  724. * object */
  725. spin_lock(&object->lock);
  726. spin_lock(&cookie->stores_lock);
  727. _debug("store limit %llx", (unsigned long long) object->store_limit);
  728. ret = radix_tree_insert(&cookie->stores, page->index, page);
  729. if (ret < 0) {
  730. if (ret == -EEXIST)
  731. goto already_queued;
  732. _debug("insert failed %d", ret);
  733. goto nobufs_unlock_obj;
  734. }
  735. radix_tree_tag_set(&cookie->stores, page->index,
  736. FSCACHE_COOKIE_PENDING_TAG);
  737. page_cache_get(page);
  738. /* we only want one writer at a time, but we do need to queue new
  739. * writers after exclusive ops */
  740. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  741. goto already_pending;
  742. spin_unlock(&cookie->stores_lock);
  743. spin_unlock(&object->lock);
  744. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  745. op->store_limit = object->store_limit;
  746. if (fscache_submit_op(object, &op->op) < 0)
  747. goto submit_failed;
  748. spin_unlock(&cookie->lock);
  749. radix_tree_preload_end();
  750. fscache_stat(&fscache_n_store_ops);
  751. fscache_stat(&fscache_n_stores_ok);
  752. /* the work queue now carries its own ref on the object */
  753. fscache_put_operation(&op->op);
  754. _leave(" = 0");
  755. return 0;
  756. already_queued:
  757. fscache_stat(&fscache_n_stores_again);
  758. already_pending:
  759. spin_unlock(&cookie->stores_lock);
  760. spin_unlock(&object->lock);
  761. spin_unlock(&cookie->lock);
  762. radix_tree_preload_end();
  763. kfree(op);
  764. fscache_stat(&fscache_n_stores_ok);
  765. _leave(" = 0");
  766. return 0;
  767. submit_failed:
  768. spin_lock(&cookie->stores_lock);
  769. radix_tree_delete(&cookie->stores, page->index);
  770. spin_unlock(&cookie->stores_lock);
  771. page_cache_release(page);
  772. ret = -ENOBUFS;
  773. goto nobufs;
  774. nobufs_unlock_obj:
  775. spin_unlock(&cookie->stores_lock);
  776. spin_unlock(&object->lock);
  777. nobufs:
  778. spin_unlock(&cookie->lock);
  779. radix_tree_preload_end();
  780. kfree(op);
  781. fscache_stat(&fscache_n_stores_nobufs);
  782. _leave(" = -ENOBUFS");
  783. return -ENOBUFS;
  784. nomem_free:
  785. kfree(op);
  786. nomem:
  787. fscache_stat(&fscache_n_stores_oom);
  788. _leave(" = -ENOMEM");
  789. return -ENOMEM;
  790. }
  791. EXPORT_SYMBOL(__fscache_write_page);
  792. /*
  793. * remove a page from the cache
  794. */
  795. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  796. {
  797. struct fscache_object *object;
  798. _enter(",%p", page);
  799. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  800. ASSERTCMP(page, !=, NULL);
  801. fscache_stat(&fscache_n_uncaches);
  802. /* cache withdrawal may beat us to it */
  803. if (!PageFsCache(page))
  804. goto done;
  805. /* get the object */
  806. spin_lock(&cookie->lock);
  807. if (hlist_empty(&cookie->backing_objects)) {
  808. ClearPageFsCache(page);
  809. goto done_unlock;
  810. }
  811. object = hlist_entry(cookie->backing_objects.first,
  812. struct fscache_object, cookie_link);
  813. /* there might now be stuff on disk we could read */
  814. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  815. /* only invoke the cache backend if we managed to mark the page
  816. * uncached here; this deals with synchronisation vs withdrawal */
  817. if (TestClearPageFsCache(page) &&
  818. object->cache->ops->uncache_page) {
  819. /* the cache backend releases the cookie lock */
  820. fscache_stat(&fscache_n_cop_uncache_page);
  821. object->cache->ops->uncache_page(object, page);
  822. fscache_stat_d(&fscache_n_cop_uncache_page);
  823. goto done;
  824. }
  825. done_unlock:
  826. spin_unlock(&cookie->lock);
  827. done:
  828. _leave("");
  829. }
  830. EXPORT_SYMBOL(__fscache_uncache_page);
  831. /**
  832. * fscache_mark_page_cached - Mark a page as being cached
  833. * @op: The retrieval op pages are being marked for
  834. * @page: The page to be marked
  835. *
  836. * Mark a netfs page as being cached. After this is called, the netfs
  837. * must call fscache_uncache_page() to remove the mark.
  838. */
  839. void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
  840. {
  841. struct fscache_cookie *cookie = op->op.object->cookie;
  842. #ifdef CONFIG_FSCACHE_STATS
  843. atomic_inc(&fscache_n_marks);
  844. #endif
  845. _debug("- mark %p{%lx}", page, page->index);
  846. if (TestSetPageFsCache(page)) {
  847. static bool once_only;
  848. if (!once_only) {
  849. once_only = true;
  850. printk(KERN_WARNING "FS-Cache:"
  851. " Cookie type %s marked page %lx"
  852. " multiple times\n",
  853. cookie->def->name, page->index);
  854. }
  855. }
  856. if (cookie->def->mark_page_cached)
  857. cookie->def->mark_page_cached(cookie->netfs_data,
  858. op->mapping, page);
  859. }
  860. EXPORT_SYMBOL(fscache_mark_page_cached);
  861. /**
  862. * fscache_mark_pages_cached - Mark pages as being cached
  863. * @op: The retrieval op pages are being marked for
  864. * @pagevec: The pages to be marked
  865. *
  866. * Mark a bunch of netfs pages as being cached. After this is called,
  867. * the netfs must call fscache_uncache_page() to remove the mark.
  868. */
  869. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  870. struct pagevec *pagevec)
  871. {
  872. unsigned long loop;
  873. for (loop = 0; loop < pagevec->nr; loop++)
  874. fscache_mark_page_cached(op, pagevec->pages[loop]);
  875. pagevec_reinit(pagevec);
  876. }
  877. EXPORT_SYMBOL(fscache_mark_pages_cached);
  878. /*
  879. * Uncache all the pages in an inode that are marked PG_fscache, assuming them
  880. * to be associated with the given cookie.
  881. */
  882. void __fscache_uncache_all_inode_pages(struct fscache_cookie *cookie,
  883. struct inode *inode)
  884. {
  885. struct address_space *mapping = inode->i_mapping;
  886. struct pagevec pvec;
  887. pgoff_t next;
  888. int i;
  889. _enter("%p,%p", cookie, inode);
  890. if (!mapping || mapping->nrpages == 0) {
  891. _leave(" [no pages]");
  892. return;
  893. }
  894. pagevec_init(&pvec, 0);
  895. next = 0;
  896. do {
  897. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE))
  898. break;
  899. for (i = 0; i < pagevec_count(&pvec); i++) {
  900. struct page *page = pvec.pages[i];
  901. next = page->index;
  902. if (PageFsCache(page)) {
  903. __fscache_wait_on_page_write(cookie, page);
  904. __fscache_uncache_page(cookie, page);
  905. }
  906. }
  907. pagevec_release(&pvec);
  908. cond_resched();
  909. } while (++next);
  910. _leave("");
  911. }
  912. EXPORT_SYMBOL(__fscache_uncache_all_inode_pages);