page.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918
  1. /* Cache page management and data I/O routines
  2. *
  3. * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #define FSCACHE_DEBUG_LEVEL PAGE
  12. #include <linux/module.h>
  13. #include <linux/fscache-cache.h>
  14. #include <linux/buffer_head.h>
  15. #include <linux/pagevec.h>
  16. #include "internal.h"
  17. /*
  18. * check to see if a page is being written to the cache
  19. */
  20. bool __fscache_check_page_write(struct fscache_cookie *cookie, struct page *page)
  21. {
  22. void *val;
  23. rcu_read_lock();
  24. val = radix_tree_lookup(&cookie->stores, page->index);
  25. rcu_read_unlock();
  26. return val != NULL;
  27. }
  28. EXPORT_SYMBOL(__fscache_check_page_write);
  29. /*
  30. * wait for a page to finish being written to the cache
  31. */
  32. void __fscache_wait_on_page_write(struct fscache_cookie *cookie, struct page *page)
  33. {
  34. wait_queue_head_t *wq = bit_waitqueue(&cookie->flags, 0);
  35. wait_event(*wq, !__fscache_check_page_write(cookie, page));
  36. }
  37. EXPORT_SYMBOL(__fscache_wait_on_page_write);
  38. /*
  39. * note that a page has finished being written to the cache
  40. */
  41. static void fscache_end_page_write(struct fscache_object *object,
  42. struct page *page)
  43. {
  44. struct fscache_cookie *cookie;
  45. struct page *xpage = NULL;
  46. spin_lock(&object->lock);
  47. cookie = object->cookie;
  48. if (cookie) {
  49. /* delete the page from the tree if it is now no longer
  50. * pending */
  51. spin_lock(&cookie->stores_lock);
  52. if (!radix_tree_tag_get(&cookie->stores, page->index,
  53. FSCACHE_COOKIE_PENDING_TAG)) {
  54. fscache_stat(&fscache_n_store_radix_deletes);
  55. xpage = radix_tree_delete(&cookie->stores, page->index);
  56. }
  57. spin_unlock(&cookie->stores_lock);
  58. wake_up_bit(&cookie->flags, 0);
  59. }
  60. spin_unlock(&object->lock);
  61. if (xpage)
  62. page_cache_release(xpage);
  63. }
  64. /*
  65. * actually apply the changed attributes to a cache object
  66. */
  67. static void fscache_attr_changed_op(struct fscache_operation *op)
  68. {
  69. struct fscache_object *object = op->object;
  70. int ret;
  71. _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
  72. fscache_stat(&fscache_n_attr_changed_calls);
  73. if (fscache_object_is_active(object)) {
  74. fscache_set_op_state(op, "CallFS");
  75. fscache_stat(&fscache_n_cop_attr_changed);
  76. ret = object->cache->ops->attr_changed(object);
  77. fscache_stat_d(&fscache_n_cop_attr_changed);
  78. fscache_set_op_state(op, "Done");
  79. if (ret < 0)
  80. fscache_abort_object(object);
  81. }
  82. _leave("");
  83. }
  84. /*
  85. * notification that the attributes on an object have changed
  86. */
  87. int __fscache_attr_changed(struct fscache_cookie *cookie)
  88. {
  89. struct fscache_operation *op;
  90. struct fscache_object *object;
  91. _enter("%p", cookie);
  92. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  93. fscache_stat(&fscache_n_attr_changed);
  94. op = kzalloc(sizeof(*op), GFP_KERNEL);
  95. if (!op) {
  96. fscache_stat(&fscache_n_attr_changed_nomem);
  97. _leave(" = -ENOMEM");
  98. return -ENOMEM;
  99. }
  100. fscache_operation_init(op, NULL);
  101. fscache_operation_init_slow(op, fscache_attr_changed_op);
  102. op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
  103. fscache_set_op_name(op, "Attr");
  104. spin_lock(&cookie->lock);
  105. if (hlist_empty(&cookie->backing_objects))
  106. goto nobufs;
  107. object = hlist_entry(cookie->backing_objects.first,
  108. struct fscache_object, cookie_link);
  109. if (fscache_submit_exclusive_op(object, op) < 0)
  110. goto nobufs;
  111. spin_unlock(&cookie->lock);
  112. fscache_stat(&fscache_n_attr_changed_ok);
  113. fscache_put_operation(op);
  114. _leave(" = 0");
  115. return 0;
  116. nobufs:
  117. spin_unlock(&cookie->lock);
  118. kfree(op);
  119. fscache_stat(&fscache_n_attr_changed_nobufs);
  120. _leave(" = %d", -ENOBUFS);
  121. return -ENOBUFS;
  122. }
  123. EXPORT_SYMBOL(__fscache_attr_changed);
  124. /*
  125. * handle secondary execution given to a retrieval op on behalf of the
  126. * cache
  127. */
  128. static void fscache_retrieval_work(struct work_struct *work)
  129. {
  130. struct fscache_retrieval *op =
  131. container_of(work, struct fscache_retrieval, op.fast_work);
  132. unsigned long start;
  133. _enter("{OP%x}", op->op.debug_id);
  134. start = jiffies;
  135. op->op.processor(&op->op);
  136. fscache_hist(fscache_ops_histogram, start);
  137. fscache_put_operation(&op->op);
  138. }
  139. /*
  140. * release a retrieval op reference
  141. */
  142. static void fscache_release_retrieval_op(struct fscache_operation *_op)
  143. {
  144. struct fscache_retrieval *op =
  145. container_of(_op, struct fscache_retrieval, op);
  146. _enter("{OP%x}", op->op.debug_id);
  147. fscache_hist(fscache_retrieval_histogram, op->start_time);
  148. if (op->context)
  149. fscache_put_context(op->op.object->cookie, op->context);
  150. _leave("");
  151. }
  152. /*
  153. * allocate a retrieval op
  154. */
  155. static struct fscache_retrieval *fscache_alloc_retrieval(
  156. struct address_space *mapping,
  157. fscache_rw_complete_t end_io_func,
  158. void *context)
  159. {
  160. struct fscache_retrieval *op;
  161. /* allocate a retrieval operation and attempt to submit it */
  162. op = kzalloc(sizeof(*op), GFP_NOIO);
  163. if (!op) {
  164. fscache_stat(&fscache_n_retrievals_nomem);
  165. return NULL;
  166. }
  167. fscache_operation_init(&op->op, fscache_release_retrieval_op);
  168. op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
  169. op->mapping = mapping;
  170. op->end_io_func = end_io_func;
  171. op->context = context;
  172. op->start_time = jiffies;
  173. INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
  174. INIT_LIST_HEAD(&op->to_do);
  175. fscache_set_op_name(&op->op, "Retr");
  176. return op;
  177. }
  178. /*
  179. * wait for a deferred lookup to complete
  180. */
  181. static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
  182. {
  183. unsigned long jif;
  184. _enter("");
  185. if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
  186. _leave(" = 0 [imm]");
  187. return 0;
  188. }
  189. fscache_stat(&fscache_n_retrievals_wait);
  190. jif = jiffies;
  191. if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
  192. fscache_wait_bit_interruptible,
  193. TASK_INTERRUPTIBLE) != 0) {
  194. fscache_stat(&fscache_n_retrievals_intr);
  195. _leave(" = -ERESTARTSYS");
  196. return -ERESTARTSYS;
  197. }
  198. ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
  199. smp_rmb();
  200. fscache_hist(fscache_retrieval_delay_histogram, jif);
  201. _leave(" = 0 [dly]");
  202. return 0;
  203. }
  204. /*
  205. * read a page from the cache or allocate a block in which to store it
  206. * - we return:
  207. * -ENOMEM - out of memory, nothing done
  208. * -ERESTARTSYS - interrupted
  209. * -ENOBUFS - no backing object available in which to cache the block
  210. * -ENODATA - no data available in the backing object for this block
  211. * 0 - dispatched a read - it'll call end_io_func() when finished
  212. */
  213. int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
  214. struct page *page,
  215. fscache_rw_complete_t end_io_func,
  216. void *context,
  217. gfp_t gfp)
  218. {
  219. struct fscache_retrieval *op;
  220. struct fscache_object *object;
  221. int ret;
  222. _enter("%p,%p,,,", cookie, page);
  223. fscache_stat(&fscache_n_retrievals);
  224. if (hlist_empty(&cookie->backing_objects))
  225. goto nobufs;
  226. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  227. ASSERTCMP(page, !=, NULL);
  228. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  229. return -ERESTARTSYS;
  230. op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
  231. if (!op) {
  232. _leave(" = -ENOMEM");
  233. return -ENOMEM;
  234. }
  235. fscache_set_op_name(&op->op, "RetrRA1");
  236. spin_lock(&cookie->lock);
  237. if (hlist_empty(&cookie->backing_objects))
  238. goto nobufs_unlock;
  239. object = hlist_entry(cookie->backing_objects.first,
  240. struct fscache_object, cookie_link);
  241. ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
  242. atomic_inc(&object->n_reads);
  243. set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  244. if (fscache_submit_op(object, &op->op) < 0)
  245. goto nobufs_unlock;
  246. spin_unlock(&cookie->lock);
  247. fscache_stat(&fscache_n_retrieval_ops);
  248. /* pin the netfs read context in case we need to do the actual netfs
  249. * read because we've encountered a cache read failure */
  250. fscache_get_context(object->cookie, op->context);
  251. /* we wait for the operation to become active, and then process it
  252. * *here*, in this thread, and not in the thread pool */
  253. if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
  254. _debug(">>> WT");
  255. fscache_stat(&fscache_n_retrieval_op_waits);
  256. if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  257. fscache_wait_bit_interruptible,
  258. TASK_INTERRUPTIBLE) < 0) {
  259. ret = fscache_cancel_op(&op->op);
  260. if (ret == 0) {
  261. ret = -ERESTARTSYS;
  262. goto error;
  263. }
  264. /* it's been removed from the pending queue by another
  265. * party, so we should get to run shortly */
  266. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  267. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  268. }
  269. _debug("<<< GO");
  270. }
  271. /* ask the cache to honour the operation */
  272. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  273. fscache_stat(&fscache_n_cop_allocate_page);
  274. ret = object->cache->ops->allocate_page(op, page, gfp);
  275. fscache_stat_d(&fscache_n_cop_allocate_page);
  276. if (ret == 0)
  277. ret = -ENODATA;
  278. } else {
  279. fscache_stat(&fscache_n_cop_read_or_alloc_page);
  280. ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
  281. fscache_stat_d(&fscache_n_cop_read_or_alloc_page);
  282. }
  283. error:
  284. if (ret == -ENOMEM)
  285. fscache_stat(&fscache_n_retrievals_nomem);
  286. else if (ret == -ERESTARTSYS)
  287. fscache_stat(&fscache_n_retrievals_intr);
  288. else if (ret == -ENODATA)
  289. fscache_stat(&fscache_n_retrievals_nodata);
  290. else if (ret < 0)
  291. fscache_stat(&fscache_n_retrievals_nobufs);
  292. else
  293. fscache_stat(&fscache_n_retrievals_ok);
  294. fscache_put_retrieval(op);
  295. _leave(" = %d", ret);
  296. return ret;
  297. nobufs_unlock:
  298. spin_unlock(&cookie->lock);
  299. kfree(op);
  300. nobufs:
  301. fscache_stat(&fscache_n_retrievals_nobufs);
  302. _leave(" = -ENOBUFS");
  303. return -ENOBUFS;
  304. }
  305. EXPORT_SYMBOL(__fscache_read_or_alloc_page);
  306. /*
  307. * read a list of page from the cache or allocate a block in which to store
  308. * them
  309. * - we return:
  310. * -ENOMEM - out of memory, some pages may be being read
  311. * -ERESTARTSYS - interrupted, some pages may be being read
  312. * -ENOBUFS - no backing object or space available in which to cache any
  313. * pages not being read
  314. * -ENODATA - no data available in the backing object for some or all of
  315. * the pages
  316. * 0 - dispatched a read on all pages
  317. *
  318. * end_io_func() will be called for each page read from the cache as it is
  319. * finishes being read
  320. *
  321. * any pages for which a read is dispatched will be removed from pages and
  322. * nr_pages
  323. */
  324. int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
  325. struct address_space *mapping,
  326. struct list_head *pages,
  327. unsigned *nr_pages,
  328. fscache_rw_complete_t end_io_func,
  329. void *context,
  330. gfp_t gfp)
  331. {
  332. struct fscache_retrieval *op;
  333. struct fscache_object *object;
  334. int ret;
  335. _enter("%p,,%d,,,", cookie, *nr_pages);
  336. fscache_stat(&fscache_n_retrievals);
  337. if (hlist_empty(&cookie->backing_objects))
  338. goto nobufs;
  339. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  340. ASSERTCMP(*nr_pages, >, 0);
  341. ASSERT(!list_empty(pages));
  342. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  343. return -ERESTARTSYS;
  344. op = fscache_alloc_retrieval(mapping, end_io_func, context);
  345. if (!op)
  346. return -ENOMEM;
  347. fscache_set_op_name(&op->op, "RetrRAN");
  348. spin_lock(&cookie->lock);
  349. if (hlist_empty(&cookie->backing_objects))
  350. goto nobufs_unlock;
  351. object = hlist_entry(cookie->backing_objects.first,
  352. struct fscache_object, cookie_link);
  353. atomic_inc(&object->n_reads);
  354. set_bit(FSCACHE_OP_DEC_READ_CNT, &op->op.flags);
  355. if (fscache_submit_op(object, &op->op) < 0)
  356. goto nobufs_unlock;
  357. spin_unlock(&cookie->lock);
  358. fscache_stat(&fscache_n_retrieval_ops);
  359. /* pin the netfs read context in case we need to do the actual netfs
  360. * read because we've encountered a cache read failure */
  361. fscache_get_context(object->cookie, op->context);
  362. /* we wait for the operation to become active, and then process it
  363. * *here*, in this thread, and not in the thread pool */
  364. if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
  365. _debug(">>> WT");
  366. fscache_stat(&fscache_n_retrieval_op_waits);
  367. if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  368. fscache_wait_bit_interruptible,
  369. TASK_INTERRUPTIBLE) < 0) {
  370. ret = fscache_cancel_op(&op->op);
  371. if (ret == 0) {
  372. ret = -ERESTARTSYS;
  373. goto error;
  374. }
  375. /* it's been removed from the pending queue by another
  376. * party, so we should get to run shortly */
  377. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  378. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  379. }
  380. _debug("<<< GO");
  381. }
  382. /* ask the cache to honour the operation */
  383. if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
  384. fscache_stat(&fscache_n_cop_allocate_pages);
  385. ret = object->cache->ops->allocate_pages(
  386. op, pages, nr_pages, gfp);
  387. fscache_stat_d(&fscache_n_cop_allocate_pages);
  388. } else {
  389. fscache_stat(&fscache_n_cop_read_or_alloc_pages);
  390. ret = object->cache->ops->read_or_alloc_pages(
  391. op, pages, nr_pages, gfp);
  392. fscache_stat_d(&fscache_n_cop_read_or_alloc_pages);
  393. }
  394. error:
  395. if (ret == -ENOMEM)
  396. fscache_stat(&fscache_n_retrievals_nomem);
  397. else if (ret == -ERESTARTSYS)
  398. fscache_stat(&fscache_n_retrievals_intr);
  399. else if (ret == -ENODATA)
  400. fscache_stat(&fscache_n_retrievals_nodata);
  401. else if (ret < 0)
  402. fscache_stat(&fscache_n_retrievals_nobufs);
  403. else
  404. fscache_stat(&fscache_n_retrievals_ok);
  405. fscache_put_retrieval(op);
  406. _leave(" = %d", ret);
  407. return ret;
  408. nobufs_unlock:
  409. spin_unlock(&cookie->lock);
  410. kfree(op);
  411. nobufs:
  412. fscache_stat(&fscache_n_retrievals_nobufs);
  413. _leave(" = -ENOBUFS");
  414. return -ENOBUFS;
  415. }
  416. EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
  417. /*
  418. * allocate a block in the cache on which to store a page
  419. * - we return:
  420. * -ENOMEM - out of memory, nothing done
  421. * -ERESTARTSYS - interrupted
  422. * -ENOBUFS - no backing object available in which to cache the block
  423. * 0 - block allocated
  424. */
  425. int __fscache_alloc_page(struct fscache_cookie *cookie,
  426. struct page *page,
  427. gfp_t gfp)
  428. {
  429. struct fscache_retrieval *op;
  430. struct fscache_object *object;
  431. int ret;
  432. _enter("%p,%p,,,", cookie, page);
  433. fscache_stat(&fscache_n_allocs);
  434. if (hlist_empty(&cookie->backing_objects))
  435. goto nobufs;
  436. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  437. ASSERTCMP(page, !=, NULL);
  438. if (fscache_wait_for_deferred_lookup(cookie) < 0)
  439. return -ERESTARTSYS;
  440. op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
  441. if (!op)
  442. return -ENOMEM;
  443. fscache_set_op_name(&op->op, "RetrAL1");
  444. spin_lock(&cookie->lock);
  445. if (hlist_empty(&cookie->backing_objects))
  446. goto nobufs_unlock;
  447. object = hlist_entry(cookie->backing_objects.first,
  448. struct fscache_object, cookie_link);
  449. if (fscache_submit_op(object, &op->op) < 0)
  450. goto nobufs_unlock;
  451. spin_unlock(&cookie->lock);
  452. fscache_stat(&fscache_n_alloc_ops);
  453. if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
  454. _debug(">>> WT");
  455. fscache_stat(&fscache_n_alloc_op_waits);
  456. if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  457. fscache_wait_bit_interruptible,
  458. TASK_INTERRUPTIBLE) < 0) {
  459. ret = fscache_cancel_op(&op->op);
  460. if (ret == 0) {
  461. ret = -ERESTARTSYS;
  462. goto error;
  463. }
  464. /* it's been removed from the pending queue by another
  465. * party, so we should get to run shortly */
  466. wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
  467. fscache_wait_bit, TASK_UNINTERRUPTIBLE);
  468. }
  469. _debug("<<< GO");
  470. }
  471. /* ask the cache to honour the operation */
  472. fscache_stat(&fscache_n_cop_allocate_page);
  473. ret = object->cache->ops->allocate_page(op, page, gfp);
  474. fscache_stat_d(&fscache_n_cop_allocate_page);
  475. error:
  476. if (ret == -ERESTARTSYS)
  477. fscache_stat(&fscache_n_allocs_intr);
  478. else if (ret < 0)
  479. fscache_stat(&fscache_n_allocs_nobufs);
  480. else
  481. fscache_stat(&fscache_n_allocs_ok);
  482. fscache_put_retrieval(op);
  483. _leave(" = %d", ret);
  484. return ret;
  485. nobufs_unlock:
  486. spin_unlock(&cookie->lock);
  487. kfree(op);
  488. nobufs:
  489. fscache_stat(&fscache_n_allocs_nobufs);
  490. _leave(" = -ENOBUFS");
  491. return -ENOBUFS;
  492. }
  493. EXPORT_SYMBOL(__fscache_alloc_page);
  494. /*
  495. * release a write op reference
  496. */
  497. static void fscache_release_write_op(struct fscache_operation *_op)
  498. {
  499. _enter("{OP%x}", _op->debug_id);
  500. }
  501. /*
  502. * perform the background storage of a page into the cache
  503. */
  504. static void fscache_write_op(struct fscache_operation *_op)
  505. {
  506. struct fscache_storage *op =
  507. container_of(_op, struct fscache_storage, op);
  508. struct fscache_object *object = op->op.object;
  509. struct fscache_cookie *cookie;
  510. struct page *page;
  511. unsigned n;
  512. void *results[1];
  513. int ret;
  514. _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
  515. fscache_set_op_state(&op->op, "GetPage");
  516. spin_lock(&object->lock);
  517. cookie = object->cookie;
  518. if (!fscache_object_is_active(object) || !cookie) {
  519. spin_unlock(&object->lock);
  520. _leave("");
  521. return;
  522. }
  523. spin_lock(&cookie->stores_lock);
  524. fscache_stat(&fscache_n_store_calls);
  525. /* find a page to store */
  526. page = NULL;
  527. n = radix_tree_gang_lookup_tag(&cookie->stores, results, 0, 1,
  528. FSCACHE_COOKIE_PENDING_TAG);
  529. if (n != 1)
  530. goto superseded;
  531. page = results[0];
  532. _debug("gang %d [%lx]", n, page->index);
  533. if (page->index > op->store_limit) {
  534. fscache_stat(&fscache_n_store_pages_over_limit);
  535. goto superseded;
  536. }
  537. radix_tree_tag_clear(&cookie->stores, page->index,
  538. FSCACHE_COOKIE_PENDING_TAG);
  539. spin_unlock(&cookie->stores_lock);
  540. spin_unlock(&object->lock);
  541. if (page) {
  542. fscache_set_op_state(&op->op, "Store");
  543. fscache_stat(&fscache_n_store_pages);
  544. fscache_stat(&fscache_n_cop_write_page);
  545. ret = object->cache->ops->write_page(op, page);
  546. fscache_stat_d(&fscache_n_cop_write_page);
  547. fscache_set_op_state(&op->op, "EndWrite");
  548. fscache_end_page_write(object, page);
  549. if (ret < 0) {
  550. fscache_set_op_state(&op->op, "Abort");
  551. fscache_abort_object(object);
  552. } else {
  553. fscache_enqueue_operation(&op->op);
  554. }
  555. }
  556. _leave("");
  557. return;
  558. superseded:
  559. /* this writer is going away and there aren't any more things to
  560. * write */
  561. _debug("cease");
  562. spin_unlock(&cookie->stores_lock);
  563. clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
  564. spin_unlock(&object->lock);
  565. _leave("");
  566. }
  567. /*
  568. * request a page be stored in the cache
  569. * - returns:
  570. * -ENOMEM - out of memory, nothing done
  571. * -ENOBUFS - no backing object available in which to cache the page
  572. * 0 - dispatched a write - it'll call end_io_func() when finished
  573. *
  574. * if the cookie still has a backing object at this point, that object can be
  575. * in one of a few states with respect to storage processing:
  576. *
  577. * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
  578. * set)
  579. *
  580. * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
  581. * fill op)
  582. *
  583. * (b) writes deferred till post-creation (mark page for writing and
  584. * return immediately)
  585. *
  586. * (2) negative lookup, object created, initial fill being made from netfs
  587. * (FSCACHE_COOKIE_INITIAL_FILL is set)
  588. *
  589. * (a) fill point not yet reached this page (mark page for writing and
  590. * return)
  591. *
  592. * (b) fill point passed this page (queue op to store this page)
  593. *
  594. * (3) object extant (queue op to store this page)
  595. *
  596. * any other state is invalid
  597. */
  598. int __fscache_write_page(struct fscache_cookie *cookie,
  599. struct page *page,
  600. gfp_t gfp)
  601. {
  602. struct fscache_storage *op;
  603. struct fscache_object *object;
  604. int ret;
  605. _enter("%p,%x,", cookie, (u32) page->flags);
  606. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  607. ASSERT(PageFsCache(page));
  608. fscache_stat(&fscache_n_stores);
  609. op = kzalloc(sizeof(*op), GFP_NOIO);
  610. if (!op)
  611. goto nomem;
  612. fscache_operation_init(&op->op, fscache_release_write_op);
  613. fscache_operation_init_slow(&op->op, fscache_write_op);
  614. op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
  615. fscache_set_op_name(&op->op, "Write1");
  616. ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
  617. if (ret < 0)
  618. goto nomem_free;
  619. ret = -ENOBUFS;
  620. spin_lock(&cookie->lock);
  621. if (hlist_empty(&cookie->backing_objects))
  622. goto nobufs;
  623. object = hlist_entry(cookie->backing_objects.first,
  624. struct fscache_object, cookie_link);
  625. if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
  626. goto nobufs;
  627. /* add the page to the pending-storage radix tree on the backing
  628. * object */
  629. spin_lock(&object->lock);
  630. spin_lock(&cookie->stores_lock);
  631. _debug("store limit %llx", (unsigned long long) object->store_limit);
  632. ret = radix_tree_insert(&cookie->stores, page->index, page);
  633. if (ret < 0) {
  634. if (ret == -EEXIST)
  635. goto already_queued;
  636. _debug("insert failed %d", ret);
  637. goto nobufs_unlock_obj;
  638. }
  639. radix_tree_tag_set(&cookie->stores, page->index,
  640. FSCACHE_COOKIE_PENDING_TAG);
  641. page_cache_get(page);
  642. /* we only want one writer at a time, but we do need to queue new
  643. * writers after exclusive ops */
  644. if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
  645. goto already_pending;
  646. spin_unlock(&cookie->stores_lock);
  647. spin_unlock(&object->lock);
  648. op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
  649. op->store_limit = object->store_limit;
  650. if (fscache_submit_op(object, &op->op) < 0)
  651. goto submit_failed;
  652. spin_unlock(&cookie->lock);
  653. radix_tree_preload_end();
  654. fscache_stat(&fscache_n_store_ops);
  655. fscache_stat(&fscache_n_stores_ok);
  656. /* the slow work queue now carries its own ref on the object */
  657. fscache_put_operation(&op->op);
  658. _leave(" = 0");
  659. return 0;
  660. already_queued:
  661. fscache_stat(&fscache_n_stores_again);
  662. already_pending:
  663. spin_unlock(&cookie->stores_lock);
  664. spin_unlock(&object->lock);
  665. spin_unlock(&cookie->lock);
  666. radix_tree_preload_end();
  667. kfree(op);
  668. fscache_stat(&fscache_n_stores_ok);
  669. _leave(" = 0");
  670. return 0;
  671. submit_failed:
  672. spin_lock(&cookie->stores_lock);
  673. radix_tree_delete(&cookie->stores, page->index);
  674. spin_unlock(&cookie->stores_lock);
  675. page_cache_release(page);
  676. ret = -ENOBUFS;
  677. goto nobufs;
  678. nobufs_unlock_obj:
  679. spin_unlock(&object->lock);
  680. nobufs:
  681. spin_unlock(&cookie->lock);
  682. radix_tree_preload_end();
  683. kfree(op);
  684. fscache_stat(&fscache_n_stores_nobufs);
  685. _leave(" = -ENOBUFS");
  686. return -ENOBUFS;
  687. nomem_free:
  688. kfree(op);
  689. nomem:
  690. fscache_stat(&fscache_n_stores_oom);
  691. _leave(" = -ENOMEM");
  692. return -ENOMEM;
  693. }
  694. EXPORT_SYMBOL(__fscache_write_page);
  695. /*
  696. * remove a page from the cache
  697. */
  698. void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
  699. {
  700. struct fscache_object *object;
  701. _enter(",%p", page);
  702. ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
  703. ASSERTCMP(page, !=, NULL);
  704. fscache_stat(&fscache_n_uncaches);
  705. /* cache withdrawal may beat us to it */
  706. if (!PageFsCache(page))
  707. goto done;
  708. /* get the object */
  709. spin_lock(&cookie->lock);
  710. if (hlist_empty(&cookie->backing_objects)) {
  711. ClearPageFsCache(page);
  712. goto done_unlock;
  713. }
  714. object = hlist_entry(cookie->backing_objects.first,
  715. struct fscache_object, cookie_link);
  716. /* there might now be stuff on disk we could read */
  717. clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
  718. /* only invoke the cache backend if we managed to mark the page
  719. * uncached here; this deals with synchronisation vs withdrawal */
  720. if (TestClearPageFsCache(page) &&
  721. object->cache->ops->uncache_page) {
  722. /* the cache backend releases the cookie lock */
  723. fscache_stat(&fscache_n_cop_uncache_page);
  724. object->cache->ops->uncache_page(object, page);
  725. fscache_stat_d(&fscache_n_cop_uncache_page);
  726. goto done;
  727. }
  728. done_unlock:
  729. spin_unlock(&cookie->lock);
  730. done:
  731. _leave("");
  732. }
  733. EXPORT_SYMBOL(__fscache_uncache_page);
  734. /**
  735. * fscache_mark_pages_cached - Mark pages as being cached
  736. * @op: The retrieval op pages are being marked for
  737. * @pagevec: The pages to be marked
  738. *
  739. * Mark a bunch of netfs pages as being cached. After this is called,
  740. * the netfs must call fscache_uncache_page() to remove the mark.
  741. */
  742. void fscache_mark_pages_cached(struct fscache_retrieval *op,
  743. struct pagevec *pagevec)
  744. {
  745. struct fscache_cookie *cookie = op->op.object->cookie;
  746. unsigned long loop;
  747. #ifdef CONFIG_FSCACHE_STATS
  748. atomic_add(pagevec->nr, &fscache_n_marks);
  749. #endif
  750. for (loop = 0; loop < pagevec->nr; loop++) {
  751. struct page *page = pagevec->pages[loop];
  752. _debug("- mark %p{%lx}", page, page->index);
  753. if (TestSetPageFsCache(page)) {
  754. static bool once_only;
  755. if (!once_only) {
  756. once_only = true;
  757. printk(KERN_WARNING "FS-Cache:"
  758. " Cookie type %s marked page %lx"
  759. " multiple times\n",
  760. cookie->def->name, page->index);
  761. }
  762. }
  763. }
  764. if (cookie->def->mark_pages_cached)
  765. cookie->def->mark_pages_cached(cookie->netfs_data,
  766. op->mapping, pagevec);
  767. pagevec_reinit(pagevec);
  768. }
  769. EXPORT_SYMBOL(fscache_mark_pages_cached);