rdwr.c 25 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995
  1. /* Storage object read/write
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/mount.h>
  12. #include <linux/slab.h>
  13. #include <linux/file.h>
  14. #include "internal.h"
  15. /*
  16. * detect wake up events generated by the unlocking of pages in which we're
  17. * interested
  18. * - we use this to detect read completion of backing pages
  19. * - the caller holds the waitqueue lock
  20. */
  21. static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
  22. int sync, void *_key)
  23. {
  24. struct cachefiles_one_read *monitor =
  25. container_of(wait, struct cachefiles_one_read, monitor);
  26. struct cachefiles_object *object;
  27. struct wait_bit_key *key = _key;
  28. struct page *page = wait->private;
  29. ASSERT(key);
  30. _enter("{%lu},%u,%d,{%p,%u}",
  31. monitor->netfs_page->index, mode, sync,
  32. key->flags, key->bit_nr);
  33. if (key->flags != &page->flags ||
  34. key->bit_nr != PG_locked)
  35. return 0;
  36. _debug("--- monitor %p %lx ---", page, page->flags);
  37. if (!PageUptodate(page) && !PageError(page)) {
  38. /* unlocked, not uptodate and not erronous? */
  39. _debug("page probably truncated");
  40. }
  41. /* remove from the waitqueue */
  42. list_del(&wait->task_list);
  43. /* move onto the action list and queue for FS-Cache thread pool */
  44. ASSERT(monitor->op);
  45. object = container_of(monitor->op->op.object,
  46. struct cachefiles_object, fscache);
  47. spin_lock(&object->work_lock);
  48. list_add_tail(&monitor->op_link, &monitor->op->to_do);
  49. spin_unlock(&object->work_lock);
  50. fscache_enqueue_retrieval(monitor->op);
  51. return 0;
  52. }
  53. /*
  54. * handle a probably truncated page
  55. * - check to see if the page is still relevant and reissue the read if
  56. * possible
  57. * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
  58. * must wait again and 0 if successful
  59. */
  60. static int cachefiles_read_reissue(struct cachefiles_object *object,
  61. struct cachefiles_one_read *monitor)
  62. {
  63. struct address_space *bmapping = object->backer->d_inode->i_mapping;
  64. struct page *backpage = monitor->back_page, *backpage2;
  65. int ret;
  66. _enter("{ino=%lx},{%lx,%lx}",
  67. object->backer->d_inode->i_ino,
  68. backpage->index, backpage->flags);
  69. /* skip if the page was truncated away completely */
  70. if (backpage->mapping != bmapping) {
  71. _leave(" = -ENODATA [mapping]");
  72. return -ENODATA;
  73. }
  74. backpage2 = find_get_page(bmapping, backpage->index);
  75. if (!backpage2) {
  76. _leave(" = -ENODATA [gone]");
  77. return -ENODATA;
  78. }
  79. if (backpage != backpage2) {
  80. put_page(backpage2);
  81. _leave(" = -ENODATA [different]");
  82. return -ENODATA;
  83. }
  84. /* the page is still there and we already have a ref on it, so we don't
  85. * need a second */
  86. put_page(backpage2);
  87. INIT_LIST_HEAD(&monitor->op_link);
  88. add_page_wait_queue(backpage, &monitor->monitor);
  89. if (trylock_page(backpage)) {
  90. ret = -EIO;
  91. if (PageError(backpage))
  92. goto unlock_discard;
  93. ret = 0;
  94. if (PageUptodate(backpage))
  95. goto unlock_discard;
  96. _debug("reissue read");
  97. ret = bmapping->a_ops->readpage(NULL, backpage);
  98. if (ret < 0)
  99. goto unlock_discard;
  100. }
  101. /* but the page may have been read before the monitor was installed, so
  102. * the monitor may miss the event - so we have to ensure that we do get
  103. * one in such a case */
  104. if (trylock_page(backpage)) {
  105. _debug("jumpstart %p {%lx}", backpage, backpage->flags);
  106. unlock_page(backpage);
  107. }
  108. /* it'll reappear on the todo list */
  109. _leave(" = -EINPROGRESS");
  110. return -EINPROGRESS;
  111. unlock_discard:
  112. unlock_page(backpage);
  113. spin_lock_irq(&object->work_lock);
  114. list_del(&monitor->op_link);
  115. spin_unlock_irq(&object->work_lock);
  116. _leave(" = %d", ret);
  117. return ret;
  118. }
  119. /*
  120. * copy data from backing pages to netfs pages to complete a read operation
  121. * - driven by FS-Cache's thread pool
  122. */
  123. static void cachefiles_read_copier(struct fscache_operation *_op)
  124. {
  125. struct cachefiles_one_read *monitor;
  126. struct cachefiles_object *object;
  127. struct fscache_retrieval *op;
  128. struct pagevec pagevec;
  129. int error, max;
  130. op = container_of(_op, struct fscache_retrieval, op);
  131. object = container_of(op->op.object,
  132. struct cachefiles_object, fscache);
  133. _enter("{ino=%lu}", object->backer->d_inode->i_ino);
  134. pagevec_init(&pagevec, 0);
  135. max = 8;
  136. spin_lock_irq(&object->work_lock);
  137. while (!list_empty(&op->to_do)) {
  138. monitor = list_entry(op->to_do.next,
  139. struct cachefiles_one_read, op_link);
  140. list_del(&monitor->op_link);
  141. spin_unlock_irq(&object->work_lock);
  142. _debug("- copy {%lu}", monitor->back_page->index);
  143. recheck:
  144. if (PageUptodate(monitor->back_page)) {
  145. copy_highpage(monitor->netfs_page, monitor->back_page);
  146. fscache_mark_page_cached(monitor->op,
  147. monitor->netfs_page);
  148. error = 0;
  149. } else if (!PageError(monitor->back_page)) {
  150. /* the page has probably been truncated */
  151. error = cachefiles_read_reissue(object, monitor);
  152. if (error == -EINPROGRESS)
  153. goto next;
  154. goto recheck;
  155. } else {
  156. cachefiles_io_error_obj(
  157. object,
  158. "Readpage failed on backing file %lx",
  159. (unsigned long) monitor->back_page->flags);
  160. error = -EIO;
  161. }
  162. page_cache_release(monitor->back_page);
  163. fscache_end_io(op, monitor->netfs_page, error);
  164. page_cache_release(monitor->netfs_page);
  165. fscache_retrieval_complete(op, 1);
  166. fscache_put_retrieval(op);
  167. kfree(monitor);
  168. next:
  169. /* let the thread pool have some air occasionally */
  170. max--;
  171. if (max < 0 || need_resched()) {
  172. if (!list_empty(&op->to_do))
  173. fscache_enqueue_retrieval(op);
  174. _leave(" [maxed out]");
  175. return;
  176. }
  177. spin_lock_irq(&object->work_lock);
  178. }
  179. spin_unlock_irq(&object->work_lock);
  180. _leave("");
  181. }
  182. /*
  183. * read the corresponding page to the given set from the backing file
  184. * - an uncertain page is simply discarded, to be tried again another time
  185. */
  186. static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
  187. struct fscache_retrieval *op,
  188. struct page *netpage,
  189. struct pagevec *pagevec)
  190. {
  191. struct cachefiles_one_read *monitor;
  192. struct address_space *bmapping;
  193. struct page *newpage, *backpage;
  194. int ret;
  195. _enter("");
  196. pagevec_reinit(pagevec);
  197. _debug("read back %p{%lu,%d}",
  198. netpage, netpage->index, page_count(netpage));
  199. monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
  200. if (!monitor)
  201. goto nomem;
  202. monitor->netfs_page = netpage;
  203. monitor->op = fscache_get_retrieval(op);
  204. init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
  205. /* attempt to get hold of the backing page */
  206. bmapping = object->backer->d_inode->i_mapping;
  207. newpage = NULL;
  208. for (;;) {
  209. backpage = find_get_page(bmapping, netpage->index);
  210. if (backpage)
  211. goto backing_page_already_present;
  212. if (!newpage) {
  213. newpage = __page_cache_alloc(cachefiles_gfp |
  214. __GFP_COLD);
  215. if (!newpage)
  216. goto nomem_monitor;
  217. }
  218. ret = add_to_page_cache(newpage, bmapping,
  219. netpage->index, cachefiles_gfp);
  220. if (ret == 0)
  221. goto installed_new_backing_page;
  222. if (ret != -EEXIST)
  223. goto nomem_page;
  224. }
  225. /* we've installed a new backing page, so now we need to add it
  226. * to the LRU list and start it reading */
  227. installed_new_backing_page:
  228. _debug("- new %p", newpage);
  229. backpage = newpage;
  230. newpage = NULL;
  231. page_cache_get(backpage);
  232. pagevec_add(pagevec, backpage);
  233. __pagevec_lru_add_file(pagevec);
  234. read_backing_page:
  235. ret = bmapping->a_ops->readpage(NULL, backpage);
  236. if (ret < 0)
  237. goto read_error;
  238. /* set the monitor to transfer the data across */
  239. monitor_backing_page:
  240. _debug("- monitor add");
  241. /* install the monitor */
  242. page_cache_get(monitor->netfs_page);
  243. page_cache_get(backpage);
  244. monitor->back_page = backpage;
  245. monitor->monitor.private = backpage;
  246. add_page_wait_queue(backpage, &monitor->monitor);
  247. monitor = NULL;
  248. /* but the page may have been read before the monitor was installed, so
  249. * the monitor may miss the event - so we have to ensure that we do get
  250. * one in such a case */
  251. if (trylock_page(backpage)) {
  252. _debug("jumpstart %p {%lx}", backpage, backpage->flags);
  253. unlock_page(backpage);
  254. }
  255. goto success;
  256. /* if the backing page is already present, it can be in one of
  257. * three states: read in progress, read failed or read okay */
  258. backing_page_already_present:
  259. _debug("- present");
  260. if (newpage) {
  261. page_cache_release(newpage);
  262. newpage = NULL;
  263. }
  264. if (PageError(backpage))
  265. goto io_error;
  266. if (PageUptodate(backpage))
  267. goto backing_page_already_uptodate;
  268. if (!trylock_page(backpage))
  269. goto monitor_backing_page;
  270. _debug("read %p {%lx}", backpage, backpage->flags);
  271. goto read_backing_page;
  272. /* the backing page is already up to date, attach the netfs
  273. * page to the pagecache and LRU and copy the data across */
  274. backing_page_already_uptodate:
  275. _debug("- uptodate");
  276. fscache_mark_page_cached(op, netpage);
  277. copy_highpage(netpage, backpage);
  278. fscache_end_io(op, netpage, 0);
  279. fscache_retrieval_complete(op, 1);
  280. success:
  281. _debug("success");
  282. ret = 0;
  283. out:
  284. if (backpage)
  285. page_cache_release(backpage);
  286. if (monitor) {
  287. fscache_put_retrieval(monitor->op);
  288. kfree(monitor);
  289. }
  290. _leave(" = %d", ret);
  291. return ret;
  292. read_error:
  293. _debug("read error %d", ret);
  294. if (ret == -ENOMEM)
  295. goto out;
  296. io_error:
  297. cachefiles_io_error_obj(object, "Page read error on backing file");
  298. fscache_retrieval_complete(op, 1);
  299. ret = -ENOBUFS;
  300. goto out;
  301. nomem_page:
  302. page_cache_release(newpage);
  303. nomem_monitor:
  304. fscache_put_retrieval(monitor->op);
  305. kfree(monitor);
  306. nomem:
  307. fscache_retrieval_complete(op, 1);
  308. _leave(" = -ENOMEM");
  309. return -ENOMEM;
  310. }
  311. /*
  312. * read a page from the cache or allocate a block in which to store it
  313. * - cache withdrawal is prevented by the caller
  314. * - returns -EINTR if interrupted
  315. * - returns -ENOMEM if ran out of memory
  316. * - returns -ENOBUFS if no buffers can be made available
  317. * - returns -ENOBUFS if page is beyond EOF
  318. * - if the page is backed by a block in the cache:
  319. * - a read will be started which will call the callback on completion
  320. * - 0 will be returned
  321. * - else if the page is unbacked:
  322. * - the metadata will be retained
  323. * - -ENODATA will be returned
  324. */
  325. int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
  326. struct page *page,
  327. gfp_t gfp)
  328. {
  329. struct cachefiles_object *object;
  330. struct cachefiles_cache *cache;
  331. struct pagevec pagevec;
  332. struct inode *inode;
  333. sector_t block0, block;
  334. unsigned shift;
  335. int ret;
  336. object = container_of(op->op.object,
  337. struct cachefiles_object, fscache);
  338. cache = container_of(object->fscache.cache,
  339. struct cachefiles_cache, cache);
  340. _enter("{%p},{%lx},,,", object, page->index);
  341. if (!object->backer)
  342. goto enobufs;
  343. inode = object->backer->d_inode;
  344. ASSERT(S_ISREG(inode->i_mode));
  345. ASSERT(inode->i_mapping->a_ops->bmap);
  346. ASSERT(inode->i_mapping->a_ops->readpages);
  347. /* calculate the shift required to use bmap */
  348. if (inode->i_sb->s_blocksize > PAGE_SIZE)
  349. goto enobufs;
  350. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  351. op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
  352. op->op.flags |= FSCACHE_OP_ASYNC;
  353. op->op.processor = cachefiles_read_copier;
  354. pagevec_init(&pagevec, 0);
  355. /* we assume the absence or presence of the first block is a good
  356. * enough indication for the page as a whole
  357. * - TODO: don't use bmap() for this as it is _not_ actually good
  358. * enough for this as it doesn't indicate errors, but it's all we've
  359. * got for the moment
  360. */
  361. block0 = page->index;
  362. block0 <<= shift;
  363. block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
  364. _debug("%llx -> %llx",
  365. (unsigned long long) block0,
  366. (unsigned long long) block);
  367. if (block) {
  368. /* submit the apparently valid page to the backing fs to be
  369. * read from disk */
  370. ret = cachefiles_read_backing_file_one(object, op, page,
  371. &pagevec);
  372. } else if (cachefiles_has_space(cache, 0, 1) == 0) {
  373. /* there's space in the cache we can use */
  374. fscache_mark_page_cached(op, page);
  375. fscache_retrieval_complete(op, 1);
  376. ret = -ENODATA;
  377. } else {
  378. goto enobufs;
  379. }
  380. _leave(" = %d", ret);
  381. return ret;
  382. enobufs:
  383. fscache_retrieval_complete(op, 1);
  384. _leave(" = -ENOBUFS");
  385. return -ENOBUFS;
  386. }
  387. /*
  388. * read the corresponding pages to the given set from the backing file
  389. * - any uncertain pages are simply discarded, to be tried again another time
  390. */
  391. static int cachefiles_read_backing_file(struct cachefiles_object *object,
  392. struct fscache_retrieval *op,
  393. struct list_head *list)
  394. {
  395. struct cachefiles_one_read *monitor = NULL;
  396. struct address_space *bmapping = object->backer->d_inode->i_mapping;
  397. struct pagevec lru_pvec;
  398. struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
  399. int ret = 0;
  400. _enter("");
  401. pagevec_init(&lru_pvec, 0);
  402. list_for_each_entry_safe(netpage, _n, list, lru) {
  403. list_del(&netpage->lru);
  404. _debug("read back %p{%lu,%d}",
  405. netpage, netpage->index, page_count(netpage));
  406. if (!monitor) {
  407. monitor = kzalloc(sizeof(*monitor), cachefiles_gfp);
  408. if (!monitor)
  409. goto nomem;
  410. monitor->op = fscache_get_retrieval(op);
  411. init_waitqueue_func_entry(&monitor->monitor,
  412. cachefiles_read_waiter);
  413. }
  414. for (;;) {
  415. backpage = find_get_page(bmapping, netpage->index);
  416. if (backpage)
  417. goto backing_page_already_present;
  418. if (!newpage) {
  419. newpage = __page_cache_alloc(cachefiles_gfp |
  420. __GFP_COLD);
  421. if (!newpage)
  422. goto nomem;
  423. }
  424. ret = add_to_page_cache(newpage, bmapping,
  425. netpage->index, cachefiles_gfp);
  426. if (ret == 0)
  427. goto installed_new_backing_page;
  428. if (ret != -EEXIST)
  429. goto nomem;
  430. }
  431. /* we've installed a new backing page, so now we need to add it
  432. * to the LRU list and start it reading */
  433. installed_new_backing_page:
  434. _debug("- new %p", newpage);
  435. backpage = newpage;
  436. newpage = NULL;
  437. page_cache_get(backpage);
  438. if (!pagevec_add(&lru_pvec, backpage))
  439. __pagevec_lru_add_file(&lru_pvec);
  440. reread_backing_page:
  441. ret = bmapping->a_ops->readpage(NULL, backpage);
  442. if (ret < 0)
  443. goto read_error;
  444. /* add the netfs page to the pagecache and LRU, and set the
  445. * monitor to transfer the data across */
  446. monitor_backing_page:
  447. _debug("- monitor add");
  448. ret = add_to_page_cache(netpage, op->mapping, netpage->index,
  449. cachefiles_gfp);
  450. if (ret < 0) {
  451. if (ret == -EEXIST) {
  452. page_cache_release(netpage);
  453. continue;
  454. }
  455. goto nomem;
  456. }
  457. page_cache_get(netpage);
  458. if (!pagevec_add(&lru_pvec, netpage))
  459. __pagevec_lru_add_file(&lru_pvec);
  460. /* install a monitor */
  461. page_cache_get(netpage);
  462. monitor->netfs_page = netpage;
  463. page_cache_get(backpage);
  464. monitor->back_page = backpage;
  465. monitor->monitor.private = backpage;
  466. add_page_wait_queue(backpage, &monitor->monitor);
  467. monitor = NULL;
  468. /* but the page may have been read before the monitor was
  469. * installed, so the monitor may miss the event - so we have to
  470. * ensure that we do get one in such a case */
  471. if (trylock_page(backpage)) {
  472. _debug("2unlock %p {%lx}", backpage, backpage->flags);
  473. unlock_page(backpage);
  474. }
  475. page_cache_release(backpage);
  476. backpage = NULL;
  477. page_cache_release(netpage);
  478. netpage = NULL;
  479. continue;
  480. /* if the backing page is already present, it can be in one of
  481. * three states: read in progress, read failed or read okay */
  482. backing_page_already_present:
  483. _debug("- present %p", backpage);
  484. if (PageError(backpage))
  485. goto io_error;
  486. if (PageUptodate(backpage))
  487. goto backing_page_already_uptodate;
  488. _debug("- not ready %p{%lx}", backpage, backpage->flags);
  489. if (!trylock_page(backpage))
  490. goto monitor_backing_page;
  491. if (PageError(backpage)) {
  492. _debug("error %lx", backpage->flags);
  493. unlock_page(backpage);
  494. goto io_error;
  495. }
  496. if (PageUptodate(backpage))
  497. goto backing_page_already_uptodate_unlock;
  498. /* we've locked a page that's neither up to date nor erroneous,
  499. * so we need to attempt to read it again */
  500. goto reread_backing_page;
  501. /* the backing page is already up to date, attach the netfs
  502. * page to the pagecache and LRU and copy the data across */
  503. backing_page_already_uptodate_unlock:
  504. _debug("uptodate %lx", backpage->flags);
  505. unlock_page(backpage);
  506. backing_page_already_uptodate:
  507. _debug("- uptodate");
  508. ret = add_to_page_cache(netpage, op->mapping, netpage->index,
  509. cachefiles_gfp);
  510. if (ret < 0) {
  511. if (ret == -EEXIST) {
  512. page_cache_release(netpage);
  513. continue;
  514. }
  515. goto nomem;
  516. }
  517. copy_highpage(netpage, backpage);
  518. page_cache_release(backpage);
  519. backpage = NULL;
  520. fscache_mark_page_cached(op, netpage);
  521. page_cache_get(netpage);
  522. if (!pagevec_add(&lru_pvec, netpage))
  523. __pagevec_lru_add_file(&lru_pvec);
  524. /* the netpage is unlocked and marked up to date here */
  525. fscache_end_io(op, netpage, 0);
  526. fscache_retrieval_complete(op, 1);
  527. page_cache_release(netpage);
  528. netpage = NULL;
  529. continue;
  530. }
  531. netpage = NULL;
  532. _debug("out");
  533. out:
  534. /* tidy up */
  535. pagevec_lru_add_file(&lru_pvec);
  536. if (newpage)
  537. page_cache_release(newpage);
  538. if (netpage)
  539. page_cache_release(netpage);
  540. if (backpage)
  541. page_cache_release(backpage);
  542. if (monitor) {
  543. fscache_put_retrieval(op);
  544. kfree(monitor);
  545. }
  546. list_for_each_entry_safe(netpage, _n, list, lru) {
  547. list_del(&netpage->lru);
  548. page_cache_release(netpage);
  549. fscache_retrieval_complete(op, 1);
  550. }
  551. _leave(" = %d", ret);
  552. return ret;
  553. nomem:
  554. _debug("nomem");
  555. ret = -ENOMEM;
  556. goto out;
  557. read_error:
  558. _debug("read error %d", ret);
  559. if (ret == -ENOMEM)
  560. goto out;
  561. io_error:
  562. cachefiles_io_error_obj(object, "Page read error on backing file");
  563. ret = -ENOBUFS;
  564. goto out;
  565. }
  566. /*
  567. * read a list of pages from the cache or allocate blocks in which to store
  568. * them
  569. */
  570. int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
  571. struct list_head *pages,
  572. unsigned *nr_pages,
  573. gfp_t gfp)
  574. {
  575. struct cachefiles_object *object;
  576. struct cachefiles_cache *cache;
  577. struct list_head backpages;
  578. struct pagevec pagevec;
  579. struct inode *inode;
  580. struct page *page, *_n;
  581. unsigned shift, nrbackpages;
  582. int ret, ret2, space;
  583. object = container_of(op->op.object,
  584. struct cachefiles_object, fscache);
  585. cache = container_of(object->fscache.cache,
  586. struct cachefiles_cache, cache);
  587. _enter("{OBJ%x,%d},,%d,,",
  588. object->fscache.debug_id, atomic_read(&op->op.usage),
  589. *nr_pages);
  590. if (!object->backer)
  591. goto all_enobufs;
  592. space = 1;
  593. if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
  594. space = 0;
  595. inode = object->backer->d_inode;
  596. ASSERT(S_ISREG(inode->i_mode));
  597. ASSERT(inode->i_mapping->a_ops->bmap);
  598. ASSERT(inode->i_mapping->a_ops->readpages);
  599. /* calculate the shift required to use bmap */
  600. if (inode->i_sb->s_blocksize > PAGE_SIZE)
  601. goto all_enobufs;
  602. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  603. pagevec_init(&pagevec, 0);
  604. op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
  605. op->op.flags |= FSCACHE_OP_ASYNC;
  606. op->op.processor = cachefiles_read_copier;
  607. INIT_LIST_HEAD(&backpages);
  608. nrbackpages = 0;
  609. ret = space ? -ENODATA : -ENOBUFS;
  610. list_for_each_entry_safe(page, _n, pages, lru) {
  611. sector_t block0, block;
  612. /* we assume the absence or presence of the first block is a
  613. * good enough indication for the page as a whole
  614. * - TODO: don't use bmap() for this as it is _not_ actually
  615. * good enough for this as it doesn't indicate errors, but
  616. * it's all we've got for the moment
  617. */
  618. block0 = page->index;
  619. block0 <<= shift;
  620. block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
  621. block0);
  622. _debug("%llx -> %llx",
  623. (unsigned long long) block0,
  624. (unsigned long long) block);
  625. if (block) {
  626. /* we have data - add it to the list to give to the
  627. * backing fs */
  628. list_move(&page->lru, &backpages);
  629. (*nr_pages)--;
  630. nrbackpages++;
  631. } else if (space && pagevec_add(&pagevec, page) == 0) {
  632. fscache_mark_pages_cached(op, &pagevec);
  633. fscache_retrieval_complete(op, 1);
  634. ret = -ENODATA;
  635. } else {
  636. fscache_retrieval_complete(op, 1);
  637. }
  638. }
  639. if (pagevec_count(&pagevec) > 0)
  640. fscache_mark_pages_cached(op, &pagevec);
  641. if (list_empty(pages))
  642. ret = 0;
  643. /* submit the apparently valid pages to the backing fs to be read from
  644. * disk */
  645. if (nrbackpages > 0) {
  646. ret2 = cachefiles_read_backing_file(object, op, &backpages);
  647. if (ret2 == -ENOMEM || ret2 == -EINTR)
  648. ret = ret2;
  649. }
  650. _leave(" = %d [nr=%u%s]",
  651. ret, *nr_pages, list_empty(pages) ? " empty" : "");
  652. return ret;
  653. all_enobufs:
  654. fscache_retrieval_complete(op, *nr_pages);
  655. return -ENOBUFS;
  656. }
  657. /*
  658. * allocate a block in the cache in which to store a page
  659. * - cache withdrawal is prevented by the caller
  660. * - returns -EINTR if interrupted
  661. * - returns -ENOMEM if ran out of memory
  662. * - returns -ENOBUFS if no buffers can be made available
  663. * - returns -ENOBUFS if page is beyond EOF
  664. * - otherwise:
  665. * - the metadata will be retained
  666. * - 0 will be returned
  667. */
  668. int cachefiles_allocate_page(struct fscache_retrieval *op,
  669. struct page *page,
  670. gfp_t gfp)
  671. {
  672. struct cachefiles_object *object;
  673. struct cachefiles_cache *cache;
  674. int ret;
  675. object = container_of(op->op.object,
  676. struct cachefiles_object, fscache);
  677. cache = container_of(object->fscache.cache,
  678. struct cachefiles_cache, cache);
  679. _enter("%p,{%lx},", object, page->index);
  680. ret = cachefiles_has_space(cache, 0, 1);
  681. if (ret == 0)
  682. fscache_mark_page_cached(op, page);
  683. else
  684. ret = -ENOBUFS;
  685. fscache_retrieval_complete(op, 1);
  686. _leave(" = %d", ret);
  687. return ret;
  688. }
  689. /*
  690. * allocate blocks in the cache in which to store a set of pages
  691. * - cache withdrawal is prevented by the caller
  692. * - returns -EINTR if interrupted
  693. * - returns -ENOMEM if ran out of memory
  694. * - returns -ENOBUFS if some buffers couldn't be made available
  695. * - returns -ENOBUFS if some pages are beyond EOF
  696. * - otherwise:
  697. * - -ENODATA will be returned
  698. * - metadata will be retained for any page marked
  699. */
  700. int cachefiles_allocate_pages(struct fscache_retrieval *op,
  701. struct list_head *pages,
  702. unsigned *nr_pages,
  703. gfp_t gfp)
  704. {
  705. struct cachefiles_object *object;
  706. struct cachefiles_cache *cache;
  707. struct pagevec pagevec;
  708. struct page *page;
  709. int ret;
  710. object = container_of(op->op.object,
  711. struct cachefiles_object, fscache);
  712. cache = container_of(object->fscache.cache,
  713. struct cachefiles_cache, cache);
  714. _enter("%p,,,%d,", object, *nr_pages);
  715. ret = cachefiles_has_space(cache, 0, *nr_pages);
  716. if (ret == 0) {
  717. pagevec_init(&pagevec, 0);
  718. list_for_each_entry(page, pages, lru) {
  719. if (pagevec_add(&pagevec, page) == 0)
  720. fscache_mark_pages_cached(op, &pagevec);
  721. }
  722. if (pagevec_count(&pagevec) > 0)
  723. fscache_mark_pages_cached(op, &pagevec);
  724. ret = -ENODATA;
  725. } else {
  726. ret = -ENOBUFS;
  727. }
  728. fscache_retrieval_complete(op, *nr_pages);
  729. _leave(" = %d", ret);
  730. return ret;
  731. }
  732. /*
  733. * request a page be stored in the cache
  734. * - cache withdrawal is prevented by the caller
  735. * - this request may be ignored if there's no cache block available, in which
  736. * case -ENOBUFS will be returned
  737. * - if the op is in progress, 0 will be returned
  738. */
  739. int cachefiles_write_page(struct fscache_storage *op, struct page *page)
  740. {
  741. struct cachefiles_object *object;
  742. struct cachefiles_cache *cache;
  743. mm_segment_t old_fs;
  744. struct file *file;
  745. struct path path;
  746. loff_t pos, eof;
  747. size_t len;
  748. void *data;
  749. int ret;
  750. ASSERT(op != NULL);
  751. ASSERT(page != NULL);
  752. object = container_of(op->op.object,
  753. struct cachefiles_object, fscache);
  754. _enter("%p,%p{%lx},,,", object, page, page->index);
  755. if (!object->backer) {
  756. _leave(" = -ENOBUFS");
  757. return -ENOBUFS;
  758. }
  759. ASSERT(S_ISREG(object->backer->d_inode->i_mode));
  760. cache = container_of(object->fscache.cache,
  761. struct cachefiles_cache, cache);
  762. /* write the page to the backing filesystem and let it store it in its
  763. * own time */
  764. path.mnt = cache->mnt;
  765. path.dentry = object->backer;
  766. file = dentry_open(&path, O_RDWR | O_LARGEFILE, cache->cache_cred);
  767. if (IS_ERR(file)) {
  768. ret = PTR_ERR(file);
  769. } else {
  770. ret = -EIO;
  771. if (file->f_op->write) {
  772. pos = (loff_t) page->index << PAGE_SHIFT;
  773. /* we mustn't write more data than we have, so we have
  774. * to beware of a partial page at EOF */
  775. eof = object->fscache.store_limit_l;
  776. len = PAGE_SIZE;
  777. if (eof & ~PAGE_MASK) {
  778. ASSERTCMP(pos, <, eof);
  779. if (eof - pos < PAGE_SIZE) {
  780. _debug("cut short %llx to %llx",
  781. pos, eof);
  782. len = eof - pos;
  783. ASSERTCMP(pos + len, ==, eof);
  784. }
  785. }
  786. data = kmap(page);
  787. old_fs = get_fs();
  788. set_fs(KERNEL_DS);
  789. ret = file->f_op->write(
  790. file, (const void __user *) data, len, &pos);
  791. set_fs(old_fs);
  792. kunmap(page);
  793. if (ret != len)
  794. ret = -EIO;
  795. }
  796. fput(file);
  797. }
  798. if (ret < 0) {
  799. if (ret == -EIO)
  800. cachefiles_io_error_obj(
  801. object, "Write page to backing file failed");
  802. ret = -ENOBUFS;
  803. }
  804. _leave(" = %d", ret);
  805. return ret;
  806. }
  807. /*
  808. * detach a backing block from a page
  809. * - cache withdrawal is prevented by the caller
  810. */
  811. void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
  812. {
  813. struct cachefiles_object *object;
  814. struct cachefiles_cache *cache;
  815. object = container_of(_object, struct cachefiles_object, fscache);
  816. cache = container_of(object->fscache.cache,
  817. struct cachefiles_cache, cache);
  818. _enter("%p,{%lu}", object, page->index);
  819. spin_unlock(&object->fscache.cookie->lock);
  820. }