rdwr.c 24 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983
  1. /* Storage object read/write
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/mount.h>
  12. #include <linux/file.h>
  13. #include "internal.h"
  14. /*
  15. * detect wake up events generated by the unlocking of pages in which we're
  16. * interested
  17. * - we use this to detect read completion of backing pages
  18. * - the caller holds the waitqueue lock
  19. */
  20. static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
  21. int sync, void *_key)
  22. {
  23. struct cachefiles_one_read *monitor =
  24. container_of(wait, struct cachefiles_one_read, monitor);
  25. struct cachefiles_object *object;
  26. struct wait_bit_key *key = _key;
  27. struct page *page = wait->private;
  28. ASSERT(key);
  29. _enter("{%lu},%u,%d,{%p,%u}",
  30. monitor->netfs_page->index, mode, sync,
  31. key->flags, key->bit_nr);
  32. if (key->flags != &page->flags ||
  33. key->bit_nr != PG_locked)
  34. return 0;
  35. _debug("--- monitor %p %lx ---", page, page->flags);
  36. if (!PageUptodate(page) && !PageError(page)) {
  37. /* unlocked, not uptodate and not erronous? */
  38. _debug("page probably truncated");
  39. }
  40. /* remove from the waitqueue */
  41. list_del(&wait->task_list);
  42. /* move onto the action list and queue for FS-Cache thread pool */
  43. ASSERT(monitor->op);
  44. object = container_of(monitor->op->op.object,
  45. struct cachefiles_object, fscache);
  46. spin_lock(&object->work_lock);
  47. list_add_tail(&monitor->op_link, &monitor->op->to_do);
  48. spin_unlock(&object->work_lock);
  49. fscache_enqueue_retrieval(monitor->op);
  50. return 0;
  51. }
  52. /*
  53. * handle a probably truncated page
  54. * - check to see if the page is still relevant and reissue the read if
  55. * possible
  56. * - return -EIO on error, -ENODATA if the page is gone, -EINPROGRESS if we
  57. * must wait again and 0 if successful
  58. */
  59. static int cachefiles_read_reissue(struct cachefiles_object *object,
  60. struct cachefiles_one_read *monitor)
  61. {
  62. struct address_space *bmapping = object->backer->d_inode->i_mapping;
  63. struct page *backpage = monitor->back_page, *backpage2;
  64. int ret;
  65. kenter("{ino=%lx},{%lx,%lx}",
  66. object->backer->d_inode->i_ino,
  67. backpage->index, backpage->flags);
  68. /* skip if the page was truncated away completely */
  69. if (backpage->mapping != bmapping) {
  70. kleave(" = -ENODATA [mapping]");
  71. return -ENODATA;
  72. }
  73. backpage2 = find_get_page(bmapping, backpage->index);
  74. if (!backpage2) {
  75. kleave(" = -ENODATA [gone]");
  76. return -ENODATA;
  77. }
  78. if (backpage != backpage2) {
  79. put_page(backpage2);
  80. kleave(" = -ENODATA [different]");
  81. return -ENODATA;
  82. }
  83. /* the page is still there and we already have a ref on it, so we don't
  84. * need a second */
  85. put_page(backpage2);
  86. INIT_LIST_HEAD(&monitor->op_link);
  87. add_page_wait_queue(backpage, &monitor->monitor);
  88. if (trylock_page(backpage)) {
  89. ret = -EIO;
  90. if (PageError(backpage))
  91. goto unlock_discard;
  92. ret = 0;
  93. if (PageUptodate(backpage))
  94. goto unlock_discard;
  95. kdebug("reissue read");
  96. ret = bmapping->a_ops->readpage(NULL, backpage);
  97. if (ret < 0)
  98. goto unlock_discard;
  99. }
  100. /* but the page may have been read before the monitor was installed, so
  101. * the monitor may miss the event - so we have to ensure that we do get
  102. * one in such a case */
  103. if (trylock_page(backpage)) {
  104. _debug("jumpstart %p {%lx}", backpage, backpage->flags);
  105. unlock_page(backpage);
  106. }
  107. /* it'll reappear on the todo list */
  108. kleave(" = -EINPROGRESS");
  109. return -EINPROGRESS;
  110. unlock_discard:
  111. unlock_page(backpage);
  112. spin_lock_irq(&object->work_lock);
  113. list_del(&monitor->op_link);
  114. spin_unlock_irq(&object->work_lock);
  115. kleave(" = %d", ret);
  116. return ret;
  117. }
  118. /*
  119. * copy data from backing pages to netfs pages to complete a read operation
  120. * - driven by FS-Cache's thread pool
  121. */
  122. static void cachefiles_read_copier(struct fscache_operation *_op)
  123. {
  124. struct cachefiles_one_read *monitor;
  125. struct cachefiles_object *object;
  126. struct fscache_retrieval *op;
  127. struct pagevec pagevec;
  128. int error, max;
  129. op = container_of(_op, struct fscache_retrieval, op);
  130. object = container_of(op->op.object,
  131. struct cachefiles_object, fscache);
  132. _enter("{ino=%lu}", object->backer->d_inode->i_ino);
  133. pagevec_init(&pagevec, 0);
  134. max = 8;
  135. spin_lock_irq(&object->work_lock);
  136. while (!list_empty(&op->to_do)) {
  137. monitor = list_entry(op->to_do.next,
  138. struct cachefiles_one_read, op_link);
  139. list_del(&monitor->op_link);
  140. spin_unlock_irq(&object->work_lock);
  141. _debug("- copy {%lu}", monitor->back_page->index);
  142. recheck:
  143. if (PageUptodate(monitor->back_page)) {
  144. copy_highpage(monitor->netfs_page, monitor->back_page);
  145. pagevec_add(&pagevec, monitor->netfs_page);
  146. fscache_mark_pages_cached(monitor->op, &pagevec);
  147. error = 0;
  148. } else if (!PageError(monitor->back_page)) {
  149. /* the page has probably been truncated */
  150. error = cachefiles_read_reissue(object, monitor);
  151. if (error == -EINPROGRESS)
  152. goto next;
  153. goto recheck;
  154. } else {
  155. cachefiles_io_error_obj(
  156. object,
  157. "Readpage failed on backing file %lx",
  158. (unsigned long) monitor->back_page->flags);
  159. error = -EIO;
  160. }
  161. page_cache_release(monitor->back_page);
  162. fscache_end_io(op, monitor->netfs_page, error);
  163. page_cache_release(monitor->netfs_page);
  164. fscache_put_retrieval(op);
  165. kfree(monitor);
  166. next:
  167. /* let the thread pool have some air occasionally */
  168. max--;
  169. if (max < 0 || need_resched()) {
  170. if (!list_empty(&op->to_do))
  171. fscache_enqueue_retrieval(op);
  172. _leave(" [maxed out]");
  173. return;
  174. }
  175. spin_lock_irq(&object->work_lock);
  176. }
  177. spin_unlock_irq(&object->work_lock);
  178. _leave("");
  179. }
  180. /*
  181. * read the corresponding page to the given set from the backing file
  182. * - an uncertain page is simply discarded, to be tried again another time
  183. */
  184. static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
  185. struct fscache_retrieval *op,
  186. struct page *netpage,
  187. struct pagevec *pagevec)
  188. {
  189. struct cachefiles_one_read *monitor;
  190. struct address_space *bmapping;
  191. struct page *newpage, *backpage;
  192. int ret;
  193. _enter("");
  194. pagevec_reinit(pagevec);
  195. _debug("read back %p{%lu,%d}",
  196. netpage, netpage->index, page_count(netpage));
  197. monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
  198. if (!monitor)
  199. goto nomem;
  200. monitor->netfs_page = netpage;
  201. monitor->op = fscache_get_retrieval(op);
  202. init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
  203. /* attempt to get hold of the backing page */
  204. bmapping = object->backer->d_inode->i_mapping;
  205. newpage = NULL;
  206. for (;;) {
  207. backpage = find_get_page(bmapping, netpage->index);
  208. if (backpage)
  209. goto backing_page_already_present;
  210. if (!newpage) {
  211. newpage = page_cache_alloc_cold(bmapping);
  212. if (!newpage)
  213. goto nomem_monitor;
  214. }
  215. ret = add_to_page_cache(newpage, bmapping,
  216. netpage->index, GFP_KERNEL);
  217. if (ret == 0)
  218. goto installed_new_backing_page;
  219. if (ret != -EEXIST)
  220. goto nomem_page;
  221. }
  222. /* we've installed a new backing page, so now we need to add it
  223. * to the LRU list and start it reading */
  224. installed_new_backing_page:
  225. _debug("- new %p", newpage);
  226. backpage = newpage;
  227. newpage = NULL;
  228. page_cache_get(backpage);
  229. pagevec_add(pagevec, backpage);
  230. __pagevec_lru_add_file(pagevec);
  231. read_backing_page:
  232. ret = bmapping->a_ops->readpage(NULL, backpage);
  233. if (ret < 0)
  234. goto read_error;
  235. /* set the monitor to transfer the data across */
  236. monitor_backing_page:
  237. _debug("- monitor add");
  238. /* install the monitor */
  239. page_cache_get(monitor->netfs_page);
  240. page_cache_get(backpage);
  241. monitor->back_page = backpage;
  242. monitor->monitor.private = backpage;
  243. add_page_wait_queue(backpage, &monitor->monitor);
  244. monitor = NULL;
  245. /* but the page may have been read before the monitor was installed, so
  246. * the monitor may miss the event - so we have to ensure that we do get
  247. * one in such a case */
  248. if (trylock_page(backpage)) {
  249. _debug("jumpstart %p {%lx}", backpage, backpage->flags);
  250. unlock_page(backpage);
  251. }
  252. goto success;
  253. /* if the backing page is already present, it can be in one of
  254. * three states: read in progress, read failed or read okay */
  255. backing_page_already_present:
  256. _debug("- present");
  257. if (newpage) {
  258. page_cache_release(newpage);
  259. newpage = NULL;
  260. }
  261. if (PageError(backpage))
  262. goto io_error;
  263. if (PageUptodate(backpage))
  264. goto backing_page_already_uptodate;
  265. if (!trylock_page(backpage))
  266. goto monitor_backing_page;
  267. _debug("read %p {%lx}", backpage, backpage->flags);
  268. goto read_backing_page;
  269. /* the backing page is already up to date, attach the netfs
  270. * page to the pagecache and LRU and copy the data across */
  271. backing_page_already_uptodate:
  272. _debug("- uptodate");
  273. pagevec_add(pagevec, netpage);
  274. fscache_mark_pages_cached(op, pagevec);
  275. copy_highpage(netpage, backpage);
  276. fscache_end_io(op, netpage, 0);
  277. success:
  278. _debug("success");
  279. ret = 0;
  280. out:
  281. if (backpage)
  282. page_cache_release(backpage);
  283. if (monitor) {
  284. fscache_put_retrieval(monitor->op);
  285. kfree(monitor);
  286. }
  287. _leave(" = %d", ret);
  288. return ret;
  289. read_error:
  290. _debug("read error %d", ret);
  291. if (ret == -ENOMEM)
  292. goto out;
  293. io_error:
  294. cachefiles_io_error_obj(object, "Page read error on backing file");
  295. ret = -ENOBUFS;
  296. goto out;
  297. nomem_page:
  298. page_cache_release(newpage);
  299. nomem_monitor:
  300. fscache_put_retrieval(monitor->op);
  301. kfree(monitor);
  302. nomem:
  303. _leave(" = -ENOMEM");
  304. return -ENOMEM;
  305. }
  306. /*
  307. * read a page from the cache or allocate a block in which to store it
  308. * - cache withdrawal is prevented by the caller
  309. * - returns -EINTR if interrupted
  310. * - returns -ENOMEM if ran out of memory
  311. * - returns -ENOBUFS if no buffers can be made available
  312. * - returns -ENOBUFS if page is beyond EOF
  313. * - if the page is backed by a block in the cache:
  314. * - a read will be started which will call the callback on completion
  315. * - 0 will be returned
  316. * - else if the page is unbacked:
  317. * - the metadata will be retained
  318. * - -ENODATA will be returned
  319. */
  320. int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
  321. struct page *page,
  322. gfp_t gfp)
  323. {
  324. struct cachefiles_object *object;
  325. struct cachefiles_cache *cache;
  326. struct pagevec pagevec;
  327. struct inode *inode;
  328. sector_t block0, block;
  329. unsigned shift;
  330. int ret;
  331. object = container_of(op->op.object,
  332. struct cachefiles_object, fscache);
  333. cache = container_of(object->fscache.cache,
  334. struct cachefiles_cache, cache);
  335. _enter("{%p},{%lx},,,", object, page->index);
  336. if (!object->backer)
  337. return -ENOBUFS;
  338. inode = object->backer->d_inode;
  339. ASSERT(S_ISREG(inode->i_mode));
  340. ASSERT(inode->i_mapping->a_ops->bmap);
  341. ASSERT(inode->i_mapping->a_ops->readpages);
  342. /* calculate the shift required to use bmap */
  343. if (inode->i_sb->s_blocksize > PAGE_SIZE)
  344. return -ENOBUFS;
  345. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  346. op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
  347. op->op.flags |= FSCACHE_OP_FAST;
  348. op->op.processor = cachefiles_read_copier;
  349. pagevec_init(&pagevec, 0);
  350. /* we assume the absence or presence of the first block is a good
  351. * enough indication for the page as a whole
  352. * - TODO: don't use bmap() for this as it is _not_ actually good
  353. * enough for this as it doesn't indicate errors, but it's all we've
  354. * got for the moment
  355. */
  356. block0 = page->index;
  357. block0 <<= shift;
  358. block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
  359. _debug("%llx -> %llx",
  360. (unsigned long long) block0,
  361. (unsigned long long) block);
  362. if (block) {
  363. /* submit the apparently valid page to the backing fs to be
  364. * read from disk */
  365. ret = cachefiles_read_backing_file_one(object, op, page,
  366. &pagevec);
  367. } else if (cachefiles_has_space(cache, 0, 1) == 0) {
  368. /* there's space in the cache we can use */
  369. pagevec_add(&pagevec, page);
  370. fscache_mark_pages_cached(op, &pagevec);
  371. ret = -ENODATA;
  372. } else {
  373. ret = -ENOBUFS;
  374. }
  375. _leave(" = %d", ret);
  376. return ret;
  377. }
  378. /*
  379. * read the corresponding pages to the given set from the backing file
  380. * - any uncertain pages are simply discarded, to be tried again another time
  381. */
  382. static int cachefiles_read_backing_file(struct cachefiles_object *object,
  383. struct fscache_retrieval *op,
  384. struct list_head *list,
  385. struct pagevec *mark_pvec)
  386. {
  387. struct cachefiles_one_read *monitor = NULL;
  388. struct address_space *bmapping = object->backer->d_inode->i_mapping;
  389. struct pagevec lru_pvec;
  390. struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
  391. int ret = 0;
  392. _enter("");
  393. pagevec_init(&lru_pvec, 0);
  394. list_for_each_entry_safe(netpage, _n, list, lru) {
  395. list_del(&netpage->lru);
  396. _debug("read back %p{%lu,%d}",
  397. netpage, netpage->index, page_count(netpage));
  398. if (!monitor) {
  399. monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
  400. if (!monitor)
  401. goto nomem;
  402. monitor->op = fscache_get_retrieval(op);
  403. init_waitqueue_func_entry(&monitor->monitor,
  404. cachefiles_read_waiter);
  405. }
  406. for (;;) {
  407. backpage = find_get_page(bmapping, netpage->index);
  408. if (backpage)
  409. goto backing_page_already_present;
  410. if (!newpage) {
  411. newpage = page_cache_alloc_cold(bmapping);
  412. if (!newpage)
  413. goto nomem;
  414. }
  415. ret = add_to_page_cache(newpage, bmapping,
  416. netpage->index, GFP_KERNEL);
  417. if (ret == 0)
  418. goto installed_new_backing_page;
  419. if (ret != -EEXIST)
  420. goto nomem;
  421. }
  422. /* we've installed a new backing page, so now we need to add it
  423. * to the LRU list and start it reading */
  424. installed_new_backing_page:
  425. _debug("- new %p", newpage);
  426. backpage = newpage;
  427. newpage = NULL;
  428. page_cache_get(backpage);
  429. if (!pagevec_add(&lru_pvec, backpage))
  430. __pagevec_lru_add_file(&lru_pvec);
  431. reread_backing_page:
  432. ret = bmapping->a_ops->readpage(NULL, backpage);
  433. if (ret < 0)
  434. goto read_error;
  435. /* add the netfs page to the pagecache and LRU, and set the
  436. * monitor to transfer the data across */
  437. monitor_backing_page:
  438. _debug("- monitor add");
  439. ret = add_to_page_cache(netpage, op->mapping, netpage->index,
  440. GFP_KERNEL);
  441. if (ret < 0) {
  442. if (ret == -EEXIST) {
  443. page_cache_release(netpage);
  444. continue;
  445. }
  446. goto nomem;
  447. }
  448. page_cache_get(netpage);
  449. if (!pagevec_add(&lru_pvec, netpage))
  450. __pagevec_lru_add_file(&lru_pvec);
  451. /* install a monitor */
  452. page_cache_get(netpage);
  453. monitor->netfs_page = netpage;
  454. page_cache_get(backpage);
  455. monitor->back_page = backpage;
  456. monitor->monitor.private = backpage;
  457. add_page_wait_queue(backpage, &monitor->monitor);
  458. monitor = NULL;
  459. /* but the page may have been read before the monitor was
  460. * installed, so the monitor may miss the event - so we have to
  461. * ensure that we do get one in such a case */
  462. if (trylock_page(backpage)) {
  463. _debug("2unlock %p {%lx}", backpage, backpage->flags);
  464. unlock_page(backpage);
  465. }
  466. page_cache_release(backpage);
  467. backpage = NULL;
  468. page_cache_release(netpage);
  469. netpage = NULL;
  470. continue;
  471. /* if the backing page is already present, it can be in one of
  472. * three states: read in progress, read failed or read okay */
  473. backing_page_already_present:
  474. _debug("- present %p", backpage);
  475. if (PageError(backpage))
  476. goto io_error;
  477. if (PageUptodate(backpage))
  478. goto backing_page_already_uptodate;
  479. _debug("- not ready %p{%lx}", backpage, backpage->flags);
  480. if (!trylock_page(backpage))
  481. goto monitor_backing_page;
  482. if (PageError(backpage)) {
  483. _debug("error %lx", backpage->flags);
  484. unlock_page(backpage);
  485. goto io_error;
  486. }
  487. if (PageUptodate(backpage))
  488. goto backing_page_already_uptodate_unlock;
  489. /* we've locked a page that's neither up to date nor erroneous,
  490. * so we need to attempt to read it again */
  491. goto reread_backing_page;
  492. /* the backing page is already up to date, attach the netfs
  493. * page to the pagecache and LRU and copy the data across */
  494. backing_page_already_uptodate_unlock:
  495. _debug("uptodate %lx", backpage->flags);
  496. unlock_page(backpage);
  497. backing_page_already_uptodate:
  498. _debug("- uptodate");
  499. ret = add_to_page_cache(netpage, op->mapping, netpage->index,
  500. GFP_KERNEL);
  501. if (ret < 0) {
  502. if (ret == -EEXIST) {
  503. page_cache_release(netpage);
  504. continue;
  505. }
  506. goto nomem;
  507. }
  508. copy_highpage(netpage, backpage);
  509. page_cache_release(backpage);
  510. backpage = NULL;
  511. if (!pagevec_add(mark_pvec, netpage))
  512. fscache_mark_pages_cached(op, mark_pvec);
  513. page_cache_get(netpage);
  514. if (!pagevec_add(&lru_pvec, netpage))
  515. __pagevec_lru_add_file(&lru_pvec);
  516. fscache_end_io(op, netpage, 0);
  517. page_cache_release(netpage);
  518. netpage = NULL;
  519. continue;
  520. }
  521. netpage = NULL;
  522. _debug("out");
  523. out:
  524. /* tidy up */
  525. pagevec_lru_add_file(&lru_pvec);
  526. if (newpage)
  527. page_cache_release(newpage);
  528. if (netpage)
  529. page_cache_release(netpage);
  530. if (backpage)
  531. page_cache_release(backpage);
  532. if (monitor) {
  533. fscache_put_retrieval(op);
  534. kfree(monitor);
  535. }
  536. list_for_each_entry_safe(netpage, _n, list, lru) {
  537. list_del(&netpage->lru);
  538. page_cache_release(netpage);
  539. }
  540. _leave(" = %d", ret);
  541. return ret;
  542. nomem:
  543. _debug("nomem");
  544. ret = -ENOMEM;
  545. goto out;
  546. read_error:
  547. _debug("read error %d", ret);
  548. if (ret == -ENOMEM)
  549. goto out;
  550. io_error:
  551. cachefiles_io_error_obj(object, "Page read error on backing file");
  552. ret = -ENOBUFS;
  553. goto out;
  554. }
  555. /*
  556. * read a list of pages from the cache or allocate blocks in which to store
  557. * them
  558. */
  559. int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
  560. struct list_head *pages,
  561. unsigned *nr_pages,
  562. gfp_t gfp)
  563. {
  564. struct cachefiles_object *object;
  565. struct cachefiles_cache *cache;
  566. struct list_head backpages;
  567. struct pagevec pagevec;
  568. struct inode *inode;
  569. struct page *page, *_n;
  570. unsigned shift, nrbackpages;
  571. int ret, ret2, space;
  572. object = container_of(op->op.object,
  573. struct cachefiles_object, fscache);
  574. cache = container_of(object->fscache.cache,
  575. struct cachefiles_cache, cache);
  576. _enter("{OBJ%x,%d},,%d,,",
  577. object->fscache.debug_id, atomic_read(&op->op.usage),
  578. *nr_pages);
  579. if (!object->backer)
  580. return -ENOBUFS;
  581. space = 1;
  582. if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
  583. space = 0;
  584. inode = object->backer->d_inode;
  585. ASSERT(S_ISREG(inode->i_mode));
  586. ASSERT(inode->i_mapping->a_ops->bmap);
  587. ASSERT(inode->i_mapping->a_ops->readpages);
  588. /* calculate the shift required to use bmap */
  589. if (inode->i_sb->s_blocksize > PAGE_SIZE)
  590. return -ENOBUFS;
  591. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  592. pagevec_init(&pagevec, 0);
  593. op->op.flags &= FSCACHE_OP_KEEP_FLAGS;
  594. op->op.flags |= FSCACHE_OP_FAST;
  595. op->op.processor = cachefiles_read_copier;
  596. INIT_LIST_HEAD(&backpages);
  597. nrbackpages = 0;
  598. ret = space ? -ENODATA : -ENOBUFS;
  599. list_for_each_entry_safe(page, _n, pages, lru) {
  600. sector_t block0, block;
  601. /* we assume the absence or presence of the first block is a
  602. * good enough indication for the page as a whole
  603. * - TODO: don't use bmap() for this as it is _not_ actually
  604. * good enough for this as it doesn't indicate errors, but
  605. * it's all we've got for the moment
  606. */
  607. block0 = page->index;
  608. block0 <<= shift;
  609. block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
  610. block0);
  611. _debug("%llx -> %llx",
  612. (unsigned long long) block0,
  613. (unsigned long long) block);
  614. if (block) {
  615. /* we have data - add it to the list to give to the
  616. * backing fs */
  617. list_move(&page->lru, &backpages);
  618. (*nr_pages)--;
  619. nrbackpages++;
  620. } else if (space && pagevec_add(&pagevec, page) == 0) {
  621. fscache_mark_pages_cached(op, &pagevec);
  622. ret = -ENODATA;
  623. }
  624. }
  625. if (pagevec_count(&pagevec) > 0)
  626. fscache_mark_pages_cached(op, &pagevec);
  627. if (list_empty(pages))
  628. ret = 0;
  629. /* submit the apparently valid pages to the backing fs to be read from
  630. * disk */
  631. if (nrbackpages > 0) {
  632. ret2 = cachefiles_read_backing_file(object, op, &backpages,
  633. &pagevec);
  634. if (ret2 == -ENOMEM || ret2 == -EINTR)
  635. ret = ret2;
  636. }
  637. if (pagevec_count(&pagevec) > 0)
  638. fscache_mark_pages_cached(op, &pagevec);
  639. _leave(" = %d [nr=%u%s]",
  640. ret, *nr_pages, list_empty(pages) ? " empty" : "");
  641. return ret;
  642. }
  643. /*
  644. * allocate a block in the cache in which to store a page
  645. * - cache withdrawal is prevented by the caller
  646. * - returns -EINTR if interrupted
  647. * - returns -ENOMEM if ran out of memory
  648. * - returns -ENOBUFS if no buffers can be made available
  649. * - returns -ENOBUFS if page is beyond EOF
  650. * - otherwise:
  651. * - the metadata will be retained
  652. * - 0 will be returned
  653. */
  654. int cachefiles_allocate_page(struct fscache_retrieval *op,
  655. struct page *page,
  656. gfp_t gfp)
  657. {
  658. struct cachefiles_object *object;
  659. struct cachefiles_cache *cache;
  660. struct pagevec pagevec;
  661. int ret;
  662. object = container_of(op->op.object,
  663. struct cachefiles_object, fscache);
  664. cache = container_of(object->fscache.cache,
  665. struct cachefiles_cache, cache);
  666. _enter("%p,{%lx},", object, page->index);
  667. ret = cachefiles_has_space(cache, 0, 1);
  668. if (ret == 0) {
  669. pagevec_init(&pagevec, 0);
  670. pagevec_add(&pagevec, page);
  671. fscache_mark_pages_cached(op, &pagevec);
  672. } else {
  673. ret = -ENOBUFS;
  674. }
  675. _leave(" = %d", ret);
  676. return ret;
  677. }
  678. /*
  679. * allocate blocks in the cache in which to store a set of pages
  680. * - cache withdrawal is prevented by the caller
  681. * - returns -EINTR if interrupted
  682. * - returns -ENOMEM if ran out of memory
  683. * - returns -ENOBUFS if some buffers couldn't be made available
  684. * - returns -ENOBUFS if some pages are beyond EOF
  685. * - otherwise:
  686. * - -ENODATA will be returned
  687. * - metadata will be retained for any page marked
  688. */
  689. int cachefiles_allocate_pages(struct fscache_retrieval *op,
  690. struct list_head *pages,
  691. unsigned *nr_pages,
  692. gfp_t gfp)
  693. {
  694. struct cachefiles_object *object;
  695. struct cachefiles_cache *cache;
  696. struct pagevec pagevec;
  697. struct page *page;
  698. int ret;
  699. object = container_of(op->op.object,
  700. struct cachefiles_object, fscache);
  701. cache = container_of(object->fscache.cache,
  702. struct cachefiles_cache, cache);
  703. _enter("%p,,,%d,", object, *nr_pages);
  704. ret = cachefiles_has_space(cache, 0, *nr_pages);
  705. if (ret == 0) {
  706. pagevec_init(&pagevec, 0);
  707. list_for_each_entry(page, pages, lru) {
  708. if (pagevec_add(&pagevec, page) == 0)
  709. fscache_mark_pages_cached(op, &pagevec);
  710. }
  711. if (pagevec_count(&pagevec) > 0)
  712. fscache_mark_pages_cached(op, &pagevec);
  713. ret = -ENODATA;
  714. } else {
  715. ret = -ENOBUFS;
  716. }
  717. _leave(" = %d", ret);
  718. return ret;
  719. }
  720. /*
  721. * request a page be stored in the cache
  722. * - cache withdrawal is prevented by the caller
  723. * - this request may be ignored if there's no cache block available, in which
  724. * case -ENOBUFS will be returned
  725. * - if the op is in progress, 0 will be returned
  726. */
  727. int cachefiles_write_page(struct fscache_storage *op, struct page *page)
  728. {
  729. struct cachefiles_object *object;
  730. struct cachefiles_cache *cache;
  731. mm_segment_t old_fs;
  732. struct file *file;
  733. loff_t pos, eof;
  734. size_t len;
  735. void *data;
  736. int ret;
  737. ASSERT(op != NULL);
  738. ASSERT(page != NULL);
  739. object = container_of(op->op.object,
  740. struct cachefiles_object, fscache);
  741. _enter("%p,%p{%lx},,,", object, page, page->index);
  742. if (!object->backer) {
  743. _leave(" = -ENOBUFS");
  744. return -ENOBUFS;
  745. }
  746. ASSERT(S_ISREG(object->backer->d_inode->i_mode));
  747. cache = container_of(object->fscache.cache,
  748. struct cachefiles_cache, cache);
  749. /* write the page to the backing filesystem and let it store it in its
  750. * own time */
  751. dget(object->backer);
  752. mntget(cache->mnt);
  753. file = dentry_open(object->backer, cache->mnt, O_RDWR,
  754. cache->cache_cred);
  755. if (IS_ERR(file)) {
  756. ret = PTR_ERR(file);
  757. } else {
  758. ret = -EIO;
  759. if (file->f_op->write) {
  760. pos = (loff_t) page->index << PAGE_SHIFT;
  761. /* we mustn't write more data than we have, so we have
  762. * to beware of a partial page at EOF */
  763. eof = object->fscache.store_limit_l;
  764. len = PAGE_SIZE;
  765. if (eof & ~PAGE_MASK) {
  766. ASSERTCMP(pos, <, eof);
  767. if (eof - pos < PAGE_SIZE) {
  768. _debug("cut short %llx to %llx",
  769. pos, eof);
  770. len = eof - pos;
  771. ASSERTCMP(pos + len, ==, eof);
  772. }
  773. }
  774. data = kmap(page);
  775. old_fs = get_fs();
  776. set_fs(KERNEL_DS);
  777. ret = file->f_op->write(
  778. file, (const void __user *) data, len, &pos);
  779. set_fs(old_fs);
  780. kunmap(page);
  781. if (ret != len)
  782. ret = -EIO;
  783. }
  784. fput(file);
  785. }
  786. if (ret < 0) {
  787. if (ret == -EIO)
  788. cachefiles_io_error_obj(
  789. object, "Write page to backing file failed");
  790. ret = -ENOBUFS;
  791. }
  792. _leave(" = %d", ret);
  793. return ret;
  794. }
  795. /*
  796. * detach a backing block from a page
  797. * - cache withdrawal is prevented by the caller
  798. */
  799. void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
  800. {
  801. struct cachefiles_object *object;
  802. struct cachefiles_cache *cache;
  803. object = container_of(_object, struct cachefiles_object, fscache);
  804. cache = container_of(object->fscache.cache,
  805. struct cachefiles_cache, cache);
  806. _enter("%p,{%lu}", object, page->index);
  807. spin_unlock(&object->fscache.cookie->lock);
  808. }