rdwr.c 22 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879
  1. /* Storage object read/write
  2. *
  3. * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public Licence
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the Licence, or (at your option) any later version.
  10. */
  11. #include <linux/mount.h>
  12. #include <linux/file.h>
  13. #include "internal.h"
  14. /*
  15. * detect wake up events generated by the unlocking of pages in which we're
  16. * interested
  17. * - we use this to detect read completion of backing pages
  18. * - the caller holds the waitqueue lock
  19. */
  20. static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
  21. int sync, void *_key)
  22. {
  23. struct cachefiles_one_read *monitor =
  24. container_of(wait, struct cachefiles_one_read, monitor);
  25. struct cachefiles_object *object;
  26. struct wait_bit_key *key = _key;
  27. struct page *page = wait->private;
  28. ASSERT(key);
  29. _enter("{%lu},%u,%d,{%p,%u}",
  30. monitor->netfs_page->index, mode, sync,
  31. key->flags, key->bit_nr);
  32. if (key->flags != &page->flags ||
  33. key->bit_nr != PG_locked)
  34. return 0;
  35. _debug("--- monitor %p %lx ---", page, page->flags);
  36. if (!PageUptodate(page) && !PageError(page))
  37. dump_stack();
  38. /* remove from the waitqueue */
  39. list_del(&wait->task_list);
  40. /* move onto the action list and queue for FS-Cache thread pool */
  41. ASSERT(monitor->op);
  42. object = container_of(monitor->op->op.object,
  43. struct cachefiles_object, fscache);
  44. spin_lock(&object->work_lock);
  45. list_add_tail(&monitor->op_link, &monitor->op->to_do);
  46. spin_unlock(&object->work_lock);
  47. fscache_enqueue_retrieval(monitor->op);
  48. return 0;
  49. }
  50. /*
  51. * copy data from backing pages to netfs pages to complete a read operation
  52. * - driven by FS-Cache's thread pool
  53. */
  54. static void cachefiles_read_copier(struct fscache_operation *_op)
  55. {
  56. struct cachefiles_one_read *monitor;
  57. struct cachefiles_object *object;
  58. struct fscache_retrieval *op;
  59. struct pagevec pagevec;
  60. int error, max;
  61. op = container_of(_op, struct fscache_retrieval, op);
  62. object = container_of(op->op.object,
  63. struct cachefiles_object, fscache);
  64. _enter("{ino=%lu}", object->backer->d_inode->i_ino);
  65. pagevec_init(&pagevec, 0);
  66. max = 8;
  67. spin_lock_irq(&object->work_lock);
  68. while (!list_empty(&op->to_do)) {
  69. monitor = list_entry(op->to_do.next,
  70. struct cachefiles_one_read, op_link);
  71. list_del(&monitor->op_link);
  72. spin_unlock_irq(&object->work_lock);
  73. _debug("- copy {%lu}", monitor->back_page->index);
  74. error = -EIO;
  75. if (PageUptodate(monitor->back_page)) {
  76. copy_highpage(monitor->netfs_page, monitor->back_page);
  77. pagevec_add(&pagevec, monitor->netfs_page);
  78. fscache_mark_pages_cached(monitor->op, &pagevec);
  79. error = 0;
  80. }
  81. if (error)
  82. cachefiles_io_error_obj(
  83. object,
  84. "Readpage failed on backing file %lx",
  85. (unsigned long) monitor->back_page->flags);
  86. page_cache_release(monitor->back_page);
  87. fscache_end_io(op, monitor->netfs_page, error);
  88. page_cache_release(monitor->netfs_page);
  89. fscache_put_retrieval(op);
  90. kfree(monitor);
  91. /* let the thread pool have some air occasionally */
  92. max--;
  93. if (max < 0 || need_resched()) {
  94. if (!list_empty(&op->to_do))
  95. fscache_enqueue_retrieval(op);
  96. _leave(" [maxed out]");
  97. return;
  98. }
  99. spin_lock_irq(&object->work_lock);
  100. }
  101. spin_unlock_irq(&object->work_lock);
  102. _leave("");
  103. }
  104. /*
  105. * read the corresponding page to the given set from the backing file
  106. * - an uncertain page is simply discarded, to be tried again another time
  107. */
  108. static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
  109. struct fscache_retrieval *op,
  110. struct page *netpage,
  111. struct pagevec *pagevec)
  112. {
  113. struct cachefiles_one_read *monitor;
  114. struct address_space *bmapping;
  115. struct page *newpage, *backpage;
  116. int ret;
  117. _enter("");
  118. pagevec_reinit(pagevec);
  119. _debug("read back %p{%lu,%d}",
  120. netpage, netpage->index, page_count(netpage));
  121. monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
  122. if (!monitor)
  123. goto nomem;
  124. monitor->netfs_page = netpage;
  125. monitor->op = fscache_get_retrieval(op);
  126. init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
  127. /* attempt to get hold of the backing page */
  128. bmapping = object->backer->d_inode->i_mapping;
  129. newpage = NULL;
  130. for (;;) {
  131. backpage = find_get_page(bmapping, netpage->index);
  132. if (backpage)
  133. goto backing_page_already_present;
  134. if (!newpage) {
  135. newpage = page_cache_alloc_cold(bmapping);
  136. if (!newpage)
  137. goto nomem_monitor;
  138. }
  139. ret = add_to_page_cache(newpage, bmapping,
  140. netpage->index, GFP_KERNEL);
  141. if (ret == 0)
  142. goto installed_new_backing_page;
  143. if (ret != -EEXIST)
  144. goto nomem_page;
  145. }
  146. /* we've installed a new backing page, so now we need to add it
  147. * to the LRU list and start it reading */
  148. installed_new_backing_page:
  149. _debug("- new %p", newpage);
  150. backpage = newpage;
  151. newpage = NULL;
  152. page_cache_get(backpage);
  153. pagevec_add(pagevec, backpage);
  154. __pagevec_lru_add_file(pagevec);
  155. read_backing_page:
  156. ret = bmapping->a_ops->readpage(NULL, backpage);
  157. if (ret < 0)
  158. goto read_error;
  159. /* set the monitor to transfer the data across */
  160. monitor_backing_page:
  161. _debug("- monitor add");
  162. /* install the monitor */
  163. page_cache_get(monitor->netfs_page);
  164. page_cache_get(backpage);
  165. monitor->back_page = backpage;
  166. monitor->monitor.private = backpage;
  167. add_page_wait_queue(backpage, &monitor->monitor);
  168. monitor = NULL;
  169. /* but the page may have been read before the monitor was installed, so
  170. * the monitor may miss the event - so we have to ensure that we do get
  171. * one in such a case */
  172. if (trylock_page(backpage)) {
  173. _debug("jumpstart %p {%lx}", backpage, backpage->flags);
  174. unlock_page(backpage);
  175. }
  176. goto success;
  177. /* if the backing page is already present, it can be in one of
  178. * three states: read in progress, read failed or read okay */
  179. backing_page_already_present:
  180. _debug("- present");
  181. if (newpage) {
  182. page_cache_release(newpage);
  183. newpage = NULL;
  184. }
  185. if (PageError(backpage))
  186. goto io_error;
  187. if (PageUptodate(backpage))
  188. goto backing_page_already_uptodate;
  189. if (!trylock_page(backpage))
  190. goto monitor_backing_page;
  191. _debug("read %p {%lx}", backpage, backpage->flags);
  192. goto read_backing_page;
  193. /* the backing page is already up to date, attach the netfs
  194. * page to the pagecache and LRU and copy the data across */
  195. backing_page_already_uptodate:
  196. _debug("- uptodate");
  197. pagevec_add(pagevec, netpage);
  198. fscache_mark_pages_cached(op, pagevec);
  199. copy_highpage(netpage, backpage);
  200. fscache_end_io(op, netpage, 0);
  201. success:
  202. _debug("success");
  203. ret = 0;
  204. out:
  205. if (backpage)
  206. page_cache_release(backpage);
  207. if (monitor) {
  208. fscache_put_retrieval(monitor->op);
  209. kfree(monitor);
  210. }
  211. _leave(" = %d", ret);
  212. return ret;
  213. read_error:
  214. _debug("read error %d", ret);
  215. if (ret == -ENOMEM)
  216. goto out;
  217. io_error:
  218. cachefiles_io_error_obj(object, "Page read error on backing file");
  219. ret = -ENOBUFS;
  220. goto out;
  221. nomem_page:
  222. page_cache_release(newpage);
  223. nomem_monitor:
  224. fscache_put_retrieval(monitor->op);
  225. kfree(monitor);
  226. nomem:
  227. _leave(" = -ENOMEM");
  228. return -ENOMEM;
  229. }
  230. /*
  231. * read a page from the cache or allocate a block in which to store it
  232. * - cache withdrawal is prevented by the caller
  233. * - returns -EINTR if interrupted
  234. * - returns -ENOMEM if ran out of memory
  235. * - returns -ENOBUFS if no buffers can be made available
  236. * - returns -ENOBUFS if page is beyond EOF
  237. * - if the page is backed by a block in the cache:
  238. * - a read will be started which will call the callback on completion
  239. * - 0 will be returned
  240. * - else if the page is unbacked:
  241. * - the metadata will be retained
  242. * - -ENODATA will be returned
  243. */
  244. int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
  245. struct page *page,
  246. gfp_t gfp)
  247. {
  248. struct cachefiles_object *object;
  249. struct cachefiles_cache *cache;
  250. struct pagevec pagevec;
  251. struct inode *inode;
  252. sector_t block0, block;
  253. unsigned shift;
  254. int ret;
  255. object = container_of(op->op.object,
  256. struct cachefiles_object, fscache);
  257. cache = container_of(object->fscache.cache,
  258. struct cachefiles_cache, cache);
  259. _enter("{%p},{%lx},,,", object, page->index);
  260. if (!object->backer)
  261. return -ENOBUFS;
  262. inode = object->backer->d_inode;
  263. ASSERT(S_ISREG(inode->i_mode));
  264. ASSERT(inode->i_mapping->a_ops->bmap);
  265. ASSERT(inode->i_mapping->a_ops->readpages);
  266. /* calculate the shift required to use bmap */
  267. if (inode->i_sb->s_blocksize > PAGE_SIZE)
  268. return -ENOBUFS;
  269. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  270. op->op.flags = FSCACHE_OP_FAST;
  271. op->op.processor = cachefiles_read_copier;
  272. pagevec_init(&pagevec, 0);
  273. /* we assume the absence or presence of the first block is a good
  274. * enough indication for the page as a whole
  275. * - TODO: don't use bmap() for this as it is _not_ actually good
  276. * enough for this as it doesn't indicate errors, but it's all we've
  277. * got for the moment
  278. */
  279. block0 = page->index;
  280. block0 <<= shift;
  281. block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
  282. _debug("%llx -> %llx",
  283. (unsigned long long) block0,
  284. (unsigned long long) block);
  285. if (block) {
  286. /* submit the apparently valid page to the backing fs to be
  287. * read from disk */
  288. ret = cachefiles_read_backing_file_one(object, op, page,
  289. &pagevec);
  290. } else if (cachefiles_has_space(cache, 0, 1) == 0) {
  291. /* there's space in the cache we can use */
  292. pagevec_add(&pagevec, page);
  293. fscache_mark_pages_cached(op, &pagevec);
  294. ret = -ENODATA;
  295. } else {
  296. ret = -ENOBUFS;
  297. }
  298. _leave(" = %d", ret);
  299. return ret;
  300. }
  301. /*
  302. * read the corresponding pages to the given set from the backing file
  303. * - any uncertain pages are simply discarded, to be tried again another time
  304. */
  305. static int cachefiles_read_backing_file(struct cachefiles_object *object,
  306. struct fscache_retrieval *op,
  307. struct list_head *list,
  308. struct pagevec *mark_pvec)
  309. {
  310. struct cachefiles_one_read *monitor = NULL;
  311. struct address_space *bmapping = object->backer->d_inode->i_mapping;
  312. struct pagevec lru_pvec;
  313. struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
  314. int ret = 0;
  315. _enter("");
  316. pagevec_init(&lru_pvec, 0);
  317. list_for_each_entry_safe(netpage, _n, list, lru) {
  318. list_del(&netpage->lru);
  319. _debug("read back %p{%lu,%d}",
  320. netpage, netpage->index, page_count(netpage));
  321. if (!monitor) {
  322. monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
  323. if (!monitor)
  324. goto nomem;
  325. monitor->op = fscache_get_retrieval(op);
  326. init_waitqueue_func_entry(&monitor->monitor,
  327. cachefiles_read_waiter);
  328. }
  329. for (;;) {
  330. backpage = find_get_page(bmapping, netpage->index);
  331. if (backpage)
  332. goto backing_page_already_present;
  333. if (!newpage) {
  334. newpage = page_cache_alloc_cold(bmapping);
  335. if (!newpage)
  336. goto nomem;
  337. }
  338. ret = add_to_page_cache(newpage, bmapping,
  339. netpage->index, GFP_KERNEL);
  340. if (ret == 0)
  341. goto installed_new_backing_page;
  342. if (ret != -EEXIST)
  343. goto nomem;
  344. }
  345. /* we've installed a new backing page, so now we need to add it
  346. * to the LRU list and start it reading */
  347. installed_new_backing_page:
  348. _debug("- new %p", newpage);
  349. backpage = newpage;
  350. newpage = NULL;
  351. page_cache_get(backpage);
  352. if (!pagevec_add(&lru_pvec, backpage))
  353. __pagevec_lru_add_file(&lru_pvec);
  354. reread_backing_page:
  355. ret = bmapping->a_ops->readpage(NULL, backpage);
  356. if (ret < 0)
  357. goto read_error;
  358. /* add the netfs page to the pagecache and LRU, and set the
  359. * monitor to transfer the data across */
  360. monitor_backing_page:
  361. _debug("- monitor add");
  362. ret = add_to_page_cache(netpage, op->mapping, netpage->index,
  363. GFP_KERNEL);
  364. if (ret < 0) {
  365. if (ret == -EEXIST) {
  366. page_cache_release(netpage);
  367. continue;
  368. }
  369. goto nomem;
  370. }
  371. page_cache_get(netpage);
  372. if (!pagevec_add(&lru_pvec, netpage))
  373. __pagevec_lru_add_file(&lru_pvec);
  374. /* install a monitor */
  375. page_cache_get(netpage);
  376. monitor->netfs_page = netpage;
  377. page_cache_get(backpage);
  378. monitor->back_page = backpage;
  379. monitor->monitor.private = backpage;
  380. add_page_wait_queue(backpage, &monitor->monitor);
  381. monitor = NULL;
  382. /* but the page may have been read before the monitor was
  383. * installed, so the monitor may miss the event - so we have to
  384. * ensure that we do get one in such a case */
  385. if (trylock_page(backpage)) {
  386. _debug("2unlock %p {%lx}", backpage, backpage->flags);
  387. unlock_page(backpage);
  388. }
  389. page_cache_release(backpage);
  390. backpage = NULL;
  391. page_cache_release(netpage);
  392. netpage = NULL;
  393. continue;
  394. /* if the backing page is already present, it can be in one of
  395. * three states: read in progress, read failed or read okay */
  396. backing_page_already_present:
  397. _debug("- present %p", backpage);
  398. if (PageError(backpage))
  399. goto io_error;
  400. if (PageUptodate(backpage))
  401. goto backing_page_already_uptodate;
  402. _debug("- not ready %p{%lx}", backpage, backpage->flags);
  403. if (!trylock_page(backpage))
  404. goto monitor_backing_page;
  405. if (PageError(backpage)) {
  406. _debug("error %lx", backpage->flags);
  407. unlock_page(backpage);
  408. goto io_error;
  409. }
  410. if (PageUptodate(backpage))
  411. goto backing_page_already_uptodate_unlock;
  412. /* we've locked a page that's neither up to date nor erroneous,
  413. * so we need to attempt to read it again */
  414. goto reread_backing_page;
  415. /* the backing page is already up to date, attach the netfs
  416. * page to the pagecache and LRU and copy the data across */
  417. backing_page_already_uptodate_unlock:
  418. _debug("uptodate %lx", backpage->flags);
  419. unlock_page(backpage);
  420. backing_page_already_uptodate:
  421. _debug("- uptodate");
  422. ret = add_to_page_cache(netpage, op->mapping, netpage->index,
  423. GFP_KERNEL);
  424. if (ret < 0) {
  425. if (ret == -EEXIST) {
  426. page_cache_release(netpage);
  427. continue;
  428. }
  429. goto nomem;
  430. }
  431. copy_highpage(netpage, backpage);
  432. page_cache_release(backpage);
  433. backpage = NULL;
  434. if (!pagevec_add(mark_pvec, netpage))
  435. fscache_mark_pages_cached(op, mark_pvec);
  436. page_cache_get(netpage);
  437. if (!pagevec_add(&lru_pvec, netpage))
  438. __pagevec_lru_add_file(&lru_pvec);
  439. fscache_end_io(op, netpage, 0);
  440. page_cache_release(netpage);
  441. netpage = NULL;
  442. continue;
  443. }
  444. netpage = NULL;
  445. _debug("out");
  446. out:
  447. /* tidy up */
  448. pagevec_lru_add_file(&lru_pvec);
  449. if (newpage)
  450. page_cache_release(newpage);
  451. if (netpage)
  452. page_cache_release(netpage);
  453. if (backpage)
  454. page_cache_release(backpage);
  455. if (monitor) {
  456. fscache_put_retrieval(op);
  457. kfree(monitor);
  458. }
  459. list_for_each_entry_safe(netpage, _n, list, lru) {
  460. list_del(&netpage->lru);
  461. page_cache_release(netpage);
  462. }
  463. _leave(" = %d", ret);
  464. return ret;
  465. nomem:
  466. _debug("nomem");
  467. ret = -ENOMEM;
  468. goto out;
  469. read_error:
  470. _debug("read error %d", ret);
  471. if (ret == -ENOMEM)
  472. goto out;
  473. io_error:
  474. cachefiles_io_error_obj(object, "Page read error on backing file");
  475. ret = -ENOBUFS;
  476. goto out;
  477. }
  478. /*
  479. * read a list of pages from the cache or allocate blocks in which to store
  480. * them
  481. */
  482. int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
  483. struct list_head *pages,
  484. unsigned *nr_pages,
  485. gfp_t gfp)
  486. {
  487. struct cachefiles_object *object;
  488. struct cachefiles_cache *cache;
  489. struct list_head backpages;
  490. struct pagevec pagevec;
  491. struct inode *inode;
  492. struct page *page, *_n;
  493. unsigned shift, nrbackpages;
  494. int ret, ret2, space;
  495. object = container_of(op->op.object,
  496. struct cachefiles_object, fscache);
  497. cache = container_of(object->fscache.cache,
  498. struct cachefiles_cache, cache);
  499. _enter("{OBJ%x,%d},,%d,,",
  500. object->fscache.debug_id, atomic_read(&op->op.usage),
  501. *nr_pages);
  502. if (!object->backer)
  503. return -ENOBUFS;
  504. space = 1;
  505. if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
  506. space = 0;
  507. inode = object->backer->d_inode;
  508. ASSERT(S_ISREG(inode->i_mode));
  509. ASSERT(inode->i_mapping->a_ops->bmap);
  510. ASSERT(inode->i_mapping->a_ops->readpages);
  511. /* calculate the shift required to use bmap */
  512. if (inode->i_sb->s_blocksize > PAGE_SIZE)
  513. return -ENOBUFS;
  514. shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
  515. pagevec_init(&pagevec, 0);
  516. op->op.flags = FSCACHE_OP_FAST;
  517. op->op.processor = cachefiles_read_copier;
  518. INIT_LIST_HEAD(&backpages);
  519. nrbackpages = 0;
  520. ret = space ? -ENODATA : -ENOBUFS;
  521. list_for_each_entry_safe(page, _n, pages, lru) {
  522. sector_t block0, block;
  523. /* we assume the absence or presence of the first block is a
  524. * good enough indication for the page as a whole
  525. * - TODO: don't use bmap() for this as it is _not_ actually
  526. * good enough for this as it doesn't indicate errors, but
  527. * it's all we've got for the moment
  528. */
  529. block0 = page->index;
  530. block0 <<= shift;
  531. block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
  532. block0);
  533. _debug("%llx -> %llx",
  534. (unsigned long long) block0,
  535. (unsigned long long) block);
  536. if (block) {
  537. /* we have data - add it to the list to give to the
  538. * backing fs */
  539. list_move(&page->lru, &backpages);
  540. (*nr_pages)--;
  541. nrbackpages++;
  542. } else if (space && pagevec_add(&pagevec, page) == 0) {
  543. fscache_mark_pages_cached(op, &pagevec);
  544. ret = -ENODATA;
  545. }
  546. }
  547. if (pagevec_count(&pagevec) > 0)
  548. fscache_mark_pages_cached(op, &pagevec);
  549. if (list_empty(pages))
  550. ret = 0;
  551. /* submit the apparently valid pages to the backing fs to be read from
  552. * disk */
  553. if (nrbackpages > 0) {
  554. ret2 = cachefiles_read_backing_file(object, op, &backpages,
  555. &pagevec);
  556. if (ret2 == -ENOMEM || ret2 == -EINTR)
  557. ret = ret2;
  558. }
  559. if (pagevec_count(&pagevec) > 0)
  560. fscache_mark_pages_cached(op, &pagevec);
  561. _leave(" = %d [nr=%u%s]",
  562. ret, *nr_pages, list_empty(pages) ? " empty" : "");
  563. return ret;
  564. }
  565. /*
  566. * allocate a block in the cache in which to store a page
  567. * - cache withdrawal is prevented by the caller
  568. * - returns -EINTR if interrupted
  569. * - returns -ENOMEM if ran out of memory
  570. * - returns -ENOBUFS if no buffers can be made available
  571. * - returns -ENOBUFS if page is beyond EOF
  572. * - otherwise:
  573. * - the metadata will be retained
  574. * - 0 will be returned
  575. */
  576. int cachefiles_allocate_page(struct fscache_retrieval *op,
  577. struct page *page,
  578. gfp_t gfp)
  579. {
  580. struct cachefiles_object *object;
  581. struct cachefiles_cache *cache;
  582. struct pagevec pagevec;
  583. int ret;
  584. object = container_of(op->op.object,
  585. struct cachefiles_object, fscache);
  586. cache = container_of(object->fscache.cache,
  587. struct cachefiles_cache, cache);
  588. _enter("%p,{%lx},", object, page->index);
  589. ret = cachefiles_has_space(cache, 0, 1);
  590. if (ret == 0) {
  591. pagevec_init(&pagevec, 0);
  592. pagevec_add(&pagevec, page);
  593. fscache_mark_pages_cached(op, &pagevec);
  594. } else {
  595. ret = -ENOBUFS;
  596. }
  597. _leave(" = %d", ret);
  598. return ret;
  599. }
  600. /*
  601. * allocate blocks in the cache in which to store a set of pages
  602. * - cache withdrawal is prevented by the caller
  603. * - returns -EINTR if interrupted
  604. * - returns -ENOMEM if ran out of memory
  605. * - returns -ENOBUFS if some buffers couldn't be made available
  606. * - returns -ENOBUFS if some pages are beyond EOF
  607. * - otherwise:
  608. * - -ENODATA will be returned
  609. * - metadata will be retained for any page marked
  610. */
  611. int cachefiles_allocate_pages(struct fscache_retrieval *op,
  612. struct list_head *pages,
  613. unsigned *nr_pages,
  614. gfp_t gfp)
  615. {
  616. struct cachefiles_object *object;
  617. struct cachefiles_cache *cache;
  618. struct pagevec pagevec;
  619. struct page *page;
  620. int ret;
  621. object = container_of(op->op.object,
  622. struct cachefiles_object, fscache);
  623. cache = container_of(object->fscache.cache,
  624. struct cachefiles_cache, cache);
  625. _enter("%p,,,%d,", object, *nr_pages);
  626. ret = cachefiles_has_space(cache, 0, *nr_pages);
  627. if (ret == 0) {
  628. pagevec_init(&pagevec, 0);
  629. list_for_each_entry(page, pages, lru) {
  630. if (pagevec_add(&pagevec, page) == 0)
  631. fscache_mark_pages_cached(op, &pagevec);
  632. }
  633. if (pagevec_count(&pagevec) > 0)
  634. fscache_mark_pages_cached(op, &pagevec);
  635. ret = -ENODATA;
  636. } else {
  637. ret = -ENOBUFS;
  638. }
  639. _leave(" = %d", ret);
  640. return ret;
  641. }
  642. /*
  643. * request a page be stored in the cache
  644. * - cache withdrawal is prevented by the caller
  645. * - this request may be ignored if there's no cache block available, in which
  646. * case -ENOBUFS will be returned
  647. * - if the op is in progress, 0 will be returned
  648. */
  649. int cachefiles_write_page(struct fscache_storage *op, struct page *page)
  650. {
  651. struct cachefiles_object *object;
  652. struct cachefiles_cache *cache;
  653. mm_segment_t old_fs;
  654. struct file *file;
  655. loff_t pos;
  656. void *data;
  657. int ret;
  658. ASSERT(op != NULL);
  659. ASSERT(page != NULL);
  660. object = container_of(op->op.object,
  661. struct cachefiles_object, fscache);
  662. _enter("%p,%p{%lx},,,", object, page, page->index);
  663. if (!object->backer) {
  664. _leave(" = -ENOBUFS");
  665. return -ENOBUFS;
  666. }
  667. ASSERT(S_ISREG(object->backer->d_inode->i_mode));
  668. cache = container_of(object->fscache.cache,
  669. struct cachefiles_cache, cache);
  670. /* write the page to the backing filesystem and let it store it in its
  671. * own time */
  672. dget(object->backer);
  673. mntget(cache->mnt);
  674. file = dentry_open(object->backer, cache->mnt, O_RDWR,
  675. cache->cache_cred);
  676. if (IS_ERR(file)) {
  677. ret = PTR_ERR(file);
  678. } else {
  679. ret = -EIO;
  680. if (file->f_op->write) {
  681. pos = (loff_t) page->index << PAGE_SHIFT;
  682. data = kmap(page);
  683. old_fs = get_fs();
  684. set_fs(KERNEL_DS);
  685. ret = file->f_op->write(
  686. file, (const void __user *) data, PAGE_SIZE,
  687. &pos);
  688. set_fs(old_fs);
  689. kunmap(page);
  690. if (ret != PAGE_SIZE)
  691. ret = -EIO;
  692. }
  693. fput(file);
  694. }
  695. if (ret < 0) {
  696. if (ret == -EIO)
  697. cachefiles_io_error_obj(
  698. object, "Write page to backing file failed");
  699. ret = -ENOBUFS;
  700. }
  701. _leave(" = %d", ret);
  702. return ret;
  703. }
  704. /*
  705. * detach a backing block from a page
  706. * - cache withdrawal is prevented by the caller
  707. */
  708. void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
  709. {
  710. struct cachefiles_object *object;
  711. struct cachefiles_cache *cache;
  712. object = container_of(_object, struct cachefiles_object, fscache);
  713. cache = container_of(object->fscache.cache,
  714. struct cachefiles_cache, cache);
  715. _enter("%p,{%lu}", object, page->index);
  716. spin_unlock(&object->fscache.cookie->lock);
  717. }