readahead.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478
  1. /*
  2. * mm/readahead.c - address_space-level file readahead.
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 09Apr2002 akpm@zip.com.au
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/fs.h>
  11. #include <linux/mm.h>
  12. #include <linux/module.h>
  13. #include <linux/blkdev.h>
  14. #include <linux/backing-dev.h>
  15. #include <linux/task_io_accounting_ops.h>
  16. #include <linux/pagevec.h>
  17. #include <linux/pagemap.h>
  18. void default_unplug_io_fn(struct backing_dev_info *bdi, struct page *page)
  19. {
  20. }
  21. EXPORT_SYMBOL(default_unplug_io_fn);
  22. struct backing_dev_info default_backing_dev_info = {
  23. .ra_pages = VM_MAX_READAHEAD * 1024 / PAGE_CACHE_SIZE,
  24. .state = 0,
  25. .capabilities = BDI_CAP_MAP_COPY,
  26. .unplug_io_fn = default_unplug_io_fn,
  27. };
  28. EXPORT_SYMBOL_GPL(default_backing_dev_info);
  29. /*
  30. * Initialise a struct file's readahead state. Assumes that the caller has
  31. * memset *ra to zero.
  32. */
  33. void
  34. file_ra_state_init(struct file_ra_state *ra, struct address_space *mapping)
  35. {
  36. ra->ra_pages = mapping->backing_dev_info->ra_pages;
  37. ra->prev_pos = -1;
  38. }
  39. EXPORT_SYMBOL_GPL(file_ra_state_init);
  40. #define list_to_page(head) (list_entry((head)->prev, struct page, lru))
  41. /**
  42. * read_cache_pages - populate an address space with some pages & start reads against them
  43. * @mapping: the address_space
  44. * @pages: The address of a list_head which contains the target pages. These
  45. * pages have their ->index populated and are otherwise uninitialised.
  46. * @filler: callback routine for filling a single page.
  47. * @data: private data for the callback routine.
  48. *
  49. * Hides the details of the LRU cache etc from the filesystems.
  50. */
  51. int read_cache_pages(struct address_space *mapping, struct list_head *pages,
  52. int (*filler)(void *, struct page *), void *data)
  53. {
  54. struct page *page;
  55. struct pagevec lru_pvec;
  56. int ret = 0;
  57. pagevec_init(&lru_pvec, 0);
  58. while (!list_empty(pages)) {
  59. page = list_to_page(pages);
  60. list_del(&page->lru);
  61. if (add_to_page_cache(page, mapping, page->index, GFP_KERNEL)) {
  62. page_cache_release(page);
  63. continue;
  64. }
  65. ret = filler(data, page);
  66. if (!pagevec_add(&lru_pvec, page))
  67. __pagevec_lru_add(&lru_pvec);
  68. if (ret) {
  69. put_pages_list(pages);
  70. break;
  71. }
  72. task_io_account_read(PAGE_CACHE_SIZE);
  73. }
  74. pagevec_lru_add(&lru_pvec);
  75. return ret;
  76. }
  77. EXPORT_SYMBOL(read_cache_pages);
  78. static int read_pages(struct address_space *mapping, struct file *filp,
  79. struct list_head *pages, unsigned nr_pages)
  80. {
  81. unsigned page_idx;
  82. struct pagevec lru_pvec;
  83. int ret;
  84. if (mapping->a_ops->readpages) {
  85. ret = mapping->a_ops->readpages(filp, mapping, pages, nr_pages);
  86. /* Clean up the remaining pages */
  87. put_pages_list(pages);
  88. goto out;
  89. }
  90. pagevec_init(&lru_pvec, 0);
  91. for (page_idx = 0; page_idx < nr_pages; page_idx++) {
  92. struct page *page = list_to_page(pages);
  93. list_del(&page->lru);
  94. if (!add_to_page_cache(page, mapping,
  95. page->index, GFP_KERNEL)) {
  96. mapping->a_ops->readpage(filp, page);
  97. if (!pagevec_add(&lru_pvec, page))
  98. __pagevec_lru_add(&lru_pvec);
  99. } else
  100. page_cache_release(page);
  101. }
  102. pagevec_lru_add(&lru_pvec);
  103. ret = 0;
  104. out:
  105. return ret;
  106. }
  107. /*
  108. * do_page_cache_readahead actually reads a chunk of disk. It allocates all
  109. * the pages first, then submits them all for I/O. This avoids the very bad
  110. * behaviour which would occur if page allocations are causing VM writeback.
  111. * We really don't want to intermingle reads and writes like that.
  112. *
  113. * Returns the number of pages requested, or the maximum amount of I/O allowed.
  114. *
  115. * do_page_cache_readahead() returns -1 if it encountered request queue
  116. * congestion.
  117. */
  118. static int
  119. __do_page_cache_readahead(struct address_space *mapping, struct file *filp,
  120. pgoff_t offset, unsigned long nr_to_read,
  121. unsigned long lookahead_size)
  122. {
  123. struct inode *inode = mapping->host;
  124. struct page *page;
  125. unsigned long end_index; /* The last page we want to read */
  126. LIST_HEAD(page_pool);
  127. int page_idx;
  128. int ret = 0;
  129. loff_t isize = i_size_read(inode);
  130. if (isize == 0)
  131. goto out;
  132. end_index = ((isize - 1) >> PAGE_CACHE_SHIFT);
  133. /*
  134. * Preallocate as many pages as we will need.
  135. */
  136. for (page_idx = 0; page_idx < nr_to_read; page_idx++) {
  137. pgoff_t page_offset = offset + page_idx;
  138. if (page_offset > end_index)
  139. break;
  140. rcu_read_lock();
  141. page = radix_tree_lookup(&mapping->page_tree, page_offset);
  142. rcu_read_unlock();
  143. if (page)
  144. continue;
  145. page = page_cache_alloc_cold(mapping);
  146. if (!page)
  147. break;
  148. page->index = page_offset;
  149. list_add(&page->lru, &page_pool);
  150. if (page_idx == nr_to_read - lookahead_size)
  151. SetPageReadahead(page);
  152. ret++;
  153. }
  154. /*
  155. * Now start the IO. We ignore I/O errors - if the page is not
  156. * uptodate then the caller will launch readpage again, and
  157. * will then handle the error.
  158. */
  159. if (ret)
  160. read_pages(mapping, filp, &page_pool, ret);
  161. BUG_ON(!list_empty(&page_pool));
  162. out:
  163. return ret;
  164. }
  165. /*
  166. * Chunk the readahead into 2 megabyte units, so that we don't pin too much
  167. * memory at once.
  168. */
  169. int force_page_cache_readahead(struct address_space *mapping, struct file *filp,
  170. pgoff_t offset, unsigned long nr_to_read)
  171. {
  172. int ret = 0;
  173. if (unlikely(!mapping->a_ops->readpage && !mapping->a_ops->readpages))
  174. return -EINVAL;
  175. while (nr_to_read) {
  176. int err;
  177. unsigned long this_chunk = (2 * 1024 * 1024) / PAGE_CACHE_SIZE;
  178. if (this_chunk > nr_to_read)
  179. this_chunk = nr_to_read;
  180. err = __do_page_cache_readahead(mapping, filp,
  181. offset, this_chunk, 0);
  182. if (err < 0) {
  183. ret = err;
  184. break;
  185. }
  186. ret += err;
  187. offset += this_chunk;
  188. nr_to_read -= this_chunk;
  189. }
  190. return ret;
  191. }
  192. /*
  193. * This version skips the IO if the queue is read-congested, and will tell the
  194. * block layer to abandon the readahead if request allocation would block.
  195. *
  196. * force_page_cache_readahead() will ignore queue congestion and will block on
  197. * request queues.
  198. */
  199. int do_page_cache_readahead(struct address_space *mapping, struct file *filp,
  200. pgoff_t offset, unsigned long nr_to_read)
  201. {
  202. if (bdi_read_congested(mapping->backing_dev_info))
  203. return -1;
  204. return __do_page_cache_readahead(mapping, filp, offset, nr_to_read, 0);
  205. }
  206. /*
  207. * Given a desired number of PAGE_CACHE_SIZE readahead pages, return a
  208. * sensible upper limit.
  209. */
  210. unsigned long max_sane_readahead(unsigned long nr)
  211. {
  212. return min(nr, (node_page_state(numa_node_id(), NR_INACTIVE)
  213. + node_page_state(numa_node_id(), NR_FREE_PAGES)) / 2);
  214. }
  215. /*
  216. * Submit IO for the read-ahead request in file_ra_state.
  217. */
  218. static unsigned long ra_submit(struct file_ra_state *ra,
  219. struct address_space *mapping, struct file *filp)
  220. {
  221. int actual;
  222. actual = __do_page_cache_readahead(mapping, filp,
  223. ra->start, ra->size, ra->async_size);
  224. return actual;
  225. }
  226. /*
  227. * Set the initial window size, round to next power of 2 and square
  228. * for small size, x 4 for medium, and x 2 for large
  229. * for 128k (32 page) max ra
  230. * 1-8 page = 32k initial, > 8 page = 128k initial
  231. */
  232. static unsigned long get_init_ra_size(unsigned long size, unsigned long max)
  233. {
  234. unsigned long newsize = roundup_pow_of_two(size);
  235. if (newsize <= max / 32)
  236. newsize = newsize * 4;
  237. else if (newsize <= max / 4)
  238. newsize = newsize * 2;
  239. else
  240. newsize = max;
  241. return newsize;
  242. }
  243. /*
  244. * Get the previous window size, ramp it up, and
  245. * return it as the new window size.
  246. */
  247. static unsigned long get_next_ra_size(struct file_ra_state *ra,
  248. unsigned long max)
  249. {
  250. unsigned long cur = ra->size;
  251. unsigned long newsize;
  252. if (cur < max / 16)
  253. newsize = 4 * cur;
  254. else
  255. newsize = 2 * cur;
  256. return min(newsize, max);
  257. }
  258. /*
  259. * On-demand readahead design.
  260. *
  261. * The fields in struct file_ra_state represent the most-recently-executed
  262. * readahead attempt:
  263. *
  264. * |<----- async_size ---------|
  265. * |------------------- size -------------------->|
  266. * |==================#===========================|
  267. * ^start ^page marked with PG_readahead
  268. *
  269. * To overlap application thinking time and disk I/O time, we do
  270. * `readahead pipelining': Do not wait until the application consumed all
  271. * readahead pages and stalled on the missing page at readahead_index;
  272. * Instead, submit an asynchronous readahead I/O as soon as there are
  273. * only async_size pages left in the readahead window. Normally async_size
  274. * will be equal to size, for maximum pipelining.
  275. *
  276. * In interleaved sequential reads, concurrent streams on the same fd can
  277. * be invalidating each other's readahead state. So we flag the new readahead
  278. * page at (start+size-async_size) with PG_readahead, and use it as readahead
  279. * indicator. The flag won't be set on already cached pages, to avoid the
  280. * readahead-for-nothing fuss, saving pointless page cache lookups.
  281. *
  282. * prev_pos tracks the last visited byte in the _previous_ read request.
  283. * It should be maintained by the caller, and will be used for detecting
  284. * small random reads. Note that the readahead algorithm checks loosely
  285. * for sequential patterns. Hence interleaved reads might be served as
  286. * sequential ones.
  287. *
  288. * There is a special-case: if the first page which the application tries to
  289. * read happens to be the first page of the file, it is assumed that a linear
  290. * read is about to happen and the window is immediately set to the initial size
  291. * based on I/O request size and the max_readahead.
  292. *
  293. * The code ramps up the readahead size aggressively at first, but slow down as
  294. * it approaches max_readhead.
  295. */
  296. /*
  297. * A minimal readahead algorithm for trivial sequential/random reads.
  298. */
  299. static unsigned long
  300. ondemand_readahead(struct address_space *mapping,
  301. struct file_ra_state *ra, struct file *filp,
  302. bool hit_readahead_marker, pgoff_t offset,
  303. unsigned long req_size)
  304. {
  305. int max = ra->ra_pages; /* max readahead pages */
  306. pgoff_t prev_offset;
  307. int sequential;
  308. /*
  309. * It's the expected callback offset, assume sequential access.
  310. * Ramp up sizes, and push forward the readahead window.
  311. */
  312. if (offset && (offset == (ra->start + ra->size - ra->async_size) ||
  313. offset == (ra->start + ra->size))) {
  314. ra->start += ra->size;
  315. ra->size = get_next_ra_size(ra, max);
  316. ra->async_size = ra->size;
  317. goto readit;
  318. }
  319. prev_offset = ra->prev_pos >> PAGE_CACHE_SHIFT;
  320. sequential = offset - prev_offset <= 1UL || req_size > max;
  321. /*
  322. * Standalone, small read.
  323. * Read as is, and do not pollute the readahead state.
  324. */
  325. if (!hit_readahead_marker && !sequential) {
  326. return __do_page_cache_readahead(mapping, filp,
  327. offset, req_size, 0);
  328. }
  329. /*
  330. * Hit a marked page without valid readahead state.
  331. * E.g. interleaved reads.
  332. * Query the pagecache for async_size, which normally equals to
  333. * readahead size. Ramp it up and use it as the new readahead size.
  334. */
  335. if (hit_readahead_marker) {
  336. pgoff_t start;
  337. read_lock_irq(&mapping->tree_lock);
  338. start = radix_tree_next_hole(&mapping->page_tree, offset, max+1);
  339. read_unlock_irq(&mapping->tree_lock);
  340. if (!start || start - offset > max)
  341. return 0;
  342. ra->start = start;
  343. ra->size = start - offset; /* old async_size */
  344. ra->size = get_next_ra_size(ra, max);
  345. ra->async_size = ra->size;
  346. goto readit;
  347. }
  348. /*
  349. * It may be one of
  350. * - first read on start of file
  351. * - sequential cache miss
  352. * - oversize random read
  353. * Start readahead for it.
  354. */
  355. ra->start = offset;
  356. ra->size = get_init_ra_size(req_size, max);
  357. ra->async_size = ra->size > req_size ? ra->size - req_size : ra->size;
  358. readit:
  359. return ra_submit(ra, mapping, filp);
  360. }
  361. /**
  362. * page_cache_sync_readahead - generic file readahead
  363. * @mapping: address_space which holds the pagecache and I/O vectors
  364. * @ra: file_ra_state which holds the readahead state
  365. * @filp: passed on to ->readpage() and ->readpages()
  366. * @offset: start offset into @mapping, in pagecache page-sized units
  367. * @req_size: hint: total size of the read which the caller is performing in
  368. * pagecache pages
  369. *
  370. * page_cache_sync_readahead() should be called when a cache miss happened:
  371. * it will submit the read. The readahead logic may decide to piggyback more
  372. * pages onto the read request if access patterns suggest it will improve
  373. * performance.
  374. */
  375. void page_cache_sync_readahead(struct address_space *mapping,
  376. struct file_ra_state *ra, struct file *filp,
  377. pgoff_t offset, unsigned long req_size)
  378. {
  379. /* no read-ahead */
  380. if (!ra->ra_pages)
  381. return;
  382. /* do read-ahead */
  383. ondemand_readahead(mapping, ra, filp, false, offset, req_size);
  384. }
  385. EXPORT_SYMBOL_GPL(page_cache_sync_readahead);
  386. /**
  387. * page_cache_async_readahead - file readahead for marked pages
  388. * @mapping: address_space which holds the pagecache and I/O vectors
  389. * @ra: file_ra_state which holds the readahead state
  390. * @filp: passed on to ->readpage() and ->readpages()
  391. * @page: the page at @offset which has the PG_readahead flag set
  392. * @offset: start offset into @mapping, in pagecache page-sized units
  393. * @req_size: hint: total size of the read which the caller is performing in
  394. * pagecache pages
  395. *
  396. * page_cache_async_ondemand() should be called when a page is used which
  397. * has the PG_readahead flag: this is a marker to suggest that the application
  398. * has used up enough of the readahead window that we should start pulling in
  399. * more pages. */
  400. void
  401. page_cache_async_readahead(struct address_space *mapping,
  402. struct file_ra_state *ra, struct file *filp,
  403. struct page *page, pgoff_t offset,
  404. unsigned long req_size)
  405. {
  406. /* no read-ahead */
  407. if (!ra->ra_pages)
  408. return;
  409. /*
  410. * Same bit is used for PG_readahead and PG_reclaim.
  411. */
  412. if (PageWriteback(page))
  413. return;
  414. ClearPageReadahead(page);
  415. /*
  416. * Defer asynchronous read-ahead on IO congestion.
  417. */
  418. if (bdi_read_congested(mapping->backing_dev_info))
  419. return;
  420. /* do read-ahead */
  421. ondemand_readahead(mapping, ra, filp, true, offset, req_size);
  422. }
  423. EXPORT_SYMBOL_GPL(page_cache_async_readahead);