truncate.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418
  1. /*
  2. * mm/truncate.c - code for taking down pages from address_spaces
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 10Sep2002 akpm@zip.com.au
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/mm.h>
  11. #include <linux/swap.h>
  12. #include <linux/module.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/task_io_accounting_ops.h>
  16. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  17. do_invalidatepage */
  18. /**
  19. * do_invalidatepage - invalidate part of all of a page
  20. * @page: the page which is affected
  21. * @offset: the index of the truncation point
  22. *
  23. * do_invalidatepage() is called when all or part of the page has become
  24. * invalidated by a truncate operation.
  25. *
  26. * do_invalidatepage() does not have to release all buffers, but it must
  27. * ensure that no dirty buffer is left outside @offset and that no I/O
  28. * is underway against any of the blocks which are outside the truncation
  29. * point. Because the caller is about to free (and possibly reuse) those
  30. * blocks on-disk.
  31. */
  32. void do_invalidatepage(struct page *page, unsigned long offset)
  33. {
  34. void (*invalidatepage)(struct page *, unsigned long);
  35. invalidatepage = page->mapping->a_ops->invalidatepage;
  36. #ifdef CONFIG_BLOCK
  37. if (!invalidatepage)
  38. invalidatepage = block_invalidatepage;
  39. #endif
  40. if (invalidatepage)
  41. (*invalidatepage)(page, offset);
  42. }
  43. static inline void truncate_partial_page(struct page *page, unsigned partial)
  44. {
  45. memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
  46. if (PagePrivate(page))
  47. do_invalidatepage(page, partial);
  48. }
  49. /*
  50. * If truncate cannot remove the fs-private metadata from the page, the page
  51. * becomes anonymous. It will be left on the LRU and may even be mapped into
  52. * user pagetables if we're racing with filemap_nopage().
  53. *
  54. * We need to bale out if page->mapping is no longer equal to the original
  55. * mapping. This happens a) when the VM reclaimed the page while we waited on
  56. * its lock, b) when a concurrent invalidate_inode_pages got there first and
  57. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  58. */
  59. static void
  60. truncate_complete_page(struct address_space *mapping, struct page *page)
  61. {
  62. if (page->mapping != mapping)
  63. return;
  64. if (PagePrivate(page))
  65. do_invalidatepage(page, 0);
  66. if (test_clear_page_dirty(page))
  67. task_io_account_cancelled_write(PAGE_CACHE_SIZE);
  68. ClearPageUptodate(page);
  69. ClearPageMappedToDisk(page);
  70. remove_from_page_cache(page);
  71. page_cache_release(page); /* pagecache ref */
  72. }
  73. /*
  74. * This is for invalidate_inode_pages(). That function can be called at
  75. * any time, and is not supposed to throw away dirty pages. But pages can
  76. * be marked dirty at any time too, so use remove_mapping which safely
  77. * discards clean, unused pages.
  78. *
  79. * Returns non-zero if the page was successfully invalidated.
  80. */
  81. static int
  82. invalidate_complete_page(struct address_space *mapping, struct page *page)
  83. {
  84. int ret;
  85. if (page->mapping != mapping)
  86. return 0;
  87. if (PagePrivate(page) && !try_to_release_page(page, 0))
  88. return 0;
  89. ret = remove_mapping(mapping, page);
  90. return ret;
  91. }
  92. /**
  93. * truncate_inode_pages - truncate range of pages specified by start and
  94. * end byte offsets
  95. * @mapping: mapping to truncate
  96. * @lstart: offset from which to truncate
  97. * @lend: offset to which to truncate
  98. *
  99. * Truncate the page cache, removing the pages that are between
  100. * specified offsets (and zeroing out partial page
  101. * (if lstart is not page aligned)).
  102. *
  103. * Truncate takes two passes - the first pass is nonblocking. It will not
  104. * block on page locks and it will not block on writeback. The second pass
  105. * will wait. This is to prevent as much IO as possible in the affected region.
  106. * The first pass will remove most pages, so the search cost of the second pass
  107. * is low.
  108. *
  109. * When looking at page->index outside the page lock we need to be careful to
  110. * copy it into a local to avoid races (it could change at any time).
  111. *
  112. * We pass down the cache-hot hint to the page freeing code. Even if the
  113. * mapping is large, it is probably the case that the final pages are the most
  114. * recently touched, and freeing happens in ascending file offset order.
  115. */
  116. void truncate_inode_pages_range(struct address_space *mapping,
  117. loff_t lstart, loff_t lend)
  118. {
  119. const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
  120. pgoff_t end;
  121. const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
  122. struct pagevec pvec;
  123. pgoff_t next;
  124. int i;
  125. if (mapping->nrpages == 0)
  126. return;
  127. BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
  128. end = (lend >> PAGE_CACHE_SHIFT);
  129. pagevec_init(&pvec, 0);
  130. next = start;
  131. while (next <= end &&
  132. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  133. for (i = 0; i < pagevec_count(&pvec); i++) {
  134. struct page *page = pvec.pages[i];
  135. pgoff_t page_index = page->index;
  136. if (page_index > end) {
  137. next = page_index;
  138. break;
  139. }
  140. if (page_index > next)
  141. next = page_index;
  142. next++;
  143. if (TestSetPageLocked(page))
  144. continue;
  145. if (PageWriteback(page)) {
  146. unlock_page(page);
  147. continue;
  148. }
  149. truncate_complete_page(mapping, page);
  150. unlock_page(page);
  151. }
  152. pagevec_release(&pvec);
  153. cond_resched();
  154. }
  155. if (partial) {
  156. struct page *page = find_lock_page(mapping, start - 1);
  157. if (page) {
  158. wait_on_page_writeback(page);
  159. truncate_partial_page(page, partial);
  160. unlock_page(page);
  161. page_cache_release(page);
  162. }
  163. }
  164. next = start;
  165. for ( ; ; ) {
  166. cond_resched();
  167. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  168. if (next == start)
  169. break;
  170. next = start;
  171. continue;
  172. }
  173. if (pvec.pages[0]->index > end) {
  174. pagevec_release(&pvec);
  175. break;
  176. }
  177. for (i = 0; i < pagevec_count(&pvec); i++) {
  178. struct page *page = pvec.pages[i];
  179. if (page->index > end)
  180. break;
  181. lock_page(page);
  182. wait_on_page_writeback(page);
  183. if (page->index > next)
  184. next = page->index;
  185. next++;
  186. truncate_complete_page(mapping, page);
  187. unlock_page(page);
  188. }
  189. pagevec_release(&pvec);
  190. }
  191. }
  192. EXPORT_SYMBOL(truncate_inode_pages_range);
  193. /**
  194. * truncate_inode_pages - truncate *all* the pages from an offset
  195. * @mapping: mapping to truncate
  196. * @lstart: offset from which to truncate
  197. *
  198. * Called under (and serialised by) inode->i_mutex.
  199. */
  200. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  201. {
  202. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  203. }
  204. EXPORT_SYMBOL(truncate_inode_pages);
  205. /**
  206. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  207. * @mapping: the address_space which holds the pages to invalidate
  208. * @start: the offset 'from' which to invalidate
  209. * @end: the offset 'to' which to invalidate (inclusive)
  210. *
  211. * This function only removes the unlocked pages, if you want to
  212. * remove all the pages of one inode, you must call truncate_inode_pages.
  213. *
  214. * invalidate_mapping_pages() will not block on IO activity. It will not
  215. * invalidate pages which are dirty, locked, under writeback or mapped into
  216. * pagetables.
  217. */
  218. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  219. pgoff_t start, pgoff_t end)
  220. {
  221. struct pagevec pvec;
  222. pgoff_t next = start;
  223. unsigned long ret = 0;
  224. int i;
  225. pagevec_init(&pvec, 0);
  226. while (next <= end &&
  227. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  228. for (i = 0; i < pagevec_count(&pvec); i++) {
  229. struct page *page = pvec.pages[i];
  230. pgoff_t index;
  231. int lock_failed;
  232. lock_failed = TestSetPageLocked(page);
  233. /*
  234. * We really shouldn't be looking at the ->index of an
  235. * unlocked page. But we're not allowed to lock these
  236. * pages. So we rely upon nobody altering the ->index
  237. * of this (pinned-by-us) page.
  238. */
  239. index = page->index;
  240. if (index > next)
  241. next = index;
  242. next++;
  243. if (lock_failed)
  244. continue;
  245. if (PageDirty(page) || PageWriteback(page))
  246. goto unlock;
  247. if (page_mapped(page))
  248. goto unlock;
  249. ret += invalidate_complete_page(mapping, page);
  250. unlock:
  251. unlock_page(page);
  252. if (next > end)
  253. break;
  254. }
  255. pagevec_release(&pvec);
  256. }
  257. return ret;
  258. }
  259. unsigned long invalidate_inode_pages(struct address_space *mapping)
  260. {
  261. return invalidate_mapping_pages(mapping, 0, ~0UL);
  262. }
  263. EXPORT_SYMBOL(invalidate_inode_pages);
  264. /*
  265. * This is like invalidate_complete_page(), except it ignores the page's
  266. * refcount. We do this because invalidate_inode_pages2() needs stronger
  267. * invalidation guarantees, and cannot afford to leave pages behind because
  268. * shrink_list() has a temp ref on them, or because they're transiently sitting
  269. * in the lru_cache_add() pagevecs.
  270. */
  271. static int
  272. invalidate_complete_page2(struct address_space *mapping, struct page *page)
  273. {
  274. if (page->mapping != mapping)
  275. return 0;
  276. if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
  277. return 0;
  278. write_lock_irq(&mapping->tree_lock);
  279. if (PageDirty(page))
  280. goto failed;
  281. BUG_ON(PagePrivate(page));
  282. __remove_from_page_cache(page);
  283. write_unlock_irq(&mapping->tree_lock);
  284. ClearPageUptodate(page);
  285. page_cache_release(page); /* pagecache ref */
  286. return 1;
  287. failed:
  288. write_unlock_irq(&mapping->tree_lock);
  289. return 0;
  290. }
  291. /**
  292. * invalidate_inode_pages2_range - remove range of pages from an address_space
  293. * @mapping: the address_space
  294. * @start: the page offset 'from' which to invalidate
  295. * @end: the page offset 'to' which to invalidate (inclusive)
  296. *
  297. * Any pages which are found to be mapped into pagetables are unmapped prior to
  298. * invalidation.
  299. *
  300. * Returns -EIO if any pages could not be invalidated.
  301. */
  302. int invalidate_inode_pages2_range(struct address_space *mapping,
  303. pgoff_t start, pgoff_t end)
  304. {
  305. struct pagevec pvec;
  306. pgoff_t next;
  307. int i;
  308. int ret = 0;
  309. int did_range_unmap = 0;
  310. int wrapped = 0;
  311. pagevec_init(&pvec, 0);
  312. next = start;
  313. while (next <= end && !ret && !wrapped &&
  314. pagevec_lookup(&pvec, mapping, next,
  315. min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  316. for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
  317. struct page *page = pvec.pages[i];
  318. pgoff_t page_index;
  319. int was_dirty;
  320. lock_page(page);
  321. if (page->mapping != mapping) {
  322. unlock_page(page);
  323. continue;
  324. }
  325. page_index = page->index;
  326. next = page_index + 1;
  327. if (next == 0)
  328. wrapped = 1;
  329. if (page_index > end) {
  330. unlock_page(page);
  331. break;
  332. }
  333. wait_on_page_writeback(page);
  334. while (page_mapped(page)) {
  335. if (!did_range_unmap) {
  336. /*
  337. * Zap the rest of the file in one hit.
  338. */
  339. unmap_mapping_range(mapping,
  340. (loff_t)page_index<<PAGE_CACHE_SHIFT,
  341. (loff_t)(end - page_index + 1)
  342. << PAGE_CACHE_SHIFT,
  343. 0);
  344. did_range_unmap = 1;
  345. } else {
  346. /*
  347. * Just zap this page
  348. */
  349. unmap_mapping_range(mapping,
  350. (loff_t)page_index<<PAGE_CACHE_SHIFT,
  351. PAGE_CACHE_SIZE, 0);
  352. }
  353. }
  354. was_dirty = test_clear_page_dirty(page);
  355. if (!invalidate_complete_page2(mapping, page)) {
  356. if (was_dirty)
  357. set_page_dirty(page);
  358. ret = -EIO;
  359. }
  360. unlock_page(page);
  361. }
  362. pagevec_release(&pvec);
  363. cond_resched();
  364. }
  365. WARN_ON_ONCE(ret);
  366. return ret;
  367. }
  368. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  369. /**
  370. * invalidate_inode_pages2 - remove all pages from an address_space
  371. * @mapping: the address_space
  372. *
  373. * Any pages which are found to be mapped into pagetables are unmapped prior to
  374. * invalidation.
  375. *
  376. * Returns -EIO if any pages could not be invalidated.
  377. */
  378. int invalidate_inode_pages2(struct address_space *mapping)
  379. {
  380. return invalidate_inode_pages2_range(mapping, 0, -1);
  381. }
  382. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);