truncate.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416
  1. /*
  2. * mm/truncate.c - code for taking down pages from address_spaces
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 10Sep2002 akpm@zip.com.au
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/mm.h>
  11. #include <linux/swap.h>
  12. #include <linux/module.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  16. do_invalidatepage */
  17. /**
  18. * do_invalidatepage - invalidate part of all of a page
  19. * @page: the page which is affected
  20. * @offset: the index of the truncation point
  21. *
  22. * do_invalidatepage() is called when all or part of the page has become
  23. * invalidated by a truncate operation.
  24. *
  25. * do_invalidatepage() does not have to release all buffers, but it must
  26. * ensure that no dirty buffer is left outside @offset and that no I/O
  27. * is underway against any of the blocks which are outside the truncation
  28. * point. Because the caller is about to free (and possibly reuse) those
  29. * blocks on-disk.
  30. */
  31. void do_invalidatepage(struct page *page, unsigned long offset)
  32. {
  33. void (*invalidatepage)(struct page *, unsigned long);
  34. invalidatepage = page->mapping->a_ops->invalidatepage;
  35. #ifdef CONFIG_BLOCK
  36. if (!invalidatepage)
  37. invalidatepage = block_invalidatepage;
  38. #endif
  39. if (invalidatepage)
  40. (*invalidatepage)(page, offset);
  41. }
  42. static inline void truncate_partial_page(struct page *page, unsigned partial)
  43. {
  44. memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
  45. if (PagePrivate(page))
  46. do_invalidatepage(page, partial);
  47. }
  48. /*
  49. * If truncate cannot remove the fs-private metadata from the page, the page
  50. * becomes anonymous. It will be left on the LRU and may even be mapped into
  51. * user pagetables if we're racing with filemap_nopage().
  52. *
  53. * We need to bale out if page->mapping is no longer equal to the original
  54. * mapping. This happens a) when the VM reclaimed the page while we waited on
  55. * its lock, b) when a concurrent invalidate_inode_pages got there first and
  56. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  57. */
  58. static void
  59. truncate_complete_page(struct address_space *mapping, struct page *page)
  60. {
  61. if (page->mapping != mapping)
  62. return;
  63. if (PagePrivate(page))
  64. do_invalidatepage(page, 0);
  65. clear_page_dirty(page);
  66. ClearPageUptodate(page);
  67. ClearPageMappedToDisk(page);
  68. remove_from_page_cache(page);
  69. page_cache_release(page); /* pagecache ref */
  70. }
  71. /*
  72. * This is for invalidate_inode_pages(). That function can be called at
  73. * any time, and is not supposed to throw away dirty pages. But pages can
  74. * be marked dirty at any time too, so use remove_mapping which safely
  75. * discards clean, unused pages.
  76. *
  77. * Returns non-zero if the page was successfully invalidated.
  78. */
  79. static int
  80. invalidate_complete_page(struct address_space *mapping, struct page *page)
  81. {
  82. int ret;
  83. if (page->mapping != mapping)
  84. return 0;
  85. if (PagePrivate(page) && !try_to_release_page(page, 0))
  86. return 0;
  87. ret = remove_mapping(mapping, page);
  88. return ret;
  89. }
  90. /**
  91. * truncate_inode_pages - truncate range of pages specified by start and
  92. * end byte offsets
  93. * @mapping: mapping to truncate
  94. * @lstart: offset from which to truncate
  95. * @lend: offset to which to truncate
  96. *
  97. * Truncate the page cache, removing the pages that are between
  98. * specified offsets (and zeroing out partial page
  99. * (if lstart is not page aligned)).
  100. *
  101. * Truncate takes two passes - the first pass is nonblocking. It will not
  102. * block on page locks and it will not block on writeback. The second pass
  103. * will wait. This is to prevent as much IO as possible in the affected region.
  104. * The first pass will remove most pages, so the search cost of the second pass
  105. * is low.
  106. *
  107. * When looking at page->index outside the page lock we need to be careful to
  108. * copy it into a local to avoid races (it could change at any time).
  109. *
  110. * We pass down the cache-hot hint to the page freeing code. Even if the
  111. * mapping is large, it is probably the case that the final pages are the most
  112. * recently touched, and freeing happens in ascending file offset order.
  113. */
  114. void truncate_inode_pages_range(struct address_space *mapping,
  115. loff_t lstart, loff_t lend)
  116. {
  117. const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
  118. pgoff_t end;
  119. const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
  120. struct pagevec pvec;
  121. pgoff_t next;
  122. int i;
  123. if (mapping->nrpages == 0)
  124. return;
  125. BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
  126. end = (lend >> PAGE_CACHE_SHIFT);
  127. pagevec_init(&pvec, 0);
  128. next = start;
  129. while (next <= end &&
  130. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  131. for (i = 0; i < pagevec_count(&pvec); i++) {
  132. struct page *page = pvec.pages[i];
  133. pgoff_t page_index = page->index;
  134. if (page_index > end) {
  135. next = page_index;
  136. break;
  137. }
  138. if (page_index > next)
  139. next = page_index;
  140. next++;
  141. if (TestSetPageLocked(page))
  142. continue;
  143. if (PageWriteback(page)) {
  144. unlock_page(page);
  145. continue;
  146. }
  147. truncate_complete_page(mapping, page);
  148. unlock_page(page);
  149. }
  150. pagevec_release(&pvec);
  151. cond_resched();
  152. }
  153. if (partial) {
  154. struct page *page = find_lock_page(mapping, start - 1);
  155. if (page) {
  156. wait_on_page_writeback(page);
  157. truncate_partial_page(page, partial);
  158. unlock_page(page);
  159. page_cache_release(page);
  160. }
  161. }
  162. next = start;
  163. for ( ; ; ) {
  164. cond_resched();
  165. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  166. if (next == start)
  167. break;
  168. next = start;
  169. continue;
  170. }
  171. if (pvec.pages[0]->index > end) {
  172. pagevec_release(&pvec);
  173. break;
  174. }
  175. for (i = 0; i < pagevec_count(&pvec); i++) {
  176. struct page *page = pvec.pages[i];
  177. if (page->index > end)
  178. break;
  179. lock_page(page);
  180. wait_on_page_writeback(page);
  181. if (page->index > next)
  182. next = page->index;
  183. next++;
  184. truncate_complete_page(mapping, page);
  185. unlock_page(page);
  186. }
  187. pagevec_release(&pvec);
  188. }
  189. }
  190. EXPORT_SYMBOL(truncate_inode_pages_range);
  191. /**
  192. * truncate_inode_pages - truncate *all* the pages from an offset
  193. * @mapping: mapping to truncate
  194. * @lstart: offset from which to truncate
  195. *
  196. * Called under (and serialised by) inode->i_mutex.
  197. */
  198. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  199. {
  200. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  201. }
  202. EXPORT_SYMBOL(truncate_inode_pages);
  203. /**
  204. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  205. * @mapping: the address_space which holds the pages to invalidate
  206. * @start: the offset 'from' which to invalidate
  207. * @end: the offset 'to' which to invalidate (inclusive)
  208. *
  209. * This function only removes the unlocked pages, if you want to
  210. * remove all the pages of one inode, you must call truncate_inode_pages.
  211. *
  212. * invalidate_mapping_pages() will not block on IO activity. It will not
  213. * invalidate pages which are dirty, locked, under writeback or mapped into
  214. * pagetables.
  215. */
  216. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  217. pgoff_t start, pgoff_t end)
  218. {
  219. struct pagevec pvec;
  220. pgoff_t next = start;
  221. unsigned long ret = 0;
  222. int i;
  223. pagevec_init(&pvec, 0);
  224. while (next <= end &&
  225. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  226. for (i = 0; i < pagevec_count(&pvec); i++) {
  227. struct page *page = pvec.pages[i];
  228. pgoff_t index;
  229. int lock_failed;
  230. lock_failed = TestSetPageLocked(page);
  231. /*
  232. * We really shouldn't be looking at the ->index of an
  233. * unlocked page. But we're not allowed to lock these
  234. * pages. So we rely upon nobody altering the ->index
  235. * of this (pinned-by-us) page.
  236. */
  237. index = page->index;
  238. if (index > next)
  239. next = index;
  240. next++;
  241. if (lock_failed)
  242. continue;
  243. if (PageDirty(page) || PageWriteback(page))
  244. goto unlock;
  245. if (page_mapped(page))
  246. goto unlock;
  247. ret += invalidate_complete_page(mapping, page);
  248. unlock:
  249. unlock_page(page);
  250. if (next > end)
  251. break;
  252. }
  253. pagevec_release(&pvec);
  254. }
  255. return ret;
  256. }
  257. unsigned long invalidate_inode_pages(struct address_space *mapping)
  258. {
  259. return invalidate_mapping_pages(mapping, 0, ~0UL);
  260. }
  261. EXPORT_SYMBOL(invalidate_inode_pages);
  262. /*
  263. * This is like invalidate_complete_page(), except it ignores the page's
  264. * refcount. We do this because invalidate_inode_pages2() needs stronger
  265. * invalidation guarantees, and cannot afford to leave pages behind because
  266. * shrink_list() has a temp ref on them, or because they're transiently sitting
  267. * in the lru_cache_add() pagevecs.
  268. */
  269. static int
  270. invalidate_complete_page2(struct address_space *mapping, struct page *page)
  271. {
  272. if (page->mapping != mapping)
  273. return 0;
  274. if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
  275. return 0;
  276. write_lock_irq(&mapping->tree_lock);
  277. if (PageDirty(page))
  278. goto failed;
  279. BUG_ON(PagePrivate(page));
  280. __remove_from_page_cache(page);
  281. write_unlock_irq(&mapping->tree_lock);
  282. ClearPageUptodate(page);
  283. page_cache_release(page); /* pagecache ref */
  284. return 1;
  285. failed:
  286. write_unlock_irq(&mapping->tree_lock);
  287. return 0;
  288. }
  289. /**
  290. * invalidate_inode_pages2_range - remove range of pages from an address_space
  291. * @mapping: the address_space
  292. * @start: the page offset 'from' which to invalidate
  293. * @end: the page offset 'to' which to invalidate (inclusive)
  294. *
  295. * Any pages which are found to be mapped into pagetables are unmapped prior to
  296. * invalidation.
  297. *
  298. * Returns -EIO if any pages could not be invalidated.
  299. */
  300. int invalidate_inode_pages2_range(struct address_space *mapping,
  301. pgoff_t start, pgoff_t end)
  302. {
  303. struct pagevec pvec;
  304. pgoff_t next;
  305. int i;
  306. int ret = 0;
  307. int did_range_unmap = 0;
  308. int wrapped = 0;
  309. pagevec_init(&pvec, 0);
  310. next = start;
  311. while (next <= end && !ret && !wrapped &&
  312. pagevec_lookup(&pvec, mapping, next,
  313. min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  314. for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
  315. struct page *page = pvec.pages[i];
  316. pgoff_t page_index;
  317. int was_dirty;
  318. lock_page(page);
  319. if (page->mapping != mapping) {
  320. unlock_page(page);
  321. continue;
  322. }
  323. page_index = page->index;
  324. next = page_index + 1;
  325. if (next == 0)
  326. wrapped = 1;
  327. if (page_index > end) {
  328. unlock_page(page);
  329. break;
  330. }
  331. wait_on_page_writeback(page);
  332. while (page_mapped(page)) {
  333. if (!did_range_unmap) {
  334. /*
  335. * Zap the rest of the file in one hit.
  336. */
  337. unmap_mapping_range(mapping,
  338. (loff_t)page_index<<PAGE_CACHE_SHIFT,
  339. (loff_t)(end - page_index + 1)
  340. << PAGE_CACHE_SHIFT,
  341. 0);
  342. did_range_unmap = 1;
  343. } else {
  344. /*
  345. * Just zap this page
  346. */
  347. unmap_mapping_range(mapping,
  348. (loff_t)page_index<<PAGE_CACHE_SHIFT,
  349. PAGE_CACHE_SIZE, 0);
  350. }
  351. }
  352. was_dirty = test_clear_page_dirty(page);
  353. if (!invalidate_complete_page2(mapping, page)) {
  354. if (was_dirty)
  355. set_page_dirty(page);
  356. ret = -EIO;
  357. }
  358. unlock_page(page);
  359. }
  360. pagevec_release(&pvec);
  361. cond_resched();
  362. }
  363. WARN_ON_ONCE(ret);
  364. return ret;
  365. }
  366. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  367. /**
  368. * invalidate_inode_pages2 - remove all pages from an address_space
  369. * @mapping: the address_space
  370. *
  371. * Any pages which are found to be mapped into pagetables are unmapped prior to
  372. * invalidation.
  373. *
  374. * Returns -EIO if any pages could not be invalidated.
  375. */
  376. int invalidate_inode_pages2(struct address_space *mapping)
  377. {
  378. return invalidate_inode_pages2_range(mapping, 0, -1);
  379. }
  380. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);