truncate.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386
  1. /*
  2. * mm/truncate.c - code for taking down pages from address_spaces
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 10Sep2002 akpm@zip.com.au
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/mm.h>
  11. #include <linux/swap.h>
  12. #include <linux/module.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  16. do_invalidatepage */
  17. /**
  18. * do_invalidatepage - invalidate part of all of a page
  19. * @page: the page which is affected
  20. * @offset: the index of the truncation point
  21. *
  22. * do_invalidatepage() is called when all or part of the page has become
  23. * invalidated by a truncate operation.
  24. *
  25. * do_invalidatepage() does not have to release all buffers, but it must
  26. * ensure that no dirty buffer is left outside @offset and that no I/O
  27. * is underway against any of the blocks which are outside the truncation
  28. * point. Because the caller is about to free (and possibly reuse) those
  29. * blocks on-disk.
  30. */
  31. void do_invalidatepage(struct page *page, unsigned long offset)
  32. {
  33. void (*invalidatepage)(struct page *, unsigned long);
  34. invalidatepage = page->mapping->a_ops->invalidatepage;
  35. #ifdef CONFIG_BLOCK
  36. if (!invalidatepage)
  37. invalidatepage = block_invalidatepage;
  38. #endif
  39. if (invalidatepage)
  40. (*invalidatepage)(page, offset);
  41. }
  42. static inline void truncate_partial_page(struct page *page, unsigned partial)
  43. {
  44. memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
  45. if (PagePrivate(page))
  46. do_invalidatepage(page, partial);
  47. }
  48. /*
  49. * If truncate cannot remove the fs-private metadata from the page, the page
  50. * becomes anonymous. It will be left on the LRU and may even be mapped into
  51. * user pagetables if we're racing with filemap_nopage().
  52. *
  53. * We need to bale out if page->mapping is no longer equal to the original
  54. * mapping. This happens a) when the VM reclaimed the page while we waited on
  55. * its lock, b) when a concurrent invalidate_inode_pages got there first and
  56. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  57. */
  58. static void
  59. truncate_complete_page(struct address_space *mapping, struct page *page)
  60. {
  61. if (page->mapping != mapping)
  62. return;
  63. if (PagePrivate(page))
  64. do_invalidatepage(page, 0);
  65. clear_page_dirty(page);
  66. ClearPageUptodate(page);
  67. ClearPageMappedToDisk(page);
  68. remove_from_page_cache(page);
  69. page_cache_release(page); /* pagecache ref */
  70. }
  71. /*
  72. * This is for invalidate_inode_pages(). That function can be called at
  73. * any time, and is not supposed to throw away dirty pages. But pages can
  74. * be marked dirty at any time too, so use remove_mapping which safely
  75. * discards clean, unused pages.
  76. *
  77. * Returns non-zero if the page was successfully invalidated.
  78. */
  79. static int
  80. invalidate_complete_page(struct address_space *mapping, struct page *page)
  81. {
  82. int ret;
  83. if (page->mapping != mapping)
  84. return 0;
  85. if (PagePrivate(page) && !try_to_release_page(page, 0))
  86. return 0;
  87. ret = remove_mapping(mapping, page);
  88. ClearPageUptodate(page);
  89. return ret;
  90. }
  91. /**
  92. * truncate_inode_pages - truncate range of pages specified by start and
  93. * end byte offsets
  94. * @mapping: mapping to truncate
  95. * @lstart: offset from which to truncate
  96. * @lend: offset to which to truncate
  97. *
  98. * Truncate the page cache, removing the pages that are between
  99. * specified offsets (and zeroing out partial page
  100. * (if lstart is not page aligned)).
  101. *
  102. * Truncate takes two passes - the first pass is nonblocking. It will not
  103. * block on page locks and it will not block on writeback. The second pass
  104. * will wait. This is to prevent as much IO as possible in the affected region.
  105. * The first pass will remove most pages, so the search cost of the second pass
  106. * is low.
  107. *
  108. * When looking at page->index outside the page lock we need to be careful to
  109. * copy it into a local to avoid races (it could change at any time).
  110. *
  111. * We pass down the cache-hot hint to the page freeing code. Even if the
  112. * mapping is large, it is probably the case that the final pages are the most
  113. * recently touched, and freeing happens in ascending file offset order.
  114. */
  115. void truncate_inode_pages_range(struct address_space *mapping,
  116. loff_t lstart, loff_t lend)
  117. {
  118. const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
  119. pgoff_t end;
  120. const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
  121. struct pagevec pvec;
  122. pgoff_t next;
  123. int i;
  124. if (mapping->nrpages == 0)
  125. return;
  126. BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
  127. end = (lend >> PAGE_CACHE_SHIFT);
  128. pagevec_init(&pvec, 0);
  129. next = start;
  130. while (next <= end &&
  131. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  132. for (i = 0; i < pagevec_count(&pvec); i++) {
  133. struct page *page = pvec.pages[i];
  134. pgoff_t page_index = page->index;
  135. if (page_index > end) {
  136. next = page_index;
  137. break;
  138. }
  139. if (page_index > next)
  140. next = page_index;
  141. next++;
  142. if (TestSetPageLocked(page))
  143. continue;
  144. if (PageWriteback(page)) {
  145. unlock_page(page);
  146. continue;
  147. }
  148. truncate_complete_page(mapping, page);
  149. unlock_page(page);
  150. }
  151. pagevec_release(&pvec);
  152. cond_resched();
  153. }
  154. if (partial) {
  155. struct page *page = find_lock_page(mapping, start - 1);
  156. if (page) {
  157. wait_on_page_writeback(page);
  158. truncate_partial_page(page, partial);
  159. unlock_page(page);
  160. page_cache_release(page);
  161. }
  162. }
  163. next = start;
  164. for ( ; ; ) {
  165. cond_resched();
  166. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  167. if (next == start)
  168. break;
  169. next = start;
  170. continue;
  171. }
  172. if (pvec.pages[0]->index > end) {
  173. pagevec_release(&pvec);
  174. break;
  175. }
  176. for (i = 0; i < pagevec_count(&pvec); i++) {
  177. struct page *page = pvec.pages[i];
  178. if (page->index > end)
  179. break;
  180. lock_page(page);
  181. wait_on_page_writeback(page);
  182. if (page->index > next)
  183. next = page->index;
  184. next++;
  185. truncate_complete_page(mapping, page);
  186. unlock_page(page);
  187. }
  188. pagevec_release(&pvec);
  189. }
  190. }
  191. EXPORT_SYMBOL(truncate_inode_pages_range);
  192. /**
  193. * truncate_inode_pages - truncate *all* the pages from an offset
  194. * @mapping: mapping to truncate
  195. * @lstart: offset from which to truncate
  196. *
  197. * Called under (and serialised by) inode->i_mutex.
  198. */
  199. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  200. {
  201. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  202. }
  203. EXPORT_SYMBOL(truncate_inode_pages);
  204. /**
  205. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  206. * @mapping: the address_space which holds the pages to invalidate
  207. * @start: the offset 'from' which to invalidate
  208. * @end: the offset 'to' which to invalidate (inclusive)
  209. *
  210. * This function only removes the unlocked pages, if you want to
  211. * remove all the pages of one inode, you must call truncate_inode_pages.
  212. *
  213. * invalidate_mapping_pages() will not block on IO activity. It will not
  214. * invalidate pages which are dirty, locked, under writeback or mapped into
  215. * pagetables.
  216. */
  217. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  218. pgoff_t start, pgoff_t end)
  219. {
  220. struct pagevec pvec;
  221. pgoff_t next = start;
  222. unsigned long ret = 0;
  223. int i;
  224. pagevec_init(&pvec, 0);
  225. while (next <= end &&
  226. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  227. for (i = 0; i < pagevec_count(&pvec); i++) {
  228. struct page *page = pvec.pages[i];
  229. pgoff_t index;
  230. int lock_failed;
  231. lock_failed = TestSetPageLocked(page);
  232. /*
  233. * We really shouldn't be looking at the ->index of an
  234. * unlocked page. But we're not allowed to lock these
  235. * pages. So we rely upon nobody altering the ->index
  236. * of this (pinned-by-us) page.
  237. */
  238. index = page->index;
  239. if (index > next)
  240. next = index;
  241. next++;
  242. if (lock_failed)
  243. continue;
  244. if (PageDirty(page) || PageWriteback(page))
  245. goto unlock;
  246. if (page_mapped(page))
  247. goto unlock;
  248. ret += invalidate_complete_page(mapping, page);
  249. unlock:
  250. unlock_page(page);
  251. if (next > end)
  252. break;
  253. }
  254. pagevec_release(&pvec);
  255. }
  256. return ret;
  257. }
  258. unsigned long invalidate_inode_pages(struct address_space *mapping)
  259. {
  260. return invalidate_mapping_pages(mapping, 0, ~0UL);
  261. }
  262. EXPORT_SYMBOL(invalidate_inode_pages);
  263. /**
  264. * invalidate_inode_pages2_range - remove range of pages from an address_space
  265. * @mapping: the address_space
  266. * @start: the page offset 'from' which to invalidate
  267. * @end: the page offset 'to' which to invalidate (inclusive)
  268. *
  269. * Any pages which are found to be mapped into pagetables are unmapped prior to
  270. * invalidation.
  271. *
  272. * Returns -EIO if any pages could not be invalidated.
  273. */
  274. int invalidate_inode_pages2_range(struct address_space *mapping,
  275. pgoff_t start, pgoff_t end)
  276. {
  277. struct pagevec pvec;
  278. pgoff_t next;
  279. int i;
  280. int ret = 0;
  281. int did_range_unmap = 0;
  282. int wrapped = 0;
  283. pagevec_init(&pvec, 0);
  284. next = start;
  285. while (next <= end && !ret && !wrapped &&
  286. pagevec_lookup(&pvec, mapping, next,
  287. min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  288. for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
  289. struct page *page = pvec.pages[i];
  290. pgoff_t page_index;
  291. int was_dirty;
  292. lock_page(page);
  293. if (page->mapping != mapping) {
  294. unlock_page(page);
  295. continue;
  296. }
  297. page_index = page->index;
  298. next = page_index + 1;
  299. if (next == 0)
  300. wrapped = 1;
  301. if (page_index > end) {
  302. unlock_page(page);
  303. break;
  304. }
  305. wait_on_page_writeback(page);
  306. while (page_mapped(page)) {
  307. if (!did_range_unmap) {
  308. /*
  309. * Zap the rest of the file in one hit.
  310. */
  311. unmap_mapping_range(mapping,
  312. (loff_t)page_index<<PAGE_CACHE_SHIFT,
  313. (loff_t)(end - page_index + 1)
  314. << PAGE_CACHE_SHIFT,
  315. 0);
  316. did_range_unmap = 1;
  317. } else {
  318. /*
  319. * Just zap this page
  320. */
  321. unmap_mapping_range(mapping,
  322. (loff_t)page_index<<PAGE_CACHE_SHIFT,
  323. PAGE_CACHE_SIZE, 0);
  324. }
  325. }
  326. was_dirty = test_clear_page_dirty(page);
  327. if (!invalidate_complete_page(mapping, page)) {
  328. if (was_dirty)
  329. set_page_dirty(page);
  330. ret = -EIO;
  331. }
  332. unlock_page(page);
  333. }
  334. pagevec_release(&pvec);
  335. cond_resched();
  336. }
  337. return ret;
  338. }
  339. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  340. /**
  341. * invalidate_inode_pages2 - remove all pages from an address_space
  342. * @mapping: the address_space
  343. *
  344. * Any pages which are found to be mapped into pagetables are unmapped prior to
  345. * invalidation.
  346. *
  347. * Returns -EIO if any pages could not be invalidated.
  348. */
  349. int invalidate_inode_pages2(struct address_space *mapping)
  350. {
  351. return invalidate_inode_pages2_range(mapping, 0, -1);
  352. }
  353. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);