truncate.c 10 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384
  1. /*
  2. * mm/truncate.c - code for taking down pages from address_spaces
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 10Sep2002 akpm@zip.com.au
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/mm.h>
  11. #include <linux/swap.h>
  12. #include <linux/module.h>
  13. #include <linux/pagemap.h>
  14. #include <linux/pagevec.h>
  15. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  16. do_invalidatepage */
  17. /**
  18. * do_invalidatepage - invalidate part of all of a page
  19. * @page: the page which is affected
  20. * @offset: the index of the truncation point
  21. *
  22. * do_invalidatepage() is called when all or part of the page has become
  23. * invalidated by a truncate operation.
  24. *
  25. * do_invalidatepage() does not have to release all buffers, but it must
  26. * ensure that no dirty buffer is left outside @offset and that no I/O
  27. * is underway against any of the blocks which are outside the truncation
  28. * point. Because the caller is about to free (and possibly reuse) those
  29. * blocks on-disk.
  30. */
  31. void do_invalidatepage(struct page *page, unsigned long offset)
  32. {
  33. void (*invalidatepage)(struct page *, unsigned long);
  34. invalidatepage = page->mapping->a_ops->invalidatepage;
  35. if (!invalidatepage)
  36. invalidatepage = block_invalidatepage;
  37. if (invalidatepage)
  38. (*invalidatepage)(page, offset);
  39. }
  40. static inline void truncate_partial_page(struct page *page, unsigned partial)
  41. {
  42. memclear_highpage_flush(page, partial, PAGE_CACHE_SIZE-partial);
  43. if (PagePrivate(page))
  44. do_invalidatepage(page, partial);
  45. }
  46. /*
  47. * If truncate cannot remove the fs-private metadata from the page, the page
  48. * becomes anonymous. It will be left on the LRU and may even be mapped into
  49. * user pagetables if we're racing with filemap_nopage().
  50. *
  51. * We need to bale out if page->mapping is no longer equal to the original
  52. * mapping. This happens a) when the VM reclaimed the page while we waited on
  53. * its lock, b) when a concurrent invalidate_inode_pages got there first and
  54. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  55. */
  56. static void
  57. truncate_complete_page(struct address_space *mapping, struct page *page)
  58. {
  59. if (page->mapping != mapping)
  60. return;
  61. if (PagePrivate(page))
  62. do_invalidatepage(page, 0);
  63. clear_page_dirty(page);
  64. ClearPageUptodate(page);
  65. ClearPageMappedToDisk(page);
  66. remove_from_page_cache(page);
  67. page_cache_release(page); /* pagecache ref */
  68. }
  69. /*
  70. * This is for invalidate_inode_pages(). That function can be called at
  71. * any time, and is not supposed to throw away dirty pages. But pages can
  72. * be marked dirty at any time too, so use remove_mapping which safely
  73. * discards clean, unused pages.
  74. *
  75. * Returns non-zero if the page was successfully invalidated.
  76. */
  77. static int
  78. invalidate_complete_page(struct address_space *mapping, struct page *page)
  79. {
  80. int ret;
  81. if (page->mapping != mapping)
  82. return 0;
  83. if (PagePrivate(page) && !try_to_release_page(page, 0))
  84. return 0;
  85. ret = remove_mapping(mapping, page);
  86. ClearPageUptodate(page);
  87. return ret;
  88. }
  89. /**
  90. * truncate_inode_pages - truncate range of pages specified by start and
  91. * end byte offsets
  92. * @mapping: mapping to truncate
  93. * @lstart: offset from which to truncate
  94. * @lend: offset to which to truncate
  95. *
  96. * Truncate the page cache, removing the pages that are between
  97. * specified offsets (and zeroing out partial page
  98. * (if lstart is not page aligned)).
  99. *
  100. * Truncate takes two passes - the first pass is nonblocking. It will not
  101. * block on page locks and it will not block on writeback. The second pass
  102. * will wait. This is to prevent as much IO as possible in the affected region.
  103. * The first pass will remove most pages, so the search cost of the second pass
  104. * is low.
  105. *
  106. * When looking at page->index outside the page lock we need to be careful to
  107. * copy it into a local to avoid races (it could change at any time).
  108. *
  109. * We pass down the cache-hot hint to the page freeing code. Even if the
  110. * mapping is large, it is probably the case that the final pages are the most
  111. * recently touched, and freeing happens in ascending file offset order.
  112. */
  113. void truncate_inode_pages_range(struct address_space *mapping,
  114. loff_t lstart, loff_t lend)
  115. {
  116. const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
  117. pgoff_t end;
  118. const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
  119. struct pagevec pvec;
  120. pgoff_t next;
  121. int i;
  122. if (mapping->nrpages == 0)
  123. return;
  124. BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
  125. end = (lend >> PAGE_CACHE_SHIFT);
  126. pagevec_init(&pvec, 0);
  127. next = start;
  128. while (next <= end &&
  129. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  130. for (i = 0; i < pagevec_count(&pvec); i++) {
  131. struct page *page = pvec.pages[i];
  132. pgoff_t page_index = page->index;
  133. if (page_index > end) {
  134. next = page_index;
  135. break;
  136. }
  137. if (page_index > next)
  138. next = page_index;
  139. next++;
  140. if (TestSetPageLocked(page))
  141. continue;
  142. if (PageWriteback(page)) {
  143. unlock_page(page);
  144. continue;
  145. }
  146. truncate_complete_page(mapping, page);
  147. unlock_page(page);
  148. }
  149. pagevec_release(&pvec);
  150. cond_resched();
  151. }
  152. if (partial) {
  153. struct page *page = find_lock_page(mapping, start - 1);
  154. if (page) {
  155. wait_on_page_writeback(page);
  156. truncate_partial_page(page, partial);
  157. unlock_page(page);
  158. page_cache_release(page);
  159. }
  160. }
  161. next = start;
  162. for ( ; ; ) {
  163. cond_resched();
  164. if (!pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  165. if (next == start)
  166. break;
  167. next = start;
  168. continue;
  169. }
  170. if (pvec.pages[0]->index > end) {
  171. pagevec_release(&pvec);
  172. break;
  173. }
  174. for (i = 0; i < pagevec_count(&pvec); i++) {
  175. struct page *page = pvec.pages[i];
  176. if (page->index > end)
  177. break;
  178. lock_page(page);
  179. wait_on_page_writeback(page);
  180. if (page->index > next)
  181. next = page->index;
  182. next++;
  183. truncate_complete_page(mapping, page);
  184. unlock_page(page);
  185. }
  186. pagevec_release(&pvec);
  187. }
  188. }
  189. EXPORT_SYMBOL(truncate_inode_pages_range);
  190. /**
  191. * truncate_inode_pages - truncate *all* the pages from an offset
  192. * @mapping: mapping to truncate
  193. * @lstart: offset from which to truncate
  194. *
  195. * Called under (and serialised by) inode->i_mutex.
  196. */
  197. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  198. {
  199. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  200. }
  201. EXPORT_SYMBOL(truncate_inode_pages);
  202. /**
  203. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  204. * @mapping: the address_space which holds the pages to invalidate
  205. * @start: the offset 'from' which to invalidate
  206. * @end: the offset 'to' which to invalidate (inclusive)
  207. *
  208. * This function only removes the unlocked pages, if you want to
  209. * remove all the pages of one inode, you must call truncate_inode_pages.
  210. *
  211. * invalidate_mapping_pages() will not block on IO activity. It will not
  212. * invalidate pages which are dirty, locked, under writeback or mapped into
  213. * pagetables.
  214. */
  215. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  216. pgoff_t start, pgoff_t end)
  217. {
  218. struct pagevec pvec;
  219. pgoff_t next = start;
  220. unsigned long ret = 0;
  221. int i;
  222. pagevec_init(&pvec, 0);
  223. while (next <= end &&
  224. pagevec_lookup(&pvec, mapping, next, PAGEVEC_SIZE)) {
  225. for (i = 0; i < pagevec_count(&pvec); i++) {
  226. struct page *page = pvec.pages[i];
  227. pgoff_t index;
  228. int lock_failed;
  229. lock_failed = TestSetPageLocked(page);
  230. /*
  231. * We really shouldn't be looking at the ->index of an
  232. * unlocked page. But we're not allowed to lock these
  233. * pages. So we rely upon nobody altering the ->index
  234. * of this (pinned-by-us) page.
  235. */
  236. index = page->index;
  237. if (index > next)
  238. next = index;
  239. next++;
  240. if (lock_failed)
  241. continue;
  242. if (PageDirty(page) || PageWriteback(page))
  243. goto unlock;
  244. if (page_mapped(page))
  245. goto unlock;
  246. ret += invalidate_complete_page(mapping, page);
  247. unlock:
  248. unlock_page(page);
  249. if (next > end)
  250. break;
  251. }
  252. pagevec_release(&pvec);
  253. }
  254. return ret;
  255. }
  256. unsigned long invalidate_inode_pages(struct address_space *mapping)
  257. {
  258. return invalidate_mapping_pages(mapping, 0, ~0UL);
  259. }
  260. EXPORT_SYMBOL(invalidate_inode_pages);
  261. /**
  262. * invalidate_inode_pages2_range - remove range of pages from an address_space
  263. * @mapping: the address_space
  264. * @start: the page offset 'from' which to invalidate
  265. * @end: the page offset 'to' which to invalidate (inclusive)
  266. *
  267. * Any pages which are found to be mapped into pagetables are unmapped prior to
  268. * invalidation.
  269. *
  270. * Returns -EIO if any pages could not be invalidated.
  271. */
  272. int invalidate_inode_pages2_range(struct address_space *mapping,
  273. pgoff_t start, pgoff_t end)
  274. {
  275. struct pagevec pvec;
  276. pgoff_t next;
  277. int i;
  278. int ret = 0;
  279. int did_range_unmap = 0;
  280. int wrapped = 0;
  281. pagevec_init(&pvec, 0);
  282. next = start;
  283. while (next <= end && !ret && !wrapped &&
  284. pagevec_lookup(&pvec, mapping, next,
  285. min(end - next, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  286. for (i = 0; !ret && i < pagevec_count(&pvec); i++) {
  287. struct page *page = pvec.pages[i];
  288. pgoff_t page_index;
  289. int was_dirty;
  290. lock_page(page);
  291. if (page->mapping != mapping) {
  292. unlock_page(page);
  293. continue;
  294. }
  295. page_index = page->index;
  296. next = page_index + 1;
  297. if (next == 0)
  298. wrapped = 1;
  299. if (page_index > end) {
  300. unlock_page(page);
  301. break;
  302. }
  303. wait_on_page_writeback(page);
  304. while (page_mapped(page)) {
  305. if (!did_range_unmap) {
  306. /*
  307. * Zap the rest of the file in one hit.
  308. */
  309. unmap_mapping_range(mapping,
  310. (loff_t)page_index<<PAGE_CACHE_SHIFT,
  311. (loff_t)(end - page_index + 1)
  312. << PAGE_CACHE_SHIFT,
  313. 0);
  314. did_range_unmap = 1;
  315. } else {
  316. /*
  317. * Just zap this page
  318. */
  319. unmap_mapping_range(mapping,
  320. (loff_t)page_index<<PAGE_CACHE_SHIFT,
  321. PAGE_CACHE_SIZE, 0);
  322. }
  323. }
  324. was_dirty = test_clear_page_dirty(page);
  325. if (!invalidate_complete_page(mapping, page)) {
  326. if (was_dirty)
  327. set_page_dirty(page);
  328. ret = -EIO;
  329. }
  330. unlock_page(page);
  331. }
  332. pagevec_release(&pvec);
  333. cond_resched();
  334. }
  335. return ret;
  336. }
  337. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  338. /**
  339. * invalidate_inode_pages2 - remove all pages from an address_space
  340. * @mapping: the address_space
  341. *
  342. * Any pages which are found to be mapped into pagetables are unmapped prior to
  343. * invalidation.
  344. *
  345. * Returns -EIO if any pages could not be invalidated.
  346. */
  347. int invalidate_inode_pages2(struct address_space *mapping)
  348. {
  349. return invalidate_inode_pages2_range(mapping, 0, -1);
  350. }
  351. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);