truncate.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620
  1. /*
  2. * mm/truncate.c - code for taking down pages from address_spaces
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 10Sep2002 Andrew Morton
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/backing-dev.h>
  11. #include <linux/gfp.h>
  12. #include <linux/mm.h>
  13. #include <linux/swap.h>
  14. #include <linux/module.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/highmem.h>
  17. #include <linux/pagevec.h>
  18. #include <linux/task_io_accounting_ops.h>
  19. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  20. do_invalidatepage */
  21. #include <linux/cleancache.h>
  22. #include "internal.h"
  23. /**
  24. * do_invalidatepage - invalidate part or all of a page
  25. * @page: the page which is affected
  26. * @offset: the index of the truncation point
  27. *
  28. * do_invalidatepage() is called when all or part of the page has become
  29. * invalidated by a truncate operation.
  30. *
  31. * do_invalidatepage() does not have to release all buffers, but it must
  32. * ensure that no dirty buffer is left outside @offset and that no I/O
  33. * is underway against any of the blocks which are outside the truncation
  34. * point. Because the caller is about to free (and possibly reuse) those
  35. * blocks on-disk.
  36. */
  37. void do_invalidatepage(struct page *page, unsigned long offset)
  38. {
  39. void (*invalidatepage)(struct page *, unsigned long);
  40. invalidatepage = page->mapping->a_ops->invalidatepage;
  41. #ifdef CONFIG_BLOCK
  42. if (!invalidatepage)
  43. invalidatepage = block_invalidatepage;
  44. #endif
  45. if (invalidatepage)
  46. (*invalidatepage)(page, offset);
  47. }
  48. static inline void truncate_partial_page(struct page *page, unsigned partial)
  49. {
  50. zero_user_segment(page, partial, PAGE_CACHE_SIZE);
  51. cleancache_flush_page(page->mapping, page);
  52. if (page_has_private(page))
  53. do_invalidatepage(page, partial);
  54. }
  55. /*
  56. * This cancels just the dirty bit on the kernel page itself, it
  57. * does NOT actually remove dirty bits on any mmap's that may be
  58. * around. It also leaves the page tagged dirty, so any sync
  59. * activity will still find it on the dirty lists, and in particular,
  60. * clear_page_dirty_for_io() will still look at the dirty bits in
  61. * the VM.
  62. *
  63. * Doing this should *normally* only ever be done when a page
  64. * is truncated, and is not actually mapped anywhere at all. However,
  65. * fs/buffer.c does this when it notices that somebody has cleaned
  66. * out all the buffers on a page without actually doing it through
  67. * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
  68. */
  69. void cancel_dirty_page(struct page *page, unsigned int account_size)
  70. {
  71. if (TestClearPageDirty(page)) {
  72. struct address_space *mapping = page->mapping;
  73. if (mapping && mapping_cap_account_dirty(mapping)) {
  74. dec_zone_page_state(page, NR_FILE_DIRTY);
  75. dec_bdi_stat(mapping->backing_dev_info,
  76. BDI_RECLAIMABLE);
  77. if (account_size)
  78. task_io_account_cancelled_write(account_size);
  79. }
  80. }
  81. }
  82. EXPORT_SYMBOL(cancel_dirty_page);
  83. /*
  84. * If truncate cannot remove the fs-private metadata from the page, the page
  85. * becomes orphaned. It will be left on the LRU and may even be mapped into
  86. * user pagetables if we're racing with filemap_fault().
  87. *
  88. * We need to bale out if page->mapping is no longer equal to the original
  89. * mapping. This happens a) when the VM reclaimed the page while we waited on
  90. * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  91. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  92. */
  93. static int
  94. truncate_complete_page(struct address_space *mapping, struct page *page)
  95. {
  96. if (page->mapping != mapping)
  97. return -EIO;
  98. if (page_has_private(page))
  99. do_invalidatepage(page, 0);
  100. cancel_dirty_page(page, PAGE_CACHE_SIZE);
  101. clear_page_mlock(page);
  102. ClearPageMappedToDisk(page);
  103. delete_from_page_cache(page);
  104. return 0;
  105. }
  106. /*
  107. * This is for invalidate_mapping_pages(). That function can be called at
  108. * any time, and is not supposed to throw away dirty pages. But pages can
  109. * be marked dirty at any time too, so use remove_mapping which safely
  110. * discards clean, unused pages.
  111. *
  112. * Returns non-zero if the page was successfully invalidated.
  113. */
  114. static int
  115. invalidate_complete_page(struct address_space *mapping, struct page *page)
  116. {
  117. int ret;
  118. if (page->mapping != mapping)
  119. return 0;
  120. if (page_has_private(page) && !try_to_release_page(page, 0))
  121. return 0;
  122. clear_page_mlock(page);
  123. ret = remove_mapping(mapping, page);
  124. return ret;
  125. }
  126. int truncate_inode_page(struct address_space *mapping, struct page *page)
  127. {
  128. if (page_mapped(page)) {
  129. unmap_mapping_range(mapping,
  130. (loff_t)page->index << PAGE_CACHE_SHIFT,
  131. PAGE_CACHE_SIZE, 0);
  132. }
  133. return truncate_complete_page(mapping, page);
  134. }
  135. /*
  136. * Used to get rid of pages on hardware memory corruption.
  137. */
  138. int generic_error_remove_page(struct address_space *mapping, struct page *page)
  139. {
  140. if (!mapping)
  141. return -EINVAL;
  142. /*
  143. * Only punch for normal data pages for now.
  144. * Handling other types like directories would need more auditing.
  145. */
  146. if (!S_ISREG(mapping->host->i_mode))
  147. return -EIO;
  148. return truncate_inode_page(mapping, page);
  149. }
  150. EXPORT_SYMBOL(generic_error_remove_page);
  151. /*
  152. * Safely invalidate one page from its pagecache mapping.
  153. * It only drops clean, unused pages. The page must be locked.
  154. *
  155. * Returns 1 if the page is successfully invalidated, otherwise 0.
  156. */
  157. int invalidate_inode_page(struct page *page)
  158. {
  159. struct address_space *mapping = page_mapping(page);
  160. if (!mapping)
  161. return 0;
  162. if (PageDirty(page) || PageWriteback(page))
  163. return 0;
  164. if (page_mapped(page))
  165. return 0;
  166. return invalidate_complete_page(mapping, page);
  167. }
  168. /**
  169. * truncate_inode_pages - truncate range of pages specified by start & end byte offsets
  170. * @mapping: mapping to truncate
  171. * @lstart: offset from which to truncate
  172. * @lend: offset to which to truncate
  173. *
  174. * Truncate the page cache, removing the pages that are between
  175. * specified offsets (and zeroing out partial page
  176. * (if lstart is not page aligned)).
  177. *
  178. * Truncate takes two passes - the first pass is nonblocking. It will not
  179. * block on page locks and it will not block on writeback. The second pass
  180. * will wait. This is to prevent as much IO as possible in the affected region.
  181. * The first pass will remove most pages, so the search cost of the second pass
  182. * is low.
  183. *
  184. * We pass down the cache-hot hint to the page freeing code. Even if the
  185. * mapping is large, it is probably the case that the final pages are the most
  186. * recently touched, and freeing happens in ascending file offset order.
  187. */
  188. void truncate_inode_pages_range(struct address_space *mapping,
  189. loff_t lstart, loff_t lend)
  190. {
  191. const pgoff_t start = (lstart + PAGE_CACHE_SIZE-1) >> PAGE_CACHE_SHIFT;
  192. const unsigned partial = lstart & (PAGE_CACHE_SIZE - 1);
  193. struct pagevec pvec;
  194. pgoff_t index;
  195. pgoff_t end;
  196. int i;
  197. cleancache_flush_inode(mapping);
  198. if (mapping->nrpages == 0)
  199. return;
  200. BUG_ON((lend & (PAGE_CACHE_SIZE - 1)) != (PAGE_CACHE_SIZE - 1));
  201. end = (lend >> PAGE_CACHE_SHIFT);
  202. pagevec_init(&pvec, 0);
  203. index = start;
  204. while (index <= end && pagevec_lookup(&pvec, mapping, index,
  205. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  206. mem_cgroup_uncharge_start();
  207. for (i = 0; i < pagevec_count(&pvec); i++) {
  208. struct page *page = pvec.pages[i];
  209. /* We rely upon deletion not changing page->index */
  210. index = page->index;
  211. if (index > end)
  212. break;
  213. if (!trylock_page(page))
  214. continue;
  215. WARN_ON(page->index != index);
  216. if (PageWriteback(page)) {
  217. unlock_page(page);
  218. continue;
  219. }
  220. truncate_inode_page(mapping, page);
  221. unlock_page(page);
  222. }
  223. pagevec_release(&pvec);
  224. mem_cgroup_uncharge_end();
  225. cond_resched();
  226. index++;
  227. }
  228. if (partial) {
  229. struct page *page = find_lock_page(mapping, start - 1);
  230. if (page) {
  231. wait_on_page_writeback(page);
  232. truncate_partial_page(page, partial);
  233. unlock_page(page);
  234. page_cache_release(page);
  235. }
  236. }
  237. index = start;
  238. for ( ; ; ) {
  239. cond_resched();
  240. if (!pagevec_lookup(&pvec, mapping, index,
  241. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  242. if (index == start)
  243. break;
  244. index = start;
  245. continue;
  246. }
  247. if (index == start && pvec.pages[0]->index > end) {
  248. pagevec_release(&pvec);
  249. break;
  250. }
  251. mem_cgroup_uncharge_start();
  252. for (i = 0; i < pagevec_count(&pvec); i++) {
  253. struct page *page = pvec.pages[i];
  254. /* We rely upon deletion not changing page->index */
  255. index = page->index;
  256. if (index > end)
  257. break;
  258. lock_page(page);
  259. WARN_ON(page->index != index);
  260. wait_on_page_writeback(page);
  261. truncate_inode_page(mapping, page);
  262. unlock_page(page);
  263. }
  264. pagevec_release(&pvec);
  265. mem_cgroup_uncharge_end();
  266. index++;
  267. }
  268. cleancache_flush_inode(mapping);
  269. }
  270. EXPORT_SYMBOL(truncate_inode_pages_range);
  271. /**
  272. * truncate_inode_pages - truncate *all* the pages from an offset
  273. * @mapping: mapping to truncate
  274. * @lstart: offset from which to truncate
  275. *
  276. * Called under (and serialised by) inode->i_mutex.
  277. *
  278. * Note: When this function returns, there can be a page in the process of
  279. * deletion (inside __delete_from_page_cache()) in the specified range. Thus
  280. * mapping->nrpages can be non-zero when this function returns even after
  281. * truncation of the whole mapping.
  282. */
  283. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  284. {
  285. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  286. }
  287. EXPORT_SYMBOL(truncate_inode_pages);
  288. /**
  289. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  290. * @mapping: the address_space which holds the pages to invalidate
  291. * @start: the offset 'from' which to invalidate
  292. * @end: the offset 'to' which to invalidate (inclusive)
  293. *
  294. * This function only removes the unlocked pages, if you want to
  295. * remove all the pages of one inode, you must call truncate_inode_pages.
  296. *
  297. * invalidate_mapping_pages() will not block on IO activity. It will not
  298. * invalidate pages which are dirty, locked, under writeback or mapped into
  299. * pagetables.
  300. */
  301. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  302. pgoff_t start, pgoff_t end)
  303. {
  304. struct pagevec pvec;
  305. pgoff_t index = start;
  306. unsigned long ret;
  307. unsigned long count = 0;
  308. int i;
  309. pagevec_init(&pvec, 0);
  310. while (index <= end && pagevec_lookup(&pvec, mapping, index,
  311. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  312. mem_cgroup_uncharge_start();
  313. for (i = 0; i < pagevec_count(&pvec); i++) {
  314. struct page *page = pvec.pages[i];
  315. /* We rely upon deletion not changing page->index */
  316. index = page->index;
  317. if (index > end)
  318. break;
  319. if (!trylock_page(page))
  320. continue;
  321. WARN_ON(page->index != index);
  322. ret = invalidate_inode_page(page);
  323. unlock_page(page);
  324. /*
  325. * Invalidation is a hint that the page is no longer
  326. * of interest and try to speed up its reclaim.
  327. */
  328. if (!ret)
  329. deactivate_page(page);
  330. count += ret;
  331. }
  332. pagevec_release(&pvec);
  333. mem_cgroup_uncharge_end();
  334. cond_resched();
  335. index++;
  336. }
  337. return count;
  338. }
  339. EXPORT_SYMBOL(invalidate_mapping_pages);
  340. /*
  341. * This is like invalidate_complete_page(), except it ignores the page's
  342. * refcount. We do this because invalidate_inode_pages2() needs stronger
  343. * invalidation guarantees, and cannot afford to leave pages behind because
  344. * shrink_page_list() has a temp ref on them, or because they're transiently
  345. * sitting in the lru_cache_add() pagevecs.
  346. */
  347. static int
  348. invalidate_complete_page2(struct address_space *mapping, struct page *page)
  349. {
  350. if (page->mapping != mapping)
  351. return 0;
  352. if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
  353. return 0;
  354. spin_lock_irq(&mapping->tree_lock);
  355. if (PageDirty(page))
  356. goto failed;
  357. clear_page_mlock(page);
  358. BUG_ON(page_has_private(page));
  359. __delete_from_page_cache(page);
  360. spin_unlock_irq(&mapping->tree_lock);
  361. mem_cgroup_uncharge_cache_page(page);
  362. if (mapping->a_ops->freepage)
  363. mapping->a_ops->freepage(page);
  364. page_cache_release(page); /* pagecache ref */
  365. return 1;
  366. failed:
  367. spin_unlock_irq(&mapping->tree_lock);
  368. return 0;
  369. }
  370. static int do_launder_page(struct address_space *mapping, struct page *page)
  371. {
  372. if (!PageDirty(page))
  373. return 0;
  374. if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
  375. return 0;
  376. return mapping->a_ops->launder_page(page);
  377. }
  378. /**
  379. * invalidate_inode_pages2_range - remove range of pages from an address_space
  380. * @mapping: the address_space
  381. * @start: the page offset 'from' which to invalidate
  382. * @end: the page offset 'to' which to invalidate (inclusive)
  383. *
  384. * Any pages which are found to be mapped into pagetables are unmapped prior to
  385. * invalidation.
  386. *
  387. * Returns -EBUSY if any pages could not be invalidated.
  388. */
  389. int invalidate_inode_pages2_range(struct address_space *mapping,
  390. pgoff_t start, pgoff_t end)
  391. {
  392. struct pagevec pvec;
  393. pgoff_t index;
  394. int i;
  395. int ret = 0;
  396. int ret2 = 0;
  397. int did_range_unmap = 0;
  398. cleancache_flush_inode(mapping);
  399. pagevec_init(&pvec, 0);
  400. index = start;
  401. while (index <= end && pagevec_lookup(&pvec, mapping, index,
  402. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  403. mem_cgroup_uncharge_start();
  404. for (i = 0; i < pagevec_count(&pvec); i++) {
  405. struct page *page = pvec.pages[i];
  406. /* We rely upon deletion not changing page->index */
  407. index = page->index;
  408. if (index > end)
  409. break;
  410. lock_page(page);
  411. WARN_ON(page->index != index);
  412. if (page->mapping != mapping) {
  413. unlock_page(page);
  414. continue;
  415. }
  416. wait_on_page_writeback(page);
  417. if (page_mapped(page)) {
  418. if (!did_range_unmap) {
  419. /*
  420. * Zap the rest of the file in one hit.
  421. */
  422. unmap_mapping_range(mapping,
  423. (loff_t)index << PAGE_CACHE_SHIFT,
  424. (loff_t)(1 + end - index)
  425. << PAGE_CACHE_SHIFT,
  426. 0);
  427. did_range_unmap = 1;
  428. } else {
  429. /*
  430. * Just zap this page
  431. */
  432. unmap_mapping_range(mapping,
  433. (loff_t)index << PAGE_CACHE_SHIFT,
  434. PAGE_CACHE_SIZE, 0);
  435. }
  436. }
  437. BUG_ON(page_mapped(page));
  438. ret2 = do_launder_page(mapping, page);
  439. if (ret2 == 0) {
  440. if (!invalidate_complete_page2(mapping, page))
  441. ret2 = -EBUSY;
  442. }
  443. if (ret2 < 0)
  444. ret = ret2;
  445. unlock_page(page);
  446. }
  447. pagevec_release(&pvec);
  448. mem_cgroup_uncharge_end();
  449. cond_resched();
  450. index++;
  451. }
  452. cleancache_flush_inode(mapping);
  453. return ret;
  454. }
  455. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  456. /**
  457. * invalidate_inode_pages2 - remove all pages from an address_space
  458. * @mapping: the address_space
  459. *
  460. * Any pages which are found to be mapped into pagetables are unmapped prior to
  461. * invalidation.
  462. *
  463. * Returns -EBUSY if any pages could not be invalidated.
  464. */
  465. int invalidate_inode_pages2(struct address_space *mapping)
  466. {
  467. return invalidate_inode_pages2_range(mapping, 0, -1);
  468. }
  469. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
  470. /**
  471. * truncate_pagecache - unmap and remove pagecache that has been truncated
  472. * @inode: inode
  473. * @oldsize: old file size
  474. * @newsize: new file size
  475. *
  476. * inode's new i_size must already be written before truncate_pagecache
  477. * is called.
  478. *
  479. * This function should typically be called before the filesystem
  480. * releases resources associated with the freed range (eg. deallocates
  481. * blocks). This way, pagecache will always stay logically coherent
  482. * with on-disk format, and the filesystem would not have to deal with
  483. * situations such as writepage being called for a page that has already
  484. * had its underlying blocks deallocated.
  485. */
  486. void truncate_pagecache(struct inode *inode, loff_t oldsize, loff_t newsize)
  487. {
  488. struct address_space *mapping = inode->i_mapping;
  489. loff_t holebegin = round_up(newsize, PAGE_SIZE);
  490. /*
  491. * unmap_mapping_range is called twice, first simply for
  492. * efficiency so that truncate_inode_pages does fewer
  493. * single-page unmaps. However after this first call, and
  494. * before truncate_inode_pages finishes, it is possible for
  495. * private pages to be COWed, which remain after
  496. * truncate_inode_pages finishes, hence the second
  497. * unmap_mapping_range call must be made for correctness.
  498. */
  499. unmap_mapping_range(mapping, holebegin, 0, 1);
  500. truncate_inode_pages(mapping, newsize);
  501. unmap_mapping_range(mapping, holebegin, 0, 1);
  502. }
  503. EXPORT_SYMBOL(truncate_pagecache);
  504. /**
  505. * truncate_setsize - update inode and pagecache for a new file size
  506. * @inode: inode
  507. * @newsize: new file size
  508. *
  509. * truncate_setsize updates i_size and performs pagecache truncation (if
  510. * necessary) to @newsize. It will be typically be called from the filesystem's
  511. * setattr function when ATTR_SIZE is passed in.
  512. *
  513. * Must be called with inode_mutex held and before all filesystem specific
  514. * block truncation has been performed.
  515. */
  516. void truncate_setsize(struct inode *inode, loff_t newsize)
  517. {
  518. loff_t oldsize;
  519. oldsize = inode->i_size;
  520. i_size_write(inode, newsize);
  521. truncate_pagecache(inode, oldsize, newsize);
  522. }
  523. EXPORT_SYMBOL(truncate_setsize);
  524. /**
  525. * vmtruncate - unmap mappings "freed" by truncate() syscall
  526. * @inode: inode of the file used
  527. * @newsize: file offset to start truncating
  528. *
  529. * This function is deprecated and truncate_setsize or truncate_pagecache
  530. * should be used instead, together with filesystem specific block truncation.
  531. */
  532. int vmtruncate(struct inode *inode, loff_t newsize)
  533. {
  534. int error;
  535. error = inode_newsize_ok(inode, newsize);
  536. if (error)
  537. return error;
  538. truncate_setsize(inode, newsize);
  539. if (inode->i_op->truncate)
  540. inode->i_op->truncate(inode);
  541. return 0;
  542. }
  543. EXPORT_SYMBOL(vmtruncate);
  544. int vmtruncate_range(struct inode *inode, loff_t lstart, loff_t lend)
  545. {
  546. struct address_space *mapping = inode->i_mapping;
  547. loff_t holebegin = round_up(lstart, PAGE_SIZE);
  548. loff_t holelen = 1 + lend - holebegin;
  549. /*
  550. * If the underlying filesystem is not going to provide
  551. * a way to truncate a range of blocks (punch a hole) -
  552. * we should return failure right now.
  553. */
  554. if (!inode->i_op->truncate_range)
  555. return -ENOSYS;
  556. mutex_lock(&inode->i_mutex);
  557. inode_dio_wait(inode);
  558. unmap_mapping_range(mapping, holebegin, holelen, 1);
  559. inode->i_op->truncate_range(inode, lstart, lend);
  560. /* unmap again to remove racily COWed private pages */
  561. unmap_mapping_range(mapping, holebegin, holelen, 1);
  562. mutex_unlock(&inode->i_mutex);
  563. return 0;
  564. }