truncate.c 20 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662
  1. /*
  2. * mm/truncate.c - code for taking down pages from address_spaces
  3. *
  4. * Copyright (C) 2002, Linus Torvalds
  5. *
  6. * 10Sep2002 Andrew Morton
  7. * Initial version.
  8. */
  9. #include <linux/kernel.h>
  10. #include <linux/backing-dev.h>
  11. #include <linux/gfp.h>
  12. #include <linux/mm.h>
  13. #include <linux/swap.h>
  14. #include <linux/export.h>
  15. #include <linux/pagemap.h>
  16. #include <linux/highmem.h>
  17. #include <linux/pagevec.h>
  18. #include <linux/task_io_accounting_ops.h>
  19. #include <linux/buffer_head.h> /* grr. try_to_release_page,
  20. do_invalidatepage */
  21. #include <linux/cleancache.h>
  22. #include "internal.h"
  23. /**
  24. * do_invalidatepage - invalidate part or all of a page
  25. * @page: the page which is affected
  26. * @offset: start of the range to invalidate
  27. * @length: length of the range to invalidate
  28. *
  29. * do_invalidatepage() is called when all or part of the page has become
  30. * invalidated by a truncate operation.
  31. *
  32. * do_invalidatepage() does not have to release all buffers, but it must
  33. * ensure that no dirty buffer is left outside @offset and that no I/O
  34. * is underway against any of the blocks which are outside the truncation
  35. * point. Because the caller is about to free (and possibly reuse) those
  36. * blocks on-disk.
  37. */
  38. void do_invalidatepage(struct page *page, unsigned int offset,
  39. unsigned int length)
  40. {
  41. void (*invalidatepage)(struct page *, unsigned int, unsigned int);
  42. invalidatepage = page->mapping->a_ops->invalidatepage;
  43. #ifdef CONFIG_BLOCK
  44. if (!invalidatepage)
  45. invalidatepage = block_invalidatepage;
  46. #endif
  47. if (invalidatepage)
  48. (*invalidatepage)(page, offset, length);
  49. }
  50. /*
  51. * This cancels just the dirty bit on the kernel page itself, it
  52. * does NOT actually remove dirty bits on any mmap's that may be
  53. * around. It also leaves the page tagged dirty, so any sync
  54. * activity will still find it on the dirty lists, and in particular,
  55. * clear_page_dirty_for_io() will still look at the dirty bits in
  56. * the VM.
  57. *
  58. * Doing this should *normally* only ever be done when a page
  59. * is truncated, and is not actually mapped anywhere at all. However,
  60. * fs/buffer.c does this when it notices that somebody has cleaned
  61. * out all the buffers on a page without actually doing it through
  62. * the VM. Can you say "ext3 is horribly ugly"? Tought you could.
  63. */
  64. void cancel_dirty_page(struct page *page, unsigned int account_size)
  65. {
  66. if (TestClearPageDirty(page)) {
  67. struct address_space *mapping = page->mapping;
  68. if (mapping && mapping_cap_account_dirty(mapping)) {
  69. dec_zone_page_state(page, NR_FILE_DIRTY);
  70. dec_bdi_stat(mapping->backing_dev_info,
  71. BDI_RECLAIMABLE);
  72. if (account_size)
  73. task_io_account_cancelled_write(account_size);
  74. }
  75. }
  76. }
  77. EXPORT_SYMBOL(cancel_dirty_page);
  78. /*
  79. * If truncate cannot remove the fs-private metadata from the page, the page
  80. * becomes orphaned. It will be left on the LRU and may even be mapped into
  81. * user pagetables if we're racing with filemap_fault().
  82. *
  83. * We need to bale out if page->mapping is no longer equal to the original
  84. * mapping. This happens a) when the VM reclaimed the page while we waited on
  85. * its lock, b) when a concurrent invalidate_mapping_pages got there first and
  86. * c) when tmpfs swizzles a page between a tmpfs inode and swapper_space.
  87. */
  88. static int
  89. truncate_complete_page(struct address_space *mapping, struct page *page)
  90. {
  91. if (page->mapping != mapping)
  92. return -EIO;
  93. if (page_has_private(page))
  94. do_invalidatepage(page, 0, PAGE_CACHE_SIZE);
  95. cancel_dirty_page(page, PAGE_CACHE_SIZE);
  96. ClearPageMappedToDisk(page);
  97. delete_from_page_cache(page);
  98. return 0;
  99. }
  100. /*
  101. * This is for invalidate_mapping_pages(). That function can be called at
  102. * any time, and is not supposed to throw away dirty pages. But pages can
  103. * be marked dirty at any time too, so use remove_mapping which safely
  104. * discards clean, unused pages.
  105. *
  106. * Returns non-zero if the page was successfully invalidated.
  107. */
  108. static int
  109. invalidate_complete_page(struct address_space *mapping, struct page *page)
  110. {
  111. int ret;
  112. if (page->mapping != mapping)
  113. return 0;
  114. if (page_has_private(page) && !try_to_release_page(page, 0))
  115. return 0;
  116. ret = remove_mapping(mapping, page);
  117. return ret;
  118. }
  119. int truncate_inode_page(struct address_space *mapping, struct page *page)
  120. {
  121. if (page_mapped(page)) {
  122. unmap_mapping_range(mapping,
  123. (loff_t)page->index << PAGE_CACHE_SHIFT,
  124. PAGE_CACHE_SIZE, 0);
  125. }
  126. return truncate_complete_page(mapping, page);
  127. }
  128. /*
  129. * Used to get rid of pages on hardware memory corruption.
  130. */
  131. int generic_error_remove_page(struct address_space *mapping, struct page *page)
  132. {
  133. if (!mapping)
  134. return -EINVAL;
  135. /*
  136. * Only punch for normal data pages for now.
  137. * Handling other types like directories would need more auditing.
  138. */
  139. if (!S_ISREG(mapping->host->i_mode))
  140. return -EIO;
  141. return truncate_inode_page(mapping, page);
  142. }
  143. EXPORT_SYMBOL(generic_error_remove_page);
  144. /*
  145. * Safely invalidate one page from its pagecache mapping.
  146. * It only drops clean, unused pages. The page must be locked.
  147. *
  148. * Returns 1 if the page is successfully invalidated, otherwise 0.
  149. */
  150. int invalidate_inode_page(struct page *page)
  151. {
  152. struct address_space *mapping = page_mapping(page);
  153. if (!mapping)
  154. return 0;
  155. if (PageDirty(page) || PageWriteback(page))
  156. return 0;
  157. if (page_mapped(page))
  158. return 0;
  159. return invalidate_complete_page(mapping, page);
  160. }
  161. /**
  162. * truncate_inode_pages_range - truncate range of pages specified by start & end byte offsets
  163. * @mapping: mapping to truncate
  164. * @lstart: offset from which to truncate
  165. * @lend: offset to which to truncate (inclusive)
  166. *
  167. * Truncate the page cache, removing the pages that are between
  168. * specified offsets (and zeroing out partial pages
  169. * if lstart or lend + 1 is not page aligned).
  170. *
  171. * Truncate takes two passes - the first pass is nonblocking. It will not
  172. * block on page locks and it will not block on writeback. The second pass
  173. * will wait. This is to prevent as much IO as possible in the affected region.
  174. * The first pass will remove most pages, so the search cost of the second pass
  175. * is low.
  176. *
  177. * We pass down the cache-hot hint to the page freeing code. Even if the
  178. * mapping is large, it is probably the case that the final pages are the most
  179. * recently touched, and freeing happens in ascending file offset order.
  180. *
  181. * Note that since ->invalidatepage() accepts range to invalidate
  182. * truncate_inode_pages_range is able to handle cases where lend + 1 is not
  183. * page aligned properly.
  184. */
  185. void truncate_inode_pages_range(struct address_space *mapping,
  186. loff_t lstart, loff_t lend)
  187. {
  188. pgoff_t start; /* inclusive */
  189. pgoff_t end; /* exclusive */
  190. unsigned int partial_start; /* inclusive */
  191. unsigned int partial_end; /* exclusive */
  192. struct pagevec pvec;
  193. pgoff_t index;
  194. int i;
  195. cleancache_invalidate_inode(mapping);
  196. if (mapping->nrpages == 0)
  197. return;
  198. /* Offsets within partial pages */
  199. partial_start = lstart & (PAGE_CACHE_SIZE - 1);
  200. partial_end = (lend + 1) & (PAGE_CACHE_SIZE - 1);
  201. /*
  202. * 'start' and 'end' always covers the range of pages to be fully
  203. * truncated. Partial pages are covered with 'partial_start' at the
  204. * start of the range and 'partial_end' at the end of the range.
  205. * Note that 'end' is exclusive while 'lend' is inclusive.
  206. */
  207. start = (lstart + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
  208. if (lend == -1)
  209. /*
  210. * lend == -1 indicates end-of-file so we have to set 'end'
  211. * to the highest possible pgoff_t and since the type is
  212. * unsigned we're using -1.
  213. */
  214. end = -1;
  215. else
  216. end = (lend + 1) >> PAGE_CACHE_SHIFT;
  217. pagevec_init(&pvec, 0);
  218. index = start;
  219. while (index < end && pagevec_lookup(&pvec, mapping, index,
  220. min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
  221. mem_cgroup_uncharge_start();
  222. for (i = 0; i < pagevec_count(&pvec); i++) {
  223. struct page *page = pvec.pages[i];
  224. /* We rely upon deletion not changing page->index */
  225. index = page->index;
  226. if (index >= end)
  227. break;
  228. if (!trylock_page(page))
  229. continue;
  230. WARN_ON(page->index != index);
  231. if (PageWriteback(page)) {
  232. unlock_page(page);
  233. continue;
  234. }
  235. truncate_inode_page(mapping, page);
  236. unlock_page(page);
  237. }
  238. pagevec_release(&pvec);
  239. mem_cgroup_uncharge_end();
  240. cond_resched();
  241. index++;
  242. }
  243. if (partial_start) {
  244. struct page *page = find_lock_page(mapping, start - 1);
  245. if (page) {
  246. unsigned int top = PAGE_CACHE_SIZE;
  247. if (start > end) {
  248. /* Truncation within a single page */
  249. top = partial_end;
  250. partial_end = 0;
  251. }
  252. wait_on_page_writeback(page);
  253. zero_user_segment(page, partial_start, top);
  254. cleancache_invalidate_page(mapping, page);
  255. if (page_has_private(page))
  256. do_invalidatepage(page, partial_start,
  257. top - partial_start);
  258. unlock_page(page);
  259. page_cache_release(page);
  260. }
  261. }
  262. if (partial_end) {
  263. struct page *page = find_lock_page(mapping, end);
  264. if (page) {
  265. wait_on_page_writeback(page);
  266. zero_user_segment(page, 0, partial_end);
  267. cleancache_invalidate_page(mapping, page);
  268. if (page_has_private(page))
  269. do_invalidatepage(page, 0,
  270. partial_end);
  271. unlock_page(page);
  272. page_cache_release(page);
  273. }
  274. }
  275. /*
  276. * If the truncation happened within a single page no pages
  277. * will be released, just zeroed, so we can bail out now.
  278. */
  279. if (start >= end)
  280. return;
  281. index = start;
  282. for ( ; ; ) {
  283. cond_resched();
  284. if (!pagevec_lookup(&pvec, mapping, index,
  285. min(end - index, (pgoff_t)PAGEVEC_SIZE))) {
  286. if (index == start)
  287. break;
  288. index = start;
  289. continue;
  290. }
  291. if (index == start && pvec.pages[0]->index >= end) {
  292. pagevec_release(&pvec);
  293. break;
  294. }
  295. mem_cgroup_uncharge_start();
  296. for (i = 0; i < pagevec_count(&pvec); i++) {
  297. struct page *page = pvec.pages[i];
  298. /* We rely upon deletion not changing page->index */
  299. index = page->index;
  300. if (index >= end)
  301. break;
  302. lock_page(page);
  303. WARN_ON(page->index != index);
  304. wait_on_page_writeback(page);
  305. truncate_inode_page(mapping, page);
  306. unlock_page(page);
  307. }
  308. pagevec_release(&pvec);
  309. mem_cgroup_uncharge_end();
  310. index++;
  311. }
  312. cleancache_invalidate_inode(mapping);
  313. }
  314. EXPORT_SYMBOL(truncate_inode_pages_range);
  315. /**
  316. * truncate_inode_pages - truncate *all* the pages from an offset
  317. * @mapping: mapping to truncate
  318. * @lstart: offset from which to truncate
  319. *
  320. * Called under (and serialised by) inode->i_mutex.
  321. *
  322. * Note: When this function returns, there can be a page in the process of
  323. * deletion (inside __delete_from_page_cache()) in the specified range. Thus
  324. * mapping->nrpages can be non-zero when this function returns even after
  325. * truncation of the whole mapping.
  326. */
  327. void truncate_inode_pages(struct address_space *mapping, loff_t lstart)
  328. {
  329. truncate_inode_pages_range(mapping, lstart, (loff_t)-1);
  330. }
  331. EXPORT_SYMBOL(truncate_inode_pages);
  332. /**
  333. * invalidate_mapping_pages - Invalidate all the unlocked pages of one inode
  334. * @mapping: the address_space which holds the pages to invalidate
  335. * @start: the offset 'from' which to invalidate
  336. * @end: the offset 'to' which to invalidate (inclusive)
  337. *
  338. * This function only removes the unlocked pages, if you want to
  339. * remove all the pages of one inode, you must call truncate_inode_pages.
  340. *
  341. * invalidate_mapping_pages() will not block on IO activity. It will not
  342. * invalidate pages which are dirty, locked, under writeback or mapped into
  343. * pagetables.
  344. */
  345. unsigned long invalidate_mapping_pages(struct address_space *mapping,
  346. pgoff_t start, pgoff_t end)
  347. {
  348. struct pagevec pvec;
  349. pgoff_t index = start;
  350. unsigned long ret;
  351. unsigned long count = 0;
  352. int i;
  353. /*
  354. * Note: this function may get called on a shmem/tmpfs mapping:
  355. * pagevec_lookup() might then return 0 prematurely (because it
  356. * got a gangful of swap entries); but it's hardly worth worrying
  357. * about - it can rarely have anything to free from such a mapping
  358. * (most pages are dirty), and already skips over any difficulties.
  359. */
  360. pagevec_init(&pvec, 0);
  361. while (index <= end && pagevec_lookup(&pvec, mapping, index,
  362. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  363. mem_cgroup_uncharge_start();
  364. for (i = 0; i < pagevec_count(&pvec); i++) {
  365. struct page *page = pvec.pages[i];
  366. /* We rely upon deletion not changing page->index */
  367. index = page->index;
  368. if (index > end)
  369. break;
  370. if (!trylock_page(page))
  371. continue;
  372. WARN_ON(page->index != index);
  373. ret = invalidate_inode_page(page);
  374. unlock_page(page);
  375. /*
  376. * Invalidation is a hint that the page is no longer
  377. * of interest and try to speed up its reclaim.
  378. */
  379. if (!ret)
  380. deactivate_page(page);
  381. count += ret;
  382. }
  383. pagevec_release(&pvec);
  384. mem_cgroup_uncharge_end();
  385. cond_resched();
  386. index++;
  387. }
  388. return count;
  389. }
  390. EXPORT_SYMBOL(invalidate_mapping_pages);
  391. /*
  392. * This is like invalidate_complete_page(), except it ignores the page's
  393. * refcount. We do this because invalidate_inode_pages2() needs stronger
  394. * invalidation guarantees, and cannot afford to leave pages behind because
  395. * shrink_page_list() has a temp ref on them, or because they're transiently
  396. * sitting in the lru_cache_add() pagevecs.
  397. */
  398. static int
  399. invalidate_complete_page2(struct address_space *mapping, struct page *page)
  400. {
  401. if (page->mapping != mapping)
  402. return 0;
  403. if (page_has_private(page) && !try_to_release_page(page, GFP_KERNEL))
  404. return 0;
  405. spin_lock_irq(&mapping->tree_lock);
  406. if (PageDirty(page))
  407. goto failed;
  408. BUG_ON(page_has_private(page));
  409. __delete_from_page_cache(page);
  410. spin_unlock_irq(&mapping->tree_lock);
  411. mem_cgroup_uncharge_cache_page(page);
  412. if (mapping->a_ops->freepage)
  413. mapping->a_ops->freepage(page);
  414. page_cache_release(page); /* pagecache ref */
  415. return 1;
  416. failed:
  417. spin_unlock_irq(&mapping->tree_lock);
  418. return 0;
  419. }
  420. static int do_launder_page(struct address_space *mapping, struct page *page)
  421. {
  422. if (!PageDirty(page))
  423. return 0;
  424. if (page->mapping != mapping || mapping->a_ops->launder_page == NULL)
  425. return 0;
  426. return mapping->a_ops->launder_page(page);
  427. }
  428. /**
  429. * invalidate_inode_pages2_range - remove range of pages from an address_space
  430. * @mapping: the address_space
  431. * @start: the page offset 'from' which to invalidate
  432. * @end: the page offset 'to' which to invalidate (inclusive)
  433. *
  434. * Any pages which are found to be mapped into pagetables are unmapped prior to
  435. * invalidation.
  436. *
  437. * Returns -EBUSY if any pages could not be invalidated.
  438. */
  439. int invalidate_inode_pages2_range(struct address_space *mapping,
  440. pgoff_t start, pgoff_t end)
  441. {
  442. struct pagevec pvec;
  443. pgoff_t index;
  444. int i;
  445. int ret = 0;
  446. int ret2 = 0;
  447. int did_range_unmap = 0;
  448. cleancache_invalidate_inode(mapping);
  449. pagevec_init(&pvec, 0);
  450. index = start;
  451. while (index <= end && pagevec_lookup(&pvec, mapping, index,
  452. min(end - index, (pgoff_t)PAGEVEC_SIZE - 1) + 1)) {
  453. mem_cgroup_uncharge_start();
  454. for (i = 0; i < pagevec_count(&pvec); i++) {
  455. struct page *page = pvec.pages[i];
  456. /* We rely upon deletion not changing page->index */
  457. index = page->index;
  458. if (index > end)
  459. break;
  460. lock_page(page);
  461. WARN_ON(page->index != index);
  462. if (page->mapping != mapping) {
  463. unlock_page(page);
  464. continue;
  465. }
  466. wait_on_page_writeback(page);
  467. if (page_mapped(page)) {
  468. if (!did_range_unmap) {
  469. /*
  470. * Zap the rest of the file in one hit.
  471. */
  472. unmap_mapping_range(mapping,
  473. (loff_t)index << PAGE_CACHE_SHIFT,
  474. (loff_t)(1 + end - index)
  475. << PAGE_CACHE_SHIFT,
  476. 0);
  477. did_range_unmap = 1;
  478. } else {
  479. /*
  480. * Just zap this page
  481. */
  482. unmap_mapping_range(mapping,
  483. (loff_t)index << PAGE_CACHE_SHIFT,
  484. PAGE_CACHE_SIZE, 0);
  485. }
  486. }
  487. BUG_ON(page_mapped(page));
  488. ret2 = do_launder_page(mapping, page);
  489. if (ret2 == 0) {
  490. if (!invalidate_complete_page2(mapping, page))
  491. ret2 = -EBUSY;
  492. }
  493. if (ret2 < 0)
  494. ret = ret2;
  495. unlock_page(page);
  496. }
  497. pagevec_release(&pvec);
  498. mem_cgroup_uncharge_end();
  499. cond_resched();
  500. index++;
  501. }
  502. cleancache_invalidate_inode(mapping);
  503. return ret;
  504. }
  505. EXPORT_SYMBOL_GPL(invalidate_inode_pages2_range);
  506. /**
  507. * invalidate_inode_pages2 - remove all pages from an address_space
  508. * @mapping: the address_space
  509. *
  510. * Any pages which are found to be mapped into pagetables are unmapped prior to
  511. * invalidation.
  512. *
  513. * Returns -EBUSY if any pages could not be invalidated.
  514. */
  515. int invalidate_inode_pages2(struct address_space *mapping)
  516. {
  517. return invalidate_inode_pages2_range(mapping, 0, -1);
  518. }
  519. EXPORT_SYMBOL_GPL(invalidate_inode_pages2);
  520. /**
  521. * truncate_pagecache - unmap and remove pagecache that has been truncated
  522. * @inode: inode
  523. * @oldsize: old file size
  524. * @newsize: new file size
  525. *
  526. * inode's new i_size must already be written before truncate_pagecache
  527. * is called.
  528. *
  529. * This function should typically be called before the filesystem
  530. * releases resources associated with the freed range (eg. deallocates
  531. * blocks). This way, pagecache will always stay logically coherent
  532. * with on-disk format, and the filesystem would not have to deal with
  533. * situations such as writepage being called for a page that has already
  534. * had its underlying blocks deallocated.
  535. */
  536. void truncate_pagecache(struct inode *inode, loff_t oldsize, loff_t newsize)
  537. {
  538. struct address_space *mapping = inode->i_mapping;
  539. loff_t holebegin = round_up(newsize, PAGE_SIZE);
  540. /*
  541. * unmap_mapping_range is called twice, first simply for
  542. * efficiency so that truncate_inode_pages does fewer
  543. * single-page unmaps. However after this first call, and
  544. * before truncate_inode_pages finishes, it is possible for
  545. * private pages to be COWed, which remain after
  546. * truncate_inode_pages finishes, hence the second
  547. * unmap_mapping_range call must be made for correctness.
  548. */
  549. unmap_mapping_range(mapping, holebegin, 0, 1);
  550. truncate_inode_pages(mapping, newsize);
  551. unmap_mapping_range(mapping, holebegin, 0, 1);
  552. }
  553. EXPORT_SYMBOL(truncate_pagecache);
  554. /**
  555. * truncate_setsize - update inode and pagecache for a new file size
  556. * @inode: inode
  557. * @newsize: new file size
  558. *
  559. * truncate_setsize updates i_size and performs pagecache truncation (if
  560. * necessary) to @newsize. It will be typically be called from the filesystem's
  561. * setattr function when ATTR_SIZE is passed in.
  562. *
  563. * Must be called with inode_mutex held and before all filesystem specific
  564. * block truncation has been performed.
  565. */
  566. void truncate_setsize(struct inode *inode, loff_t newsize)
  567. {
  568. loff_t oldsize;
  569. oldsize = inode->i_size;
  570. i_size_write(inode, newsize);
  571. truncate_pagecache(inode, oldsize, newsize);
  572. }
  573. EXPORT_SYMBOL(truncate_setsize);
  574. /**
  575. * truncate_pagecache_range - unmap and remove pagecache that is hole-punched
  576. * @inode: inode
  577. * @lstart: offset of beginning of hole
  578. * @lend: offset of last byte of hole
  579. *
  580. * This function should typically be called before the filesystem
  581. * releases resources associated with the freed range (eg. deallocates
  582. * blocks). This way, pagecache will always stay logically coherent
  583. * with on-disk format, and the filesystem would not have to deal with
  584. * situations such as writepage being called for a page that has already
  585. * had its underlying blocks deallocated.
  586. */
  587. void truncate_pagecache_range(struct inode *inode, loff_t lstart, loff_t lend)
  588. {
  589. struct address_space *mapping = inode->i_mapping;
  590. loff_t unmap_start = round_up(lstart, PAGE_SIZE);
  591. loff_t unmap_end = round_down(1 + lend, PAGE_SIZE) - 1;
  592. /*
  593. * This rounding is currently just for example: unmap_mapping_range
  594. * expands its hole outwards, whereas we want it to contract the hole
  595. * inwards. However, existing callers of truncate_pagecache_range are
  596. * doing their own page rounding first. Note that unmap_mapping_range
  597. * allows holelen 0 for all, and we allow lend -1 for end of file.
  598. */
  599. /*
  600. * Unlike in truncate_pagecache, unmap_mapping_range is called only
  601. * once (before truncating pagecache), and without "even_cows" flag:
  602. * hole-punching should not remove private COWed pages from the hole.
  603. */
  604. if ((u64)unmap_end > (u64)unmap_start)
  605. unmap_mapping_range(mapping, unmap_start,
  606. 1 + unmap_end - unmap_start, 0);
  607. truncate_inode_pages_range(mapping, lstart, lend);
  608. }
  609. EXPORT_SYMBOL(truncate_pagecache_range);