madvise.c 14 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551
  1. /*
  2. * linux/mm/madvise.c
  3. *
  4. * Copyright (C) 1999 Linus Torvalds
  5. * Copyright (C) 2002 Christoph Hellwig
  6. */
  7. #include <linux/mman.h>
  8. #include <linux/pagemap.h>
  9. #include <linux/syscalls.h>
  10. #include <linux/mempolicy.h>
  11. #include <linux/page-isolation.h>
  12. #include <linux/hugetlb.h>
  13. #include <linux/falloc.h>
  14. #include <linux/sched.h>
  15. #include <linux/ksm.h>
  16. #include <linux/fs.h>
  17. #include <linux/file.h>
  18. #include <linux/blkdev.h>
  19. #include <linux/swap.h>
  20. #include <linux/swapops.h>
  21. /*
  22. * Any behaviour which results in changes to the vma->vm_flags needs to
  23. * take mmap_sem for writing. Others, which simply traverse vmas, need
  24. * to only take it for reading.
  25. */
  26. static int madvise_need_mmap_write(int behavior)
  27. {
  28. switch (behavior) {
  29. case MADV_REMOVE:
  30. case MADV_WILLNEED:
  31. case MADV_DONTNEED:
  32. return 0;
  33. default:
  34. /* be safe, default to 1. list exceptions explicitly */
  35. return 1;
  36. }
  37. }
  38. /*
  39. * We can potentially split a vm area into separate
  40. * areas, each area with its own behavior.
  41. */
  42. static long madvise_behavior(struct vm_area_struct * vma,
  43. struct vm_area_struct **prev,
  44. unsigned long start, unsigned long end, int behavior)
  45. {
  46. struct mm_struct * mm = vma->vm_mm;
  47. int error = 0;
  48. pgoff_t pgoff;
  49. unsigned long new_flags = vma->vm_flags;
  50. switch (behavior) {
  51. case MADV_NORMAL:
  52. new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
  53. break;
  54. case MADV_SEQUENTIAL:
  55. new_flags = (new_flags & ~VM_RAND_READ) | VM_SEQ_READ;
  56. break;
  57. case MADV_RANDOM:
  58. new_flags = (new_flags & ~VM_SEQ_READ) | VM_RAND_READ;
  59. break;
  60. case MADV_DONTFORK:
  61. new_flags |= VM_DONTCOPY;
  62. break;
  63. case MADV_DOFORK:
  64. if (vma->vm_flags & VM_IO) {
  65. error = -EINVAL;
  66. goto out;
  67. }
  68. new_flags &= ~VM_DONTCOPY;
  69. break;
  70. case MADV_DONTDUMP:
  71. new_flags |= VM_DONTDUMP;
  72. break;
  73. case MADV_DODUMP:
  74. if (new_flags & VM_SPECIAL) {
  75. error = -EINVAL;
  76. goto out;
  77. }
  78. new_flags &= ~VM_DONTDUMP;
  79. break;
  80. case MADV_MERGEABLE:
  81. case MADV_UNMERGEABLE:
  82. error = ksm_madvise(vma, start, end, behavior, &new_flags);
  83. if (error)
  84. goto out;
  85. break;
  86. case MADV_HUGEPAGE:
  87. case MADV_NOHUGEPAGE:
  88. error = hugepage_madvise(vma, &new_flags, behavior);
  89. if (error)
  90. goto out;
  91. break;
  92. }
  93. if (new_flags == vma->vm_flags) {
  94. *prev = vma;
  95. goto out;
  96. }
  97. pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  98. *prev = vma_merge(mm, *prev, start, end, new_flags, vma->anon_vma,
  99. vma->vm_file, pgoff, vma_policy(vma));
  100. if (*prev) {
  101. vma = *prev;
  102. goto success;
  103. }
  104. *prev = vma;
  105. if (start != vma->vm_start) {
  106. error = split_vma(mm, vma, start, 1);
  107. if (error)
  108. goto out;
  109. }
  110. if (end != vma->vm_end) {
  111. error = split_vma(mm, vma, end, 0);
  112. if (error)
  113. goto out;
  114. }
  115. success:
  116. /*
  117. * vm_flags is protected by the mmap_sem held in write mode.
  118. */
  119. vma->vm_flags = new_flags;
  120. out:
  121. if (error == -ENOMEM)
  122. error = -EAGAIN;
  123. return error;
  124. }
  125. #ifdef CONFIG_SWAP
  126. static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
  127. unsigned long end, struct mm_walk *walk)
  128. {
  129. pte_t *orig_pte;
  130. struct vm_area_struct *vma = walk->private;
  131. unsigned long index;
  132. if (pmd_none_or_trans_huge_or_clear_bad(pmd))
  133. return 0;
  134. for (index = start; index != end; index += PAGE_SIZE) {
  135. pte_t pte;
  136. swp_entry_t entry;
  137. struct page *page;
  138. spinlock_t *ptl;
  139. orig_pte = pte_offset_map_lock(vma->vm_mm, pmd, start, &ptl);
  140. pte = *(orig_pte + ((index - start) / PAGE_SIZE));
  141. pte_unmap_unlock(orig_pte, ptl);
  142. if (pte_present(pte) || pte_none(pte) || pte_file(pte))
  143. continue;
  144. entry = pte_to_swp_entry(pte);
  145. if (unlikely(non_swap_entry(entry)))
  146. continue;
  147. page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
  148. vma, index);
  149. if (page)
  150. page_cache_release(page);
  151. }
  152. return 0;
  153. }
  154. static void force_swapin_readahead(struct vm_area_struct *vma,
  155. unsigned long start, unsigned long end)
  156. {
  157. struct mm_walk walk = {
  158. .mm = vma->vm_mm,
  159. .pmd_entry = swapin_walk_pmd_entry,
  160. .private = vma,
  161. };
  162. walk_page_range(start, end, &walk);
  163. lru_add_drain(); /* Push any new pages onto the LRU now */
  164. }
  165. static void force_shm_swapin_readahead(struct vm_area_struct *vma,
  166. unsigned long start, unsigned long end,
  167. struct address_space *mapping)
  168. {
  169. pgoff_t index;
  170. struct page *page;
  171. swp_entry_t swap;
  172. for (; start < end; start += PAGE_SIZE) {
  173. index = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  174. page = find_get_page(mapping, index);
  175. if (!radix_tree_exceptional_entry(page)) {
  176. if (page)
  177. page_cache_release(page);
  178. continue;
  179. }
  180. swap = radix_to_swp_entry(page);
  181. page = read_swap_cache_async(swap, GFP_HIGHUSER_MOVABLE,
  182. NULL, 0);
  183. if (page)
  184. page_cache_release(page);
  185. }
  186. lru_add_drain(); /* Push any new pages onto the LRU now */
  187. }
  188. #endif /* CONFIG_SWAP */
  189. /*
  190. * Schedule all required I/O operations. Do not wait for completion.
  191. */
  192. static long madvise_willneed(struct vm_area_struct * vma,
  193. struct vm_area_struct ** prev,
  194. unsigned long start, unsigned long end)
  195. {
  196. struct file *file = vma->vm_file;
  197. #ifdef CONFIG_SWAP
  198. if (!file || mapping_cap_swap_backed(file->f_mapping)) {
  199. *prev = vma;
  200. if (!file)
  201. force_swapin_readahead(vma, start, end);
  202. else
  203. force_shm_swapin_readahead(vma, start, end,
  204. file->f_mapping);
  205. return 0;
  206. }
  207. #endif
  208. if (!file)
  209. return -EBADF;
  210. if (file->f_mapping->a_ops->get_xip_mem) {
  211. /* no bad return value, but ignore advice */
  212. return 0;
  213. }
  214. *prev = vma;
  215. start = ((start - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  216. if (end > vma->vm_end)
  217. end = vma->vm_end;
  218. end = ((end - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
  219. force_page_cache_readahead(file->f_mapping, file, start, end - start);
  220. return 0;
  221. }
  222. /*
  223. * Application no longer needs these pages. If the pages are dirty,
  224. * it's OK to just throw them away. The app will be more careful about
  225. * data it wants to keep. Be sure to free swap resources too. The
  226. * zap_page_range call sets things up for shrink_active_list to actually free
  227. * these pages later if no one else has touched them in the meantime,
  228. * although we could add these pages to a global reuse list for
  229. * shrink_active_list to pick up before reclaiming other pages.
  230. *
  231. * NB: This interface discards data rather than pushes it out to swap,
  232. * as some implementations do. This has performance implications for
  233. * applications like large transactional databases which want to discard
  234. * pages in anonymous maps after committing to backing store the data
  235. * that was kept in them. There is no reason to write this data out to
  236. * the swap area if the application is discarding it.
  237. *
  238. * An interface that causes the system to free clean pages and flush
  239. * dirty pages is already available as msync(MS_INVALIDATE).
  240. */
  241. static long madvise_dontneed(struct vm_area_struct * vma,
  242. struct vm_area_struct ** prev,
  243. unsigned long start, unsigned long end)
  244. {
  245. *prev = vma;
  246. if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
  247. return -EINVAL;
  248. if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
  249. struct zap_details details = {
  250. .nonlinear_vma = vma,
  251. .last_index = ULONG_MAX,
  252. };
  253. zap_page_range(vma, start, end - start, &details);
  254. } else
  255. zap_page_range(vma, start, end - start, NULL);
  256. return 0;
  257. }
  258. /*
  259. * Application wants to free up the pages and associated backing store.
  260. * This is effectively punching a hole into the middle of a file.
  261. *
  262. * NOTE: Currently, only shmfs/tmpfs is supported for this operation.
  263. * Other filesystems return -ENOSYS.
  264. */
  265. static long madvise_remove(struct vm_area_struct *vma,
  266. struct vm_area_struct **prev,
  267. unsigned long start, unsigned long end)
  268. {
  269. loff_t offset;
  270. int error;
  271. struct file *f;
  272. *prev = NULL; /* tell sys_madvise we drop mmap_sem */
  273. if (vma->vm_flags & (VM_LOCKED|VM_NONLINEAR|VM_HUGETLB))
  274. return -EINVAL;
  275. f = vma->vm_file;
  276. if (!f || !f->f_mapping || !f->f_mapping->host) {
  277. return -EINVAL;
  278. }
  279. if ((vma->vm_flags & (VM_SHARED|VM_WRITE)) != (VM_SHARED|VM_WRITE))
  280. return -EACCES;
  281. offset = (loff_t)(start - vma->vm_start)
  282. + ((loff_t)vma->vm_pgoff << PAGE_SHIFT);
  283. /*
  284. * Filesystem's fallocate may need to take i_mutex. We need to
  285. * explicitly grab a reference because the vma (and hence the
  286. * vma's reference to the file) can go away as soon as we drop
  287. * mmap_sem.
  288. */
  289. get_file(f);
  290. up_read(&current->mm->mmap_sem);
  291. error = do_fallocate(f,
  292. FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
  293. offset, end - start);
  294. fput(f);
  295. down_read(&current->mm->mmap_sem);
  296. return error;
  297. }
  298. #ifdef CONFIG_MEMORY_FAILURE
  299. /*
  300. * Error injection support for memory error handling.
  301. */
  302. static int madvise_hwpoison(int bhv, unsigned long start, unsigned long end)
  303. {
  304. int ret = 0;
  305. if (!capable(CAP_SYS_ADMIN))
  306. return -EPERM;
  307. for (; start < end; start += PAGE_SIZE) {
  308. struct page *p;
  309. int ret = get_user_pages_fast(start, 1, 0, &p);
  310. if (ret != 1)
  311. return ret;
  312. if (bhv == MADV_SOFT_OFFLINE) {
  313. printk(KERN_INFO "Soft offlining page %lx at %lx\n",
  314. page_to_pfn(p), start);
  315. ret = soft_offline_page(p, MF_COUNT_INCREASED);
  316. if (ret)
  317. break;
  318. continue;
  319. }
  320. printk(KERN_INFO "Injecting memory failure for page %lx at %lx\n",
  321. page_to_pfn(p), start);
  322. /* Ignore return value for now */
  323. memory_failure(page_to_pfn(p), 0, MF_COUNT_INCREASED);
  324. }
  325. return ret;
  326. }
  327. #endif
  328. static long
  329. madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
  330. unsigned long start, unsigned long end, int behavior)
  331. {
  332. switch (behavior) {
  333. case MADV_REMOVE:
  334. return madvise_remove(vma, prev, start, end);
  335. case MADV_WILLNEED:
  336. return madvise_willneed(vma, prev, start, end);
  337. case MADV_DONTNEED:
  338. return madvise_dontneed(vma, prev, start, end);
  339. default:
  340. return madvise_behavior(vma, prev, start, end, behavior);
  341. }
  342. }
  343. static int
  344. madvise_behavior_valid(int behavior)
  345. {
  346. switch (behavior) {
  347. case MADV_DOFORK:
  348. case MADV_DONTFORK:
  349. case MADV_NORMAL:
  350. case MADV_SEQUENTIAL:
  351. case MADV_RANDOM:
  352. case MADV_REMOVE:
  353. case MADV_WILLNEED:
  354. case MADV_DONTNEED:
  355. #ifdef CONFIG_KSM
  356. case MADV_MERGEABLE:
  357. case MADV_UNMERGEABLE:
  358. #endif
  359. #ifdef CONFIG_TRANSPARENT_HUGEPAGE
  360. case MADV_HUGEPAGE:
  361. case MADV_NOHUGEPAGE:
  362. #endif
  363. case MADV_DONTDUMP:
  364. case MADV_DODUMP:
  365. return 1;
  366. default:
  367. return 0;
  368. }
  369. }
  370. /*
  371. * The madvise(2) system call.
  372. *
  373. * Applications can use madvise() to advise the kernel how it should
  374. * handle paging I/O in this VM area. The idea is to help the kernel
  375. * use appropriate read-ahead and caching techniques. The information
  376. * provided is advisory only, and can be safely disregarded by the
  377. * kernel without affecting the correct operation of the application.
  378. *
  379. * behavior values:
  380. * MADV_NORMAL - the default behavior is to read clusters. This
  381. * results in some read-ahead and read-behind.
  382. * MADV_RANDOM - the system should read the minimum amount of data
  383. * on any access, since it is unlikely that the appli-
  384. * cation will need more than what it asks for.
  385. * MADV_SEQUENTIAL - pages in the given range will probably be accessed
  386. * once, so they can be aggressively read ahead, and
  387. * can be freed soon after they are accessed.
  388. * MADV_WILLNEED - the application is notifying the system to read
  389. * some pages ahead.
  390. * MADV_DONTNEED - the application is finished with the given range,
  391. * so the kernel can free resources associated with it.
  392. * MADV_REMOVE - the application wants to free up the given range of
  393. * pages and associated backing store.
  394. * MADV_DONTFORK - omit this area from child's address space when forking:
  395. * typically, to avoid COWing pages pinned by get_user_pages().
  396. * MADV_DOFORK - cancel MADV_DONTFORK: no longer omit this area when forking.
  397. * MADV_MERGEABLE - the application recommends that KSM try to merge pages in
  398. * this area with pages of identical content from other such areas.
  399. * MADV_UNMERGEABLE- cancel MADV_MERGEABLE: no longer merge pages with others.
  400. *
  401. * return values:
  402. * zero - success
  403. * -EINVAL - start + len < 0, start is not page-aligned,
  404. * "behavior" is not a valid value, or application
  405. * is attempting to release locked or shared pages.
  406. * -ENOMEM - addresses in the specified range are not currently
  407. * mapped, or are outside the AS of the process.
  408. * -EIO - an I/O error occurred while paging in data.
  409. * -EBADF - map exists, but area maps something that isn't a file.
  410. * -EAGAIN - a kernel resource was temporarily unavailable.
  411. */
  412. SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
  413. {
  414. unsigned long end, tmp;
  415. struct vm_area_struct * vma, *prev;
  416. int unmapped_error = 0;
  417. int error = -EINVAL;
  418. int write;
  419. size_t len;
  420. struct blk_plug plug;
  421. #ifdef CONFIG_MEMORY_FAILURE
  422. if (behavior == MADV_HWPOISON || behavior == MADV_SOFT_OFFLINE)
  423. return madvise_hwpoison(behavior, start, start+len_in);
  424. #endif
  425. if (!madvise_behavior_valid(behavior))
  426. return error;
  427. write = madvise_need_mmap_write(behavior);
  428. if (write)
  429. down_write(&current->mm->mmap_sem);
  430. else
  431. down_read(&current->mm->mmap_sem);
  432. if (start & ~PAGE_MASK)
  433. goto out;
  434. len = (len_in + ~PAGE_MASK) & PAGE_MASK;
  435. /* Check to see whether len was rounded up from small -ve to zero */
  436. if (len_in && !len)
  437. goto out;
  438. end = start + len;
  439. if (end < start)
  440. goto out;
  441. error = 0;
  442. if (end == start)
  443. goto out;
  444. /*
  445. * If the interval [start,end) covers some unmapped address
  446. * ranges, just ignore them, but return -ENOMEM at the end.
  447. * - different from the way of handling in mlock etc.
  448. */
  449. vma = find_vma_prev(current->mm, start, &prev);
  450. if (vma && start > vma->vm_start)
  451. prev = vma;
  452. blk_start_plug(&plug);
  453. for (;;) {
  454. /* Still start < end. */
  455. error = -ENOMEM;
  456. if (!vma)
  457. goto out_plug;
  458. /* Here start < (end|vma->vm_end). */
  459. if (start < vma->vm_start) {
  460. unmapped_error = -ENOMEM;
  461. start = vma->vm_start;
  462. if (start >= end)
  463. goto out_plug;
  464. }
  465. /* Here vma->vm_start <= start < (end|vma->vm_end) */
  466. tmp = vma->vm_end;
  467. if (end < tmp)
  468. tmp = end;
  469. /* Here vma->vm_start <= start < tmp <= (end|vma->vm_end). */
  470. error = madvise_vma(vma, &prev, start, tmp, behavior);
  471. if (error)
  472. goto out_plug;
  473. start = tmp;
  474. if (prev && start < prev->vm_end)
  475. start = prev->vm_end;
  476. error = unmapped_error;
  477. if (start >= end)
  478. goto out_plug;
  479. if (prev)
  480. vma = prev->vm_next;
  481. else /* madvise_remove dropped mmap_sem */
  482. vma = find_vma(current->mm, start);
  483. }
  484. out_plug:
  485. blk_finish_plug(&plug);
  486. out:
  487. if (write)
  488. up_write(&current->mm->mmap_sem);
  489. else
  490. up_read(&current->mm->mmap_sem);
  491. return error;
  492. }