mremap.c 13 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527
  1. /*
  2. * mm/mremap.c
  3. *
  4. * (C) Copyright 1996 Linus Torvalds
  5. *
  6. * Address space accounting code <alan@lxorguk.ukuu.org.uk>
  7. * (C) Copyright 2002 Red Hat Inc, All Rights Reserved
  8. */
  9. #include <linux/mm.h>
  10. #include <linux/hugetlb.h>
  11. #include <linux/slab.h>
  12. #include <linux/shm.h>
  13. #include <linux/ksm.h>
  14. #include <linux/mman.h>
  15. #include <linux/swap.h>
  16. #include <linux/capability.h>
  17. #include <linux/fs.h>
  18. #include <linux/highmem.h>
  19. #include <linux/security.h>
  20. #include <linux/syscalls.h>
  21. #include <linux/mmu_notifier.h>
  22. #include <asm/uaccess.h>
  23. #include <asm/cacheflush.h>
  24. #include <asm/tlbflush.h>
  25. #include "internal.h"
  26. #ifndef arch_mmap_check
  27. #define arch_mmap_check(addr, len, flags) (0)
  28. #endif
  29. static pmd_t *get_old_pmd(struct mm_struct *mm, unsigned long addr)
  30. {
  31. pgd_t *pgd;
  32. pud_t *pud;
  33. pmd_t *pmd;
  34. pgd = pgd_offset(mm, addr);
  35. if (pgd_none_or_clear_bad(pgd))
  36. return NULL;
  37. pud = pud_offset(pgd, addr);
  38. if (pud_none_or_clear_bad(pud))
  39. return NULL;
  40. pmd = pmd_offset(pud, addr);
  41. if (pmd_none_or_clear_bad(pmd))
  42. return NULL;
  43. return pmd;
  44. }
  45. static pmd_t *alloc_new_pmd(struct mm_struct *mm, unsigned long addr)
  46. {
  47. pgd_t *pgd;
  48. pud_t *pud;
  49. pmd_t *pmd;
  50. pgd = pgd_offset(mm, addr);
  51. pud = pud_alloc(mm, pgd, addr);
  52. if (!pud)
  53. return NULL;
  54. pmd = pmd_alloc(mm, pud, addr);
  55. if (!pmd)
  56. return NULL;
  57. if (!pmd_present(*pmd) && __pte_alloc(mm, pmd, addr))
  58. return NULL;
  59. return pmd;
  60. }
  61. static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
  62. unsigned long old_addr, unsigned long old_end,
  63. struct vm_area_struct *new_vma, pmd_t *new_pmd,
  64. unsigned long new_addr)
  65. {
  66. struct address_space *mapping = NULL;
  67. struct mm_struct *mm = vma->vm_mm;
  68. pte_t *old_pte, *new_pte, pte;
  69. spinlock_t *old_ptl, *new_ptl;
  70. unsigned long old_start;
  71. old_start = old_addr;
  72. mmu_notifier_invalidate_range_start(vma->vm_mm,
  73. old_start, old_end);
  74. if (vma->vm_file) {
  75. /*
  76. * Subtle point from Rajesh Venkatasubramanian: before
  77. * moving file-based ptes, we must lock truncate_pagecache
  78. * out, since it might clean the dst vma before the src vma,
  79. * and we propagate stale pages into the dst afterward.
  80. */
  81. mapping = vma->vm_file->f_mapping;
  82. spin_lock(&mapping->i_mmap_lock);
  83. if (new_vma->vm_truncate_count &&
  84. new_vma->vm_truncate_count != vma->vm_truncate_count)
  85. new_vma->vm_truncate_count = 0;
  86. }
  87. /*
  88. * We don't have to worry about the ordering of src and dst
  89. * pte locks because exclusive mmap_sem prevents deadlock.
  90. */
  91. old_pte = pte_offset_map_lock(mm, old_pmd, old_addr, &old_ptl);
  92. new_pte = pte_offset_map_nested(new_pmd, new_addr);
  93. new_ptl = pte_lockptr(mm, new_pmd);
  94. if (new_ptl != old_ptl)
  95. spin_lock_nested(new_ptl, SINGLE_DEPTH_NESTING);
  96. arch_enter_lazy_mmu_mode();
  97. for (; old_addr < old_end; old_pte++, old_addr += PAGE_SIZE,
  98. new_pte++, new_addr += PAGE_SIZE) {
  99. if (pte_none(*old_pte))
  100. continue;
  101. pte = ptep_clear_flush(vma, old_addr, old_pte);
  102. pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
  103. set_pte_at(mm, new_addr, new_pte, pte);
  104. }
  105. arch_leave_lazy_mmu_mode();
  106. if (new_ptl != old_ptl)
  107. spin_unlock(new_ptl);
  108. pte_unmap_nested(new_pte - 1);
  109. pte_unmap_unlock(old_pte - 1, old_ptl);
  110. if (mapping)
  111. spin_unlock(&mapping->i_mmap_lock);
  112. mmu_notifier_invalidate_range_end(vma->vm_mm, old_start, old_end);
  113. }
  114. #define LATENCY_LIMIT (64 * PAGE_SIZE)
  115. unsigned long move_page_tables(struct vm_area_struct *vma,
  116. unsigned long old_addr, struct vm_area_struct *new_vma,
  117. unsigned long new_addr, unsigned long len)
  118. {
  119. unsigned long extent, next, old_end;
  120. pmd_t *old_pmd, *new_pmd;
  121. old_end = old_addr + len;
  122. flush_cache_range(vma, old_addr, old_end);
  123. for (; old_addr < old_end; old_addr += extent, new_addr += extent) {
  124. cond_resched();
  125. next = (old_addr + PMD_SIZE) & PMD_MASK;
  126. if (next - 1 > old_end)
  127. next = old_end;
  128. extent = next - old_addr;
  129. old_pmd = get_old_pmd(vma->vm_mm, old_addr);
  130. if (!old_pmd)
  131. continue;
  132. new_pmd = alloc_new_pmd(vma->vm_mm, new_addr);
  133. if (!new_pmd)
  134. break;
  135. next = (new_addr + PMD_SIZE) & PMD_MASK;
  136. if (extent > next - new_addr)
  137. extent = next - new_addr;
  138. if (extent > LATENCY_LIMIT)
  139. extent = LATENCY_LIMIT;
  140. move_ptes(vma, old_pmd, old_addr, old_addr + extent,
  141. new_vma, new_pmd, new_addr);
  142. }
  143. return len + old_addr - old_end; /* how much done */
  144. }
  145. static unsigned long move_vma(struct vm_area_struct *vma,
  146. unsigned long old_addr, unsigned long old_len,
  147. unsigned long new_len, unsigned long new_addr)
  148. {
  149. struct mm_struct *mm = vma->vm_mm;
  150. struct vm_area_struct *new_vma;
  151. unsigned long vm_flags = vma->vm_flags;
  152. unsigned long new_pgoff;
  153. unsigned long moved_len;
  154. unsigned long excess = 0;
  155. unsigned long hiwater_vm;
  156. int split = 0;
  157. int err;
  158. /*
  159. * We'd prefer to avoid failure later on in do_munmap:
  160. * which may split one vma into three before unmapping.
  161. */
  162. if (mm->map_count >= sysctl_max_map_count - 3)
  163. return -ENOMEM;
  164. /*
  165. * Advise KSM to break any KSM pages in the area to be moved:
  166. * it would be confusing if they were to turn up at the new
  167. * location, where they happen to coincide with different KSM
  168. * pages recently unmapped. But leave vma->vm_flags as it was,
  169. * so KSM can come around to merge on vma and new_vma afterwards.
  170. */
  171. err = ksm_madvise(vma, old_addr, old_addr + old_len,
  172. MADV_UNMERGEABLE, &vm_flags);
  173. if (err)
  174. return err;
  175. new_pgoff = vma->vm_pgoff + ((old_addr - vma->vm_start) >> PAGE_SHIFT);
  176. new_vma = copy_vma(&vma, new_addr, new_len, new_pgoff);
  177. if (!new_vma)
  178. return -ENOMEM;
  179. moved_len = move_page_tables(vma, old_addr, new_vma, new_addr, old_len);
  180. if (moved_len < old_len) {
  181. /*
  182. * On error, move entries back from new area to old,
  183. * which will succeed since page tables still there,
  184. * and then proceed to unmap new area instead of old.
  185. */
  186. move_page_tables(new_vma, new_addr, vma, old_addr, moved_len);
  187. vma = new_vma;
  188. old_len = new_len;
  189. old_addr = new_addr;
  190. new_addr = -ENOMEM;
  191. }
  192. /* Conceal VM_ACCOUNT so old reservation is not undone */
  193. if (vm_flags & VM_ACCOUNT) {
  194. vma->vm_flags &= ~VM_ACCOUNT;
  195. excess = vma->vm_end - vma->vm_start - old_len;
  196. if (old_addr > vma->vm_start &&
  197. old_addr + old_len < vma->vm_end)
  198. split = 1;
  199. }
  200. /*
  201. * If we failed to move page tables we still do total_vm increment
  202. * since do_munmap() will decrement it by old_len == new_len.
  203. *
  204. * Since total_vm is about to be raised artificially high for a
  205. * moment, we need to restore high watermark afterwards: if stats
  206. * are taken meanwhile, total_vm and hiwater_vm appear too high.
  207. * If this were a serious issue, we'd add a flag to do_munmap().
  208. */
  209. hiwater_vm = mm->hiwater_vm;
  210. mm->total_vm += new_len >> PAGE_SHIFT;
  211. vm_stat_account(mm, vma->vm_flags, vma->vm_file, new_len>>PAGE_SHIFT);
  212. if (do_munmap(mm, old_addr, old_len) < 0) {
  213. /* OOM: unable to split vma, just get accounts right */
  214. vm_unacct_memory(excess >> PAGE_SHIFT);
  215. excess = 0;
  216. }
  217. mm->hiwater_vm = hiwater_vm;
  218. /* Restore VM_ACCOUNT if one or two pieces of vma left */
  219. if (excess) {
  220. vma->vm_flags |= VM_ACCOUNT;
  221. if (split)
  222. vma->vm_next->vm_flags |= VM_ACCOUNT;
  223. }
  224. if (vm_flags & VM_LOCKED) {
  225. mm->locked_vm += new_len >> PAGE_SHIFT;
  226. if (new_len > old_len)
  227. mlock_vma_pages_range(new_vma, new_addr + old_len,
  228. new_addr + new_len);
  229. }
  230. return new_addr;
  231. }
  232. static struct vm_area_struct *vma_to_resize(unsigned long addr,
  233. unsigned long old_len, unsigned long new_len, unsigned long *p)
  234. {
  235. struct mm_struct *mm = current->mm;
  236. struct vm_area_struct *vma = find_vma(mm, addr);
  237. if (!vma || vma->vm_start > addr)
  238. goto Efault;
  239. if (is_vm_hugetlb_page(vma))
  240. goto Einval;
  241. /* We can't remap across vm area boundaries */
  242. if (old_len > vma->vm_end - addr)
  243. goto Efault;
  244. if (vma->vm_flags & (VM_DONTEXPAND | VM_PFNMAP)) {
  245. if (new_len > old_len)
  246. goto Efault;
  247. }
  248. if (vma->vm_flags & VM_LOCKED) {
  249. unsigned long locked, lock_limit;
  250. locked = mm->locked_vm << PAGE_SHIFT;
  251. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  252. locked += new_len - old_len;
  253. if (locked > lock_limit && !capable(CAP_IPC_LOCK))
  254. goto Eagain;
  255. }
  256. if (!may_expand_vm(mm, (new_len - old_len) >> PAGE_SHIFT))
  257. goto Enomem;
  258. if (vma->vm_flags & VM_ACCOUNT) {
  259. unsigned long charged = (new_len - old_len) >> PAGE_SHIFT;
  260. if (security_vm_enough_memory(charged))
  261. goto Efault;
  262. *p = charged;
  263. }
  264. return vma;
  265. Efault: /* very odd choice for most of the cases, but... */
  266. return ERR_PTR(-EFAULT);
  267. Einval:
  268. return ERR_PTR(-EINVAL);
  269. Enomem:
  270. return ERR_PTR(-ENOMEM);
  271. Eagain:
  272. return ERR_PTR(-EAGAIN);
  273. }
  274. static unsigned long mremap_to(unsigned long addr,
  275. unsigned long old_len, unsigned long new_addr,
  276. unsigned long new_len)
  277. {
  278. struct mm_struct *mm = current->mm;
  279. struct vm_area_struct *vma;
  280. unsigned long ret = -EINVAL;
  281. unsigned long charged = 0;
  282. unsigned long map_flags;
  283. if (new_addr & ~PAGE_MASK)
  284. goto out;
  285. if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
  286. goto out;
  287. /* Check if the location we're moving into overlaps the
  288. * old location at all, and fail if it does.
  289. */
  290. if ((new_addr <= addr) && (new_addr+new_len) > addr)
  291. goto out;
  292. if ((addr <= new_addr) && (addr+old_len) > new_addr)
  293. goto out;
  294. ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
  295. if (ret)
  296. goto out;
  297. ret = do_munmap(mm, new_addr, new_len);
  298. if (ret)
  299. goto out;
  300. if (old_len >= new_len) {
  301. ret = do_munmap(mm, addr+new_len, old_len - new_len);
  302. if (ret && old_len != new_len)
  303. goto out;
  304. old_len = new_len;
  305. }
  306. vma = vma_to_resize(addr, old_len, new_len, &charged);
  307. if (IS_ERR(vma)) {
  308. ret = PTR_ERR(vma);
  309. goto out;
  310. }
  311. map_flags = MAP_FIXED;
  312. if (vma->vm_flags & VM_MAYSHARE)
  313. map_flags |= MAP_SHARED;
  314. ret = arch_mmap_check(new_addr, new_len, map_flags);
  315. if (ret)
  316. goto out1;
  317. ret = get_unmapped_area(vma->vm_file, new_addr, new_len, vma->vm_pgoff +
  318. ((addr - vma->vm_start) >> PAGE_SHIFT),
  319. map_flags);
  320. if (ret & ~PAGE_MASK)
  321. goto out1;
  322. ret = move_vma(vma, addr, old_len, new_len, new_addr);
  323. if (!(ret & ~PAGE_MASK))
  324. goto out;
  325. out1:
  326. vm_unacct_memory(charged);
  327. out:
  328. return ret;
  329. }
  330. static int vma_expandable(struct vm_area_struct *vma, unsigned long delta)
  331. {
  332. unsigned long end = vma->vm_end + delta;
  333. unsigned long max_addr = TASK_SIZE;
  334. if (vma->vm_next)
  335. max_addr = vma->vm_next->vm_start;
  336. if (max_addr < end || end < vma->vm_end)
  337. return 0;
  338. if (arch_mmap_check(vma->vm_start, end - vma->vm_start, MAP_FIXED))
  339. return 0;
  340. if (get_unmapped_area(NULL, vma->vm_start, end - vma->vm_start,
  341. 0, MAP_FIXED) & ~PAGE_MASK)
  342. return 0;
  343. return 1;
  344. }
  345. /*
  346. * Expand (or shrink) an existing mapping, potentially moving it at the
  347. * same time (controlled by the MREMAP_MAYMOVE flag and available VM space)
  348. *
  349. * MREMAP_FIXED option added 5-Dec-1999 by Benjamin LaHaise
  350. * This option implies MREMAP_MAYMOVE.
  351. */
  352. unsigned long do_mremap(unsigned long addr,
  353. unsigned long old_len, unsigned long new_len,
  354. unsigned long flags, unsigned long new_addr)
  355. {
  356. struct mm_struct *mm = current->mm;
  357. struct vm_area_struct *vma;
  358. unsigned long ret = -EINVAL;
  359. unsigned long charged = 0;
  360. if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE))
  361. goto out;
  362. if (addr & ~PAGE_MASK)
  363. goto out;
  364. old_len = PAGE_ALIGN(old_len);
  365. new_len = PAGE_ALIGN(new_len);
  366. /*
  367. * We allow a zero old-len as a special case
  368. * for DOS-emu "duplicate shm area" thing. But
  369. * a zero new-len is nonsensical.
  370. */
  371. if (!new_len)
  372. goto out;
  373. if (flags & MREMAP_FIXED) {
  374. if (flags & MREMAP_MAYMOVE)
  375. ret = mremap_to(addr, old_len, new_addr, new_len);
  376. goto out;
  377. }
  378. /*
  379. * Always allow a shrinking remap: that just unmaps
  380. * the unnecessary pages..
  381. * do_munmap does all the needed commit accounting
  382. */
  383. if (old_len >= new_len) {
  384. ret = do_munmap(mm, addr+new_len, old_len - new_len);
  385. if (ret && old_len != new_len)
  386. goto out;
  387. ret = addr;
  388. goto out;
  389. }
  390. /*
  391. * Ok, we need to grow..
  392. */
  393. vma = vma_to_resize(addr, old_len, new_len, &charged);
  394. if (IS_ERR(vma)) {
  395. ret = PTR_ERR(vma);
  396. goto out;
  397. }
  398. /* old_len exactly to the end of the area..
  399. */
  400. if (old_len == vma->vm_end - addr) {
  401. /* can we just expand the current mapping? */
  402. if (vma_expandable(vma, new_len - old_len)) {
  403. int pages = (new_len - old_len) >> PAGE_SHIFT;
  404. vma_adjust(vma, vma->vm_start,
  405. addr + new_len, vma->vm_pgoff, NULL);
  406. mm->total_vm += pages;
  407. vm_stat_account(mm, vma->vm_flags, vma->vm_file, pages);
  408. if (vma->vm_flags & VM_LOCKED) {
  409. mm->locked_vm += pages;
  410. mlock_vma_pages_range(vma, addr + old_len,
  411. addr + new_len);
  412. }
  413. ret = addr;
  414. goto out;
  415. }
  416. }
  417. /*
  418. * We weren't able to just expand or shrink the area,
  419. * we need to create a new one and move it..
  420. */
  421. ret = -ENOMEM;
  422. if (flags & MREMAP_MAYMOVE) {
  423. unsigned long map_flags = 0;
  424. if (vma->vm_flags & VM_MAYSHARE)
  425. map_flags |= MAP_SHARED;
  426. new_addr = get_unmapped_area(vma->vm_file, 0, new_len,
  427. vma->vm_pgoff +
  428. ((addr - vma->vm_start) >> PAGE_SHIFT),
  429. map_flags);
  430. if (new_addr & ~PAGE_MASK) {
  431. ret = new_addr;
  432. goto out;
  433. }
  434. ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1);
  435. if (ret)
  436. goto out;
  437. ret = move_vma(vma, addr, old_len, new_len, new_addr);
  438. }
  439. out:
  440. if (ret & ~PAGE_MASK)
  441. vm_unacct_memory(charged);
  442. return ret;
  443. }
  444. SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
  445. unsigned long, new_len, unsigned long, flags,
  446. unsigned long, new_addr)
  447. {
  448. unsigned long ret;
  449. down_write(&current->mm->mmap_sem);
  450. ret = do_mremap(addr, old_len, new_len, flags, new_addr);
  451. up_write(&current->mm->mmap_sem);
  452. return ret;
  453. }