mlock.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650
  1. /*
  2. * linux/mm/mlock.c
  3. *
  4. * (C) Copyright 1995 Linus Torvalds
  5. * (C) Copyright 2002 Christoph Hellwig
  6. */
  7. #include <linux/capability.h>
  8. #include <linux/mman.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/swapops.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/mempolicy.h>
  14. #include <linux/syscalls.h>
  15. #include <linux/sched.h>
  16. #include <linux/module.h>
  17. #include <linux/rmap.h>
  18. #include <linux/mmzone.h>
  19. #include <linux/hugetlb.h>
  20. #include "internal.h"
  21. int can_do_mlock(void)
  22. {
  23. if (capable(CAP_IPC_LOCK))
  24. return 1;
  25. if (rlimit(RLIMIT_MEMLOCK) != 0)
  26. return 1;
  27. return 0;
  28. }
  29. EXPORT_SYMBOL(can_do_mlock);
  30. /*
  31. * Mlocked pages are marked with PageMlocked() flag for efficient testing
  32. * in vmscan and, possibly, the fault path; and to support semi-accurate
  33. * statistics.
  34. *
  35. * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
  36. * be placed on the LRU "unevictable" list, rather than the [in]active lists.
  37. * The unevictable list is an LRU sibling list to the [in]active lists.
  38. * PageUnevictable is set to indicate the unevictable state.
  39. *
  40. * When lazy mlocking via vmscan, it is important to ensure that the
  41. * vma's VM_LOCKED status is not concurrently being modified, otherwise we
  42. * may have mlocked a page that is being munlocked. So lazy mlock must take
  43. * the mmap_sem for read, and verify that the vma really is locked
  44. * (see mm/rmap.c).
  45. */
  46. /*
  47. * LRU accounting for clear_page_mlock()
  48. */
  49. void __clear_page_mlock(struct page *page)
  50. {
  51. VM_BUG_ON(!PageLocked(page));
  52. if (!page->mapping) { /* truncated ? */
  53. return;
  54. }
  55. dec_zone_page_state(page, NR_MLOCK);
  56. count_vm_event(UNEVICTABLE_PGCLEARED);
  57. if (!isolate_lru_page(page)) {
  58. putback_lru_page(page);
  59. } else {
  60. /*
  61. * We lost the race. the page already moved to evictable list.
  62. */
  63. if (PageUnevictable(page))
  64. count_vm_event(UNEVICTABLE_PGSTRANDED);
  65. }
  66. }
  67. /*
  68. * Mark page as mlocked if not already.
  69. * If page on LRU, isolate and putback to move to unevictable list.
  70. */
  71. void mlock_vma_page(struct page *page)
  72. {
  73. BUG_ON(!PageLocked(page));
  74. if (!TestSetPageMlocked(page)) {
  75. inc_zone_page_state(page, NR_MLOCK);
  76. count_vm_event(UNEVICTABLE_PGMLOCKED);
  77. if (!isolate_lru_page(page))
  78. putback_lru_page(page);
  79. }
  80. }
  81. /**
  82. * munlock_vma_page - munlock a vma page
  83. * @page - page to be unlocked
  84. *
  85. * called from munlock()/munmap() path with page supposedly on the LRU.
  86. * When we munlock a page, because the vma where we found the page is being
  87. * munlock()ed or munmap()ed, we want to check whether other vmas hold the
  88. * page locked so that we can leave it on the unevictable lru list and not
  89. * bother vmscan with it. However, to walk the page's rmap list in
  90. * try_to_munlock() we must isolate the page from the LRU. If some other
  91. * task has removed the page from the LRU, we won't be able to do that.
  92. * So we clear the PageMlocked as we might not get another chance. If we
  93. * can't isolate the page, we leave it for putback_lru_page() and vmscan
  94. * [page_referenced()/try_to_unmap()] to deal with.
  95. */
  96. void munlock_vma_page(struct page *page)
  97. {
  98. BUG_ON(!PageLocked(page));
  99. if (TestClearPageMlocked(page)) {
  100. dec_zone_page_state(page, NR_MLOCK);
  101. if (!isolate_lru_page(page)) {
  102. int ret = try_to_munlock(page);
  103. /*
  104. * did try_to_unlock() succeed or punt?
  105. */
  106. if (ret != SWAP_MLOCK)
  107. count_vm_event(UNEVICTABLE_PGMUNLOCKED);
  108. putback_lru_page(page);
  109. } else {
  110. /*
  111. * Some other task has removed the page from the LRU.
  112. * putback_lru_page() will take care of removing the
  113. * page from the unevictable list, if necessary.
  114. * vmscan [page_referenced()] will move the page back
  115. * to the unevictable list if some other vma has it
  116. * mlocked.
  117. */
  118. if (PageUnevictable(page))
  119. count_vm_event(UNEVICTABLE_PGSTRANDED);
  120. else
  121. count_vm_event(UNEVICTABLE_PGMUNLOCKED);
  122. }
  123. }
  124. }
  125. /**
  126. * __mlock_vma_pages_range() - mlock a range of pages in the vma.
  127. * @vma: target vma
  128. * @start: start address
  129. * @end: end address
  130. *
  131. * This takes care of making the pages present too.
  132. *
  133. * return 0 on success, negative error code on error.
  134. *
  135. * vma->vm_mm->mmap_sem must be held for at least read.
  136. */
  137. static long __mlock_vma_pages_range(struct vm_area_struct *vma,
  138. unsigned long start, unsigned long end)
  139. {
  140. struct mm_struct *mm = vma->vm_mm;
  141. unsigned long addr = start;
  142. struct page *pages[16]; /* 16 gives a reasonable batch */
  143. int nr_pages = (end - start) / PAGE_SIZE;
  144. int ret = 0;
  145. int gup_flags;
  146. VM_BUG_ON(start & ~PAGE_MASK);
  147. VM_BUG_ON(end & ~PAGE_MASK);
  148. VM_BUG_ON(start < vma->vm_start);
  149. VM_BUG_ON(end > vma->vm_end);
  150. VM_BUG_ON(!rwsem_is_locked(&mm->mmap_sem));
  151. gup_flags = FOLL_TOUCH | FOLL_GET;
  152. if (vma->vm_flags & VM_WRITE)
  153. gup_flags |= FOLL_WRITE;
  154. while (nr_pages > 0) {
  155. int i;
  156. cond_resched();
  157. /*
  158. * get_user_pages makes pages present if we are
  159. * setting mlock. and this extra reference count will
  160. * disable migration of this page. However, page may
  161. * still be truncated out from under us.
  162. */
  163. ret = __get_user_pages(current, mm, addr,
  164. min_t(int, nr_pages, ARRAY_SIZE(pages)),
  165. gup_flags, pages, NULL);
  166. /*
  167. * This can happen for, e.g., VM_NONLINEAR regions before
  168. * a page has been allocated and mapped at a given offset,
  169. * or for addresses that map beyond end of a file.
  170. * We'll mlock the pages if/when they get faulted in.
  171. */
  172. if (ret < 0)
  173. break;
  174. lru_add_drain(); /* push cached pages to LRU */
  175. for (i = 0; i < ret; i++) {
  176. struct page *page = pages[i];
  177. if (page->mapping) {
  178. /*
  179. * That preliminary check is mainly to avoid
  180. * the pointless overhead of lock_page on the
  181. * ZERO_PAGE: which might bounce very badly if
  182. * there is contention. However, we're still
  183. * dirtying its cacheline with get/put_page:
  184. * we'll add another __get_user_pages flag to
  185. * avoid it if that case turns out to matter.
  186. */
  187. lock_page(page);
  188. /*
  189. * Because we lock page here and migration is
  190. * blocked by the elevated reference, we need
  191. * only check for file-cache page truncation.
  192. */
  193. if (page->mapping)
  194. mlock_vma_page(page);
  195. unlock_page(page);
  196. }
  197. put_page(page); /* ref from get_user_pages() */
  198. }
  199. addr += ret * PAGE_SIZE;
  200. nr_pages -= ret;
  201. ret = 0;
  202. }
  203. return ret; /* 0 or negative error code */
  204. }
  205. /*
  206. * convert get_user_pages() return value to posix mlock() error
  207. */
  208. static int __mlock_posix_error_return(long retval)
  209. {
  210. if (retval == -EFAULT)
  211. retval = -ENOMEM;
  212. else if (retval == -ENOMEM)
  213. retval = -EAGAIN;
  214. return retval;
  215. }
  216. /**
  217. * mlock_vma_pages_range() - mlock pages in specified vma range.
  218. * @vma - the vma containing the specfied address range
  219. * @start - starting address in @vma to mlock
  220. * @end - end address [+1] in @vma to mlock
  221. *
  222. * For mmap()/mremap()/expansion of mlocked vma.
  223. *
  224. * return 0 on success for "normal" vmas.
  225. *
  226. * return number of pages [> 0] to be removed from locked_vm on success
  227. * of "special" vmas.
  228. */
  229. long mlock_vma_pages_range(struct vm_area_struct *vma,
  230. unsigned long start, unsigned long end)
  231. {
  232. int nr_pages = (end - start) / PAGE_SIZE;
  233. BUG_ON(!(vma->vm_flags & VM_LOCKED));
  234. /*
  235. * filter unlockable vmas
  236. */
  237. if (vma->vm_flags & (VM_IO | VM_PFNMAP))
  238. goto no_mlock;
  239. if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
  240. is_vm_hugetlb_page(vma) ||
  241. vma == get_gate_vma(current))) {
  242. __mlock_vma_pages_range(vma, start, end);
  243. /* Hide errors from mmap() and other callers */
  244. return 0;
  245. }
  246. /*
  247. * User mapped kernel pages or huge pages:
  248. * make these pages present to populate the ptes, but
  249. * fall thru' to reset VM_LOCKED--no need to unlock, and
  250. * return nr_pages so these don't get counted against task's
  251. * locked limit. huge pages are already counted against
  252. * locked vm limit.
  253. */
  254. make_pages_present(start, end);
  255. no_mlock:
  256. vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
  257. return nr_pages; /* error or pages NOT mlocked */
  258. }
  259. /*
  260. * munlock_vma_pages_range() - munlock all pages in the vma range.'
  261. * @vma - vma containing range to be munlock()ed.
  262. * @start - start address in @vma of the range
  263. * @end - end of range in @vma.
  264. *
  265. * For mremap(), munmap() and exit().
  266. *
  267. * Called with @vma VM_LOCKED.
  268. *
  269. * Returns with VM_LOCKED cleared. Callers must be prepared to
  270. * deal with this.
  271. *
  272. * We don't save and restore VM_LOCKED here because pages are
  273. * still on lru. In unmap path, pages might be scanned by reclaim
  274. * and re-mlocked by try_to_{munlock|unmap} before we unmap and
  275. * free them. This will result in freeing mlocked pages.
  276. */
  277. void munlock_vma_pages_range(struct vm_area_struct *vma,
  278. unsigned long start, unsigned long end)
  279. {
  280. unsigned long addr;
  281. lru_add_drain();
  282. vma->vm_flags &= ~VM_LOCKED;
  283. for (addr = start; addr < end; addr += PAGE_SIZE) {
  284. struct page *page;
  285. /*
  286. * Although FOLL_DUMP is intended for get_dump_page(),
  287. * it just so happens that its special treatment of the
  288. * ZERO_PAGE (returning an error instead of doing get_page)
  289. * suits munlock very well (and if somehow an abnormal page
  290. * has sneaked into the range, we won't oops here: great).
  291. */
  292. page = follow_page(vma, addr, FOLL_GET | FOLL_DUMP);
  293. if (page && !IS_ERR(page)) {
  294. lock_page(page);
  295. /*
  296. * Like in __mlock_vma_pages_range(),
  297. * because we lock page here and migration is
  298. * blocked by the elevated reference, we need
  299. * only check for file-cache page truncation.
  300. */
  301. if (page->mapping)
  302. munlock_vma_page(page);
  303. unlock_page(page);
  304. put_page(page);
  305. }
  306. cond_resched();
  307. }
  308. }
  309. /*
  310. * mlock_fixup - handle mlock[all]/munlock[all] requests.
  311. *
  312. * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
  313. * munlock is a no-op. However, for some special vmas, we go ahead and
  314. * populate the ptes via make_pages_present().
  315. *
  316. * For vmas that pass the filters, merge/split as appropriate.
  317. */
  318. static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
  319. unsigned long start, unsigned long end, unsigned int newflags)
  320. {
  321. struct mm_struct *mm = vma->vm_mm;
  322. pgoff_t pgoff;
  323. int nr_pages;
  324. int ret = 0;
  325. int lock = newflags & VM_LOCKED;
  326. if (newflags == vma->vm_flags ||
  327. (vma->vm_flags & (VM_IO | VM_PFNMAP)))
  328. goto out; /* don't set VM_LOCKED, don't count */
  329. if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
  330. is_vm_hugetlb_page(vma) ||
  331. vma == get_gate_vma(current)) {
  332. if (lock)
  333. make_pages_present(start, end);
  334. goto out; /* don't set VM_LOCKED, don't count */
  335. }
  336. pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  337. *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
  338. vma->vm_file, pgoff, vma_policy(vma));
  339. if (*prev) {
  340. vma = *prev;
  341. goto success;
  342. }
  343. if (start != vma->vm_start) {
  344. ret = split_vma(mm, vma, start, 1);
  345. if (ret)
  346. goto out;
  347. }
  348. if (end != vma->vm_end) {
  349. ret = split_vma(mm, vma, end, 0);
  350. if (ret)
  351. goto out;
  352. }
  353. success:
  354. /*
  355. * Keep track of amount of locked VM.
  356. */
  357. nr_pages = (end - start) >> PAGE_SHIFT;
  358. if (!lock)
  359. nr_pages = -nr_pages;
  360. mm->locked_vm += nr_pages;
  361. /*
  362. * vm_flags is protected by the mmap_sem held in write mode.
  363. * It's okay if try_to_unmap_one unmaps a page just after we
  364. * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
  365. */
  366. if (lock) {
  367. vma->vm_flags = newflags;
  368. ret = __mlock_vma_pages_range(vma, start, end);
  369. if (ret < 0)
  370. ret = __mlock_posix_error_return(ret);
  371. } else {
  372. munlock_vma_pages_range(vma, start, end);
  373. }
  374. out:
  375. *prev = vma;
  376. return ret;
  377. }
  378. static int do_mlock(unsigned long start, size_t len, int on)
  379. {
  380. unsigned long nstart, end, tmp;
  381. struct vm_area_struct * vma, * prev;
  382. int error;
  383. len = PAGE_ALIGN(len);
  384. end = start + len;
  385. if (end < start)
  386. return -EINVAL;
  387. if (end == start)
  388. return 0;
  389. vma = find_vma_prev(current->mm, start, &prev);
  390. if (!vma || vma->vm_start > start)
  391. return -ENOMEM;
  392. if (start > vma->vm_start)
  393. prev = vma;
  394. for (nstart = start ; ; ) {
  395. unsigned int newflags;
  396. /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
  397. newflags = vma->vm_flags | VM_LOCKED;
  398. if (!on)
  399. newflags &= ~VM_LOCKED;
  400. tmp = vma->vm_end;
  401. if (tmp > end)
  402. tmp = end;
  403. error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
  404. if (error)
  405. break;
  406. nstart = tmp;
  407. if (nstart < prev->vm_end)
  408. nstart = prev->vm_end;
  409. if (nstart >= end)
  410. break;
  411. vma = prev->vm_next;
  412. if (!vma || vma->vm_start != nstart) {
  413. error = -ENOMEM;
  414. break;
  415. }
  416. }
  417. return error;
  418. }
  419. SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
  420. {
  421. unsigned long locked;
  422. unsigned long lock_limit;
  423. int error = -ENOMEM;
  424. if (!can_do_mlock())
  425. return -EPERM;
  426. lru_add_drain_all(); /* flush pagevec */
  427. down_write(&current->mm->mmap_sem);
  428. len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  429. start &= PAGE_MASK;
  430. locked = len >> PAGE_SHIFT;
  431. locked += current->mm->locked_vm;
  432. lock_limit = rlimit(RLIMIT_MEMLOCK);
  433. lock_limit >>= PAGE_SHIFT;
  434. /* check against resource limits */
  435. if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
  436. error = do_mlock(start, len, 1);
  437. up_write(&current->mm->mmap_sem);
  438. return error;
  439. }
  440. SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
  441. {
  442. int ret;
  443. down_write(&current->mm->mmap_sem);
  444. len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  445. start &= PAGE_MASK;
  446. ret = do_mlock(start, len, 0);
  447. up_write(&current->mm->mmap_sem);
  448. return ret;
  449. }
  450. static int do_mlockall(int flags)
  451. {
  452. struct vm_area_struct * vma, * prev = NULL;
  453. unsigned int def_flags = 0;
  454. if (flags & MCL_FUTURE)
  455. def_flags = VM_LOCKED;
  456. current->mm->def_flags = def_flags;
  457. if (flags == MCL_FUTURE)
  458. goto out;
  459. for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
  460. unsigned int newflags;
  461. newflags = vma->vm_flags | VM_LOCKED;
  462. if (!(flags & MCL_CURRENT))
  463. newflags &= ~VM_LOCKED;
  464. /* Ignore errors */
  465. mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
  466. }
  467. out:
  468. return 0;
  469. }
  470. SYSCALL_DEFINE1(mlockall, int, flags)
  471. {
  472. unsigned long lock_limit;
  473. int ret = -EINVAL;
  474. if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
  475. goto out;
  476. ret = -EPERM;
  477. if (!can_do_mlock())
  478. goto out;
  479. lru_add_drain_all(); /* flush pagevec */
  480. down_write(&current->mm->mmap_sem);
  481. lock_limit = rlimit(RLIMIT_MEMLOCK);
  482. lock_limit >>= PAGE_SHIFT;
  483. ret = -ENOMEM;
  484. if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
  485. capable(CAP_IPC_LOCK))
  486. ret = do_mlockall(flags);
  487. up_write(&current->mm->mmap_sem);
  488. out:
  489. return ret;
  490. }
  491. SYSCALL_DEFINE0(munlockall)
  492. {
  493. int ret;
  494. down_write(&current->mm->mmap_sem);
  495. ret = do_mlockall(0);
  496. up_write(&current->mm->mmap_sem);
  497. return ret;
  498. }
  499. /*
  500. * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
  501. * shm segments) get accounted against the user_struct instead.
  502. */
  503. static DEFINE_SPINLOCK(shmlock_user_lock);
  504. int user_shm_lock(size_t size, struct user_struct *user)
  505. {
  506. unsigned long lock_limit, locked;
  507. int allowed = 0;
  508. locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  509. lock_limit = rlimit(RLIMIT_MEMLOCK);
  510. if (lock_limit == RLIM_INFINITY)
  511. allowed = 1;
  512. lock_limit >>= PAGE_SHIFT;
  513. spin_lock(&shmlock_user_lock);
  514. if (!allowed &&
  515. locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
  516. goto out;
  517. get_uid(user);
  518. user->locked_shm += locked;
  519. allowed = 1;
  520. out:
  521. spin_unlock(&shmlock_user_lock);
  522. return allowed;
  523. }
  524. void user_shm_unlock(size_t size, struct user_struct *user)
  525. {
  526. spin_lock(&shmlock_user_lock);
  527. user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  528. spin_unlock(&shmlock_user_lock);
  529. free_uid(user);
  530. }
  531. int account_locked_memory(struct mm_struct *mm, struct rlimit *rlim,
  532. size_t size)
  533. {
  534. unsigned long lim, vm, pgsz;
  535. int error = -ENOMEM;
  536. pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
  537. down_write(&mm->mmap_sem);
  538. lim = ACCESS_ONCE(rlim[RLIMIT_AS].rlim_cur) >> PAGE_SHIFT;
  539. vm = mm->total_vm + pgsz;
  540. if (lim < vm)
  541. goto out;
  542. lim = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur) >> PAGE_SHIFT;
  543. vm = mm->locked_vm + pgsz;
  544. if (lim < vm)
  545. goto out;
  546. mm->total_vm += pgsz;
  547. mm->locked_vm += pgsz;
  548. error = 0;
  549. out:
  550. up_write(&mm->mmap_sem);
  551. return error;
  552. }
  553. void refund_locked_memory(struct mm_struct *mm, size_t size)
  554. {
  555. unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
  556. down_write(&mm->mmap_sem);
  557. mm->total_vm -= pgsz;
  558. mm->locked_vm -= pgsz;
  559. up_write(&mm->mmap_sem);
  560. }