mlock.c 18 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715
  1. /*
  2. * linux/mm/mlock.c
  3. *
  4. * (C) Copyright 1995 Linus Torvalds
  5. * (C) Copyright 2002 Christoph Hellwig
  6. */
  7. #include <linux/capability.h>
  8. #include <linux/mman.h>
  9. #include <linux/mm.h>
  10. #include <linux/swap.h>
  11. #include <linux/swapops.h>
  12. #include <linux/pagemap.h>
  13. #include <linux/mempolicy.h>
  14. #include <linux/syscalls.h>
  15. #include <linux/sched.h>
  16. #include <linux/module.h>
  17. #include <linux/rmap.h>
  18. #include <linux/mmzone.h>
  19. #include <linux/hugetlb.h>
  20. #include "internal.h"
  21. int can_do_mlock(void)
  22. {
  23. if (capable(CAP_IPC_LOCK))
  24. return 1;
  25. if (current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur != 0)
  26. return 1;
  27. return 0;
  28. }
  29. EXPORT_SYMBOL(can_do_mlock);
  30. #ifdef CONFIG_UNEVICTABLE_LRU
  31. /*
  32. * Mlocked pages are marked with PageMlocked() flag for efficient testing
  33. * in vmscan and, possibly, the fault path; and to support semi-accurate
  34. * statistics.
  35. *
  36. * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
  37. * be placed on the LRU "unevictable" list, rather than the [in]active lists.
  38. * The unevictable list is an LRU sibling list to the [in]active lists.
  39. * PageUnevictable is set to indicate the unevictable state.
  40. *
  41. * When lazy mlocking via vmscan, it is important to ensure that the
  42. * vma's VM_LOCKED status is not concurrently being modified, otherwise we
  43. * may have mlocked a page that is being munlocked. So lazy mlock must take
  44. * the mmap_sem for read, and verify that the vma really is locked
  45. * (see mm/rmap.c).
  46. */
  47. /*
  48. * LRU accounting for clear_page_mlock()
  49. */
  50. void __clear_page_mlock(struct page *page)
  51. {
  52. VM_BUG_ON(!PageLocked(page));
  53. if (!page->mapping) { /* truncated ? */
  54. return;
  55. }
  56. dec_zone_page_state(page, NR_MLOCK);
  57. count_vm_event(UNEVICTABLE_PGCLEARED);
  58. if (!isolate_lru_page(page)) {
  59. putback_lru_page(page);
  60. } else {
  61. /*
  62. * We lost the race. the page already moved to evictable list.
  63. */
  64. if (PageUnevictable(page))
  65. count_vm_event(UNEVICTABLE_PGSTRANDED);
  66. }
  67. }
  68. /*
  69. * Mark page as mlocked if not already.
  70. * If page on LRU, isolate and putback to move to unevictable list.
  71. */
  72. void mlock_vma_page(struct page *page)
  73. {
  74. BUG_ON(!PageLocked(page));
  75. if (!TestSetPageMlocked(page)) {
  76. inc_zone_page_state(page, NR_MLOCK);
  77. count_vm_event(UNEVICTABLE_PGMLOCKED);
  78. if (!isolate_lru_page(page))
  79. putback_lru_page(page);
  80. }
  81. }
  82. /*
  83. * called from munlock()/munmap() path with page supposedly on the LRU.
  84. *
  85. * Note: unlike mlock_vma_page(), we can't just clear the PageMlocked
  86. * [in try_to_munlock()] and then attempt to isolate the page. We must
  87. * isolate the page to keep others from messing with its unevictable
  88. * and mlocked state while trying to munlock. However, we pre-clear the
  89. * mlocked state anyway as we might lose the isolation race and we might
  90. * not get another chance to clear PageMlocked. If we successfully
  91. * isolate the page and try_to_munlock() detects other VM_LOCKED vmas
  92. * mapping the page, it will restore the PageMlocked state, unless the page
  93. * is mapped in a non-linear vma. So, we go ahead and SetPageMlocked(),
  94. * perhaps redundantly.
  95. * If we lose the isolation race, and the page is mapped by other VM_LOCKED
  96. * vmas, we'll detect this in vmscan--via try_to_munlock() or try_to_unmap()
  97. * either of which will restore the PageMlocked state by calling
  98. * mlock_vma_page() above, if it can grab the vma's mmap sem.
  99. */
  100. static void munlock_vma_page(struct page *page)
  101. {
  102. BUG_ON(!PageLocked(page));
  103. if (TestClearPageMlocked(page)) {
  104. dec_zone_page_state(page, NR_MLOCK);
  105. if (!isolate_lru_page(page)) {
  106. int ret = try_to_munlock(page);
  107. /*
  108. * did try_to_unlock() succeed or punt?
  109. */
  110. if (ret == SWAP_SUCCESS || ret == SWAP_AGAIN)
  111. count_vm_event(UNEVICTABLE_PGMUNLOCKED);
  112. putback_lru_page(page);
  113. } else {
  114. /*
  115. * We lost the race. let try_to_unmap() deal
  116. * with it. At least we get the page state and
  117. * mlock stats right. However, page is still on
  118. * the noreclaim list. We'll fix that up when
  119. * the page is eventually freed or we scan the
  120. * noreclaim list.
  121. */
  122. if (PageUnevictable(page))
  123. count_vm_event(UNEVICTABLE_PGSTRANDED);
  124. else
  125. count_vm_event(UNEVICTABLE_PGMUNLOCKED);
  126. }
  127. }
  128. }
  129. /**
  130. * __mlock_vma_pages_range() - mlock/munlock a range of pages in the vma.
  131. * @vma: target vma
  132. * @start: start address
  133. * @end: end address
  134. * @mlock: 0 indicate munlock, otherwise mlock.
  135. *
  136. * If @mlock == 0, unlock an mlocked range;
  137. * else mlock the range of pages. This takes care of making the pages present ,
  138. * too.
  139. *
  140. * return 0 on success, negative error code on error.
  141. *
  142. * vma->vm_mm->mmap_sem must be held for at least read.
  143. */
  144. static long __mlock_vma_pages_range(struct vm_area_struct *vma,
  145. unsigned long start, unsigned long end,
  146. int mlock)
  147. {
  148. struct mm_struct *mm = vma->vm_mm;
  149. unsigned long addr = start;
  150. struct page *pages[16]; /* 16 gives a reasonable batch */
  151. int nr_pages = (end - start) / PAGE_SIZE;
  152. int ret = 0;
  153. int gup_flags = 0;
  154. VM_BUG_ON(start & ~PAGE_MASK);
  155. VM_BUG_ON(end & ~PAGE_MASK);
  156. VM_BUG_ON(start < vma->vm_start);
  157. VM_BUG_ON(end > vma->vm_end);
  158. VM_BUG_ON((!rwsem_is_locked(&mm->mmap_sem)) &&
  159. (atomic_read(&mm->mm_users) != 0));
  160. /*
  161. * mlock: don't page populate if vma has PROT_NONE permission.
  162. * munlock: always do munlock although the vma has PROT_NONE
  163. * permission, or SIGKILL is pending.
  164. */
  165. if (!mlock)
  166. gup_flags |= GUP_FLAGS_IGNORE_VMA_PERMISSIONS |
  167. GUP_FLAGS_IGNORE_SIGKILL;
  168. if (vma->vm_flags & VM_WRITE)
  169. gup_flags |= GUP_FLAGS_WRITE;
  170. while (nr_pages > 0) {
  171. int i;
  172. cond_resched();
  173. /*
  174. * get_user_pages makes pages present if we are
  175. * setting mlock. and this extra reference count will
  176. * disable migration of this page. However, page may
  177. * still be truncated out from under us.
  178. */
  179. ret = __get_user_pages(current, mm, addr,
  180. min_t(int, nr_pages, ARRAY_SIZE(pages)),
  181. gup_flags, pages, NULL);
  182. /*
  183. * This can happen for, e.g., VM_NONLINEAR regions before
  184. * a page has been allocated and mapped at a given offset,
  185. * or for addresses that map beyond end of a file.
  186. * We'll mlock the the pages if/when they get faulted in.
  187. */
  188. if (ret < 0)
  189. break;
  190. if (ret == 0) {
  191. /*
  192. * We know the vma is there, so the only time
  193. * we cannot get a single page should be an
  194. * error (ret < 0) case.
  195. */
  196. WARN_ON(1);
  197. break;
  198. }
  199. lru_add_drain(); /* push cached pages to LRU */
  200. for (i = 0; i < ret; i++) {
  201. struct page *page = pages[i];
  202. lock_page(page);
  203. /*
  204. * Because we lock page here and migration is blocked
  205. * by the elevated reference, we need only check for
  206. * page truncation (file-cache only).
  207. */
  208. if (page->mapping) {
  209. if (mlock)
  210. mlock_vma_page(page);
  211. else
  212. munlock_vma_page(page);
  213. }
  214. unlock_page(page);
  215. put_page(page); /* ref from get_user_pages() */
  216. /*
  217. * here we assume that get_user_pages() has given us
  218. * a list of virtually contiguous pages.
  219. */
  220. addr += PAGE_SIZE; /* for next get_user_pages() */
  221. nr_pages--;
  222. }
  223. ret = 0;
  224. }
  225. return ret; /* count entire vma as locked_vm */
  226. }
  227. /*
  228. * convert get_user_pages() return value to posix mlock() error
  229. */
  230. static int __mlock_posix_error_return(long retval)
  231. {
  232. if (retval == -EFAULT)
  233. retval = -ENOMEM;
  234. else if (retval == -ENOMEM)
  235. retval = -EAGAIN;
  236. return retval;
  237. }
  238. #else /* CONFIG_UNEVICTABLE_LRU */
  239. /*
  240. * Just make pages present if VM_LOCKED. No-op if unlocking.
  241. */
  242. static long __mlock_vma_pages_range(struct vm_area_struct *vma,
  243. unsigned long start, unsigned long end,
  244. int mlock)
  245. {
  246. if (mlock && (vma->vm_flags & VM_LOCKED))
  247. return make_pages_present(start, end);
  248. return 0;
  249. }
  250. static inline int __mlock_posix_error_return(long retval)
  251. {
  252. return 0;
  253. }
  254. #endif /* CONFIG_UNEVICTABLE_LRU */
  255. /**
  256. * mlock_vma_pages_range() - mlock pages in specified vma range.
  257. * @vma - the vma containing the specfied address range
  258. * @start - starting address in @vma to mlock
  259. * @end - end address [+1] in @vma to mlock
  260. *
  261. * For mmap()/mremap()/expansion of mlocked vma.
  262. *
  263. * return 0 on success for "normal" vmas.
  264. *
  265. * return number of pages [> 0] to be removed from locked_vm on success
  266. * of "special" vmas.
  267. *
  268. * return negative error if vma spanning @start-@range disappears while
  269. * mmap semaphore is dropped. Unlikely?
  270. */
  271. long mlock_vma_pages_range(struct vm_area_struct *vma,
  272. unsigned long start, unsigned long end)
  273. {
  274. struct mm_struct *mm = vma->vm_mm;
  275. int nr_pages = (end - start) / PAGE_SIZE;
  276. BUG_ON(!(vma->vm_flags & VM_LOCKED));
  277. /*
  278. * filter unlockable vmas
  279. */
  280. if (vma->vm_flags & (VM_IO | VM_PFNMAP))
  281. goto no_mlock;
  282. if (!((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
  283. is_vm_hugetlb_page(vma) ||
  284. vma == get_gate_vma(current))) {
  285. long error;
  286. downgrade_write(&mm->mmap_sem);
  287. error = __mlock_vma_pages_range(vma, start, end, 1);
  288. up_read(&mm->mmap_sem);
  289. /* vma can change or disappear */
  290. down_write(&mm->mmap_sem);
  291. vma = find_vma(mm, start);
  292. /* non-NULL vma must contain @start, but need to check @end */
  293. if (!vma || end > vma->vm_end)
  294. return -ENOMEM;
  295. return 0; /* hide other errors from mmap(), et al */
  296. }
  297. /*
  298. * User mapped kernel pages or huge pages:
  299. * make these pages present to populate the ptes, but
  300. * fall thru' to reset VM_LOCKED--no need to unlock, and
  301. * return nr_pages so these don't get counted against task's
  302. * locked limit. huge pages are already counted against
  303. * locked vm limit.
  304. */
  305. make_pages_present(start, end);
  306. no_mlock:
  307. vma->vm_flags &= ~VM_LOCKED; /* and don't come back! */
  308. return nr_pages; /* error or pages NOT mlocked */
  309. }
  310. /*
  311. * munlock_vma_pages_range() - munlock all pages in the vma range.'
  312. * @vma - vma containing range to be munlock()ed.
  313. * @start - start address in @vma of the range
  314. * @end - end of range in @vma.
  315. *
  316. * For mremap(), munmap() and exit().
  317. *
  318. * Called with @vma VM_LOCKED.
  319. *
  320. * Returns with VM_LOCKED cleared. Callers must be prepared to
  321. * deal with this.
  322. *
  323. * We don't save and restore VM_LOCKED here because pages are
  324. * still on lru. In unmap path, pages might be scanned by reclaim
  325. * and re-mlocked by try_to_{munlock|unmap} before we unmap and
  326. * free them. This will result in freeing mlocked pages.
  327. */
  328. void munlock_vma_pages_range(struct vm_area_struct *vma,
  329. unsigned long start, unsigned long end)
  330. {
  331. vma->vm_flags &= ~VM_LOCKED;
  332. __mlock_vma_pages_range(vma, start, end, 0);
  333. }
  334. /*
  335. * mlock_fixup - handle mlock[all]/munlock[all] requests.
  336. *
  337. * Filters out "special" vmas -- VM_LOCKED never gets set for these, and
  338. * munlock is a no-op. However, for some special vmas, we go ahead and
  339. * populate the ptes via make_pages_present().
  340. *
  341. * For vmas that pass the filters, merge/split as appropriate.
  342. */
  343. static int mlock_fixup(struct vm_area_struct *vma, struct vm_area_struct **prev,
  344. unsigned long start, unsigned long end, unsigned int newflags)
  345. {
  346. struct mm_struct *mm = vma->vm_mm;
  347. pgoff_t pgoff;
  348. int nr_pages;
  349. int ret = 0;
  350. int lock = newflags & VM_LOCKED;
  351. if (newflags == vma->vm_flags ||
  352. (vma->vm_flags & (VM_IO | VM_PFNMAP)))
  353. goto out; /* don't set VM_LOCKED, don't count */
  354. if ((vma->vm_flags & (VM_DONTEXPAND | VM_RESERVED)) ||
  355. is_vm_hugetlb_page(vma) ||
  356. vma == get_gate_vma(current)) {
  357. if (lock)
  358. make_pages_present(start, end);
  359. goto out; /* don't set VM_LOCKED, don't count */
  360. }
  361. pgoff = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
  362. *prev = vma_merge(mm, *prev, start, end, newflags, vma->anon_vma,
  363. vma->vm_file, pgoff, vma_policy(vma));
  364. if (*prev) {
  365. vma = *prev;
  366. goto success;
  367. }
  368. if (start != vma->vm_start) {
  369. ret = split_vma(mm, vma, start, 1);
  370. if (ret)
  371. goto out;
  372. }
  373. if (end != vma->vm_end) {
  374. ret = split_vma(mm, vma, end, 0);
  375. if (ret)
  376. goto out;
  377. }
  378. success:
  379. /*
  380. * Keep track of amount of locked VM.
  381. */
  382. nr_pages = (end - start) >> PAGE_SHIFT;
  383. if (!lock)
  384. nr_pages = -nr_pages;
  385. mm->locked_vm += nr_pages;
  386. /*
  387. * vm_flags is protected by the mmap_sem held in write mode.
  388. * It's okay if try_to_unmap_one unmaps a page just after we
  389. * set VM_LOCKED, __mlock_vma_pages_range will bring it back.
  390. */
  391. vma->vm_flags = newflags;
  392. if (lock) {
  393. /*
  394. * mmap_sem is currently held for write. Downgrade the write
  395. * lock to a read lock so that other faults, mmap scans, ...
  396. * while we fault in all pages.
  397. */
  398. downgrade_write(&mm->mmap_sem);
  399. ret = __mlock_vma_pages_range(vma, start, end, 1);
  400. /*
  401. * Need to reacquire mmap sem in write mode, as our callers
  402. * expect this. We have no support for atomically upgrading
  403. * a sem to write, so we need to check for ranges while sem
  404. * is unlocked.
  405. */
  406. up_read(&mm->mmap_sem);
  407. /* vma can change or disappear */
  408. down_write(&mm->mmap_sem);
  409. *prev = find_vma(mm, start);
  410. /* non-NULL *prev must contain @start, but need to check @end */
  411. if (!(*prev) || end > (*prev)->vm_end)
  412. ret = -ENOMEM;
  413. else if (ret > 0) {
  414. mm->locked_vm -= ret;
  415. ret = 0;
  416. } else
  417. ret = __mlock_posix_error_return(ret); /* translate if needed */
  418. } else {
  419. /*
  420. * TODO: for unlocking, pages will already be resident, so
  421. * we don't need to wait for allocations/reclaim/pagein, ...
  422. * However, unlocking a very large region can still take a
  423. * while. Should we downgrade the semaphore for both lock
  424. * AND unlock ?
  425. */
  426. __mlock_vma_pages_range(vma, start, end, 0);
  427. }
  428. out:
  429. *prev = vma;
  430. return ret;
  431. }
  432. static int do_mlock(unsigned long start, size_t len, int on)
  433. {
  434. unsigned long nstart, end, tmp;
  435. struct vm_area_struct * vma, * prev;
  436. int error;
  437. len = PAGE_ALIGN(len);
  438. end = start + len;
  439. if (end < start)
  440. return -EINVAL;
  441. if (end == start)
  442. return 0;
  443. vma = find_vma_prev(current->mm, start, &prev);
  444. if (!vma || vma->vm_start > start)
  445. return -ENOMEM;
  446. if (start > vma->vm_start)
  447. prev = vma;
  448. for (nstart = start ; ; ) {
  449. unsigned int newflags;
  450. /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
  451. newflags = vma->vm_flags | VM_LOCKED;
  452. if (!on)
  453. newflags &= ~VM_LOCKED;
  454. tmp = vma->vm_end;
  455. if (tmp > end)
  456. tmp = end;
  457. error = mlock_fixup(vma, &prev, nstart, tmp, newflags);
  458. if (error)
  459. break;
  460. nstart = tmp;
  461. if (nstart < prev->vm_end)
  462. nstart = prev->vm_end;
  463. if (nstart >= end)
  464. break;
  465. vma = prev->vm_next;
  466. if (!vma || vma->vm_start != nstart) {
  467. error = -ENOMEM;
  468. break;
  469. }
  470. }
  471. return error;
  472. }
  473. asmlinkage long sys_mlock(unsigned long start, size_t len)
  474. {
  475. unsigned long locked;
  476. unsigned long lock_limit;
  477. int error = -ENOMEM;
  478. if (!can_do_mlock())
  479. return -EPERM;
  480. lru_add_drain_all(); /* flush pagevec */
  481. down_write(&current->mm->mmap_sem);
  482. len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  483. start &= PAGE_MASK;
  484. locked = len >> PAGE_SHIFT;
  485. locked += current->mm->locked_vm;
  486. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  487. lock_limit >>= PAGE_SHIFT;
  488. /* check against resource limits */
  489. if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
  490. error = do_mlock(start, len, 1);
  491. up_write(&current->mm->mmap_sem);
  492. return error;
  493. }
  494. asmlinkage long sys_munlock(unsigned long start, size_t len)
  495. {
  496. int ret;
  497. down_write(&current->mm->mmap_sem);
  498. len = PAGE_ALIGN(len + (start & ~PAGE_MASK));
  499. start &= PAGE_MASK;
  500. ret = do_mlock(start, len, 0);
  501. up_write(&current->mm->mmap_sem);
  502. return ret;
  503. }
  504. static int do_mlockall(int flags)
  505. {
  506. struct vm_area_struct * vma, * prev = NULL;
  507. unsigned int def_flags = 0;
  508. if (flags & MCL_FUTURE)
  509. def_flags = VM_LOCKED;
  510. current->mm->def_flags = def_flags;
  511. if (flags == MCL_FUTURE)
  512. goto out;
  513. for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
  514. unsigned int newflags;
  515. newflags = vma->vm_flags | VM_LOCKED;
  516. if (!(flags & MCL_CURRENT))
  517. newflags &= ~VM_LOCKED;
  518. /* Ignore errors */
  519. mlock_fixup(vma, &prev, vma->vm_start, vma->vm_end, newflags);
  520. }
  521. out:
  522. return 0;
  523. }
  524. asmlinkage long sys_mlockall(int flags)
  525. {
  526. unsigned long lock_limit;
  527. int ret = -EINVAL;
  528. if (!flags || (flags & ~(MCL_CURRENT | MCL_FUTURE)))
  529. goto out;
  530. ret = -EPERM;
  531. if (!can_do_mlock())
  532. goto out;
  533. lru_add_drain_all(); /* flush pagevec */
  534. down_write(&current->mm->mmap_sem);
  535. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  536. lock_limit >>= PAGE_SHIFT;
  537. ret = -ENOMEM;
  538. if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
  539. capable(CAP_IPC_LOCK))
  540. ret = do_mlockall(flags);
  541. up_write(&current->mm->mmap_sem);
  542. out:
  543. return ret;
  544. }
  545. asmlinkage long sys_munlockall(void)
  546. {
  547. int ret;
  548. down_write(&current->mm->mmap_sem);
  549. ret = do_mlockall(0);
  550. up_write(&current->mm->mmap_sem);
  551. return ret;
  552. }
  553. /*
  554. * Objects with different lifetime than processes (SHM_LOCK and SHM_HUGETLB
  555. * shm segments) get accounted against the user_struct instead.
  556. */
  557. static DEFINE_SPINLOCK(shmlock_user_lock);
  558. int user_shm_lock(size_t size, struct user_struct *user)
  559. {
  560. unsigned long lock_limit, locked;
  561. int allowed = 0;
  562. locked = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  563. lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur;
  564. if (lock_limit == RLIM_INFINITY)
  565. allowed = 1;
  566. lock_limit >>= PAGE_SHIFT;
  567. spin_lock(&shmlock_user_lock);
  568. if (!allowed &&
  569. locked + user->locked_shm > lock_limit && !capable(CAP_IPC_LOCK))
  570. goto out;
  571. get_uid(user);
  572. user->locked_shm += locked;
  573. allowed = 1;
  574. out:
  575. spin_unlock(&shmlock_user_lock);
  576. return allowed;
  577. }
  578. void user_shm_unlock(size_t size, struct user_struct *user)
  579. {
  580. spin_lock(&shmlock_user_lock);
  581. user->locked_shm -= (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
  582. spin_unlock(&shmlock_user_lock);
  583. free_uid(user);
  584. }
  585. void *alloc_locked_buffer(size_t size)
  586. {
  587. unsigned long rlim, vm, pgsz;
  588. void *buffer = NULL;
  589. pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
  590. down_write(&current->mm->mmap_sem);
  591. rlim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT;
  592. vm = current->mm->total_vm + pgsz;
  593. if (rlim < vm)
  594. goto out;
  595. rlim = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT;
  596. vm = current->mm->locked_vm + pgsz;
  597. if (rlim < vm)
  598. goto out;
  599. buffer = kzalloc(size, GFP_KERNEL);
  600. if (!buffer)
  601. goto out;
  602. current->mm->total_vm += pgsz;
  603. current->mm->locked_vm += pgsz;
  604. out:
  605. up_write(&current->mm->mmap_sem);
  606. return buffer;
  607. }
  608. void free_locked_buffer(void *buffer, size_t size)
  609. {
  610. unsigned long pgsz = PAGE_ALIGN(size) >> PAGE_SHIFT;
  611. down_write(&current->mm->mmap_sem);
  612. current->mm->total_vm -= pgsz;
  613. current->mm->locked_vm -= pgsz;
  614. up_write(&current->mm->mmap_sem);
  615. kfree(buffer);
  616. }