ksm.h 2.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102
  1. #ifndef __LINUX_KSM_H
  2. #define __LINUX_KSM_H
  3. /*
  4. * Memory merging support.
  5. *
  6. * This code enables dynamic sharing of identical pages found in different
  7. * memory areas, even if they are not shared by fork().
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/mm.h>
  11. #include <linux/sched.h>
  12. #include <linux/vmstat.h>
  13. struct mmu_gather;
  14. #ifdef CONFIG_KSM
  15. int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
  16. unsigned long end, int advice, unsigned long *vm_flags);
  17. int __ksm_enter(struct mm_struct *mm);
  18. void __ksm_exit(struct mm_struct *mm,
  19. struct mmu_gather **tlbp, unsigned long end);
  20. static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  21. {
  22. if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
  23. return __ksm_enter(mm);
  24. return 0;
  25. }
  26. /*
  27. * For KSM to handle OOM without deadlock when it's breaking COW in a
  28. * likely victim of the OOM killer, exit_mmap() has to serialize with
  29. * ksm_exit() after freeing mm's pages but before freeing its page tables.
  30. * That leaves a window in which KSM might refault pages which have just
  31. * been finally unmapped: guard against that with ksm_test_exit(), and
  32. * use it after getting mmap_sem in ksm.c, to check if mm is exiting.
  33. */
  34. static inline bool ksm_test_exit(struct mm_struct *mm)
  35. {
  36. return atomic_read(&mm->mm_users) == 0;
  37. }
  38. static inline void ksm_exit(struct mm_struct *mm,
  39. struct mmu_gather **tlbp, unsigned long end)
  40. {
  41. if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
  42. __ksm_exit(mm, tlbp, end);
  43. }
  44. /*
  45. * A KSM page is one of those write-protected "shared pages" or "merged pages"
  46. * which KSM maps into multiple mms, wherever identical anonymous page content
  47. * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
  48. */
  49. static inline int PageKsm(struct page *page)
  50. {
  51. return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
  52. }
  53. /*
  54. * But we have to avoid the checking which page_add_anon_rmap() performs.
  55. */
  56. static inline void page_add_ksm_rmap(struct page *page)
  57. {
  58. if (atomic_inc_and_test(&page->_mapcount)) {
  59. page->mapping = (void *) PAGE_MAPPING_ANON;
  60. __inc_zone_page_state(page, NR_ANON_PAGES);
  61. }
  62. }
  63. #else /* !CONFIG_KSM */
  64. static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
  65. unsigned long end, int advice, unsigned long *vm_flags)
  66. {
  67. return 0;
  68. }
  69. static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  70. {
  71. return 0;
  72. }
  73. static inline bool ksm_test_exit(struct mm_struct *mm)
  74. {
  75. return 0;
  76. }
  77. static inline void ksm_exit(struct mm_struct *mm,
  78. struct mmu_gather **tlbp, unsigned long end)
  79. {
  80. }
  81. static inline int PageKsm(struct page *page)
  82. {
  83. return 0;
  84. }
  85. /* No stub required for page_add_ksm_rmap(page) */
  86. #endif /* !CONFIG_KSM */
  87. #endif