ksm.h 1.9 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879
  1. #ifndef __LINUX_KSM_H
  2. #define __LINUX_KSM_H
  3. /*
  4. * Memory merging support.
  5. *
  6. * This code enables dynamic sharing of identical pages found in different
  7. * memory areas, even if they are not shared by fork().
  8. */
  9. #include <linux/bitops.h>
  10. #include <linux/mm.h>
  11. #include <linux/sched.h>
  12. #include <linux/vmstat.h>
  13. #ifdef CONFIG_KSM
  14. int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
  15. unsigned long end, int advice, unsigned long *vm_flags);
  16. int __ksm_enter(struct mm_struct *mm);
  17. void __ksm_exit(struct mm_struct *mm);
  18. static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  19. {
  20. if (test_bit(MMF_VM_MERGEABLE, &oldmm->flags))
  21. return __ksm_enter(mm);
  22. return 0;
  23. }
  24. static inline void ksm_exit(struct mm_struct *mm)
  25. {
  26. if (test_bit(MMF_VM_MERGEABLE, &mm->flags))
  27. __ksm_exit(mm);
  28. }
  29. /*
  30. * A KSM page is one of those write-protected "shared pages" or "merged pages"
  31. * which KSM maps into multiple mms, wherever identical anonymous page content
  32. * is found in VM_MERGEABLE vmas. It's a PageAnon page, with NULL anon_vma.
  33. */
  34. static inline int PageKsm(struct page *page)
  35. {
  36. return ((unsigned long)page->mapping == PAGE_MAPPING_ANON);
  37. }
  38. /*
  39. * But we have to avoid the checking which page_add_anon_rmap() performs.
  40. */
  41. static inline void page_add_ksm_rmap(struct page *page)
  42. {
  43. if (atomic_inc_and_test(&page->_mapcount)) {
  44. page->mapping = (void *) PAGE_MAPPING_ANON;
  45. __inc_zone_page_state(page, NR_ANON_PAGES);
  46. }
  47. }
  48. #else /* !CONFIG_KSM */
  49. static inline int ksm_madvise(struct vm_area_struct *vma, unsigned long start,
  50. unsigned long end, int advice, unsigned long *vm_flags)
  51. {
  52. return 0;
  53. }
  54. static inline int ksm_fork(struct mm_struct *mm, struct mm_struct *oldmm)
  55. {
  56. return 0;
  57. }
  58. static inline void ksm_exit(struct mm_struct *mm)
  59. {
  60. }
  61. static inline int PageKsm(struct page *page)
  62. {
  63. return 0;
  64. }
  65. /* No stub required for page_add_ksm_rmap(page) */
  66. #endif /* !CONFIG_KSM */
  67. #endif