thrash.c 3.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155
  1. /*
  2. * mm/thrash.c
  3. *
  4. * Copyright (C) 2004, Red Hat, Inc.
  5. * Copyright (C) 2004, Rik van Riel <riel@redhat.com>
  6. * Released under the GPL, see the file COPYING for details.
  7. *
  8. * Simple token based thrashing protection, using the algorithm
  9. * described in: http://www.cse.ohio-state.edu/hpcs/WWW/HTML/publications/abs05-1.html
  10. *
  11. * Sep 2006, Ashwin Chaugule <ashwin.chaugule@celunite.com>
  12. * Improved algorithm to pass token:
  13. * Each task has a priority which is incremented if it contended
  14. * for the token in an interval less than its previous attempt.
  15. * If the token is acquired, that task's priority is boosted to prevent
  16. * the token from bouncing around too often and to let the task make
  17. * some progress in its execution.
  18. */
  19. #include <linux/jiffies.h>
  20. #include <linux/mm.h>
  21. #include <linux/sched.h>
  22. #include <linux/swap.h>
  23. #include <linux/memcontrol.h>
  24. #include <trace/events/vmscan.h>
  25. #define TOKEN_AGING_INTERVAL (0xFF)
  26. static DEFINE_SPINLOCK(swap_token_lock);
  27. struct mm_struct *swap_token_mm;
  28. struct mem_cgroup *swap_token_memcg;
  29. #ifdef CONFIG_CGROUP_MEM_RES_CTLR
  30. static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
  31. {
  32. struct mem_cgroup *memcg;
  33. memcg = try_get_mem_cgroup_from_mm(mm);
  34. if (memcg)
  35. css_put(mem_cgroup_css(memcg));
  36. return memcg;
  37. }
  38. #else
  39. static struct mem_cgroup *swap_token_memcg_from_mm(struct mm_struct *mm)
  40. {
  41. return NULL;
  42. }
  43. #endif
  44. void grab_swap_token(struct mm_struct *mm)
  45. {
  46. int current_interval;
  47. unsigned int old_prio = mm->token_priority;
  48. static unsigned int global_faults;
  49. static unsigned int last_aging;
  50. global_faults++;
  51. current_interval = global_faults - mm->faultstamp;
  52. if (!spin_trylock(&swap_token_lock))
  53. return;
  54. /* First come first served */
  55. if (!swap_token_mm)
  56. goto replace_token;
  57. /*
  58. * Usually, we don't need priority aging because long interval faults
  59. * makes priority decrease quickly. But there is one exception. If the
  60. * token owner task is sleeping, it never make long interval faults.
  61. * Thus, we need a priority aging mechanism instead. The requirements
  62. * of priority aging are
  63. * 1) An aging interval is reasonable enough long. Too short aging
  64. * interval makes quick swap token lost and decrease performance.
  65. * 2) The swap token owner task have to get priority aging even if
  66. * it's under sleep.
  67. */
  68. if ((global_faults - last_aging) > TOKEN_AGING_INTERVAL) {
  69. swap_token_mm->token_priority /= 2;
  70. last_aging = global_faults;
  71. }
  72. if (mm == swap_token_mm) {
  73. mm->token_priority += 2;
  74. goto update_priority;
  75. }
  76. if (current_interval < mm->last_interval)
  77. mm->token_priority++;
  78. else {
  79. if (likely(mm->token_priority > 0))
  80. mm->token_priority--;
  81. }
  82. /* Check if we deserve the token */
  83. if (mm->token_priority > swap_token_mm->token_priority)
  84. goto replace_token;
  85. update_priority:
  86. trace_update_swap_token_priority(mm, old_prio, swap_token_mm);
  87. out:
  88. mm->faultstamp = global_faults;
  89. mm->last_interval = current_interval;
  90. spin_unlock(&swap_token_lock);
  91. return;
  92. replace_token:
  93. mm->token_priority += 2;
  94. trace_replace_swap_token(swap_token_mm, mm);
  95. swap_token_mm = mm;
  96. swap_token_memcg = swap_token_memcg_from_mm(mm);
  97. last_aging = global_faults;
  98. goto out;
  99. }
  100. /* Called on process exit. */
  101. void __put_swap_token(struct mm_struct *mm)
  102. {
  103. spin_lock(&swap_token_lock);
  104. if (likely(mm == swap_token_mm)) {
  105. trace_put_swap_token(swap_token_mm);
  106. swap_token_mm = NULL;
  107. swap_token_memcg = NULL;
  108. }
  109. spin_unlock(&swap_token_lock);
  110. }
  111. static bool match_memcg(struct mem_cgroup *a, struct mem_cgroup *b)
  112. {
  113. if (!a)
  114. return true;
  115. if (!b)
  116. return true;
  117. if (a == b)
  118. return true;
  119. return false;
  120. }
  121. void disable_swap_token(struct mem_cgroup *memcg)
  122. {
  123. /* memcg reclaim don't disable unrelated mm token. */
  124. if (match_memcg(memcg, swap_token_memcg)) {
  125. spin_lock(&swap_token_lock);
  126. if (match_memcg(memcg, swap_token_memcg)) {
  127. trace_disable_swap_token(swap_token_mm);
  128. swap_token_mm = NULL;
  129. swap_token_memcg = NULL;
  130. }
  131. spin_unlock(&swap_token_lock);
  132. }
  133. }