list_lru.c 4.3 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193
  1. /*
  2. * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3. * Authors: David Chinner and Glauber Costa
  4. *
  5. * Generic LRU infrastructure
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/list_lru.h>
  11. bool list_lru_add(struct list_lru *lru, struct list_head *item)
  12. {
  13. int nid = page_to_nid(virt_to_page(item));
  14. struct list_lru_node *nlru = &lru->node[nid];
  15. spin_lock(&nlru->lock);
  16. WARN_ON_ONCE(nlru->nr_items < 0);
  17. if (list_empty(item)) {
  18. list_add_tail(item, &nlru->list);
  19. if (nlru->nr_items++ == 0)
  20. node_set(nid, lru->active_nodes);
  21. spin_unlock(&nlru->lock);
  22. return true;
  23. }
  24. spin_unlock(&nlru->lock);
  25. return false;
  26. }
  27. EXPORT_SYMBOL_GPL(list_lru_add);
  28. bool list_lru_del(struct list_lru *lru, struct list_head *item)
  29. {
  30. int nid = page_to_nid(virt_to_page(item));
  31. struct list_lru_node *nlru = &lru->node[nid];
  32. spin_lock(&nlru->lock);
  33. if (!list_empty(item)) {
  34. list_del_init(item);
  35. if (--nlru->nr_items == 0)
  36. node_clear(nid, lru->active_nodes);
  37. WARN_ON_ONCE(nlru->nr_items < 0);
  38. spin_unlock(&nlru->lock);
  39. return true;
  40. }
  41. spin_unlock(&nlru->lock);
  42. return false;
  43. }
  44. EXPORT_SYMBOL_GPL(list_lru_del);
  45. unsigned long list_lru_count(struct list_lru *lru)
  46. {
  47. unsigned long count = 0;
  48. int nid;
  49. for_each_node_mask(nid, lru->active_nodes) {
  50. struct list_lru_node *nlru = &lru->node[nid];
  51. spin_lock(&nlru->lock);
  52. WARN_ON_ONCE(nlru->nr_items < 0);
  53. count += nlru->nr_items;
  54. spin_unlock(&nlru->lock);
  55. }
  56. return count;
  57. }
  58. EXPORT_SYMBOL_GPL(list_lru_count);
  59. static unsigned long
  60. list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
  61. void *cb_arg, unsigned long *nr_to_walk)
  62. {
  63. struct list_lru_node *nlru = &lru->node[nid];
  64. struct list_head *item, *n;
  65. unsigned long isolated = 0;
  66. /*
  67. * If we don't keep state of at which pass we are, we can loop at
  68. * LRU_RETRY, since we have no guarantees that the caller will be able
  69. * to do something other than retry on the next pass. We handle this by
  70. * allowing at most one retry per object. This should not be altered
  71. * by any condition other than LRU_RETRY.
  72. */
  73. bool first_pass = true;
  74. spin_lock(&nlru->lock);
  75. restart:
  76. list_for_each_safe(item, n, &nlru->list) {
  77. enum lru_status ret;
  78. ret = isolate(item, &nlru->lock, cb_arg);
  79. switch (ret) {
  80. case LRU_REMOVED:
  81. if (--nlru->nr_items == 0)
  82. node_clear(nid, lru->active_nodes);
  83. WARN_ON_ONCE(nlru->nr_items < 0);
  84. isolated++;
  85. break;
  86. case LRU_ROTATE:
  87. list_move_tail(item, &nlru->list);
  88. break;
  89. case LRU_SKIP:
  90. break;
  91. case LRU_RETRY:
  92. if (!first_pass) {
  93. first_pass = true;
  94. break;
  95. }
  96. first_pass = false;
  97. goto restart;
  98. default:
  99. BUG();
  100. }
  101. if ((*nr_to_walk)-- == 0)
  102. break;
  103. }
  104. spin_unlock(&nlru->lock);
  105. return isolated;
  106. }
  107. EXPORT_SYMBOL_GPL(list_lru_walk_node);
  108. unsigned long list_lru_walk(struct list_lru *lru, list_lru_walk_cb isolate,
  109. void *cb_arg, unsigned long nr_to_walk)
  110. {
  111. unsigned long isolated = 0;
  112. int nid;
  113. for_each_node_mask(nid, lru->active_nodes) {
  114. isolated += list_lru_walk_node(lru, nid, isolate,
  115. cb_arg, &nr_to_walk);
  116. if (nr_to_walk <= 0)
  117. break;
  118. }
  119. return isolated;
  120. }
  121. EXPORT_SYMBOL_GPL(list_lru_walk);
  122. static unsigned long list_lru_dispose_all_node(struct list_lru *lru, int nid,
  123. list_lru_dispose_cb dispose)
  124. {
  125. struct list_lru_node *nlru = &lru->node[nid];
  126. LIST_HEAD(dispose_list);
  127. unsigned long disposed = 0;
  128. spin_lock(&nlru->lock);
  129. while (!list_empty(&nlru->list)) {
  130. list_splice_init(&nlru->list, &dispose_list);
  131. disposed += nlru->nr_items;
  132. nlru->nr_items = 0;
  133. node_clear(nid, lru->active_nodes);
  134. spin_unlock(&nlru->lock);
  135. dispose(&dispose_list);
  136. spin_lock(&nlru->lock);
  137. }
  138. spin_unlock(&nlru->lock);
  139. return disposed;
  140. }
  141. unsigned long list_lru_dispose_all(struct list_lru *lru,
  142. list_lru_dispose_cb dispose)
  143. {
  144. unsigned long disposed;
  145. unsigned long total = 0;
  146. int nid;
  147. do {
  148. disposed = 0;
  149. for_each_node_mask(nid, lru->active_nodes) {
  150. disposed += list_lru_dispose_all_node(lru, nid,
  151. dispose);
  152. }
  153. total += disposed;
  154. } while (disposed != 0);
  155. return total;
  156. }
  157. int list_lru_init(struct list_lru *lru)
  158. {
  159. int i;
  160. nodes_clear(lru->active_nodes);
  161. for (i = 0; i < MAX_NUMNODES; i++) {
  162. spin_lock_init(&lru->node[i].lock);
  163. INIT_LIST_HEAD(&lru->node[i].list);
  164. lru->node[i].nr_items = 0;
  165. }
  166. return 0;
  167. }
  168. EXPORT_SYMBOL_GPL(list_lru_init);