list_lru.c 2.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127
  1. /*
  2. * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3. * Authors: David Chinner and Glauber Costa
  4. *
  5. * Generic LRU infrastructure
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/list_lru.h>
  11. bool list_lru_add(struct list_lru *lru, struct list_head *item)
  12. {
  13. int nid = page_to_nid(virt_to_page(item));
  14. struct list_lru_node *nlru = &lru->node[nid];
  15. spin_lock(&nlru->lock);
  16. WARN_ON_ONCE(nlru->nr_items < 0);
  17. if (list_empty(item)) {
  18. list_add_tail(item, &nlru->list);
  19. if (nlru->nr_items++ == 0)
  20. node_set(nid, lru->active_nodes);
  21. spin_unlock(&nlru->lock);
  22. return true;
  23. }
  24. spin_unlock(&nlru->lock);
  25. return false;
  26. }
  27. EXPORT_SYMBOL_GPL(list_lru_add);
  28. bool list_lru_del(struct list_lru *lru, struct list_head *item)
  29. {
  30. int nid = page_to_nid(virt_to_page(item));
  31. struct list_lru_node *nlru = &lru->node[nid];
  32. spin_lock(&nlru->lock);
  33. if (!list_empty(item)) {
  34. list_del_init(item);
  35. if (--nlru->nr_items == 0)
  36. node_clear(nid, lru->active_nodes);
  37. WARN_ON_ONCE(nlru->nr_items < 0);
  38. spin_unlock(&nlru->lock);
  39. return true;
  40. }
  41. spin_unlock(&nlru->lock);
  42. return false;
  43. }
  44. EXPORT_SYMBOL_GPL(list_lru_del);
  45. unsigned long
  46. list_lru_count_node(struct list_lru *lru, int nid)
  47. {
  48. unsigned long count = 0;
  49. struct list_lru_node *nlru = &lru->node[nid];
  50. spin_lock(&nlru->lock);
  51. WARN_ON_ONCE(nlru->nr_items < 0);
  52. count += nlru->nr_items;
  53. spin_unlock(&nlru->lock);
  54. return count;
  55. }
  56. EXPORT_SYMBOL_GPL(list_lru_count_node);
  57. unsigned long
  58. list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
  59. void *cb_arg, unsigned long *nr_to_walk)
  60. {
  61. struct list_lru_node *nlru = &lru->node[nid];
  62. struct list_head *item, *n;
  63. unsigned long isolated = 0;
  64. spin_lock(&nlru->lock);
  65. restart:
  66. list_for_each_safe(item, n, &nlru->list) {
  67. enum lru_status ret;
  68. /*
  69. * decrement nr_to_walk first so that we don't livelock if we
  70. * get stuck on large numbesr of LRU_RETRY items
  71. */
  72. if (--(*nr_to_walk) == 0)
  73. break;
  74. ret = isolate(item, &nlru->lock, cb_arg);
  75. switch (ret) {
  76. case LRU_REMOVED:
  77. if (--nlru->nr_items == 0)
  78. node_clear(nid, lru->active_nodes);
  79. WARN_ON_ONCE(nlru->nr_items < 0);
  80. isolated++;
  81. break;
  82. case LRU_ROTATE:
  83. list_move_tail(item, &nlru->list);
  84. break;
  85. case LRU_SKIP:
  86. break;
  87. case LRU_RETRY:
  88. /*
  89. * The lru lock has been dropped, our list traversal is
  90. * now invalid and so we have to restart from scratch.
  91. */
  92. goto restart;
  93. default:
  94. BUG();
  95. }
  96. }
  97. spin_unlock(&nlru->lock);
  98. return isolated;
  99. }
  100. EXPORT_SYMBOL_GPL(list_lru_walk_node);
  101. int list_lru_init(struct list_lru *lru)
  102. {
  103. int i;
  104. nodes_clear(lru->active_nodes);
  105. for (i = 0; i < MAX_NUMNODES; i++) {
  106. spin_lock_init(&lru->node[i].lock);
  107. INIT_LIST_HEAD(&lru->node[i].list);
  108. lru->node[i].nr_items = 0;
  109. }
  110. return 0;
  111. }
  112. EXPORT_SYMBOL_GPL(list_lru_init);