list_lru.c 3.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140
  1. /*
  2. * Copyright (c) 2013 Red Hat, Inc. and Parallels Inc. All rights reserved.
  3. * Authors: David Chinner and Glauber Costa
  4. *
  5. * Generic LRU infrastructure
  6. */
  7. #include <linux/kernel.h>
  8. #include <linux/module.h>
  9. #include <linux/mm.h>
  10. #include <linux/list_lru.h>
  11. #include <linux/slab.h>
  12. bool list_lru_add(struct list_lru *lru, struct list_head *item)
  13. {
  14. int nid = page_to_nid(virt_to_page(item));
  15. struct list_lru_node *nlru = &lru->node[nid];
  16. spin_lock(&nlru->lock);
  17. WARN_ON_ONCE(nlru->nr_items < 0);
  18. if (list_empty(item)) {
  19. list_add_tail(item, &nlru->list);
  20. if (nlru->nr_items++ == 0)
  21. node_set(nid, lru->active_nodes);
  22. spin_unlock(&nlru->lock);
  23. return true;
  24. }
  25. spin_unlock(&nlru->lock);
  26. return false;
  27. }
  28. EXPORT_SYMBOL_GPL(list_lru_add);
  29. bool list_lru_del(struct list_lru *lru, struct list_head *item)
  30. {
  31. int nid = page_to_nid(virt_to_page(item));
  32. struct list_lru_node *nlru = &lru->node[nid];
  33. spin_lock(&nlru->lock);
  34. if (!list_empty(item)) {
  35. list_del_init(item);
  36. if (--nlru->nr_items == 0)
  37. node_clear(nid, lru->active_nodes);
  38. WARN_ON_ONCE(nlru->nr_items < 0);
  39. spin_unlock(&nlru->lock);
  40. return true;
  41. }
  42. spin_unlock(&nlru->lock);
  43. return false;
  44. }
  45. EXPORT_SYMBOL_GPL(list_lru_del);
  46. unsigned long
  47. list_lru_count_node(struct list_lru *lru, int nid)
  48. {
  49. unsigned long count = 0;
  50. struct list_lru_node *nlru = &lru->node[nid];
  51. spin_lock(&nlru->lock);
  52. WARN_ON_ONCE(nlru->nr_items < 0);
  53. count += nlru->nr_items;
  54. spin_unlock(&nlru->lock);
  55. return count;
  56. }
  57. EXPORT_SYMBOL_GPL(list_lru_count_node);
  58. unsigned long
  59. list_lru_walk_node(struct list_lru *lru, int nid, list_lru_walk_cb isolate,
  60. void *cb_arg, unsigned long *nr_to_walk)
  61. {
  62. struct list_lru_node *nlru = &lru->node[nid];
  63. struct list_head *item, *n;
  64. unsigned long isolated = 0;
  65. spin_lock(&nlru->lock);
  66. restart:
  67. list_for_each_safe(item, n, &nlru->list) {
  68. enum lru_status ret;
  69. /*
  70. * decrement nr_to_walk first so that we don't livelock if we
  71. * get stuck on large numbesr of LRU_RETRY items
  72. */
  73. if (!*nr_to_walk)
  74. break;
  75. --*nr_to_walk;
  76. ret = isolate(item, &nlru->lock, cb_arg);
  77. switch (ret) {
  78. case LRU_REMOVED:
  79. if (--nlru->nr_items == 0)
  80. node_clear(nid, lru->active_nodes);
  81. WARN_ON_ONCE(nlru->nr_items < 0);
  82. isolated++;
  83. break;
  84. case LRU_ROTATE:
  85. list_move_tail(item, &nlru->list);
  86. break;
  87. case LRU_SKIP:
  88. break;
  89. case LRU_RETRY:
  90. /*
  91. * The lru lock has been dropped, our list traversal is
  92. * now invalid and so we have to restart from scratch.
  93. */
  94. goto restart;
  95. default:
  96. BUG();
  97. }
  98. }
  99. spin_unlock(&nlru->lock);
  100. return isolated;
  101. }
  102. EXPORT_SYMBOL_GPL(list_lru_walk_node);
  103. int list_lru_init(struct list_lru *lru)
  104. {
  105. int i;
  106. size_t size = sizeof(*lru->node) * nr_node_ids;
  107. lru->node = kzalloc(size, GFP_KERNEL);
  108. if (!lru->node)
  109. return -ENOMEM;
  110. nodes_clear(lru->active_nodes);
  111. for (i = 0; i < nr_node_ids; i++) {
  112. spin_lock_init(&lru->node[i].lock);
  113. INIT_LIST_HEAD(&lru->node[i].list);
  114. lru->node[i].nr_items = 0;
  115. }
  116. return 0;
  117. }
  118. EXPORT_SYMBOL_GPL(list_lru_init);
  119. void list_lru_destroy(struct list_lru *lru)
  120. {
  121. kfree(lru->node);
  122. }
  123. EXPORT_SYMBOL_GPL(list_lru_destroy);