ulist.c 5.9 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221
  1. /*
  2. * Copyright (C) 2011 STRATO AG
  3. * written by Arne Jansen <sensille@gmx.net>
  4. * Distributed under the GNU GPL license version 2.
  5. */
  6. #include <linux/slab.h>
  7. #include <linux/module.h>
  8. #include "ulist.h"
  9. /*
  10. * ulist is a generic data structure to hold a collection of unique u64
  11. * values. The only operations it supports is adding to the list and
  12. * enumerating it.
  13. * It is possible to store an auxiliary value along with the key.
  14. *
  15. * The implementation is preliminary and can probably be sped up
  16. * significantly. A first step would be to store the values in an rbtree
  17. * as soon as ULIST_SIZE is exceeded.
  18. *
  19. * A sample usage for ulists is the enumeration of directed graphs without
  20. * visiting a node twice. The pseudo-code could look like this:
  21. *
  22. * ulist = ulist_alloc();
  23. * ulist_add(ulist, root);
  24. * ULIST_ITER_INIT(&uiter);
  25. *
  26. * while ((elem = ulist_next(ulist, &uiter)) {
  27. * for (all child nodes n in elem)
  28. * ulist_add(ulist, n);
  29. * do something useful with the node;
  30. * }
  31. * ulist_free(ulist);
  32. *
  33. * This assumes the graph nodes are adressable by u64. This stems from the
  34. * usage for tree enumeration in btrfs, where the logical addresses are
  35. * 64 bit.
  36. *
  37. * It is also useful for tree enumeration which could be done elegantly
  38. * recursively, but is not possible due to kernel stack limitations. The
  39. * loop would be similar to the above.
  40. */
  41. /**
  42. * ulist_init - freshly initialize a ulist
  43. * @ulist: the ulist to initialize
  44. *
  45. * Note: don't use this function to init an already used ulist, use
  46. * ulist_reinit instead.
  47. */
  48. void ulist_init(struct ulist *ulist)
  49. {
  50. ulist->nnodes = 0;
  51. ulist->nodes = ulist->int_nodes;
  52. ulist->nodes_alloced = ULIST_SIZE;
  53. }
  54. EXPORT_SYMBOL(ulist_init);
  55. /**
  56. * ulist_fini - free up additionally allocated memory for the ulist
  57. * @ulist: the ulist from which to free the additional memory
  58. *
  59. * This is useful in cases where the base 'struct ulist' has been statically
  60. * allocated.
  61. */
  62. void ulist_fini(struct ulist *ulist)
  63. {
  64. /*
  65. * The first ULIST_SIZE elements are stored inline in struct ulist.
  66. * Only if more elements are alocated they need to be freed.
  67. */
  68. if (ulist->nodes_alloced > ULIST_SIZE)
  69. kfree(ulist->nodes);
  70. ulist->nodes_alloced = 0; /* in case ulist_fini is called twice */
  71. }
  72. EXPORT_SYMBOL(ulist_fini);
  73. /**
  74. * ulist_reinit - prepare a ulist for reuse
  75. * @ulist: ulist to be reused
  76. *
  77. * Free up all additional memory allocated for the list elements and reinit
  78. * the ulist.
  79. */
  80. void ulist_reinit(struct ulist *ulist)
  81. {
  82. ulist_fini(ulist);
  83. ulist_init(ulist);
  84. }
  85. EXPORT_SYMBOL(ulist_reinit);
  86. /**
  87. * ulist_alloc - dynamically allocate a ulist
  88. * @gfp_mask: allocation flags to for base allocation
  89. *
  90. * The allocated ulist will be returned in an initialized state.
  91. */
  92. struct ulist *ulist_alloc(gfp_t gfp_mask)
  93. {
  94. struct ulist *ulist = kmalloc(sizeof(*ulist), gfp_mask);
  95. if (!ulist)
  96. return NULL;
  97. ulist_init(ulist);
  98. return ulist;
  99. }
  100. EXPORT_SYMBOL(ulist_alloc);
  101. /**
  102. * ulist_free - free dynamically allocated ulist
  103. * @ulist: ulist to free
  104. *
  105. * It is not necessary to call ulist_fini before.
  106. */
  107. void ulist_free(struct ulist *ulist)
  108. {
  109. if (!ulist)
  110. return;
  111. ulist_fini(ulist);
  112. kfree(ulist);
  113. }
  114. EXPORT_SYMBOL(ulist_free);
  115. /**
  116. * ulist_add - add an element to the ulist
  117. * @ulist: ulist to add the element to
  118. * @val: value to add to ulist
  119. * @aux: auxiliary value to store along with val
  120. * @gfp_mask: flags to use for allocation
  121. *
  122. * Note: locking must be provided by the caller. In case of rwlocks write
  123. * locking is needed
  124. *
  125. * Add an element to a ulist. The @val will only be added if it doesn't
  126. * already exist. If it is added, the auxiliary value @aux is stored along with
  127. * it. In case @val already exists in the ulist, @aux is ignored, even if
  128. * it differs from the already stored value.
  129. *
  130. * ulist_add returns 0 if @val already exists in ulist and 1 if @val has been
  131. * inserted.
  132. * In case of allocation failure -ENOMEM is returned and the ulist stays
  133. * unaltered.
  134. */
  135. int ulist_add(struct ulist *ulist, u64 val, u64 aux, gfp_t gfp_mask)
  136. {
  137. return ulist_add_merge(ulist, val, aux, NULL, gfp_mask);
  138. }
  139. int ulist_add_merge(struct ulist *ulist, u64 val, u64 aux,
  140. u64 *old_aux, gfp_t gfp_mask)
  141. {
  142. int i;
  143. for (i = 0; i < ulist->nnodes; ++i) {
  144. if (ulist->nodes[i].val == val) {
  145. if (old_aux)
  146. *old_aux = ulist->nodes[i].aux;
  147. return 0;
  148. }
  149. }
  150. if (ulist->nnodes >= ulist->nodes_alloced) {
  151. u64 new_alloced = ulist->nodes_alloced + 128;
  152. struct ulist_node *new_nodes;
  153. void *old = NULL;
  154. /*
  155. * if nodes_alloced == ULIST_SIZE no memory has been allocated
  156. * yet, so pass NULL to krealloc
  157. */
  158. if (ulist->nodes_alloced > ULIST_SIZE)
  159. old = ulist->nodes;
  160. new_nodes = krealloc(old, sizeof(*new_nodes) * new_alloced,
  161. gfp_mask);
  162. if (!new_nodes)
  163. return -ENOMEM;
  164. if (!old)
  165. memcpy(new_nodes, ulist->int_nodes,
  166. sizeof(ulist->int_nodes));
  167. ulist->nodes = new_nodes;
  168. ulist->nodes_alloced = new_alloced;
  169. }
  170. ulist->nodes[ulist->nnodes].val = val;
  171. ulist->nodes[ulist->nnodes].aux = aux;
  172. ++ulist->nnodes;
  173. return 1;
  174. }
  175. EXPORT_SYMBOL(ulist_add);
  176. /**
  177. * ulist_next - iterate ulist
  178. * @ulist: ulist to iterate
  179. * @uiter: iterator variable, initialized with ULIST_ITER_INIT(&iterator)
  180. *
  181. * Note: locking must be provided by the caller. In case of rwlocks only read
  182. * locking is needed
  183. *
  184. * This function is used to iterate an ulist.
  185. * It returns the next element from the ulist or %NULL when the
  186. * end is reached. No guarantee is made with respect to the order in which
  187. * the elements are returned. They might neither be returned in order of
  188. * addition nor in ascending order.
  189. * It is allowed to call ulist_add during an enumeration. Newly added items
  190. * are guaranteed to show up in the running enumeration.
  191. */
  192. struct ulist_node *ulist_next(struct ulist *ulist, struct ulist_iterator *uiter)
  193. {
  194. if (ulist->nnodes == 0)
  195. return NULL;
  196. if (uiter->i < 0 || uiter->i >= ulist->nnodes)
  197. return NULL;
  198. return &ulist->nodes[uiter->i++];
  199. }
  200. EXPORT_SYMBOL(ulist_next);