idr.h 6.1 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208
  1. /*
  2. * include/linux/idr.h
  3. *
  4. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  5. * Copyright (C) 2002 by Concurrent Computer Corporation
  6. * Distributed under the GNU GPL license version 2.
  7. *
  8. * Small id to pointer translation service avoiding fixed sized
  9. * tables.
  10. */
  11. #ifndef __IDR_H__
  12. #define __IDR_H__
  13. #include <linux/types.h>
  14. #include <linux/bitops.h>
  15. #include <linux/init.h>
  16. #include <linux/rcupdate.h>
  17. /*
  18. * We want shallower trees and thus more bits covered at each layer. 8
  19. * bits gives us large enough first layer for most use cases and maximum
  20. * tree depth of 4. Each idr_layer is slightly larger than 2k on 64bit and
  21. * 1k on 32bit.
  22. */
  23. #define IDR_BITS 8
  24. #define IDR_SIZE (1 << IDR_BITS)
  25. #define IDR_MASK ((1 << IDR_BITS)-1)
  26. struct idr_layer {
  27. int prefix; /* the ID prefix of this idr_layer */
  28. DECLARE_BITMAP(bitmap, IDR_SIZE); /* A zero bit means "space here" */
  29. struct idr_layer __rcu *ary[1<<IDR_BITS];
  30. int count; /* When zero, we can release it */
  31. int layer; /* distance from leaf */
  32. struct rcu_head rcu_head;
  33. };
  34. struct idr {
  35. struct idr_layer __rcu *hint; /* the last layer allocated from */
  36. struct idr_layer __rcu *top;
  37. struct idr_layer *id_free;
  38. int layers; /* only valid w/o concurrent changes */
  39. int id_free_cnt;
  40. spinlock_t lock;
  41. };
  42. #define IDR_INIT(name) \
  43. { \
  44. .lock = __SPIN_LOCK_UNLOCKED(name.lock), \
  45. }
  46. #define DEFINE_IDR(name) struct idr name = IDR_INIT(name)
  47. /**
  48. * DOC: idr sync
  49. * idr synchronization (stolen from radix-tree.h)
  50. *
  51. * idr_find() is able to be called locklessly, using RCU. The caller must
  52. * ensure calls to this function are made within rcu_read_lock() regions.
  53. * Other readers (lock-free or otherwise) and modifications may be running
  54. * concurrently.
  55. *
  56. * It is still required that the caller manage the synchronization and
  57. * lifetimes of the items. So if RCU lock-free lookups are used, typically
  58. * this would mean that the items have their own locks, or are amenable to
  59. * lock-free access; and that the items are freed by RCU (or only freed after
  60. * having been deleted from the idr tree *and* a synchronize_rcu() grace
  61. * period).
  62. */
  63. /*
  64. * This is what we export.
  65. */
  66. void *idr_find_slowpath(struct idr *idp, int id);
  67. int idr_pre_get(struct idr *idp, gfp_t gfp_mask);
  68. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id);
  69. void idr_preload(gfp_t gfp_mask);
  70. int idr_alloc(struct idr *idp, void *ptr, int start, int end, gfp_t gfp_mask);
  71. int idr_for_each(struct idr *idp,
  72. int (*fn)(int id, void *p, void *data), void *data);
  73. void *idr_get_next(struct idr *idp, int *nextid);
  74. void *idr_replace(struct idr *idp, void *ptr, int id);
  75. void idr_remove(struct idr *idp, int id);
  76. void idr_free(struct idr *idp, int id);
  77. void idr_destroy(struct idr *idp);
  78. void idr_init(struct idr *idp);
  79. /**
  80. * idr_preload_end - end preload section started with idr_preload()
  81. *
  82. * Each idr_preload() should be matched with an invocation of this
  83. * function. See idr_preload() for details.
  84. */
  85. static inline void idr_preload_end(void)
  86. {
  87. preempt_enable();
  88. }
  89. /**
  90. * idr_find - return pointer for given id
  91. * @idp: idr handle
  92. * @id: lookup key
  93. *
  94. * Return the pointer given the id it has been registered with. A %NULL
  95. * return indicates that @id is not valid or you passed %NULL in
  96. * idr_get_new().
  97. *
  98. * This function can be called under rcu_read_lock(), given that the leaf
  99. * pointers lifetimes are correctly managed.
  100. */
  101. static inline void *idr_find(struct idr *idr, int id)
  102. {
  103. struct idr_layer *hint = rcu_dereference_raw(idr->hint);
  104. if (hint && (id & ~IDR_MASK) == hint->prefix)
  105. return rcu_dereference_raw(hint->ary[id & IDR_MASK]);
  106. return idr_find_slowpath(idr, id);
  107. }
  108. /**
  109. * idr_get_new - allocate new idr entry
  110. * @idp: idr handle
  111. * @ptr: pointer you want associated with the id
  112. * @id: pointer to the allocated handle
  113. *
  114. * Simple wrapper around idr_get_new_above() w/ @starting_id of zero.
  115. */
  116. static inline int idr_get_new(struct idr *idp, void *ptr, int *id)
  117. {
  118. return idr_get_new_above(idp, ptr, 0, id);
  119. }
  120. /**
  121. * idr_for_each_entry - iterate over an idr's elements of a given type
  122. * @idp: idr handle
  123. * @entry: the type * to use as cursor
  124. * @id: id entry's key
  125. */
  126. #define idr_for_each_entry(idp, entry, id) \
  127. for (id = 0, entry = (typeof(entry))idr_get_next((idp), &(id)); \
  128. entry != NULL; \
  129. ++id, entry = (typeof(entry))idr_get_next((idp), &(id)))
  130. void __idr_remove_all(struct idr *idp); /* don't use */
  131. /**
  132. * idr_remove_all - remove all ids from the given idr tree
  133. * @idp: idr handle
  134. *
  135. * If you're trying to destroy @idp, calling idr_destroy() is enough.
  136. * This is going away. Don't use.
  137. */
  138. static inline void __deprecated idr_remove_all(struct idr *idp)
  139. {
  140. __idr_remove_all(idp);
  141. }
  142. /*
  143. * IDA - IDR based id allocator, use when translation from id to
  144. * pointer isn't necessary.
  145. *
  146. * IDA_BITMAP_LONGS is calculated to be one less to accommodate
  147. * ida_bitmap->nr_busy so that the whole struct fits in 128 bytes.
  148. */
  149. #define IDA_CHUNK_SIZE 128 /* 128 bytes per chunk */
  150. #define IDA_BITMAP_LONGS (IDA_CHUNK_SIZE / sizeof(long) - 1)
  151. #define IDA_BITMAP_BITS (IDA_BITMAP_LONGS * sizeof(long) * 8)
  152. struct ida_bitmap {
  153. long nr_busy;
  154. unsigned long bitmap[IDA_BITMAP_LONGS];
  155. };
  156. struct ida {
  157. struct idr idr;
  158. struct ida_bitmap *free_bitmap;
  159. };
  160. #define IDA_INIT(name) { .idr = IDR_INIT((name).idr), .free_bitmap = NULL, }
  161. #define DEFINE_IDA(name) struct ida name = IDA_INIT(name)
  162. int ida_pre_get(struct ida *ida, gfp_t gfp_mask);
  163. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id);
  164. void ida_remove(struct ida *ida, int id);
  165. void ida_destroy(struct ida *ida);
  166. void ida_init(struct ida *ida);
  167. int ida_simple_get(struct ida *ida, unsigned int start, unsigned int end,
  168. gfp_t gfp_mask);
  169. void ida_simple_remove(struct ida *ida, unsigned int id);
  170. /**
  171. * ida_get_new - allocate new ID
  172. * @ida: idr handle
  173. * @p_id: pointer to the allocated handle
  174. *
  175. * Simple wrapper around ida_get_new_above() w/ @starting_id of zero.
  176. */
  177. static inline int ida_get_new(struct ida *ida, int *p_id)
  178. {
  179. return ida_get_new_above(ida, 0, p_id);
  180. }
  181. void __init idr_init_cache(void);
  182. #endif /* __IDR_H__ */