idr.c 9.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420
  1. /*
  2. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  3. * Copyright (C) 2002 by Concurrent Computer Corporation
  4. * Distributed under the GNU GPL license version 2.
  5. *
  6. * Modified by George Anzinger to reuse immediately and to use
  7. * find bit instructions. Also removed _irq on spinlocks.
  8. *
  9. * Small id to pointer translation service.
  10. *
  11. * It uses a radix tree like structure as a sparse array indexed
  12. * by the id to obtain the pointer. The bitmap makes allocating
  13. * a new id quick.
  14. *
  15. * You call it to allocate an id (an int) an associate with that id a
  16. * pointer or what ever, we treat it as a (void *). You can pass this
  17. * id to a user for him to pass back at a later time. You then pass
  18. * that id to this code and it returns your pointer.
  19. * You can release ids at any time. When all ids are released, most of
  20. * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  21. * don't need to go to the memory "store" during an id allocate, just
  22. * so you don't need to be too concerned about locking and conflicts
  23. * with the slab allocator.
  24. */
  25. #ifndef TEST // to test in user space...
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #endif
  30. #include <linux/string.h>
  31. #include <linux/idr.h>
  32. static kmem_cache_t *idr_layer_cache;
  33. static struct idr_layer *alloc_layer(struct idr *idp)
  34. {
  35. struct idr_layer *p;
  36. spin_lock(&idp->lock);
  37. if ((p = idp->id_free)) {
  38. idp->id_free = p->ary[0];
  39. idp->id_free_cnt--;
  40. p->ary[0] = NULL;
  41. }
  42. spin_unlock(&idp->lock);
  43. return(p);
  44. }
  45. static void free_layer(struct idr *idp, struct idr_layer *p)
  46. {
  47. /*
  48. * Depends on the return element being zeroed.
  49. */
  50. spin_lock(&idp->lock);
  51. p->ary[0] = idp->id_free;
  52. idp->id_free = p;
  53. idp->id_free_cnt++;
  54. spin_unlock(&idp->lock);
  55. }
  56. /**
  57. * idr_pre_get - reserver resources for idr allocation
  58. * @idp: idr handle
  59. * @gfp_mask: memory allocation flags
  60. *
  61. * This function should be called prior to locking and calling the
  62. * following function. It preallocates enough memory to satisfy
  63. * the worst possible allocation.
  64. *
  65. * If the system is REALLY out of memory this function returns 0,
  66. * otherwise 1.
  67. */
  68. int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  69. {
  70. while (idp->id_free_cnt < IDR_FREE_MAX) {
  71. struct idr_layer *new;
  72. new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
  73. if (new == NULL)
  74. return (0);
  75. free_layer(idp, new);
  76. }
  77. return 1;
  78. }
  79. EXPORT_SYMBOL(idr_pre_get);
  80. static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
  81. {
  82. int n, m, sh;
  83. struct idr_layer *p, *new;
  84. struct idr_layer *pa[MAX_LEVEL];
  85. int l, id;
  86. long bm;
  87. id = *starting_id;
  88. p = idp->top;
  89. l = idp->layers;
  90. pa[l--] = NULL;
  91. while (1) {
  92. /*
  93. * We run around this while until we reach the leaf node...
  94. */
  95. n = (id >> (IDR_BITS*l)) & IDR_MASK;
  96. bm = ~p->bitmap;
  97. m = find_next_bit(&bm, IDR_SIZE, n);
  98. if (m == IDR_SIZE) {
  99. /* no space available go back to previous layer. */
  100. l++;
  101. id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  102. if (!(p = pa[l])) {
  103. *starting_id = id;
  104. return -2;
  105. }
  106. continue;
  107. }
  108. if (m != n) {
  109. sh = IDR_BITS*l;
  110. id = ((id >> sh) ^ n ^ m) << sh;
  111. }
  112. if ((id >= MAX_ID_BIT) || (id < 0))
  113. return -3;
  114. if (l == 0)
  115. break;
  116. /*
  117. * Create the layer below if it is missing.
  118. */
  119. if (!p->ary[m]) {
  120. if (!(new = alloc_layer(idp)))
  121. return -1;
  122. p->ary[m] = new;
  123. p->count++;
  124. }
  125. pa[l--] = p;
  126. p = p->ary[m];
  127. }
  128. /*
  129. * We have reached the leaf node, plant the
  130. * users pointer and return the raw id.
  131. */
  132. p->ary[m] = (struct idr_layer *)ptr;
  133. __set_bit(m, &p->bitmap);
  134. p->count++;
  135. /*
  136. * If this layer is full mark the bit in the layer above
  137. * to show that this part of the radix tree is full.
  138. * This may complete the layer above and require walking
  139. * up the radix tree.
  140. */
  141. n = id;
  142. while (p->bitmap == IDR_FULL) {
  143. if (!(p = pa[++l]))
  144. break;
  145. n = n >> IDR_BITS;
  146. __set_bit((n & IDR_MASK), &p->bitmap);
  147. }
  148. return(id);
  149. }
  150. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  151. {
  152. struct idr_layer *p, *new;
  153. int layers, v, id;
  154. id = starting_id;
  155. build_up:
  156. p = idp->top;
  157. layers = idp->layers;
  158. if (unlikely(!p)) {
  159. if (!(p = alloc_layer(idp)))
  160. return -1;
  161. layers = 1;
  162. }
  163. /*
  164. * Add a new layer to the top of the tree if the requested
  165. * id is larger than the currently allocated space.
  166. */
  167. while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  168. layers++;
  169. if (!p->count)
  170. continue;
  171. if (!(new = alloc_layer(idp))) {
  172. /*
  173. * The allocation failed. If we built part of
  174. * the structure tear it down.
  175. */
  176. for (new = p; p && p != idp->top; new = p) {
  177. p = p->ary[0];
  178. new->ary[0] = NULL;
  179. new->bitmap = new->count = 0;
  180. free_layer(idp, new);
  181. }
  182. return -1;
  183. }
  184. new->ary[0] = p;
  185. new->count = 1;
  186. if (p->bitmap == IDR_FULL)
  187. __set_bit(0, &new->bitmap);
  188. p = new;
  189. }
  190. idp->top = p;
  191. idp->layers = layers;
  192. v = sub_alloc(idp, ptr, &id);
  193. if (v == -2)
  194. goto build_up;
  195. return(v);
  196. }
  197. /**
  198. * idr_get_new_above - allocate new idr entry above or equal to a start id
  199. * @idp: idr handle
  200. * @ptr: pointer you want associated with the ide
  201. * @start_id: id to start search at
  202. * @id: pointer to the allocated handle
  203. *
  204. * This is the allocate id function. It should be called with any
  205. * required locks.
  206. *
  207. * If memory is required, it will return -EAGAIN, you should unlock
  208. * and go back to the idr_pre_get() call. If the idr is full, it will
  209. * return -ENOSPC.
  210. *
  211. * @id returns a value in the range 0 ... 0x7fffffff
  212. */
  213. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  214. {
  215. int rv;
  216. rv = idr_get_new_above_int(idp, ptr, starting_id);
  217. /*
  218. * This is a cheap hack until the IDR code can be fixed to
  219. * return proper error values.
  220. */
  221. if (rv < 0) {
  222. if (rv == -1)
  223. return -EAGAIN;
  224. else /* Will be -3 */
  225. return -ENOSPC;
  226. }
  227. *id = rv;
  228. return 0;
  229. }
  230. EXPORT_SYMBOL(idr_get_new_above);
  231. /**
  232. * idr_get_new - allocate new idr entry
  233. * @idp: idr handle
  234. * @ptr: pointer you want associated with the ide
  235. * @id: pointer to the allocated handle
  236. *
  237. * This is the allocate id function. It should be called with any
  238. * required locks.
  239. *
  240. * If memory is required, it will return -EAGAIN, you should unlock
  241. * and go back to the idr_pre_get() call. If the idr is full, it will
  242. * return -ENOSPC.
  243. *
  244. * @id returns a value in the range 0 ... 0x7fffffff
  245. */
  246. int idr_get_new(struct idr *idp, void *ptr, int *id)
  247. {
  248. int rv;
  249. rv = idr_get_new_above_int(idp, ptr, 0);
  250. /*
  251. * This is a cheap hack until the IDR code can be fixed to
  252. * return proper error values.
  253. */
  254. if (rv < 0) {
  255. if (rv == -1)
  256. return -EAGAIN;
  257. else /* Will be -3 */
  258. return -ENOSPC;
  259. }
  260. *id = rv;
  261. return 0;
  262. }
  263. EXPORT_SYMBOL(idr_get_new);
  264. static void idr_remove_warning(int id)
  265. {
  266. printk("idr_remove called for id=%d which is not allocated.\n", id);
  267. dump_stack();
  268. }
  269. static void sub_remove(struct idr *idp, int shift, int id)
  270. {
  271. struct idr_layer *p = idp->top;
  272. struct idr_layer **pa[MAX_LEVEL];
  273. struct idr_layer ***paa = &pa[0];
  274. int n;
  275. *paa = NULL;
  276. *++paa = &idp->top;
  277. while ((shift > 0) && p) {
  278. n = (id >> shift) & IDR_MASK;
  279. __clear_bit(n, &p->bitmap);
  280. *++paa = &p->ary[n];
  281. p = p->ary[n];
  282. shift -= IDR_BITS;
  283. }
  284. n = id & IDR_MASK;
  285. if (likely(p != NULL && test_bit(n, &p->bitmap))){
  286. __clear_bit(n, &p->bitmap);
  287. p->ary[n] = NULL;
  288. while(*paa && ! --((**paa)->count)){
  289. free_layer(idp, **paa);
  290. **paa-- = NULL;
  291. }
  292. if (!*paa)
  293. idp->layers = 0;
  294. } else
  295. idr_remove_warning(id);
  296. }
  297. /**
  298. * idr_remove - remove the given id and free it's slot
  299. * idp: idr handle
  300. * id: uniqueue key
  301. */
  302. void idr_remove(struct idr *idp, int id)
  303. {
  304. struct idr_layer *p;
  305. /* Mask off upper bits we don't use for the search. */
  306. id &= MAX_ID_MASK;
  307. sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  308. if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  309. idp->top->ary[0]) { // We can drop a layer
  310. p = idp->top->ary[0];
  311. idp->top->bitmap = idp->top->count = 0;
  312. free_layer(idp, idp->top);
  313. idp->top = p;
  314. --idp->layers;
  315. }
  316. while (idp->id_free_cnt >= IDR_FREE_MAX) {
  317. p = alloc_layer(idp);
  318. kmem_cache_free(idr_layer_cache, p);
  319. return;
  320. }
  321. }
  322. EXPORT_SYMBOL(idr_remove);
  323. /**
  324. * idr_destroy - release all cached layers within an idr tree
  325. * idp: idr handle
  326. */
  327. void idr_destroy(struct idr *idp)
  328. {
  329. while (idp->id_free_cnt) {
  330. struct idr_layer *p = alloc_layer(idp);
  331. kmem_cache_free(idr_layer_cache, p);
  332. }
  333. }
  334. EXPORT_SYMBOL(idr_destroy);
  335. /**
  336. * idr_find - return pointer for given id
  337. * @idp: idr handle
  338. * @id: lookup key
  339. *
  340. * Return the pointer given the id it has been registered with. A %NULL
  341. * return indicates that @id is not valid or you passed %NULL in
  342. * idr_get_new().
  343. *
  344. * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
  345. */
  346. void *idr_find(struct idr *idp, int id)
  347. {
  348. int n;
  349. struct idr_layer *p;
  350. n = idp->layers * IDR_BITS;
  351. p = idp->top;
  352. /* Mask off upper bits we don't use for the search. */
  353. id &= MAX_ID_MASK;
  354. if (id >= (1 << n))
  355. return NULL;
  356. while (n > 0 && p) {
  357. n -= IDR_BITS;
  358. p = p->ary[(id >> n) & IDR_MASK];
  359. }
  360. return((void *)p);
  361. }
  362. EXPORT_SYMBOL(idr_find);
  363. static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
  364. unsigned long flags)
  365. {
  366. memset(idr_layer, 0, sizeof(struct idr_layer));
  367. }
  368. static int init_id_cache(void)
  369. {
  370. if (!idr_layer_cache)
  371. idr_layer_cache = kmem_cache_create("idr_layer_cache",
  372. sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
  373. return 0;
  374. }
  375. /**
  376. * idr_init - initialize idr handle
  377. * @idp: idr handle
  378. *
  379. * This function is use to set up the handle (@idp) that you will pass
  380. * to the rest of the functions.
  381. */
  382. void idr_init(struct idr *idp)
  383. {
  384. init_id_cache();
  385. memset(idp, 0, sizeof(struct idr));
  386. spin_lock_init(&idp->lock);
  387. }
  388. EXPORT_SYMBOL(idr_init);