idr.c 9.8 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428
  1. /*
  2. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  3. * Copyright (C) 2002 by Concurrent Computer Corporation
  4. * Distributed under the GNU GPL license version 2.
  5. *
  6. * Modified by George Anzinger to reuse immediately and to use
  7. * find bit instructions. Also removed _irq on spinlocks.
  8. *
  9. * Small id to pointer translation service.
  10. *
  11. * It uses a radix tree like structure as a sparse array indexed
  12. * by the id to obtain the pointer. The bitmap makes allocating
  13. * a new id quick.
  14. *
  15. * You call it to allocate an id (an int) an associate with that id a
  16. * pointer or what ever, we treat it as a (void *). You can pass this
  17. * id to a user for him to pass back at a later time. You then pass
  18. * that id to this code and it returns your pointer.
  19. * You can release ids at any time. When all ids are released, most of
  20. * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  21. * don't need to go to the memory "store" during an id allocate, just
  22. * so you don't need to be too concerned about locking and conflicts
  23. * with the slab allocator.
  24. */
  25. #ifndef TEST // to test in user space...
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #endif
  30. #include <linux/string.h>
  31. #include <linux/idr.h>
  32. static kmem_cache_t *idr_layer_cache;
  33. static struct idr_layer *alloc_layer(struct idr *idp)
  34. {
  35. struct idr_layer *p;
  36. spin_lock(&idp->lock);
  37. if ((p = idp->id_free)) {
  38. idp->id_free = p->ary[0];
  39. idp->id_free_cnt--;
  40. p->ary[0] = NULL;
  41. }
  42. spin_unlock(&idp->lock);
  43. return(p);
  44. }
  45. /* only called when idp->lock is held */
  46. static void __free_layer(struct idr *idp, struct idr_layer *p)
  47. {
  48. p->ary[0] = idp->id_free;
  49. idp->id_free = p;
  50. idp->id_free_cnt++;
  51. }
  52. static void free_layer(struct idr *idp, struct idr_layer *p)
  53. {
  54. /*
  55. * Depends on the return element being zeroed.
  56. */
  57. spin_lock(&idp->lock);
  58. __free_layer(idp, p);
  59. spin_unlock(&idp->lock);
  60. }
  61. /**
  62. * idr_pre_get - reserver resources for idr allocation
  63. * @idp: idr handle
  64. * @gfp_mask: memory allocation flags
  65. *
  66. * This function should be called prior to locking and calling the
  67. * following function. It preallocates enough memory to satisfy
  68. * the worst possible allocation.
  69. *
  70. * If the system is REALLY out of memory this function returns 0,
  71. * otherwise 1.
  72. */
  73. int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  74. {
  75. while (idp->id_free_cnt < IDR_FREE_MAX) {
  76. struct idr_layer *new;
  77. new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
  78. if (new == NULL)
  79. return (0);
  80. free_layer(idp, new);
  81. }
  82. return 1;
  83. }
  84. EXPORT_SYMBOL(idr_pre_get);
  85. static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
  86. {
  87. int n, m, sh;
  88. struct idr_layer *p, *new;
  89. struct idr_layer *pa[MAX_LEVEL];
  90. int l, id;
  91. long bm;
  92. id = *starting_id;
  93. p = idp->top;
  94. l = idp->layers;
  95. pa[l--] = NULL;
  96. while (1) {
  97. /*
  98. * We run around this while until we reach the leaf node...
  99. */
  100. n = (id >> (IDR_BITS*l)) & IDR_MASK;
  101. bm = ~p->bitmap;
  102. m = find_next_bit(&bm, IDR_SIZE, n);
  103. if (m == IDR_SIZE) {
  104. /* no space available go back to previous layer. */
  105. l++;
  106. id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  107. if (!(p = pa[l])) {
  108. *starting_id = id;
  109. return -2;
  110. }
  111. continue;
  112. }
  113. if (m != n) {
  114. sh = IDR_BITS*l;
  115. id = ((id >> sh) ^ n ^ m) << sh;
  116. }
  117. if ((id >= MAX_ID_BIT) || (id < 0))
  118. return -3;
  119. if (l == 0)
  120. break;
  121. /*
  122. * Create the layer below if it is missing.
  123. */
  124. if (!p->ary[m]) {
  125. if (!(new = alloc_layer(idp)))
  126. return -1;
  127. p->ary[m] = new;
  128. p->count++;
  129. }
  130. pa[l--] = p;
  131. p = p->ary[m];
  132. }
  133. /*
  134. * We have reached the leaf node, plant the
  135. * users pointer and return the raw id.
  136. */
  137. p->ary[m] = (struct idr_layer *)ptr;
  138. __set_bit(m, &p->bitmap);
  139. p->count++;
  140. /*
  141. * If this layer is full mark the bit in the layer above
  142. * to show that this part of the radix tree is full.
  143. * This may complete the layer above and require walking
  144. * up the radix tree.
  145. */
  146. n = id;
  147. while (p->bitmap == IDR_FULL) {
  148. if (!(p = pa[++l]))
  149. break;
  150. n = n >> IDR_BITS;
  151. __set_bit((n & IDR_MASK), &p->bitmap);
  152. }
  153. return(id);
  154. }
  155. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  156. {
  157. struct idr_layer *p, *new;
  158. int layers, v, id;
  159. id = starting_id;
  160. build_up:
  161. p = idp->top;
  162. layers = idp->layers;
  163. if (unlikely(!p)) {
  164. if (!(p = alloc_layer(idp)))
  165. return -1;
  166. layers = 1;
  167. }
  168. /*
  169. * Add a new layer to the top of the tree if the requested
  170. * id is larger than the currently allocated space.
  171. */
  172. while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  173. layers++;
  174. if (!p->count)
  175. continue;
  176. if (!(new = alloc_layer(idp))) {
  177. /*
  178. * The allocation failed. If we built part of
  179. * the structure tear it down.
  180. */
  181. spin_lock(&idp->lock);
  182. for (new = p; p && p != idp->top; new = p) {
  183. p = p->ary[0];
  184. new->ary[0] = NULL;
  185. new->bitmap = new->count = 0;
  186. __free_layer(idp, new);
  187. }
  188. spin_unlock(&idp->lock);
  189. return -1;
  190. }
  191. new->ary[0] = p;
  192. new->count = 1;
  193. if (p->bitmap == IDR_FULL)
  194. __set_bit(0, &new->bitmap);
  195. p = new;
  196. }
  197. idp->top = p;
  198. idp->layers = layers;
  199. v = sub_alloc(idp, ptr, &id);
  200. if (v == -2)
  201. goto build_up;
  202. return(v);
  203. }
  204. /**
  205. * idr_get_new_above - allocate new idr entry above or equal to a start id
  206. * @idp: idr handle
  207. * @ptr: pointer you want associated with the ide
  208. * @start_id: id to start search at
  209. * @id: pointer to the allocated handle
  210. *
  211. * This is the allocate id function. It should be called with any
  212. * required locks.
  213. *
  214. * If memory is required, it will return -EAGAIN, you should unlock
  215. * and go back to the idr_pre_get() call. If the idr is full, it will
  216. * return -ENOSPC.
  217. *
  218. * @id returns a value in the range 0 ... 0x7fffffff
  219. */
  220. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  221. {
  222. int rv;
  223. rv = idr_get_new_above_int(idp, ptr, starting_id);
  224. /*
  225. * This is a cheap hack until the IDR code can be fixed to
  226. * return proper error values.
  227. */
  228. if (rv < 0) {
  229. if (rv == -1)
  230. return -EAGAIN;
  231. else /* Will be -3 */
  232. return -ENOSPC;
  233. }
  234. *id = rv;
  235. return 0;
  236. }
  237. EXPORT_SYMBOL(idr_get_new_above);
  238. /**
  239. * idr_get_new - allocate new idr entry
  240. * @idp: idr handle
  241. * @ptr: pointer you want associated with the ide
  242. * @id: pointer to the allocated handle
  243. *
  244. * This is the allocate id function. It should be called with any
  245. * required locks.
  246. *
  247. * If memory is required, it will return -EAGAIN, you should unlock
  248. * and go back to the idr_pre_get() call. If the idr is full, it will
  249. * return -ENOSPC.
  250. *
  251. * @id returns a value in the range 0 ... 0x7fffffff
  252. */
  253. int idr_get_new(struct idr *idp, void *ptr, int *id)
  254. {
  255. int rv;
  256. rv = idr_get_new_above_int(idp, ptr, 0);
  257. /*
  258. * This is a cheap hack until the IDR code can be fixed to
  259. * return proper error values.
  260. */
  261. if (rv < 0) {
  262. if (rv == -1)
  263. return -EAGAIN;
  264. else /* Will be -3 */
  265. return -ENOSPC;
  266. }
  267. *id = rv;
  268. return 0;
  269. }
  270. EXPORT_SYMBOL(idr_get_new);
  271. static void idr_remove_warning(int id)
  272. {
  273. printk("idr_remove called for id=%d which is not allocated.\n", id);
  274. dump_stack();
  275. }
  276. static void sub_remove(struct idr *idp, int shift, int id)
  277. {
  278. struct idr_layer *p = idp->top;
  279. struct idr_layer **pa[MAX_LEVEL];
  280. struct idr_layer ***paa = &pa[0];
  281. int n;
  282. *paa = NULL;
  283. *++paa = &idp->top;
  284. while ((shift > 0) && p) {
  285. n = (id >> shift) & IDR_MASK;
  286. __clear_bit(n, &p->bitmap);
  287. *++paa = &p->ary[n];
  288. p = p->ary[n];
  289. shift -= IDR_BITS;
  290. }
  291. n = id & IDR_MASK;
  292. if (likely(p != NULL && test_bit(n, &p->bitmap))){
  293. __clear_bit(n, &p->bitmap);
  294. p->ary[n] = NULL;
  295. while(*paa && ! --((**paa)->count)){
  296. free_layer(idp, **paa);
  297. **paa-- = NULL;
  298. }
  299. if (!*paa)
  300. idp->layers = 0;
  301. } else
  302. idr_remove_warning(id);
  303. }
  304. /**
  305. * idr_remove - remove the given id and free it's slot
  306. * idp: idr handle
  307. * id: uniqueue key
  308. */
  309. void idr_remove(struct idr *idp, int id)
  310. {
  311. struct idr_layer *p;
  312. /* Mask off upper bits we don't use for the search. */
  313. id &= MAX_ID_MASK;
  314. sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  315. if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  316. idp->top->ary[0]) { // We can drop a layer
  317. p = idp->top->ary[0];
  318. idp->top->bitmap = idp->top->count = 0;
  319. free_layer(idp, idp->top);
  320. idp->top = p;
  321. --idp->layers;
  322. }
  323. while (idp->id_free_cnt >= IDR_FREE_MAX) {
  324. p = alloc_layer(idp);
  325. kmem_cache_free(idr_layer_cache, p);
  326. return;
  327. }
  328. }
  329. EXPORT_SYMBOL(idr_remove);
  330. /**
  331. * idr_destroy - release all cached layers within an idr tree
  332. * idp: idr handle
  333. */
  334. void idr_destroy(struct idr *idp)
  335. {
  336. while (idp->id_free_cnt) {
  337. struct idr_layer *p = alloc_layer(idp);
  338. kmem_cache_free(idr_layer_cache, p);
  339. }
  340. }
  341. EXPORT_SYMBOL(idr_destroy);
  342. /**
  343. * idr_find - return pointer for given id
  344. * @idp: idr handle
  345. * @id: lookup key
  346. *
  347. * Return the pointer given the id it has been registered with. A %NULL
  348. * return indicates that @id is not valid or you passed %NULL in
  349. * idr_get_new().
  350. *
  351. * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
  352. */
  353. void *idr_find(struct idr *idp, int id)
  354. {
  355. int n;
  356. struct idr_layer *p;
  357. n = idp->layers * IDR_BITS;
  358. p = idp->top;
  359. /* Mask off upper bits we don't use for the search. */
  360. id &= MAX_ID_MASK;
  361. if (id >= (1 << n))
  362. return NULL;
  363. while (n > 0 && p) {
  364. n -= IDR_BITS;
  365. p = p->ary[(id >> n) & IDR_MASK];
  366. }
  367. return((void *)p);
  368. }
  369. EXPORT_SYMBOL(idr_find);
  370. static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
  371. unsigned long flags)
  372. {
  373. memset(idr_layer, 0, sizeof(struct idr_layer));
  374. }
  375. static int init_id_cache(void)
  376. {
  377. if (!idr_layer_cache)
  378. idr_layer_cache = kmem_cache_create("idr_layer_cache",
  379. sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
  380. return 0;
  381. }
  382. /**
  383. * idr_init - initialize idr handle
  384. * @idp: idr handle
  385. *
  386. * This function is use to set up the handle (@idp) that you will pass
  387. * to the rest of the functions.
  388. */
  389. void idr_init(struct idr *idp)
  390. {
  391. init_id_cache();
  392. memset(idp, 0, sizeof(struct idr));
  393. spin_lock_init(&idp->lock);
  394. }
  395. EXPORT_SYMBOL(idr_init);