idr.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471
  1. /*
  2. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  3. * Copyright (C) 2002 by Concurrent Computer Corporation
  4. * Distributed under the GNU GPL license version 2.
  5. *
  6. * Modified by George Anzinger to reuse immediately and to use
  7. * find bit instructions. Also removed _irq on spinlocks.
  8. *
  9. * Small id to pointer translation service.
  10. *
  11. * It uses a radix tree like structure as a sparse array indexed
  12. * by the id to obtain the pointer. The bitmap makes allocating
  13. * a new id quick.
  14. *
  15. * You call it to allocate an id (an int) an associate with that id a
  16. * pointer or what ever, we treat it as a (void *). You can pass this
  17. * id to a user for him to pass back at a later time. You then pass
  18. * that id to this code and it returns your pointer.
  19. * You can release ids at any time. When all ids are released, most of
  20. * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  21. * don't need to go to the memory "store" during an id allocate, just
  22. * so you don't need to be too concerned about locking and conflicts
  23. * with the slab allocator.
  24. */
  25. #ifndef TEST // to test in user space...
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #endif
  30. #include <linux/err.h>
  31. #include <linux/string.h>
  32. #include <linux/idr.h>
  33. static kmem_cache_t *idr_layer_cache;
  34. static struct idr_layer *alloc_layer(struct idr *idp)
  35. {
  36. struct idr_layer *p;
  37. spin_lock(&idp->lock);
  38. if ((p = idp->id_free)) {
  39. idp->id_free = p->ary[0];
  40. idp->id_free_cnt--;
  41. p->ary[0] = NULL;
  42. }
  43. spin_unlock(&idp->lock);
  44. return(p);
  45. }
  46. /* only called when idp->lock is held */
  47. static void __free_layer(struct idr *idp, struct idr_layer *p)
  48. {
  49. p->ary[0] = idp->id_free;
  50. idp->id_free = p;
  51. idp->id_free_cnt++;
  52. }
  53. static void free_layer(struct idr *idp, struct idr_layer *p)
  54. {
  55. /*
  56. * Depends on the return element being zeroed.
  57. */
  58. spin_lock(&idp->lock);
  59. __free_layer(idp, p);
  60. spin_unlock(&idp->lock);
  61. }
  62. /**
  63. * idr_pre_get - reserver resources for idr allocation
  64. * @idp: idr handle
  65. * @gfp_mask: memory allocation flags
  66. *
  67. * This function should be called prior to locking and calling the
  68. * following function. It preallocates enough memory to satisfy
  69. * the worst possible allocation.
  70. *
  71. * If the system is REALLY out of memory this function returns 0,
  72. * otherwise 1.
  73. */
  74. int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  75. {
  76. while (idp->id_free_cnt < IDR_FREE_MAX) {
  77. struct idr_layer *new;
  78. new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
  79. if (new == NULL)
  80. return (0);
  81. free_layer(idp, new);
  82. }
  83. return 1;
  84. }
  85. EXPORT_SYMBOL(idr_pre_get);
  86. static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
  87. {
  88. int n, m, sh;
  89. struct idr_layer *p, *new;
  90. struct idr_layer *pa[MAX_LEVEL];
  91. int l, id;
  92. long bm;
  93. id = *starting_id;
  94. p = idp->top;
  95. l = idp->layers;
  96. pa[l--] = NULL;
  97. while (1) {
  98. /*
  99. * We run around this while until we reach the leaf node...
  100. */
  101. n = (id >> (IDR_BITS*l)) & IDR_MASK;
  102. bm = ~p->bitmap;
  103. m = find_next_bit(&bm, IDR_SIZE, n);
  104. if (m == IDR_SIZE) {
  105. /* no space available go back to previous layer. */
  106. l++;
  107. id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  108. if (!(p = pa[l])) {
  109. *starting_id = id;
  110. return -2;
  111. }
  112. continue;
  113. }
  114. if (m != n) {
  115. sh = IDR_BITS*l;
  116. id = ((id >> sh) ^ n ^ m) << sh;
  117. }
  118. if ((id >= MAX_ID_BIT) || (id < 0))
  119. return -3;
  120. if (l == 0)
  121. break;
  122. /*
  123. * Create the layer below if it is missing.
  124. */
  125. if (!p->ary[m]) {
  126. if (!(new = alloc_layer(idp)))
  127. return -1;
  128. p->ary[m] = new;
  129. p->count++;
  130. }
  131. pa[l--] = p;
  132. p = p->ary[m];
  133. }
  134. /*
  135. * We have reached the leaf node, plant the
  136. * users pointer and return the raw id.
  137. */
  138. p->ary[m] = (struct idr_layer *)ptr;
  139. __set_bit(m, &p->bitmap);
  140. p->count++;
  141. /*
  142. * If this layer is full mark the bit in the layer above
  143. * to show that this part of the radix tree is full.
  144. * This may complete the layer above and require walking
  145. * up the radix tree.
  146. */
  147. n = id;
  148. while (p->bitmap == IDR_FULL) {
  149. if (!(p = pa[++l]))
  150. break;
  151. n = n >> IDR_BITS;
  152. __set_bit((n & IDR_MASK), &p->bitmap);
  153. }
  154. return(id);
  155. }
  156. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  157. {
  158. struct idr_layer *p, *new;
  159. int layers, v, id;
  160. id = starting_id;
  161. build_up:
  162. p = idp->top;
  163. layers = idp->layers;
  164. if (unlikely(!p)) {
  165. if (!(p = alloc_layer(idp)))
  166. return -1;
  167. layers = 1;
  168. }
  169. /*
  170. * Add a new layer to the top of the tree if the requested
  171. * id is larger than the currently allocated space.
  172. */
  173. while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  174. layers++;
  175. if (!p->count)
  176. continue;
  177. if (!(new = alloc_layer(idp))) {
  178. /*
  179. * The allocation failed. If we built part of
  180. * the structure tear it down.
  181. */
  182. spin_lock(&idp->lock);
  183. for (new = p; p && p != idp->top; new = p) {
  184. p = p->ary[0];
  185. new->ary[0] = NULL;
  186. new->bitmap = new->count = 0;
  187. __free_layer(idp, new);
  188. }
  189. spin_unlock(&idp->lock);
  190. return -1;
  191. }
  192. new->ary[0] = p;
  193. new->count = 1;
  194. if (p->bitmap == IDR_FULL)
  195. __set_bit(0, &new->bitmap);
  196. p = new;
  197. }
  198. idp->top = p;
  199. idp->layers = layers;
  200. v = sub_alloc(idp, ptr, &id);
  201. if (v == -2)
  202. goto build_up;
  203. return(v);
  204. }
  205. /**
  206. * idr_get_new_above - allocate new idr entry above or equal to a start id
  207. * @idp: idr handle
  208. * @ptr: pointer you want associated with the ide
  209. * @start_id: id to start search at
  210. * @id: pointer to the allocated handle
  211. *
  212. * This is the allocate id function. It should be called with any
  213. * required locks.
  214. *
  215. * If memory is required, it will return -EAGAIN, you should unlock
  216. * and go back to the idr_pre_get() call. If the idr is full, it will
  217. * return -ENOSPC.
  218. *
  219. * @id returns a value in the range 0 ... 0x7fffffff
  220. */
  221. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  222. {
  223. int rv;
  224. rv = idr_get_new_above_int(idp, ptr, starting_id);
  225. /*
  226. * This is a cheap hack until the IDR code can be fixed to
  227. * return proper error values.
  228. */
  229. if (rv < 0) {
  230. if (rv == -1)
  231. return -EAGAIN;
  232. else /* Will be -3 */
  233. return -ENOSPC;
  234. }
  235. *id = rv;
  236. return 0;
  237. }
  238. EXPORT_SYMBOL(idr_get_new_above);
  239. /**
  240. * idr_get_new - allocate new idr entry
  241. * @idp: idr handle
  242. * @ptr: pointer you want associated with the ide
  243. * @id: pointer to the allocated handle
  244. *
  245. * This is the allocate id function. It should be called with any
  246. * required locks.
  247. *
  248. * If memory is required, it will return -EAGAIN, you should unlock
  249. * and go back to the idr_pre_get() call. If the idr is full, it will
  250. * return -ENOSPC.
  251. *
  252. * @id returns a value in the range 0 ... 0x7fffffff
  253. */
  254. int idr_get_new(struct idr *idp, void *ptr, int *id)
  255. {
  256. int rv;
  257. rv = idr_get_new_above_int(idp, ptr, 0);
  258. /*
  259. * This is a cheap hack until the IDR code can be fixed to
  260. * return proper error values.
  261. */
  262. if (rv < 0) {
  263. if (rv == -1)
  264. return -EAGAIN;
  265. else /* Will be -3 */
  266. return -ENOSPC;
  267. }
  268. *id = rv;
  269. return 0;
  270. }
  271. EXPORT_SYMBOL(idr_get_new);
  272. static void idr_remove_warning(int id)
  273. {
  274. printk("idr_remove called for id=%d which is not allocated.\n", id);
  275. dump_stack();
  276. }
  277. static void sub_remove(struct idr *idp, int shift, int id)
  278. {
  279. struct idr_layer *p = idp->top;
  280. struct idr_layer **pa[MAX_LEVEL];
  281. struct idr_layer ***paa = &pa[0];
  282. int n;
  283. *paa = NULL;
  284. *++paa = &idp->top;
  285. while ((shift > 0) && p) {
  286. n = (id >> shift) & IDR_MASK;
  287. __clear_bit(n, &p->bitmap);
  288. *++paa = &p->ary[n];
  289. p = p->ary[n];
  290. shift -= IDR_BITS;
  291. }
  292. n = id & IDR_MASK;
  293. if (likely(p != NULL && test_bit(n, &p->bitmap))){
  294. __clear_bit(n, &p->bitmap);
  295. p->ary[n] = NULL;
  296. while(*paa && ! --((**paa)->count)){
  297. free_layer(idp, **paa);
  298. **paa-- = NULL;
  299. }
  300. if (!*paa)
  301. idp->layers = 0;
  302. } else
  303. idr_remove_warning(id);
  304. }
  305. /**
  306. * idr_remove - remove the given id and free it's slot
  307. * idp: idr handle
  308. * id: uniqueue key
  309. */
  310. void idr_remove(struct idr *idp, int id)
  311. {
  312. struct idr_layer *p;
  313. /* Mask off upper bits we don't use for the search. */
  314. id &= MAX_ID_MASK;
  315. sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  316. if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  317. idp->top->ary[0]) { // We can drop a layer
  318. p = idp->top->ary[0];
  319. idp->top->bitmap = idp->top->count = 0;
  320. free_layer(idp, idp->top);
  321. idp->top = p;
  322. --idp->layers;
  323. }
  324. while (idp->id_free_cnt >= IDR_FREE_MAX) {
  325. p = alloc_layer(idp);
  326. kmem_cache_free(idr_layer_cache, p);
  327. return;
  328. }
  329. }
  330. EXPORT_SYMBOL(idr_remove);
  331. /**
  332. * idr_destroy - release all cached layers within an idr tree
  333. * idp: idr handle
  334. */
  335. void idr_destroy(struct idr *idp)
  336. {
  337. while (idp->id_free_cnt) {
  338. struct idr_layer *p = alloc_layer(idp);
  339. kmem_cache_free(idr_layer_cache, p);
  340. }
  341. }
  342. EXPORT_SYMBOL(idr_destroy);
  343. /**
  344. * idr_find - return pointer for given id
  345. * @idp: idr handle
  346. * @id: lookup key
  347. *
  348. * Return the pointer given the id it has been registered with. A %NULL
  349. * return indicates that @id is not valid or you passed %NULL in
  350. * idr_get_new().
  351. *
  352. * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
  353. */
  354. void *idr_find(struct idr *idp, int id)
  355. {
  356. int n;
  357. struct idr_layer *p;
  358. n = idp->layers * IDR_BITS;
  359. p = idp->top;
  360. /* Mask off upper bits we don't use for the search. */
  361. id &= MAX_ID_MASK;
  362. if (id >= (1 << n))
  363. return NULL;
  364. while (n > 0 && p) {
  365. n -= IDR_BITS;
  366. p = p->ary[(id >> n) & IDR_MASK];
  367. }
  368. return((void *)p);
  369. }
  370. EXPORT_SYMBOL(idr_find);
  371. /**
  372. * idr_replace - replace pointer for given id
  373. * @idp: idr handle
  374. * @ptr: pointer you want associated with the id
  375. * @id: lookup key
  376. *
  377. * Replace the pointer registered with an id and return the old value.
  378. * A -ENOENT return indicates that @id was not found.
  379. * A -EINVAL return indicates that @id was not within valid constraints.
  380. *
  381. * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
  382. */
  383. void *idr_replace(struct idr *idp, void *ptr, int id)
  384. {
  385. int n;
  386. struct idr_layer *p, *old_p;
  387. n = idp->layers * IDR_BITS;
  388. p = idp->top;
  389. id &= MAX_ID_MASK;
  390. if (id >= (1 << n))
  391. return ERR_PTR(-EINVAL);
  392. n -= IDR_BITS;
  393. while ((n > 0) && p) {
  394. p = p->ary[(id >> n) & IDR_MASK];
  395. n -= IDR_BITS;
  396. }
  397. n = id & IDR_MASK;
  398. if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
  399. return ERR_PTR(-ENOENT);
  400. old_p = p->ary[n];
  401. p->ary[n] = ptr;
  402. return old_p;
  403. }
  404. EXPORT_SYMBOL(idr_replace);
  405. static void idr_cache_ctor(void * idr_layer, kmem_cache_t *idr_layer_cache,
  406. unsigned long flags)
  407. {
  408. memset(idr_layer, 0, sizeof(struct idr_layer));
  409. }
  410. static int init_id_cache(void)
  411. {
  412. if (!idr_layer_cache)
  413. idr_layer_cache = kmem_cache_create("idr_layer_cache",
  414. sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
  415. return 0;
  416. }
  417. /**
  418. * idr_init - initialize idr handle
  419. * @idp: idr handle
  420. *
  421. * This function is use to set up the handle (@idp) that you will pass
  422. * to the rest of the functions.
  423. */
  424. void idr_init(struct idr *idp)
  425. {
  426. init_id_cache();
  427. memset(idp, 0, sizeof(struct idr));
  428. spin_lock_init(&idp->lock);
  429. }
  430. EXPORT_SYMBOL(idr_init);