idr.c 11 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487
  1. /*
  2. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  3. * Copyright (C) 2002 by Concurrent Computer Corporation
  4. * Distributed under the GNU GPL license version 2.
  5. *
  6. * Modified by George Anzinger to reuse immediately and to use
  7. * find bit instructions. Also removed _irq on spinlocks.
  8. *
  9. * Small id to pointer translation service.
  10. *
  11. * It uses a radix tree like structure as a sparse array indexed
  12. * by the id to obtain the pointer. The bitmap makes allocating
  13. * a new id quick.
  14. *
  15. * You call it to allocate an id (an int) an associate with that id a
  16. * pointer or what ever, we treat it as a (void *). You can pass this
  17. * id to a user for him to pass back at a later time. You then pass
  18. * that id to this code and it returns your pointer.
  19. * You can release ids at any time. When all ids are released, most of
  20. * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  21. * don't need to go to the memory "store" during an id allocate, just
  22. * so you don't need to be too concerned about locking and conflicts
  23. * with the slab allocator.
  24. */
  25. #ifndef TEST // to test in user space...
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #endif
  30. #include <linux/err.h>
  31. #include <linux/string.h>
  32. #include <linux/idr.h>
  33. static struct kmem_cache *idr_layer_cache;
  34. static struct idr_layer *alloc_layer(struct idr *idp)
  35. {
  36. struct idr_layer *p;
  37. unsigned long flags;
  38. spin_lock_irqsave(&idp->lock, flags);
  39. if ((p = idp->id_free)) {
  40. idp->id_free = p->ary[0];
  41. idp->id_free_cnt--;
  42. p->ary[0] = NULL;
  43. }
  44. spin_unlock_irqrestore(&idp->lock, flags);
  45. return(p);
  46. }
  47. /* only called when idp->lock is held */
  48. static void __free_layer(struct idr *idp, struct idr_layer *p)
  49. {
  50. p->ary[0] = idp->id_free;
  51. idp->id_free = p;
  52. idp->id_free_cnt++;
  53. }
  54. static void free_layer(struct idr *idp, struct idr_layer *p)
  55. {
  56. unsigned long flags;
  57. /*
  58. * Depends on the return element being zeroed.
  59. */
  60. spin_lock_irqsave(&idp->lock, flags);
  61. __free_layer(idp, p);
  62. spin_unlock_irqrestore(&idp->lock, flags);
  63. }
  64. /**
  65. * idr_pre_get - reserver resources for idr allocation
  66. * @idp: idr handle
  67. * @gfp_mask: memory allocation flags
  68. *
  69. * This function should be called prior to locking and calling the
  70. * following function. It preallocates enough memory to satisfy
  71. * the worst possible allocation.
  72. *
  73. * If the system is REALLY out of memory this function returns 0,
  74. * otherwise 1.
  75. */
  76. int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  77. {
  78. while (idp->id_free_cnt < IDR_FREE_MAX) {
  79. struct idr_layer *new;
  80. new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
  81. if (new == NULL)
  82. return (0);
  83. free_layer(idp, new);
  84. }
  85. return 1;
  86. }
  87. EXPORT_SYMBOL(idr_pre_get);
  88. static int sub_alloc(struct idr *idp, void *ptr, int *starting_id)
  89. {
  90. int n, m, sh;
  91. struct idr_layer *p, *new;
  92. struct idr_layer *pa[MAX_LEVEL];
  93. int l, id, oid;
  94. long bm;
  95. id = *starting_id;
  96. restart:
  97. p = idp->top;
  98. l = idp->layers;
  99. pa[l--] = NULL;
  100. while (1) {
  101. /*
  102. * We run around this while until we reach the leaf node...
  103. */
  104. n = (id >> (IDR_BITS*l)) & IDR_MASK;
  105. bm = ~p->bitmap;
  106. m = find_next_bit(&bm, IDR_SIZE, n);
  107. if (m == IDR_SIZE) {
  108. /* no space available go back to previous layer. */
  109. l++;
  110. oid = id;
  111. id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  112. /* if already at the top layer, we need to grow */
  113. if (!(p = pa[l])) {
  114. *starting_id = id;
  115. return -2;
  116. }
  117. /* If we need to go up one layer, continue the
  118. * loop; otherwise, restart from the top.
  119. */
  120. sh = IDR_BITS * (l + 1);
  121. if (oid >> sh == id >> sh)
  122. continue;
  123. else
  124. goto restart;
  125. }
  126. if (m != n) {
  127. sh = IDR_BITS*l;
  128. id = ((id >> sh) ^ n ^ m) << sh;
  129. }
  130. if ((id >= MAX_ID_BIT) || (id < 0))
  131. return -3;
  132. if (l == 0)
  133. break;
  134. /*
  135. * Create the layer below if it is missing.
  136. */
  137. if (!p->ary[m]) {
  138. if (!(new = alloc_layer(idp)))
  139. return -1;
  140. p->ary[m] = new;
  141. p->count++;
  142. }
  143. pa[l--] = p;
  144. p = p->ary[m];
  145. }
  146. /*
  147. * We have reached the leaf node, plant the
  148. * users pointer and return the raw id.
  149. */
  150. p->ary[m] = (struct idr_layer *)ptr;
  151. __set_bit(m, &p->bitmap);
  152. p->count++;
  153. /*
  154. * If this layer is full mark the bit in the layer above
  155. * to show that this part of the radix tree is full.
  156. * This may complete the layer above and require walking
  157. * up the radix tree.
  158. */
  159. n = id;
  160. while (p->bitmap == IDR_FULL) {
  161. if (!(p = pa[++l]))
  162. break;
  163. n = n >> IDR_BITS;
  164. __set_bit((n & IDR_MASK), &p->bitmap);
  165. }
  166. return(id);
  167. }
  168. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  169. {
  170. struct idr_layer *p, *new;
  171. int layers, v, id;
  172. unsigned long flags;
  173. id = starting_id;
  174. build_up:
  175. p = idp->top;
  176. layers = idp->layers;
  177. if (unlikely(!p)) {
  178. if (!(p = alloc_layer(idp)))
  179. return -1;
  180. layers = 1;
  181. }
  182. /*
  183. * Add a new layer to the top of the tree if the requested
  184. * id is larger than the currently allocated space.
  185. */
  186. while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  187. layers++;
  188. if (!p->count)
  189. continue;
  190. if (!(new = alloc_layer(idp))) {
  191. /*
  192. * The allocation failed. If we built part of
  193. * the structure tear it down.
  194. */
  195. spin_lock_irqsave(&idp->lock, flags);
  196. for (new = p; p && p != idp->top; new = p) {
  197. p = p->ary[0];
  198. new->ary[0] = NULL;
  199. new->bitmap = new->count = 0;
  200. __free_layer(idp, new);
  201. }
  202. spin_unlock_irqrestore(&idp->lock, flags);
  203. return -1;
  204. }
  205. new->ary[0] = p;
  206. new->count = 1;
  207. if (p->bitmap == IDR_FULL)
  208. __set_bit(0, &new->bitmap);
  209. p = new;
  210. }
  211. idp->top = p;
  212. idp->layers = layers;
  213. v = sub_alloc(idp, ptr, &id);
  214. if (v == -2)
  215. goto build_up;
  216. return(v);
  217. }
  218. /**
  219. * idr_get_new_above - allocate new idr entry above or equal to a start id
  220. * @idp: idr handle
  221. * @ptr: pointer you want associated with the ide
  222. * @start_id: id to start search at
  223. * @id: pointer to the allocated handle
  224. *
  225. * This is the allocate id function. It should be called with any
  226. * required locks.
  227. *
  228. * If memory is required, it will return -EAGAIN, you should unlock
  229. * and go back to the idr_pre_get() call. If the idr is full, it will
  230. * return -ENOSPC.
  231. *
  232. * @id returns a value in the range 0 ... 0x7fffffff
  233. */
  234. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  235. {
  236. int rv;
  237. rv = idr_get_new_above_int(idp, ptr, starting_id);
  238. /*
  239. * This is a cheap hack until the IDR code can be fixed to
  240. * return proper error values.
  241. */
  242. if (rv < 0) {
  243. if (rv == -1)
  244. return -EAGAIN;
  245. else /* Will be -3 */
  246. return -ENOSPC;
  247. }
  248. *id = rv;
  249. return 0;
  250. }
  251. EXPORT_SYMBOL(idr_get_new_above);
  252. /**
  253. * idr_get_new - allocate new idr entry
  254. * @idp: idr handle
  255. * @ptr: pointer you want associated with the ide
  256. * @id: pointer to the allocated handle
  257. *
  258. * This is the allocate id function. It should be called with any
  259. * required locks.
  260. *
  261. * If memory is required, it will return -EAGAIN, you should unlock
  262. * and go back to the idr_pre_get() call. If the idr is full, it will
  263. * return -ENOSPC.
  264. *
  265. * @id returns a value in the range 0 ... 0x7fffffff
  266. */
  267. int idr_get_new(struct idr *idp, void *ptr, int *id)
  268. {
  269. int rv;
  270. rv = idr_get_new_above_int(idp, ptr, 0);
  271. /*
  272. * This is a cheap hack until the IDR code can be fixed to
  273. * return proper error values.
  274. */
  275. if (rv < 0) {
  276. if (rv == -1)
  277. return -EAGAIN;
  278. else /* Will be -3 */
  279. return -ENOSPC;
  280. }
  281. *id = rv;
  282. return 0;
  283. }
  284. EXPORT_SYMBOL(idr_get_new);
  285. static void idr_remove_warning(int id)
  286. {
  287. printk("idr_remove called for id=%d which is not allocated.\n", id);
  288. dump_stack();
  289. }
  290. static void sub_remove(struct idr *idp, int shift, int id)
  291. {
  292. struct idr_layer *p = idp->top;
  293. struct idr_layer **pa[MAX_LEVEL];
  294. struct idr_layer ***paa = &pa[0];
  295. int n;
  296. *paa = NULL;
  297. *++paa = &idp->top;
  298. while ((shift > 0) && p) {
  299. n = (id >> shift) & IDR_MASK;
  300. __clear_bit(n, &p->bitmap);
  301. *++paa = &p->ary[n];
  302. p = p->ary[n];
  303. shift -= IDR_BITS;
  304. }
  305. n = id & IDR_MASK;
  306. if (likely(p != NULL && test_bit(n, &p->bitmap))){
  307. __clear_bit(n, &p->bitmap);
  308. p->ary[n] = NULL;
  309. while(*paa && ! --((**paa)->count)){
  310. free_layer(idp, **paa);
  311. **paa-- = NULL;
  312. }
  313. if (!*paa)
  314. idp->layers = 0;
  315. } else
  316. idr_remove_warning(id);
  317. }
  318. /**
  319. * idr_remove - remove the given id and free it's slot
  320. * @idp: idr handle
  321. * @id: unique key
  322. */
  323. void idr_remove(struct idr *idp, int id)
  324. {
  325. struct idr_layer *p;
  326. /* Mask off upper bits we don't use for the search. */
  327. id &= MAX_ID_MASK;
  328. sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  329. if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  330. idp->top->ary[0]) { // We can drop a layer
  331. p = idp->top->ary[0];
  332. idp->top->bitmap = idp->top->count = 0;
  333. free_layer(idp, idp->top);
  334. idp->top = p;
  335. --idp->layers;
  336. }
  337. while (idp->id_free_cnt >= IDR_FREE_MAX) {
  338. p = alloc_layer(idp);
  339. kmem_cache_free(idr_layer_cache, p);
  340. return;
  341. }
  342. }
  343. EXPORT_SYMBOL(idr_remove);
  344. /**
  345. * idr_destroy - release all cached layers within an idr tree
  346. * idp: idr handle
  347. */
  348. void idr_destroy(struct idr *idp)
  349. {
  350. while (idp->id_free_cnt) {
  351. struct idr_layer *p = alloc_layer(idp);
  352. kmem_cache_free(idr_layer_cache, p);
  353. }
  354. }
  355. EXPORT_SYMBOL(idr_destroy);
  356. /**
  357. * idr_find - return pointer for given id
  358. * @idp: idr handle
  359. * @id: lookup key
  360. *
  361. * Return the pointer given the id it has been registered with. A %NULL
  362. * return indicates that @id is not valid or you passed %NULL in
  363. * idr_get_new().
  364. *
  365. * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
  366. */
  367. void *idr_find(struct idr *idp, int id)
  368. {
  369. int n;
  370. struct idr_layer *p;
  371. n = idp->layers * IDR_BITS;
  372. p = idp->top;
  373. /* Mask off upper bits we don't use for the search. */
  374. id &= MAX_ID_MASK;
  375. if (id >= (1 << n))
  376. return NULL;
  377. while (n > 0 && p) {
  378. n -= IDR_BITS;
  379. p = p->ary[(id >> n) & IDR_MASK];
  380. }
  381. return((void *)p);
  382. }
  383. EXPORT_SYMBOL(idr_find);
  384. /**
  385. * idr_replace - replace pointer for given id
  386. * @idp: idr handle
  387. * @ptr: pointer you want associated with the id
  388. * @id: lookup key
  389. *
  390. * Replace the pointer registered with an id and return the old value.
  391. * A -ENOENT return indicates that @id was not found.
  392. * A -EINVAL return indicates that @id was not within valid constraints.
  393. *
  394. * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
  395. */
  396. void *idr_replace(struct idr *idp, void *ptr, int id)
  397. {
  398. int n;
  399. struct idr_layer *p, *old_p;
  400. n = idp->layers * IDR_BITS;
  401. p = idp->top;
  402. id &= MAX_ID_MASK;
  403. if (id >= (1 << n))
  404. return ERR_PTR(-EINVAL);
  405. n -= IDR_BITS;
  406. while ((n > 0) && p) {
  407. p = p->ary[(id >> n) & IDR_MASK];
  408. n -= IDR_BITS;
  409. }
  410. n = id & IDR_MASK;
  411. if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
  412. return ERR_PTR(-ENOENT);
  413. old_p = p->ary[n];
  414. p->ary[n] = ptr;
  415. return old_p;
  416. }
  417. EXPORT_SYMBOL(idr_replace);
  418. static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
  419. unsigned long flags)
  420. {
  421. memset(idr_layer, 0, sizeof(struct idr_layer));
  422. }
  423. static int init_id_cache(void)
  424. {
  425. if (!idr_layer_cache)
  426. idr_layer_cache = kmem_cache_create("idr_layer_cache",
  427. sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
  428. return 0;
  429. }
  430. /**
  431. * idr_init - initialize idr handle
  432. * @idp: idr handle
  433. *
  434. * This function is use to set up the handle (@idp) that you will pass
  435. * to the rest of the functions.
  436. */
  437. void idr_init(struct idr *idp)
  438. {
  439. init_id_cache();
  440. memset(idp, 0, sizeof(struct idr));
  441. spin_lock_init(&idp->lock);
  442. }
  443. EXPORT_SYMBOL(idr_init);