idr.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753
  1. /*
  2. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  3. * Copyright (C) 2002 by Concurrent Computer Corporation
  4. * Distributed under the GNU GPL license version 2.
  5. *
  6. * Modified by George Anzinger to reuse immediately and to use
  7. * find bit instructions. Also removed _irq on spinlocks.
  8. *
  9. * Small id to pointer translation service.
  10. *
  11. * It uses a radix tree like structure as a sparse array indexed
  12. * by the id to obtain the pointer. The bitmap makes allocating
  13. * a new id quick.
  14. *
  15. * You call it to allocate an id (an int) an associate with that id a
  16. * pointer or what ever, we treat it as a (void *). You can pass this
  17. * id to a user for him to pass back at a later time. You then pass
  18. * that id to this code and it returns your pointer.
  19. * You can release ids at any time. When all ids are released, most of
  20. * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  21. * don't need to go to the memory "store" during an id allocate, just
  22. * so you don't need to be too concerned about locking and conflicts
  23. * with the slab allocator.
  24. */
  25. #ifndef TEST // to test in user space...
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #endif
  30. #include <linux/err.h>
  31. #include <linux/string.h>
  32. #include <linux/idr.h>
  33. static struct kmem_cache *idr_layer_cache;
  34. static struct idr_layer *alloc_layer(struct idr *idp)
  35. {
  36. struct idr_layer *p;
  37. unsigned long flags;
  38. spin_lock_irqsave(&idp->lock, flags);
  39. if ((p = idp->id_free)) {
  40. idp->id_free = p->ary[0];
  41. idp->id_free_cnt--;
  42. p->ary[0] = NULL;
  43. }
  44. spin_unlock_irqrestore(&idp->lock, flags);
  45. return(p);
  46. }
  47. /* only called when idp->lock is held */
  48. static void __free_layer(struct idr *idp, struct idr_layer *p)
  49. {
  50. p->ary[0] = idp->id_free;
  51. idp->id_free = p;
  52. idp->id_free_cnt++;
  53. }
  54. static void free_layer(struct idr *idp, struct idr_layer *p)
  55. {
  56. unsigned long flags;
  57. /*
  58. * Depends on the return element being zeroed.
  59. */
  60. spin_lock_irqsave(&idp->lock, flags);
  61. __free_layer(idp, p);
  62. spin_unlock_irqrestore(&idp->lock, flags);
  63. }
  64. static void idr_mark_full(struct idr_layer **pa, int id)
  65. {
  66. struct idr_layer *p = pa[0];
  67. int l = 0;
  68. __set_bit(id & IDR_MASK, &p->bitmap);
  69. /*
  70. * If this layer is full mark the bit in the layer above to
  71. * show that this part of the radix tree is full. This may
  72. * complete the layer above and require walking up the radix
  73. * tree.
  74. */
  75. while (p->bitmap == IDR_FULL) {
  76. if (!(p = pa[++l]))
  77. break;
  78. id = id >> IDR_BITS;
  79. __set_bit((id & IDR_MASK), &p->bitmap);
  80. }
  81. }
  82. /**
  83. * idr_pre_get - reserver resources for idr allocation
  84. * @idp: idr handle
  85. * @gfp_mask: memory allocation flags
  86. *
  87. * This function should be called prior to locking and calling the
  88. * following function. It preallocates enough memory to satisfy
  89. * the worst possible allocation.
  90. *
  91. * If the system is REALLY out of memory this function returns 0,
  92. * otherwise 1.
  93. */
  94. int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  95. {
  96. while (idp->id_free_cnt < IDR_FREE_MAX) {
  97. struct idr_layer *new;
  98. new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
  99. if (new == NULL)
  100. return (0);
  101. free_layer(idp, new);
  102. }
  103. return 1;
  104. }
  105. EXPORT_SYMBOL(idr_pre_get);
  106. static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
  107. {
  108. int n, m, sh;
  109. struct idr_layer *p, *new;
  110. int l, id, oid;
  111. long bm;
  112. id = *starting_id;
  113. restart:
  114. p = idp->top;
  115. l = idp->layers;
  116. pa[l--] = NULL;
  117. while (1) {
  118. /*
  119. * We run around this while until we reach the leaf node...
  120. */
  121. n = (id >> (IDR_BITS*l)) & IDR_MASK;
  122. bm = ~p->bitmap;
  123. m = find_next_bit(&bm, IDR_SIZE, n);
  124. if (m == IDR_SIZE) {
  125. /* no space available go back to previous layer. */
  126. l++;
  127. oid = id;
  128. id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  129. /* if already at the top layer, we need to grow */
  130. if (!(p = pa[l])) {
  131. *starting_id = id;
  132. return -2;
  133. }
  134. /* If we need to go up one layer, continue the
  135. * loop; otherwise, restart from the top.
  136. */
  137. sh = IDR_BITS * (l + 1);
  138. if (oid >> sh == id >> sh)
  139. continue;
  140. else
  141. goto restart;
  142. }
  143. if (m != n) {
  144. sh = IDR_BITS*l;
  145. id = ((id >> sh) ^ n ^ m) << sh;
  146. }
  147. if ((id >= MAX_ID_BIT) || (id < 0))
  148. return -3;
  149. if (l == 0)
  150. break;
  151. /*
  152. * Create the layer below if it is missing.
  153. */
  154. if (!p->ary[m]) {
  155. if (!(new = alloc_layer(idp)))
  156. return -1;
  157. p->ary[m] = new;
  158. p->count++;
  159. }
  160. pa[l--] = p;
  161. p = p->ary[m];
  162. }
  163. pa[l] = p;
  164. return id;
  165. }
  166. static int idr_get_empty_slot(struct idr *idp, int starting_id,
  167. struct idr_layer **pa)
  168. {
  169. struct idr_layer *p, *new;
  170. int layers, v, id;
  171. unsigned long flags;
  172. id = starting_id;
  173. build_up:
  174. p = idp->top;
  175. layers = idp->layers;
  176. if (unlikely(!p)) {
  177. if (!(p = alloc_layer(idp)))
  178. return -1;
  179. layers = 1;
  180. }
  181. /*
  182. * Add a new layer to the top of the tree if the requested
  183. * id is larger than the currently allocated space.
  184. */
  185. while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  186. layers++;
  187. if (!p->count)
  188. continue;
  189. if (!(new = alloc_layer(idp))) {
  190. /*
  191. * The allocation failed. If we built part of
  192. * the structure tear it down.
  193. */
  194. spin_lock_irqsave(&idp->lock, flags);
  195. for (new = p; p && p != idp->top; new = p) {
  196. p = p->ary[0];
  197. new->ary[0] = NULL;
  198. new->bitmap = new->count = 0;
  199. __free_layer(idp, new);
  200. }
  201. spin_unlock_irqrestore(&idp->lock, flags);
  202. return -1;
  203. }
  204. new->ary[0] = p;
  205. new->count = 1;
  206. if (p->bitmap == IDR_FULL)
  207. __set_bit(0, &new->bitmap);
  208. p = new;
  209. }
  210. idp->top = p;
  211. idp->layers = layers;
  212. v = sub_alloc(idp, &id, pa);
  213. if (v == -2)
  214. goto build_up;
  215. return(v);
  216. }
  217. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  218. {
  219. struct idr_layer *pa[MAX_LEVEL];
  220. int id;
  221. id = idr_get_empty_slot(idp, starting_id, pa);
  222. if (id >= 0) {
  223. /*
  224. * Successfully found an empty slot. Install the user
  225. * pointer and mark the slot full.
  226. */
  227. pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
  228. pa[0]->count++;
  229. idr_mark_full(pa, id);
  230. }
  231. return id;
  232. }
  233. /**
  234. * idr_get_new_above - allocate new idr entry above or equal to a start id
  235. * @idp: idr handle
  236. * @ptr: pointer you want associated with the ide
  237. * @start_id: id to start search at
  238. * @id: pointer to the allocated handle
  239. *
  240. * This is the allocate id function. It should be called with any
  241. * required locks.
  242. *
  243. * If memory is required, it will return -EAGAIN, you should unlock
  244. * and go back to the idr_pre_get() call. If the idr is full, it will
  245. * return -ENOSPC.
  246. *
  247. * @id returns a value in the range 0 ... 0x7fffffff
  248. */
  249. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  250. {
  251. int rv;
  252. rv = idr_get_new_above_int(idp, ptr, starting_id);
  253. /*
  254. * This is a cheap hack until the IDR code can be fixed to
  255. * return proper error values.
  256. */
  257. if (rv < 0) {
  258. if (rv == -1)
  259. return -EAGAIN;
  260. else /* Will be -3 */
  261. return -ENOSPC;
  262. }
  263. *id = rv;
  264. return 0;
  265. }
  266. EXPORT_SYMBOL(idr_get_new_above);
  267. /**
  268. * idr_get_new - allocate new idr entry
  269. * @idp: idr handle
  270. * @ptr: pointer you want associated with the ide
  271. * @id: pointer to the allocated handle
  272. *
  273. * This is the allocate id function. It should be called with any
  274. * required locks.
  275. *
  276. * If memory is required, it will return -EAGAIN, you should unlock
  277. * and go back to the idr_pre_get() call. If the idr is full, it will
  278. * return -ENOSPC.
  279. *
  280. * @id returns a value in the range 0 ... 0x7fffffff
  281. */
  282. int idr_get_new(struct idr *idp, void *ptr, int *id)
  283. {
  284. int rv;
  285. rv = idr_get_new_above_int(idp, ptr, 0);
  286. /*
  287. * This is a cheap hack until the IDR code can be fixed to
  288. * return proper error values.
  289. */
  290. if (rv < 0) {
  291. if (rv == -1)
  292. return -EAGAIN;
  293. else /* Will be -3 */
  294. return -ENOSPC;
  295. }
  296. *id = rv;
  297. return 0;
  298. }
  299. EXPORT_SYMBOL(idr_get_new);
  300. static void idr_remove_warning(int id)
  301. {
  302. printk("idr_remove called for id=%d which is not allocated.\n", id);
  303. dump_stack();
  304. }
  305. static void sub_remove(struct idr *idp, int shift, int id)
  306. {
  307. struct idr_layer *p = idp->top;
  308. struct idr_layer **pa[MAX_LEVEL];
  309. struct idr_layer ***paa = &pa[0];
  310. int n;
  311. *paa = NULL;
  312. *++paa = &idp->top;
  313. while ((shift > 0) && p) {
  314. n = (id >> shift) & IDR_MASK;
  315. __clear_bit(n, &p->bitmap);
  316. *++paa = &p->ary[n];
  317. p = p->ary[n];
  318. shift -= IDR_BITS;
  319. }
  320. n = id & IDR_MASK;
  321. if (likely(p != NULL && test_bit(n, &p->bitmap))){
  322. __clear_bit(n, &p->bitmap);
  323. p->ary[n] = NULL;
  324. while(*paa && ! --((**paa)->count)){
  325. free_layer(idp, **paa);
  326. **paa-- = NULL;
  327. }
  328. if (!*paa)
  329. idp->layers = 0;
  330. } else
  331. idr_remove_warning(id);
  332. }
  333. /**
  334. * idr_remove - remove the given id and free it's slot
  335. * @idp: idr handle
  336. * @id: unique key
  337. */
  338. void idr_remove(struct idr *idp, int id)
  339. {
  340. struct idr_layer *p;
  341. /* Mask off upper bits we don't use for the search. */
  342. id &= MAX_ID_MASK;
  343. sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  344. if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  345. idp->top->ary[0]) { // We can drop a layer
  346. p = idp->top->ary[0];
  347. idp->top->bitmap = idp->top->count = 0;
  348. free_layer(idp, idp->top);
  349. idp->top = p;
  350. --idp->layers;
  351. }
  352. while (idp->id_free_cnt >= IDR_FREE_MAX) {
  353. p = alloc_layer(idp);
  354. kmem_cache_free(idr_layer_cache, p);
  355. return;
  356. }
  357. }
  358. EXPORT_SYMBOL(idr_remove);
  359. /**
  360. * idr_destroy - release all cached layers within an idr tree
  361. * idp: idr handle
  362. */
  363. void idr_destroy(struct idr *idp)
  364. {
  365. while (idp->id_free_cnt) {
  366. struct idr_layer *p = alloc_layer(idp);
  367. kmem_cache_free(idr_layer_cache, p);
  368. }
  369. }
  370. EXPORT_SYMBOL(idr_destroy);
  371. /**
  372. * idr_find - return pointer for given id
  373. * @idp: idr handle
  374. * @id: lookup key
  375. *
  376. * Return the pointer given the id it has been registered with. A %NULL
  377. * return indicates that @id is not valid or you passed %NULL in
  378. * idr_get_new().
  379. *
  380. * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
  381. */
  382. void *idr_find(struct idr *idp, int id)
  383. {
  384. int n;
  385. struct idr_layer *p;
  386. n = idp->layers * IDR_BITS;
  387. p = idp->top;
  388. /* Mask off upper bits we don't use for the search. */
  389. id &= MAX_ID_MASK;
  390. if (id >= (1 << n))
  391. return NULL;
  392. while (n > 0 && p) {
  393. n -= IDR_BITS;
  394. p = p->ary[(id >> n) & IDR_MASK];
  395. }
  396. return((void *)p);
  397. }
  398. EXPORT_SYMBOL(idr_find);
  399. /**
  400. * idr_replace - replace pointer for given id
  401. * @idp: idr handle
  402. * @ptr: pointer you want associated with the id
  403. * @id: lookup key
  404. *
  405. * Replace the pointer registered with an id and return the old value.
  406. * A -ENOENT return indicates that @id was not found.
  407. * A -EINVAL return indicates that @id was not within valid constraints.
  408. *
  409. * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
  410. */
  411. void *idr_replace(struct idr *idp, void *ptr, int id)
  412. {
  413. int n;
  414. struct idr_layer *p, *old_p;
  415. n = idp->layers * IDR_BITS;
  416. p = idp->top;
  417. id &= MAX_ID_MASK;
  418. if (id >= (1 << n))
  419. return ERR_PTR(-EINVAL);
  420. n -= IDR_BITS;
  421. while ((n > 0) && p) {
  422. p = p->ary[(id >> n) & IDR_MASK];
  423. n -= IDR_BITS;
  424. }
  425. n = id & IDR_MASK;
  426. if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
  427. return ERR_PTR(-ENOENT);
  428. old_p = p->ary[n];
  429. p->ary[n] = ptr;
  430. return old_p;
  431. }
  432. EXPORT_SYMBOL(idr_replace);
  433. static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
  434. unsigned long flags)
  435. {
  436. memset(idr_layer, 0, sizeof(struct idr_layer));
  437. }
  438. static int init_id_cache(void)
  439. {
  440. if (!idr_layer_cache)
  441. idr_layer_cache = kmem_cache_create("idr_layer_cache",
  442. sizeof(struct idr_layer), 0, 0, idr_cache_ctor, NULL);
  443. return 0;
  444. }
  445. /**
  446. * idr_init - initialize idr handle
  447. * @idp: idr handle
  448. *
  449. * This function is use to set up the handle (@idp) that you will pass
  450. * to the rest of the functions.
  451. */
  452. void idr_init(struct idr *idp)
  453. {
  454. init_id_cache();
  455. memset(idp, 0, sizeof(struct idr));
  456. spin_lock_init(&idp->lock);
  457. }
  458. EXPORT_SYMBOL(idr_init);
  459. /*
  460. * IDA - IDR based ID allocator
  461. *
  462. * this is id allocator without id -> pointer translation. Memory
  463. * usage is much lower than full blown idr because each id only
  464. * occupies a bit. ida uses a custom leaf node which contains
  465. * IDA_BITMAP_BITS slots.
  466. *
  467. * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
  468. */
  469. static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
  470. {
  471. unsigned long flags;
  472. if (!ida->free_bitmap) {
  473. spin_lock_irqsave(&ida->idr.lock, flags);
  474. if (!ida->free_bitmap) {
  475. ida->free_bitmap = bitmap;
  476. bitmap = NULL;
  477. }
  478. spin_unlock_irqrestore(&ida->idr.lock, flags);
  479. }
  480. kfree(bitmap);
  481. }
  482. /**
  483. * ida_pre_get - reserve resources for ida allocation
  484. * @ida: ida handle
  485. * @gfp_mask: memory allocation flag
  486. *
  487. * This function should be called prior to locking and calling the
  488. * following function. It preallocates enough memory to satisfy the
  489. * worst possible allocation.
  490. *
  491. * If the system is REALLY out of memory this function returns 0,
  492. * otherwise 1.
  493. */
  494. int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
  495. {
  496. /* allocate idr_layers */
  497. if (!idr_pre_get(&ida->idr, gfp_mask))
  498. return 0;
  499. /* allocate free_bitmap */
  500. if (!ida->free_bitmap) {
  501. struct ida_bitmap *bitmap;
  502. bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
  503. if (!bitmap)
  504. return 0;
  505. free_bitmap(ida, bitmap);
  506. }
  507. return 1;
  508. }
  509. EXPORT_SYMBOL(ida_pre_get);
  510. /**
  511. * ida_get_new_above - allocate new ID above or equal to a start id
  512. * @ida: ida handle
  513. * @staring_id: id to start search at
  514. * @p_id: pointer to the allocated handle
  515. *
  516. * Allocate new ID above or equal to @ida. It should be called with
  517. * any required locks.
  518. *
  519. * If memory is required, it will return -EAGAIN, you should unlock
  520. * and go back to the ida_pre_get() call. If the ida is full, it will
  521. * return -ENOSPC.
  522. *
  523. * @p_id returns a value in the range 0 ... 0x7fffffff.
  524. */
  525. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
  526. {
  527. struct idr_layer *pa[MAX_LEVEL];
  528. struct ida_bitmap *bitmap;
  529. unsigned long flags;
  530. int idr_id = starting_id / IDA_BITMAP_BITS;
  531. int offset = starting_id % IDA_BITMAP_BITS;
  532. int t, id;
  533. restart:
  534. /* get vacant slot */
  535. t = idr_get_empty_slot(&ida->idr, idr_id, pa);
  536. if (t < 0) {
  537. if (t == -1)
  538. return -EAGAIN;
  539. else /* will be -3 */
  540. return -ENOSPC;
  541. }
  542. if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
  543. return -ENOSPC;
  544. if (t != idr_id)
  545. offset = 0;
  546. idr_id = t;
  547. /* if bitmap isn't there, create a new one */
  548. bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
  549. if (!bitmap) {
  550. spin_lock_irqsave(&ida->idr.lock, flags);
  551. bitmap = ida->free_bitmap;
  552. ida->free_bitmap = NULL;
  553. spin_unlock_irqrestore(&ida->idr.lock, flags);
  554. if (!bitmap)
  555. return -EAGAIN;
  556. memset(bitmap, 0, sizeof(struct ida_bitmap));
  557. pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
  558. pa[0]->count++;
  559. }
  560. /* lookup for empty slot */
  561. t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
  562. if (t == IDA_BITMAP_BITS) {
  563. /* no empty slot after offset, continue to the next chunk */
  564. idr_id++;
  565. offset = 0;
  566. goto restart;
  567. }
  568. id = idr_id * IDA_BITMAP_BITS + t;
  569. if (id >= MAX_ID_BIT)
  570. return -ENOSPC;
  571. __set_bit(t, bitmap->bitmap);
  572. if (++bitmap->nr_busy == IDA_BITMAP_BITS)
  573. idr_mark_full(pa, idr_id);
  574. *p_id = id;
  575. /* Each leaf node can handle nearly a thousand slots and the
  576. * whole idea of ida is to have small memory foot print.
  577. * Throw away extra resources one by one after each successful
  578. * allocation.
  579. */
  580. if (ida->idr.id_free_cnt || ida->free_bitmap) {
  581. struct idr_layer *p = alloc_layer(&ida->idr);
  582. if (p)
  583. kmem_cache_free(idr_layer_cache, p);
  584. }
  585. return 0;
  586. }
  587. EXPORT_SYMBOL(ida_get_new_above);
  588. /**
  589. * ida_get_new - allocate new ID
  590. * @ida: idr handle
  591. * @p_id: pointer to the allocated handle
  592. *
  593. * Allocate new ID. It should be called with any required locks.
  594. *
  595. * If memory is required, it will return -EAGAIN, you should unlock
  596. * and go back to the idr_pre_get() call. If the idr is full, it will
  597. * return -ENOSPC.
  598. *
  599. * @id returns a value in the range 0 ... 0x7fffffff.
  600. */
  601. int ida_get_new(struct ida *ida, int *p_id)
  602. {
  603. return ida_get_new_above(ida, 0, p_id);
  604. }
  605. EXPORT_SYMBOL(ida_get_new);
  606. /**
  607. * ida_remove - remove the given ID
  608. * @ida: ida handle
  609. * @id: ID to free
  610. */
  611. void ida_remove(struct ida *ida, int id)
  612. {
  613. struct idr_layer *p = ida->idr.top;
  614. int shift = (ida->idr.layers - 1) * IDR_BITS;
  615. int idr_id = id / IDA_BITMAP_BITS;
  616. int offset = id % IDA_BITMAP_BITS;
  617. int n;
  618. struct ida_bitmap *bitmap;
  619. /* clear full bits while looking up the leaf idr_layer */
  620. while ((shift > 0) && p) {
  621. n = (idr_id >> shift) & IDR_MASK;
  622. __clear_bit(n, &p->bitmap);
  623. p = p->ary[n];
  624. shift -= IDR_BITS;
  625. }
  626. if (p == NULL)
  627. goto err;
  628. n = idr_id & IDR_MASK;
  629. __clear_bit(n, &p->bitmap);
  630. bitmap = (void *)p->ary[n];
  631. if (!test_bit(offset, bitmap->bitmap))
  632. goto err;
  633. /* update bitmap and remove it if empty */
  634. __clear_bit(offset, bitmap->bitmap);
  635. if (--bitmap->nr_busy == 0) {
  636. __set_bit(n, &p->bitmap); /* to please idr_remove() */
  637. idr_remove(&ida->idr, idr_id);
  638. free_bitmap(ida, bitmap);
  639. }
  640. return;
  641. err:
  642. printk(KERN_WARNING
  643. "ida_remove called for id=%d which is not allocated.\n", id);
  644. }
  645. EXPORT_SYMBOL(ida_remove);
  646. /**
  647. * ida_destroy - release all cached layers within an ida tree
  648. * ida: ida handle
  649. */
  650. void ida_destroy(struct ida *ida)
  651. {
  652. idr_destroy(&ida->idr);
  653. kfree(ida->free_bitmap);
  654. }
  655. EXPORT_SYMBOL(ida_destroy);
  656. /**
  657. * ida_init - initialize ida handle
  658. * @ida: ida handle
  659. *
  660. * This function is use to set up the handle (@ida) that you will pass
  661. * to the rest of the functions.
  662. */
  663. void ida_init(struct ida *ida)
  664. {
  665. memset(ida, 0, sizeof(struct ida));
  666. idr_init(&ida->idr);
  667. }
  668. EXPORT_SYMBOL(ida_init);