idr.c 19 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855
  1. /*
  2. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  3. * Copyright (C) 2002 by Concurrent Computer Corporation
  4. * Distributed under the GNU GPL license version 2.
  5. *
  6. * Modified by George Anzinger to reuse immediately and to use
  7. * find bit instructions. Also removed _irq on spinlocks.
  8. *
  9. * Small id to pointer translation service.
  10. *
  11. * It uses a radix tree like structure as a sparse array indexed
  12. * by the id to obtain the pointer. The bitmap makes allocating
  13. * a new id quick.
  14. *
  15. * You call it to allocate an id (an int) an associate with that id a
  16. * pointer or what ever, we treat it as a (void *). You can pass this
  17. * id to a user for him to pass back at a later time. You then pass
  18. * that id to this code and it returns your pointer.
  19. * You can release ids at any time. When all ids are released, most of
  20. * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  21. * don't need to go to the memory "store" during an id allocate, just
  22. * so you don't need to be too concerned about locking and conflicts
  23. * with the slab allocator.
  24. */
  25. #ifndef TEST // to test in user space...
  26. #include <linux/slab.h>
  27. #include <linux/init.h>
  28. #include <linux/module.h>
  29. #endif
  30. #include <linux/err.h>
  31. #include <linux/string.h>
  32. #include <linux/idr.h>
  33. static struct kmem_cache *idr_layer_cache;
  34. static struct idr_layer *alloc_layer(struct idr *idp)
  35. {
  36. struct idr_layer *p;
  37. unsigned long flags;
  38. spin_lock_irqsave(&idp->lock, flags);
  39. if ((p = idp->id_free)) {
  40. idp->id_free = p->ary[0];
  41. idp->id_free_cnt--;
  42. p->ary[0] = NULL;
  43. }
  44. spin_unlock_irqrestore(&idp->lock, flags);
  45. return(p);
  46. }
  47. /* only called when idp->lock is held */
  48. static void __free_layer(struct idr *idp, struct idr_layer *p)
  49. {
  50. p->ary[0] = idp->id_free;
  51. idp->id_free = p;
  52. idp->id_free_cnt++;
  53. }
  54. static void free_layer(struct idr *idp, struct idr_layer *p)
  55. {
  56. unsigned long flags;
  57. /*
  58. * Depends on the return element being zeroed.
  59. */
  60. spin_lock_irqsave(&idp->lock, flags);
  61. __free_layer(idp, p);
  62. spin_unlock_irqrestore(&idp->lock, flags);
  63. }
  64. static void idr_mark_full(struct idr_layer **pa, int id)
  65. {
  66. struct idr_layer *p = pa[0];
  67. int l = 0;
  68. __set_bit(id & IDR_MASK, &p->bitmap);
  69. /*
  70. * If this layer is full mark the bit in the layer above to
  71. * show that this part of the radix tree is full. This may
  72. * complete the layer above and require walking up the radix
  73. * tree.
  74. */
  75. while (p->bitmap == IDR_FULL) {
  76. if (!(p = pa[++l]))
  77. break;
  78. id = id >> IDR_BITS;
  79. __set_bit((id & IDR_MASK), &p->bitmap);
  80. }
  81. }
  82. /**
  83. * idr_pre_get - reserver resources for idr allocation
  84. * @idp: idr handle
  85. * @gfp_mask: memory allocation flags
  86. *
  87. * This function should be called prior to locking and calling the
  88. * following function. It preallocates enough memory to satisfy
  89. * the worst possible allocation.
  90. *
  91. * If the system is REALLY out of memory this function returns 0,
  92. * otherwise 1.
  93. */
  94. int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  95. {
  96. while (idp->id_free_cnt < IDR_FREE_MAX) {
  97. struct idr_layer *new;
  98. new = kmem_cache_alloc(idr_layer_cache, gfp_mask);
  99. if (new == NULL)
  100. return (0);
  101. free_layer(idp, new);
  102. }
  103. return 1;
  104. }
  105. EXPORT_SYMBOL(idr_pre_get);
  106. static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
  107. {
  108. int n, m, sh;
  109. struct idr_layer *p, *new;
  110. int l, id, oid;
  111. unsigned long bm;
  112. id = *starting_id;
  113. restart:
  114. p = idp->top;
  115. l = idp->layers;
  116. pa[l--] = NULL;
  117. while (1) {
  118. /*
  119. * We run around this while until we reach the leaf node...
  120. */
  121. n = (id >> (IDR_BITS*l)) & IDR_MASK;
  122. bm = ~p->bitmap;
  123. m = find_next_bit(&bm, IDR_SIZE, n);
  124. if (m == IDR_SIZE) {
  125. /* no space available go back to previous layer. */
  126. l++;
  127. oid = id;
  128. id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  129. /* if already at the top layer, we need to grow */
  130. if (!(p = pa[l])) {
  131. *starting_id = id;
  132. return -2;
  133. }
  134. /* If we need to go up one layer, continue the
  135. * loop; otherwise, restart from the top.
  136. */
  137. sh = IDR_BITS * (l + 1);
  138. if (oid >> sh == id >> sh)
  139. continue;
  140. else
  141. goto restart;
  142. }
  143. if (m != n) {
  144. sh = IDR_BITS*l;
  145. id = ((id >> sh) ^ n ^ m) << sh;
  146. }
  147. if ((id >= MAX_ID_BIT) || (id < 0))
  148. return -3;
  149. if (l == 0)
  150. break;
  151. /*
  152. * Create the layer below if it is missing.
  153. */
  154. if (!p->ary[m]) {
  155. if (!(new = alloc_layer(idp)))
  156. return -1;
  157. p->ary[m] = new;
  158. p->count++;
  159. }
  160. pa[l--] = p;
  161. p = p->ary[m];
  162. }
  163. pa[l] = p;
  164. return id;
  165. }
  166. static int idr_get_empty_slot(struct idr *idp, int starting_id,
  167. struct idr_layer **pa)
  168. {
  169. struct idr_layer *p, *new;
  170. int layers, v, id;
  171. unsigned long flags;
  172. id = starting_id;
  173. build_up:
  174. p = idp->top;
  175. layers = idp->layers;
  176. if (unlikely(!p)) {
  177. if (!(p = alloc_layer(idp)))
  178. return -1;
  179. layers = 1;
  180. }
  181. /*
  182. * Add a new layer to the top of the tree if the requested
  183. * id is larger than the currently allocated space.
  184. */
  185. while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  186. layers++;
  187. if (!p->count)
  188. continue;
  189. if (!(new = alloc_layer(idp))) {
  190. /*
  191. * The allocation failed. If we built part of
  192. * the structure tear it down.
  193. */
  194. spin_lock_irqsave(&idp->lock, flags);
  195. for (new = p; p && p != idp->top; new = p) {
  196. p = p->ary[0];
  197. new->ary[0] = NULL;
  198. new->bitmap = new->count = 0;
  199. __free_layer(idp, new);
  200. }
  201. spin_unlock_irqrestore(&idp->lock, flags);
  202. return -1;
  203. }
  204. new->ary[0] = p;
  205. new->count = 1;
  206. if (p->bitmap == IDR_FULL)
  207. __set_bit(0, &new->bitmap);
  208. p = new;
  209. }
  210. idp->top = p;
  211. idp->layers = layers;
  212. v = sub_alloc(idp, &id, pa);
  213. if (v == -2)
  214. goto build_up;
  215. return(v);
  216. }
  217. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  218. {
  219. struct idr_layer *pa[MAX_LEVEL];
  220. int id;
  221. id = idr_get_empty_slot(idp, starting_id, pa);
  222. if (id >= 0) {
  223. /*
  224. * Successfully found an empty slot. Install the user
  225. * pointer and mark the slot full.
  226. */
  227. pa[0]->ary[id & IDR_MASK] = (struct idr_layer *)ptr;
  228. pa[0]->count++;
  229. idr_mark_full(pa, id);
  230. }
  231. return id;
  232. }
  233. /**
  234. * idr_get_new_above - allocate new idr entry above or equal to a start id
  235. * @idp: idr handle
  236. * @ptr: pointer you want associated with the ide
  237. * @start_id: id to start search at
  238. * @id: pointer to the allocated handle
  239. *
  240. * This is the allocate id function. It should be called with any
  241. * required locks.
  242. *
  243. * If memory is required, it will return -EAGAIN, you should unlock
  244. * and go back to the idr_pre_get() call. If the idr is full, it will
  245. * return -ENOSPC.
  246. *
  247. * @id returns a value in the range 0 ... 0x7fffffff
  248. */
  249. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  250. {
  251. int rv;
  252. rv = idr_get_new_above_int(idp, ptr, starting_id);
  253. /*
  254. * This is a cheap hack until the IDR code can be fixed to
  255. * return proper error values.
  256. */
  257. if (rv < 0) {
  258. if (rv == -1)
  259. return -EAGAIN;
  260. else /* Will be -3 */
  261. return -ENOSPC;
  262. }
  263. *id = rv;
  264. return 0;
  265. }
  266. EXPORT_SYMBOL(idr_get_new_above);
  267. /**
  268. * idr_get_new - allocate new idr entry
  269. * @idp: idr handle
  270. * @ptr: pointer you want associated with the ide
  271. * @id: pointer to the allocated handle
  272. *
  273. * This is the allocate id function. It should be called with any
  274. * required locks.
  275. *
  276. * If memory is required, it will return -EAGAIN, you should unlock
  277. * and go back to the idr_pre_get() call. If the idr is full, it will
  278. * return -ENOSPC.
  279. *
  280. * @id returns a value in the range 0 ... 0x7fffffff
  281. */
  282. int idr_get_new(struct idr *idp, void *ptr, int *id)
  283. {
  284. int rv;
  285. rv = idr_get_new_above_int(idp, ptr, 0);
  286. /*
  287. * This is a cheap hack until the IDR code can be fixed to
  288. * return proper error values.
  289. */
  290. if (rv < 0) {
  291. if (rv == -1)
  292. return -EAGAIN;
  293. else /* Will be -3 */
  294. return -ENOSPC;
  295. }
  296. *id = rv;
  297. return 0;
  298. }
  299. EXPORT_SYMBOL(idr_get_new);
  300. static void idr_remove_warning(int id)
  301. {
  302. printk("idr_remove called for id=%d which is not allocated.\n", id);
  303. dump_stack();
  304. }
  305. static void sub_remove(struct idr *idp, int shift, int id)
  306. {
  307. struct idr_layer *p = idp->top;
  308. struct idr_layer **pa[MAX_LEVEL];
  309. struct idr_layer ***paa = &pa[0];
  310. int n;
  311. *paa = NULL;
  312. *++paa = &idp->top;
  313. while ((shift > 0) && p) {
  314. n = (id >> shift) & IDR_MASK;
  315. __clear_bit(n, &p->bitmap);
  316. *++paa = &p->ary[n];
  317. p = p->ary[n];
  318. shift -= IDR_BITS;
  319. }
  320. n = id & IDR_MASK;
  321. if (likely(p != NULL && test_bit(n, &p->bitmap))){
  322. __clear_bit(n, &p->bitmap);
  323. p->ary[n] = NULL;
  324. while(*paa && ! --((**paa)->count)){
  325. free_layer(idp, **paa);
  326. **paa-- = NULL;
  327. }
  328. if (!*paa)
  329. idp->layers = 0;
  330. } else
  331. idr_remove_warning(id);
  332. }
  333. /**
  334. * idr_remove - remove the given id and free it's slot
  335. * @idp: idr handle
  336. * @id: unique key
  337. */
  338. void idr_remove(struct idr *idp, int id)
  339. {
  340. struct idr_layer *p;
  341. /* Mask off upper bits we don't use for the search. */
  342. id &= MAX_ID_MASK;
  343. sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  344. if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  345. idp->top->ary[0]) { // We can drop a layer
  346. p = idp->top->ary[0];
  347. idp->top->bitmap = idp->top->count = 0;
  348. free_layer(idp, idp->top);
  349. idp->top = p;
  350. --idp->layers;
  351. }
  352. while (idp->id_free_cnt >= IDR_FREE_MAX) {
  353. p = alloc_layer(idp);
  354. kmem_cache_free(idr_layer_cache, p);
  355. return;
  356. }
  357. }
  358. EXPORT_SYMBOL(idr_remove);
  359. /**
  360. * idr_remove_all - remove all ids from the given idr tree
  361. * @idp: idr handle
  362. *
  363. * idr_destroy() only frees up unused, cached idp_layers, but this
  364. * function will remove all id mappings and leave all idp_layers
  365. * unused.
  366. *
  367. * A typical clean-up sequence for objects stored in an idr tree, will
  368. * use idr_for_each() to free all objects, if necessay, then
  369. * idr_remove_all() to remove all ids, and idr_destroy() to free
  370. * up the cached idr_layers.
  371. */
  372. void idr_remove_all(struct idr *idp)
  373. {
  374. int n, id, max;
  375. struct idr_layer *p;
  376. struct idr_layer *pa[MAX_LEVEL];
  377. struct idr_layer **paa = &pa[0];
  378. n = idp->layers * IDR_BITS;
  379. p = idp->top;
  380. max = 1 << n;
  381. id = 0;
  382. while (id < max) {
  383. while (n > IDR_BITS && p) {
  384. n -= IDR_BITS;
  385. *paa++ = p;
  386. p = p->ary[(id >> n) & IDR_MASK];
  387. }
  388. id += 1 << n;
  389. while (n < fls(id)) {
  390. if (p) {
  391. memset(p, 0, sizeof *p);
  392. free_layer(idp, p);
  393. }
  394. n += IDR_BITS;
  395. p = *--paa;
  396. }
  397. }
  398. idp->top = NULL;
  399. idp->layers = 0;
  400. }
  401. EXPORT_SYMBOL(idr_remove_all);
  402. /**
  403. * idr_destroy - release all cached layers within an idr tree
  404. * idp: idr handle
  405. */
  406. void idr_destroy(struct idr *idp)
  407. {
  408. while (idp->id_free_cnt) {
  409. struct idr_layer *p = alloc_layer(idp);
  410. kmem_cache_free(idr_layer_cache, p);
  411. }
  412. }
  413. EXPORT_SYMBOL(idr_destroy);
  414. /**
  415. * idr_find - return pointer for given id
  416. * @idp: idr handle
  417. * @id: lookup key
  418. *
  419. * Return the pointer given the id it has been registered with. A %NULL
  420. * return indicates that @id is not valid or you passed %NULL in
  421. * idr_get_new().
  422. *
  423. * The caller must serialize idr_find() vs idr_get_new() and idr_remove().
  424. */
  425. void *idr_find(struct idr *idp, int id)
  426. {
  427. int n;
  428. struct idr_layer *p;
  429. n = idp->layers * IDR_BITS;
  430. p = idp->top;
  431. /* Mask off upper bits we don't use for the search. */
  432. id &= MAX_ID_MASK;
  433. if (id >= (1 << n))
  434. return NULL;
  435. while (n > 0 && p) {
  436. n -= IDR_BITS;
  437. p = p->ary[(id >> n) & IDR_MASK];
  438. }
  439. return((void *)p);
  440. }
  441. EXPORT_SYMBOL(idr_find);
  442. /**
  443. * idr_for_each - iterate through all stored pointers
  444. * @idp: idr handle
  445. * @fn: function to be called for each pointer
  446. * @data: data passed back to callback function
  447. *
  448. * Iterate over the pointers registered with the given idr. The
  449. * callback function will be called for each pointer currently
  450. * registered, passing the id, the pointer and the data pointer passed
  451. * to this function. It is not safe to modify the idr tree while in
  452. * the callback, so functions such as idr_get_new and idr_remove are
  453. * not allowed.
  454. *
  455. * We check the return of @fn each time. If it returns anything other
  456. * than 0, we break out and return that value.
  457. *
  458. * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
  459. */
  460. int idr_for_each(struct idr *idp,
  461. int (*fn)(int id, void *p, void *data), void *data)
  462. {
  463. int n, id, max, error = 0;
  464. struct idr_layer *p;
  465. struct idr_layer *pa[MAX_LEVEL];
  466. struct idr_layer **paa = &pa[0];
  467. n = idp->layers * IDR_BITS;
  468. p = idp->top;
  469. max = 1 << n;
  470. id = 0;
  471. while (id < max) {
  472. while (n > 0 && p) {
  473. n -= IDR_BITS;
  474. *paa++ = p;
  475. p = p->ary[(id >> n) & IDR_MASK];
  476. }
  477. if (p) {
  478. error = fn(id, (void *)p, data);
  479. if (error)
  480. break;
  481. }
  482. id += 1 << n;
  483. while (n < fls(id)) {
  484. n += IDR_BITS;
  485. p = *--paa;
  486. }
  487. }
  488. return error;
  489. }
  490. EXPORT_SYMBOL(idr_for_each);
  491. /**
  492. * idr_replace - replace pointer for given id
  493. * @idp: idr handle
  494. * @ptr: pointer you want associated with the id
  495. * @id: lookup key
  496. *
  497. * Replace the pointer registered with an id and return the old value.
  498. * A -ENOENT return indicates that @id was not found.
  499. * A -EINVAL return indicates that @id was not within valid constraints.
  500. *
  501. * The caller must serialize vs idr_find(), idr_get_new(), and idr_remove().
  502. */
  503. void *idr_replace(struct idr *idp, void *ptr, int id)
  504. {
  505. int n;
  506. struct idr_layer *p, *old_p;
  507. n = idp->layers * IDR_BITS;
  508. p = idp->top;
  509. id &= MAX_ID_MASK;
  510. if (id >= (1 << n))
  511. return ERR_PTR(-EINVAL);
  512. n -= IDR_BITS;
  513. while ((n > 0) && p) {
  514. p = p->ary[(id >> n) & IDR_MASK];
  515. n -= IDR_BITS;
  516. }
  517. n = id & IDR_MASK;
  518. if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
  519. return ERR_PTR(-ENOENT);
  520. old_p = p->ary[n];
  521. p->ary[n] = ptr;
  522. return old_p;
  523. }
  524. EXPORT_SYMBOL(idr_replace);
  525. static void idr_cache_ctor(void * idr_layer, struct kmem_cache *idr_layer_cache,
  526. unsigned long flags)
  527. {
  528. memset(idr_layer, 0, sizeof(struct idr_layer));
  529. }
  530. static int init_id_cache(void)
  531. {
  532. if (!idr_layer_cache)
  533. idr_layer_cache = kmem_cache_create("idr_layer_cache",
  534. sizeof(struct idr_layer), 0, 0, idr_cache_ctor);
  535. return 0;
  536. }
  537. /**
  538. * idr_init - initialize idr handle
  539. * @idp: idr handle
  540. *
  541. * This function is use to set up the handle (@idp) that you will pass
  542. * to the rest of the functions.
  543. */
  544. void idr_init(struct idr *idp)
  545. {
  546. init_id_cache();
  547. memset(idp, 0, sizeof(struct idr));
  548. spin_lock_init(&idp->lock);
  549. }
  550. EXPORT_SYMBOL(idr_init);
  551. /*
  552. * IDA - IDR based ID allocator
  553. *
  554. * this is id allocator without id -> pointer translation. Memory
  555. * usage is much lower than full blown idr because each id only
  556. * occupies a bit. ida uses a custom leaf node which contains
  557. * IDA_BITMAP_BITS slots.
  558. *
  559. * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
  560. */
  561. static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
  562. {
  563. unsigned long flags;
  564. if (!ida->free_bitmap) {
  565. spin_lock_irqsave(&ida->idr.lock, flags);
  566. if (!ida->free_bitmap) {
  567. ida->free_bitmap = bitmap;
  568. bitmap = NULL;
  569. }
  570. spin_unlock_irqrestore(&ida->idr.lock, flags);
  571. }
  572. kfree(bitmap);
  573. }
  574. /**
  575. * ida_pre_get - reserve resources for ida allocation
  576. * @ida: ida handle
  577. * @gfp_mask: memory allocation flag
  578. *
  579. * This function should be called prior to locking and calling the
  580. * following function. It preallocates enough memory to satisfy the
  581. * worst possible allocation.
  582. *
  583. * If the system is REALLY out of memory this function returns 0,
  584. * otherwise 1.
  585. */
  586. int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
  587. {
  588. /* allocate idr_layers */
  589. if (!idr_pre_get(&ida->idr, gfp_mask))
  590. return 0;
  591. /* allocate free_bitmap */
  592. if (!ida->free_bitmap) {
  593. struct ida_bitmap *bitmap;
  594. bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
  595. if (!bitmap)
  596. return 0;
  597. free_bitmap(ida, bitmap);
  598. }
  599. return 1;
  600. }
  601. EXPORT_SYMBOL(ida_pre_get);
  602. /**
  603. * ida_get_new_above - allocate new ID above or equal to a start id
  604. * @ida: ida handle
  605. * @staring_id: id to start search at
  606. * @p_id: pointer to the allocated handle
  607. *
  608. * Allocate new ID above or equal to @ida. It should be called with
  609. * any required locks.
  610. *
  611. * If memory is required, it will return -EAGAIN, you should unlock
  612. * and go back to the ida_pre_get() call. If the ida is full, it will
  613. * return -ENOSPC.
  614. *
  615. * @p_id returns a value in the range 0 ... 0x7fffffff.
  616. */
  617. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
  618. {
  619. struct idr_layer *pa[MAX_LEVEL];
  620. struct ida_bitmap *bitmap;
  621. unsigned long flags;
  622. int idr_id = starting_id / IDA_BITMAP_BITS;
  623. int offset = starting_id % IDA_BITMAP_BITS;
  624. int t, id;
  625. restart:
  626. /* get vacant slot */
  627. t = idr_get_empty_slot(&ida->idr, idr_id, pa);
  628. if (t < 0) {
  629. if (t == -1)
  630. return -EAGAIN;
  631. else /* will be -3 */
  632. return -ENOSPC;
  633. }
  634. if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
  635. return -ENOSPC;
  636. if (t != idr_id)
  637. offset = 0;
  638. idr_id = t;
  639. /* if bitmap isn't there, create a new one */
  640. bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
  641. if (!bitmap) {
  642. spin_lock_irqsave(&ida->idr.lock, flags);
  643. bitmap = ida->free_bitmap;
  644. ida->free_bitmap = NULL;
  645. spin_unlock_irqrestore(&ida->idr.lock, flags);
  646. if (!bitmap)
  647. return -EAGAIN;
  648. memset(bitmap, 0, sizeof(struct ida_bitmap));
  649. pa[0]->ary[idr_id & IDR_MASK] = (void *)bitmap;
  650. pa[0]->count++;
  651. }
  652. /* lookup for empty slot */
  653. t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
  654. if (t == IDA_BITMAP_BITS) {
  655. /* no empty slot after offset, continue to the next chunk */
  656. idr_id++;
  657. offset = 0;
  658. goto restart;
  659. }
  660. id = idr_id * IDA_BITMAP_BITS + t;
  661. if (id >= MAX_ID_BIT)
  662. return -ENOSPC;
  663. __set_bit(t, bitmap->bitmap);
  664. if (++bitmap->nr_busy == IDA_BITMAP_BITS)
  665. idr_mark_full(pa, idr_id);
  666. *p_id = id;
  667. /* Each leaf node can handle nearly a thousand slots and the
  668. * whole idea of ida is to have small memory foot print.
  669. * Throw away extra resources one by one after each successful
  670. * allocation.
  671. */
  672. if (ida->idr.id_free_cnt || ida->free_bitmap) {
  673. struct idr_layer *p = alloc_layer(&ida->idr);
  674. if (p)
  675. kmem_cache_free(idr_layer_cache, p);
  676. }
  677. return 0;
  678. }
  679. EXPORT_SYMBOL(ida_get_new_above);
  680. /**
  681. * ida_get_new - allocate new ID
  682. * @ida: idr handle
  683. * @p_id: pointer to the allocated handle
  684. *
  685. * Allocate new ID. It should be called with any required locks.
  686. *
  687. * If memory is required, it will return -EAGAIN, you should unlock
  688. * and go back to the idr_pre_get() call. If the idr is full, it will
  689. * return -ENOSPC.
  690. *
  691. * @id returns a value in the range 0 ... 0x7fffffff.
  692. */
  693. int ida_get_new(struct ida *ida, int *p_id)
  694. {
  695. return ida_get_new_above(ida, 0, p_id);
  696. }
  697. EXPORT_SYMBOL(ida_get_new);
  698. /**
  699. * ida_remove - remove the given ID
  700. * @ida: ida handle
  701. * @id: ID to free
  702. */
  703. void ida_remove(struct ida *ida, int id)
  704. {
  705. struct idr_layer *p = ida->idr.top;
  706. int shift = (ida->idr.layers - 1) * IDR_BITS;
  707. int idr_id = id / IDA_BITMAP_BITS;
  708. int offset = id % IDA_BITMAP_BITS;
  709. int n;
  710. struct ida_bitmap *bitmap;
  711. /* clear full bits while looking up the leaf idr_layer */
  712. while ((shift > 0) && p) {
  713. n = (idr_id >> shift) & IDR_MASK;
  714. __clear_bit(n, &p->bitmap);
  715. p = p->ary[n];
  716. shift -= IDR_BITS;
  717. }
  718. if (p == NULL)
  719. goto err;
  720. n = idr_id & IDR_MASK;
  721. __clear_bit(n, &p->bitmap);
  722. bitmap = (void *)p->ary[n];
  723. if (!test_bit(offset, bitmap->bitmap))
  724. goto err;
  725. /* update bitmap and remove it if empty */
  726. __clear_bit(offset, bitmap->bitmap);
  727. if (--bitmap->nr_busy == 0) {
  728. __set_bit(n, &p->bitmap); /* to please idr_remove() */
  729. idr_remove(&ida->idr, idr_id);
  730. free_bitmap(ida, bitmap);
  731. }
  732. return;
  733. err:
  734. printk(KERN_WARNING
  735. "ida_remove called for id=%d which is not allocated.\n", id);
  736. }
  737. EXPORT_SYMBOL(ida_remove);
  738. /**
  739. * ida_destroy - release all cached layers within an ida tree
  740. * ida: ida handle
  741. */
  742. void ida_destroy(struct ida *ida)
  743. {
  744. idr_destroy(&ida->idr);
  745. kfree(ida->free_bitmap);
  746. }
  747. EXPORT_SYMBOL(ida_destroy);
  748. /**
  749. * ida_init - initialize ida handle
  750. * @ida: ida handle
  751. *
  752. * This function is use to set up the handle (@ida) that you will pass
  753. * to the rest of the functions.
  754. */
  755. void ida_init(struct ida *ida)
  756. {
  757. memset(ida, 0, sizeof(struct ida));
  758. idr_init(&ida->idr);
  759. }
  760. EXPORT_SYMBOL(ida_init);