idr.c 21 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886
  1. /*
  2. * 2002-10-18 written by Jim Houston jim.houston@ccur.com
  3. * Copyright (C) 2002 by Concurrent Computer Corporation
  4. * Distributed under the GNU GPL license version 2.
  5. *
  6. * Modified by George Anzinger to reuse immediately and to use
  7. * find bit instructions. Also removed _irq on spinlocks.
  8. *
  9. * Modified by Nadia Derbey to make it RCU safe.
  10. *
  11. * Small id to pointer translation service.
  12. *
  13. * It uses a radix tree like structure as a sparse array indexed
  14. * by the id to obtain the pointer. The bitmap makes allocating
  15. * a new id quick.
  16. *
  17. * You call it to allocate an id (an int) an associate with that id a
  18. * pointer or what ever, we treat it as a (void *). You can pass this
  19. * id to a user for him to pass back at a later time. You then pass
  20. * that id to this code and it returns your pointer.
  21. * You can release ids at any time. When all ids are released, most of
  22. * the memory is returned (we keep IDR_FREE_MAX) in a local pool so we
  23. * don't need to go to the memory "store" during an id allocate, just
  24. * so you don't need to be too concerned about locking and conflicts
  25. * with the slab allocator.
  26. */
  27. #ifndef TEST // to test in user space...
  28. #include <linux/slab.h>
  29. #include <linux/init.h>
  30. #include <linux/module.h>
  31. #endif
  32. #include <linux/err.h>
  33. #include <linux/string.h>
  34. #include <linux/idr.h>
  35. static struct kmem_cache *idr_layer_cache;
  36. static struct idr_layer *get_from_free_list(struct idr *idp)
  37. {
  38. struct idr_layer *p;
  39. unsigned long flags;
  40. spin_lock_irqsave(&idp->lock, flags);
  41. if ((p = idp->id_free)) {
  42. idp->id_free = p->ary[0];
  43. idp->id_free_cnt--;
  44. p->ary[0] = NULL;
  45. }
  46. spin_unlock_irqrestore(&idp->lock, flags);
  47. return(p);
  48. }
  49. static void idr_layer_rcu_free(struct rcu_head *head)
  50. {
  51. struct idr_layer *layer;
  52. layer = container_of(head, struct idr_layer, rcu_head);
  53. kmem_cache_free(idr_layer_cache, layer);
  54. }
  55. static inline void free_layer(struct idr_layer *p)
  56. {
  57. call_rcu(&p->rcu_head, idr_layer_rcu_free);
  58. }
  59. /* only called when idp->lock is held */
  60. static void __move_to_free_list(struct idr *idp, struct idr_layer *p)
  61. {
  62. p->ary[0] = idp->id_free;
  63. idp->id_free = p;
  64. idp->id_free_cnt++;
  65. }
  66. static void move_to_free_list(struct idr *idp, struct idr_layer *p)
  67. {
  68. unsigned long flags;
  69. /*
  70. * Depends on the return element being zeroed.
  71. */
  72. spin_lock_irqsave(&idp->lock, flags);
  73. __move_to_free_list(idp, p);
  74. spin_unlock_irqrestore(&idp->lock, flags);
  75. }
  76. static void idr_mark_full(struct idr_layer **pa, int id)
  77. {
  78. struct idr_layer *p = pa[0];
  79. int l = 0;
  80. __set_bit(id & IDR_MASK, &p->bitmap);
  81. /*
  82. * If this layer is full mark the bit in the layer above to
  83. * show that this part of the radix tree is full. This may
  84. * complete the layer above and require walking up the radix
  85. * tree.
  86. */
  87. while (p->bitmap == IDR_FULL) {
  88. if (!(p = pa[++l]))
  89. break;
  90. id = id >> IDR_BITS;
  91. __set_bit((id & IDR_MASK), &p->bitmap);
  92. }
  93. }
  94. /**
  95. * idr_pre_get - reserver resources for idr allocation
  96. * @idp: idr handle
  97. * @gfp_mask: memory allocation flags
  98. *
  99. * This function should be called prior to locking and calling the
  100. * idr_get_new* functions. It preallocates enough memory to satisfy
  101. * the worst possible allocation.
  102. *
  103. * If the system is REALLY out of memory this function returns 0,
  104. * otherwise 1.
  105. */
  106. int idr_pre_get(struct idr *idp, gfp_t gfp_mask)
  107. {
  108. while (idp->id_free_cnt < IDR_FREE_MAX) {
  109. struct idr_layer *new;
  110. new = kmem_cache_zalloc(idr_layer_cache, gfp_mask);
  111. if (new == NULL)
  112. return (0);
  113. move_to_free_list(idp, new);
  114. }
  115. return 1;
  116. }
  117. EXPORT_SYMBOL(idr_pre_get);
  118. static int sub_alloc(struct idr *idp, int *starting_id, struct idr_layer **pa)
  119. {
  120. int n, m, sh;
  121. struct idr_layer *p, *new;
  122. int l, id, oid;
  123. unsigned long bm;
  124. id = *starting_id;
  125. restart:
  126. p = idp->top;
  127. l = idp->layers;
  128. pa[l--] = NULL;
  129. while (1) {
  130. /*
  131. * We run around this while until we reach the leaf node...
  132. */
  133. n = (id >> (IDR_BITS*l)) & IDR_MASK;
  134. bm = ~p->bitmap;
  135. m = find_next_bit(&bm, IDR_SIZE, n);
  136. if (m == IDR_SIZE) {
  137. /* no space available go back to previous layer. */
  138. l++;
  139. oid = id;
  140. id = (id | ((1 << (IDR_BITS * l)) - 1)) + 1;
  141. /* if already at the top layer, we need to grow */
  142. if (!(p = pa[l])) {
  143. *starting_id = id;
  144. return IDR_NEED_TO_GROW;
  145. }
  146. /* If we need to go up one layer, continue the
  147. * loop; otherwise, restart from the top.
  148. */
  149. sh = IDR_BITS * (l + 1);
  150. if (oid >> sh == id >> sh)
  151. continue;
  152. else
  153. goto restart;
  154. }
  155. if (m != n) {
  156. sh = IDR_BITS*l;
  157. id = ((id >> sh) ^ n ^ m) << sh;
  158. }
  159. if ((id >= MAX_ID_BIT) || (id < 0))
  160. return IDR_NOMORE_SPACE;
  161. if (l == 0)
  162. break;
  163. /*
  164. * Create the layer below if it is missing.
  165. */
  166. if (!p->ary[m]) {
  167. new = get_from_free_list(idp);
  168. if (!new)
  169. return -1;
  170. new->layer = l-1;
  171. rcu_assign_pointer(p->ary[m], new);
  172. p->count++;
  173. }
  174. pa[l--] = p;
  175. p = p->ary[m];
  176. }
  177. pa[l] = p;
  178. return id;
  179. }
  180. static int idr_get_empty_slot(struct idr *idp, int starting_id,
  181. struct idr_layer **pa)
  182. {
  183. struct idr_layer *p, *new;
  184. int layers, v, id;
  185. unsigned long flags;
  186. id = starting_id;
  187. build_up:
  188. p = idp->top;
  189. layers = idp->layers;
  190. if (unlikely(!p)) {
  191. if (!(p = get_from_free_list(idp)))
  192. return -1;
  193. p->layer = 0;
  194. layers = 1;
  195. }
  196. /*
  197. * Add a new layer to the top of the tree if the requested
  198. * id is larger than the currently allocated space.
  199. */
  200. while ((layers < (MAX_LEVEL - 1)) && (id >= (1 << (layers*IDR_BITS)))) {
  201. layers++;
  202. if (!p->count) {
  203. /* special case: if the tree is currently empty,
  204. * then we grow the tree by moving the top node
  205. * upwards.
  206. */
  207. p->layer++;
  208. continue;
  209. }
  210. if (!(new = get_from_free_list(idp))) {
  211. /*
  212. * The allocation failed. If we built part of
  213. * the structure tear it down.
  214. */
  215. spin_lock_irqsave(&idp->lock, flags);
  216. for (new = p; p && p != idp->top; new = p) {
  217. p = p->ary[0];
  218. new->ary[0] = NULL;
  219. new->bitmap = new->count = 0;
  220. __move_to_free_list(idp, new);
  221. }
  222. spin_unlock_irqrestore(&idp->lock, flags);
  223. return -1;
  224. }
  225. new->ary[0] = p;
  226. new->count = 1;
  227. new->layer = layers-1;
  228. if (p->bitmap == IDR_FULL)
  229. __set_bit(0, &new->bitmap);
  230. p = new;
  231. }
  232. rcu_assign_pointer(idp->top, p);
  233. idp->layers = layers;
  234. v = sub_alloc(idp, &id, pa);
  235. if (v == IDR_NEED_TO_GROW)
  236. goto build_up;
  237. return(v);
  238. }
  239. static int idr_get_new_above_int(struct idr *idp, void *ptr, int starting_id)
  240. {
  241. struct idr_layer *pa[MAX_LEVEL];
  242. int id;
  243. id = idr_get_empty_slot(idp, starting_id, pa);
  244. if (id >= 0) {
  245. /*
  246. * Successfully found an empty slot. Install the user
  247. * pointer and mark the slot full.
  248. */
  249. rcu_assign_pointer(pa[0]->ary[id & IDR_MASK],
  250. (struct idr_layer *)ptr);
  251. pa[0]->count++;
  252. idr_mark_full(pa, id);
  253. }
  254. return id;
  255. }
  256. /**
  257. * idr_get_new_above - allocate new idr entry above or equal to a start id
  258. * @idp: idr handle
  259. * @ptr: pointer you want associated with the ide
  260. * @start_id: id to start search at
  261. * @id: pointer to the allocated handle
  262. *
  263. * This is the allocate id function. It should be called with any
  264. * required locks.
  265. *
  266. * If memory is required, it will return -EAGAIN, you should unlock
  267. * and go back to the idr_pre_get() call. If the idr is full, it will
  268. * return -ENOSPC.
  269. *
  270. * @id returns a value in the range @starting_id ... 0x7fffffff
  271. */
  272. int idr_get_new_above(struct idr *idp, void *ptr, int starting_id, int *id)
  273. {
  274. int rv;
  275. rv = idr_get_new_above_int(idp, ptr, starting_id);
  276. /*
  277. * This is a cheap hack until the IDR code can be fixed to
  278. * return proper error values.
  279. */
  280. if (rv < 0)
  281. return _idr_rc_to_errno(rv);
  282. *id = rv;
  283. return 0;
  284. }
  285. EXPORT_SYMBOL(idr_get_new_above);
  286. /**
  287. * idr_get_new - allocate new idr entry
  288. * @idp: idr handle
  289. * @ptr: pointer you want associated with the ide
  290. * @id: pointer to the allocated handle
  291. *
  292. * This is the allocate id function. It should be called with any
  293. * required locks.
  294. *
  295. * If memory is required, it will return -EAGAIN, you should unlock
  296. * and go back to the idr_pre_get() call. If the idr is full, it will
  297. * return -ENOSPC.
  298. *
  299. * @id returns a value in the range 0 ... 0x7fffffff
  300. */
  301. int idr_get_new(struct idr *idp, void *ptr, int *id)
  302. {
  303. int rv;
  304. rv = idr_get_new_above_int(idp, ptr, 0);
  305. /*
  306. * This is a cheap hack until the IDR code can be fixed to
  307. * return proper error values.
  308. */
  309. if (rv < 0)
  310. return _idr_rc_to_errno(rv);
  311. *id = rv;
  312. return 0;
  313. }
  314. EXPORT_SYMBOL(idr_get_new);
  315. static void idr_remove_warning(int id)
  316. {
  317. printk(KERN_WARNING
  318. "idr_remove called for id=%d which is not allocated.\n", id);
  319. dump_stack();
  320. }
  321. static void sub_remove(struct idr *idp, int shift, int id)
  322. {
  323. struct idr_layer *p = idp->top;
  324. struct idr_layer **pa[MAX_LEVEL];
  325. struct idr_layer ***paa = &pa[0];
  326. struct idr_layer *to_free;
  327. int n;
  328. *paa = NULL;
  329. *++paa = &idp->top;
  330. while ((shift > 0) && p) {
  331. n = (id >> shift) & IDR_MASK;
  332. __clear_bit(n, &p->bitmap);
  333. *++paa = &p->ary[n];
  334. p = p->ary[n];
  335. shift -= IDR_BITS;
  336. }
  337. n = id & IDR_MASK;
  338. if (likely(p != NULL && test_bit(n, &p->bitmap))){
  339. __clear_bit(n, &p->bitmap);
  340. rcu_assign_pointer(p->ary[n], NULL);
  341. to_free = NULL;
  342. while(*paa && ! --((**paa)->count)){
  343. if (to_free)
  344. free_layer(to_free);
  345. to_free = **paa;
  346. **paa-- = NULL;
  347. }
  348. if (!*paa)
  349. idp->layers = 0;
  350. if (to_free)
  351. free_layer(to_free);
  352. } else
  353. idr_remove_warning(id);
  354. }
  355. /**
  356. * idr_remove - remove the given id and free it's slot
  357. * @idp: idr handle
  358. * @id: unique key
  359. */
  360. void idr_remove(struct idr *idp, int id)
  361. {
  362. struct idr_layer *p;
  363. struct idr_layer *to_free;
  364. /* Mask off upper bits we don't use for the search. */
  365. id &= MAX_ID_MASK;
  366. sub_remove(idp, (idp->layers - 1) * IDR_BITS, id);
  367. if (idp->top && idp->top->count == 1 && (idp->layers > 1) &&
  368. idp->top->ary[0]) {
  369. /*
  370. * Single child at leftmost slot: we can shrink the tree.
  371. * This level is not needed anymore since when layers are
  372. * inserted, they are inserted at the top of the existing
  373. * tree.
  374. */
  375. to_free = idp->top;
  376. p = idp->top->ary[0];
  377. rcu_assign_pointer(idp->top, p);
  378. --idp->layers;
  379. to_free->bitmap = to_free->count = 0;
  380. free_layer(to_free);
  381. }
  382. while (idp->id_free_cnt >= IDR_FREE_MAX) {
  383. p = get_from_free_list(idp);
  384. /*
  385. * Note: we don't call the rcu callback here, since the only
  386. * layers that fall into the freelist are those that have been
  387. * preallocated.
  388. */
  389. kmem_cache_free(idr_layer_cache, p);
  390. }
  391. return;
  392. }
  393. EXPORT_SYMBOL(idr_remove);
  394. /**
  395. * idr_remove_all - remove all ids from the given idr tree
  396. * @idp: idr handle
  397. *
  398. * idr_destroy() only frees up unused, cached idp_layers, but this
  399. * function will remove all id mappings and leave all idp_layers
  400. * unused.
  401. *
  402. * A typical clean-up sequence for objects stored in an idr tree, will
  403. * use idr_for_each() to free all objects, if necessay, then
  404. * idr_remove_all() to remove all ids, and idr_destroy() to free
  405. * up the cached idr_layers.
  406. */
  407. void idr_remove_all(struct idr *idp)
  408. {
  409. int n, id, max;
  410. struct idr_layer *p;
  411. struct idr_layer *pa[MAX_LEVEL];
  412. struct idr_layer **paa = &pa[0];
  413. n = idp->layers * IDR_BITS;
  414. p = idp->top;
  415. max = 1 << n;
  416. id = 0;
  417. while (id < max) {
  418. while (n > IDR_BITS && p) {
  419. n -= IDR_BITS;
  420. *paa++ = p;
  421. p = p->ary[(id >> n) & IDR_MASK];
  422. }
  423. id += 1 << n;
  424. while (n < fls(id)) {
  425. if (p)
  426. free_layer(p);
  427. n += IDR_BITS;
  428. p = *--paa;
  429. }
  430. }
  431. rcu_assign_pointer(idp->top, NULL);
  432. idp->layers = 0;
  433. }
  434. EXPORT_SYMBOL(idr_remove_all);
  435. /**
  436. * idr_destroy - release all cached layers within an idr tree
  437. * idp: idr handle
  438. */
  439. void idr_destroy(struct idr *idp)
  440. {
  441. while (idp->id_free_cnt) {
  442. struct idr_layer *p = get_from_free_list(idp);
  443. kmem_cache_free(idr_layer_cache, p);
  444. }
  445. }
  446. EXPORT_SYMBOL(idr_destroy);
  447. /**
  448. * idr_find - return pointer for given id
  449. * @idp: idr handle
  450. * @id: lookup key
  451. *
  452. * Return the pointer given the id it has been registered with. A %NULL
  453. * return indicates that @id is not valid or you passed %NULL in
  454. * idr_get_new().
  455. *
  456. * This function can be called under rcu_read_lock(), given that the leaf
  457. * pointers lifetimes are correctly managed.
  458. */
  459. void *idr_find(struct idr *idp, int id)
  460. {
  461. int n;
  462. struct idr_layer *p;
  463. p = rcu_dereference(idp->top);
  464. if (!p)
  465. return NULL;
  466. n = (p->layer+1) * IDR_BITS;
  467. /* Mask off upper bits we don't use for the search. */
  468. id &= MAX_ID_MASK;
  469. if (id >= (1 << n))
  470. return NULL;
  471. BUG_ON(n == 0);
  472. while (n > 0 && p) {
  473. n -= IDR_BITS;
  474. BUG_ON(n != p->layer*IDR_BITS);
  475. p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  476. }
  477. return((void *)p);
  478. }
  479. EXPORT_SYMBOL(idr_find);
  480. /**
  481. * idr_for_each - iterate through all stored pointers
  482. * @idp: idr handle
  483. * @fn: function to be called for each pointer
  484. * @data: data passed back to callback function
  485. *
  486. * Iterate over the pointers registered with the given idr. The
  487. * callback function will be called for each pointer currently
  488. * registered, passing the id, the pointer and the data pointer passed
  489. * to this function. It is not safe to modify the idr tree while in
  490. * the callback, so functions such as idr_get_new and idr_remove are
  491. * not allowed.
  492. *
  493. * We check the return of @fn each time. If it returns anything other
  494. * than 0, we break out and return that value.
  495. *
  496. * The caller must serialize idr_for_each() vs idr_get_new() and idr_remove().
  497. */
  498. int idr_for_each(struct idr *idp,
  499. int (*fn)(int id, void *p, void *data), void *data)
  500. {
  501. int n, id, max, error = 0;
  502. struct idr_layer *p;
  503. struct idr_layer *pa[MAX_LEVEL];
  504. struct idr_layer **paa = &pa[0];
  505. n = idp->layers * IDR_BITS;
  506. p = rcu_dereference(idp->top);
  507. max = 1 << n;
  508. id = 0;
  509. while (id < max) {
  510. while (n > 0 && p) {
  511. n -= IDR_BITS;
  512. *paa++ = p;
  513. p = rcu_dereference(p->ary[(id >> n) & IDR_MASK]);
  514. }
  515. if (p) {
  516. error = fn(id, (void *)p, data);
  517. if (error)
  518. break;
  519. }
  520. id += 1 << n;
  521. while (n < fls(id)) {
  522. n += IDR_BITS;
  523. p = *--paa;
  524. }
  525. }
  526. return error;
  527. }
  528. EXPORT_SYMBOL(idr_for_each);
  529. /**
  530. * idr_replace - replace pointer for given id
  531. * @idp: idr handle
  532. * @ptr: pointer you want associated with the id
  533. * @id: lookup key
  534. *
  535. * Replace the pointer registered with an id and return the old value.
  536. * A -ENOENT return indicates that @id was not found.
  537. * A -EINVAL return indicates that @id was not within valid constraints.
  538. *
  539. * The caller must serialize with writers.
  540. */
  541. void *idr_replace(struct idr *idp, void *ptr, int id)
  542. {
  543. int n;
  544. struct idr_layer *p, *old_p;
  545. p = idp->top;
  546. if (!p)
  547. return ERR_PTR(-EINVAL);
  548. n = (p->layer+1) * IDR_BITS;
  549. id &= MAX_ID_MASK;
  550. if (id >= (1 << n))
  551. return ERR_PTR(-EINVAL);
  552. n -= IDR_BITS;
  553. while ((n > 0) && p) {
  554. p = p->ary[(id >> n) & IDR_MASK];
  555. n -= IDR_BITS;
  556. }
  557. n = id & IDR_MASK;
  558. if (unlikely(p == NULL || !test_bit(n, &p->bitmap)))
  559. return ERR_PTR(-ENOENT);
  560. old_p = p->ary[n];
  561. rcu_assign_pointer(p->ary[n], ptr);
  562. return old_p;
  563. }
  564. EXPORT_SYMBOL(idr_replace);
  565. void __init idr_init_cache(void)
  566. {
  567. idr_layer_cache = kmem_cache_create("idr_layer_cache",
  568. sizeof(struct idr_layer), 0, SLAB_PANIC, NULL);
  569. }
  570. /**
  571. * idr_init - initialize idr handle
  572. * @idp: idr handle
  573. *
  574. * This function is use to set up the handle (@idp) that you will pass
  575. * to the rest of the functions.
  576. */
  577. void idr_init(struct idr *idp)
  578. {
  579. memset(idp, 0, sizeof(struct idr));
  580. spin_lock_init(&idp->lock);
  581. }
  582. EXPORT_SYMBOL(idr_init);
  583. /*
  584. * IDA - IDR based ID allocator
  585. *
  586. * this is id allocator without id -> pointer translation. Memory
  587. * usage is much lower than full blown idr because each id only
  588. * occupies a bit. ida uses a custom leaf node which contains
  589. * IDA_BITMAP_BITS slots.
  590. *
  591. * 2007-04-25 written by Tejun Heo <htejun@gmail.com>
  592. */
  593. static void free_bitmap(struct ida *ida, struct ida_bitmap *bitmap)
  594. {
  595. unsigned long flags;
  596. if (!ida->free_bitmap) {
  597. spin_lock_irqsave(&ida->idr.lock, flags);
  598. if (!ida->free_bitmap) {
  599. ida->free_bitmap = bitmap;
  600. bitmap = NULL;
  601. }
  602. spin_unlock_irqrestore(&ida->idr.lock, flags);
  603. }
  604. kfree(bitmap);
  605. }
  606. /**
  607. * ida_pre_get - reserve resources for ida allocation
  608. * @ida: ida handle
  609. * @gfp_mask: memory allocation flag
  610. *
  611. * This function should be called prior to locking and calling the
  612. * following function. It preallocates enough memory to satisfy the
  613. * worst possible allocation.
  614. *
  615. * If the system is REALLY out of memory this function returns 0,
  616. * otherwise 1.
  617. */
  618. int ida_pre_get(struct ida *ida, gfp_t gfp_mask)
  619. {
  620. /* allocate idr_layers */
  621. if (!idr_pre_get(&ida->idr, gfp_mask))
  622. return 0;
  623. /* allocate free_bitmap */
  624. if (!ida->free_bitmap) {
  625. struct ida_bitmap *bitmap;
  626. bitmap = kmalloc(sizeof(struct ida_bitmap), gfp_mask);
  627. if (!bitmap)
  628. return 0;
  629. free_bitmap(ida, bitmap);
  630. }
  631. return 1;
  632. }
  633. EXPORT_SYMBOL(ida_pre_get);
  634. /**
  635. * ida_get_new_above - allocate new ID above or equal to a start id
  636. * @ida: ida handle
  637. * @staring_id: id to start search at
  638. * @p_id: pointer to the allocated handle
  639. *
  640. * Allocate new ID above or equal to @ida. It should be called with
  641. * any required locks.
  642. *
  643. * If memory is required, it will return -EAGAIN, you should unlock
  644. * and go back to the ida_pre_get() call. If the ida is full, it will
  645. * return -ENOSPC.
  646. *
  647. * @p_id returns a value in the range @starting_id ... 0x7fffffff.
  648. */
  649. int ida_get_new_above(struct ida *ida, int starting_id, int *p_id)
  650. {
  651. struct idr_layer *pa[MAX_LEVEL];
  652. struct ida_bitmap *bitmap;
  653. unsigned long flags;
  654. int idr_id = starting_id / IDA_BITMAP_BITS;
  655. int offset = starting_id % IDA_BITMAP_BITS;
  656. int t, id;
  657. restart:
  658. /* get vacant slot */
  659. t = idr_get_empty_slot(&ida->idr, idr_id, pa);
  660. if (t < 0)
  661. return _idr_rc_to_errno(t);
  662. if (t * IDA_BITMAP_BITS >= MAX_ID_BIT)
  663. return -ENOSPC;
  664. if (t != idr_id)
  665. offset = 0;
  666. idr_id = t;
  667. /* if bitmap isn't there, create a new one */
  668. bitmap = (void *)pa[0]->ary[idr_id & IDR_MASK];
  669. if (!bitmap) {
  670. spin_lock_irqsave(&ida->idr.lock, flags);
  671. bitmap = ida->free_bitmap;
  672. ida->free_bitmap = NULL;
  673. spin_unlock_irqrestore(&ida->idr.lock, flags);
  674. if (!bitmap)
  675. return -EAGAIN;
  676. memset(bitmap, 0, sizeof(struct ida_bitmap));
  677. rcu_assign_pointer(pa[0]->ary[idr_id & IDR_MASK],
  678. (void *)bitmap);
  679. pa[0]->count++;
  680. }
  681. /* lookup for empty slot */
  682. t = find_next_zero_bit(bitmap->bitmap, IDA_BITMAP_BITS, offset);
  683. if (t == IDA_BITMAP_BITS) {
  684. /* no empty slot after offset, continue to the next chunk */
  685. idr_id++;
  686. offset = 0;
  687. goto restart;
  688. }
  689. id = idr_id * IDA_BITMAP_BITS + t;
  690. if (id >= MAX_ID_BIT)
  691. return -ENOSPC;
  692. __set_bit(t, bitmap->bitmap);
  693. if (++bitmap->nr_busy == IDA_BITMAP_BITS)
  694. idr_mark_full(pa, idr_id);
  695. *p_id = id;
  696. /* Each leaf node can handle nearly a thousand slots and the
  697. * whole idea of ida is to have small memory foot print.
  698. * Throw away extra resources one by one after each successful
  699. * allocation.
  700. */
  701. if (ida->idr.id_free_cnt || ida->free_bitmap) {
  702. struct idr_layer *p = get_from_free_list(&ida->idr);
  703. if (p)
  704. kmem_cache_free(idr_layer_cache, p);
  705. }
  706. return 0;
  707. }
  708. EXPORT_SYMBOL(ida_get_new_above);
  709. /**
  710. * ida_get_new - allocate new ID
  711. * @ida: idr handle
  712. * @p_id: pointer to the allocated handle
  713. *
  714. * Allocate new ID. It should be called with any required locks.
  715. *
  716. * If memory is required, it will return -EAGAIN, you should unlock
  717. * and go back to the idr_pre_get() call. If the idr is full, it will
  718. * return -ENOSPC.
  719. *
  720. * @id returns a value in the range 0 ... 0x7fffffff.
  721. */
  722. int ida_get_new(struct ida *ida, int *p_id)
  723. {
  724. return ida_get_new_above(ida, 0, p_id);
  725. }
  726. EXPORT_SYMBOL(ida_get_new);
  727. /**
  728. * ida_remove - remove the given ID
  729. * @ida: ida handle
  730. * @id: ID to free
  731. */
  732. void ida_remove(struct ida *ida, int id)
  733. {
  734. struct idr_layer *p = ida->idr.top;
  735. int shift = (ida->idr.layers - 1) * IDR_BITS;
  736. int idr_id = id / IDA_BITMAP_BITS;
  737. int offset = id % IDA_BITMAP_BITS;
  738. int n;
  739. struct ida_bitmap *bitmap;
  740. /* clear full bits while looking up the leaf idr_layer */
  741. while ((shift > 0) && p) {
  742. n = (idr_id >> shift) & IDR_MASK;
  743. __clear_bit(n, &p->bitmap);
  744. p = p->ary[n];
  745. shift -= IDR_BITS;
  746. }
  747. if (p == NULL)
  748. goto err;
  749. n = idr_id & IDR_MASK;
  750. __clear_bit(n, &p->bitmap);
  751. bitmap = (void *)p->ary[n];
  752. if (!test_bit(offset, bitmap->bitmap))
  753. goto err;
  754. /* update bitmap and remove it if empty */
  755. __clear_bit(offset, bitmap->bitmap);
  756. if (--bitmap->nr_busy == 0) {
  757. __set_bit(n, &p->bitmap); /* to please idr_remove() */
  758. idr_remove(&ida->idr, idr_id);
  759. free_bitmap(ida, bitmap);
  760. }
  761. return;
  762. err:
  763. printk(KERN_WARNING
  764. "ida_remove called for id=%d which is not allocated.\n", id);
  765. }
  766. EXPORT_SYMBOL(ida_remove);
  767. /**
  768. * ida_destroy - release all cached layers within an ida tree
  769. * ida: ida handle
  770. */
  771. void ida_destroy(struct ida *ida)
  772. {
  773. idr_destroy(&ida->idr);
  774. kfree(ida->free_bitmap);
  775. }
  776. EXPORT_SYMBOL(ida_destroy);
  777. /**
  778. * ida_init - initialize ida handle
  779. * @ida: ida handle
  780. *
  781. * This function is use to set up the handle (@ida) that you will pass
  782. * to the rest of the functions.
  783. */
  784. void ida_init(struct ida *ida)
  785. {
  786. memset(ida, 0, sizeof(struct ida));
  787. idr_init(&ida->idr);
  788. }
  789. EXPORT_SYMBOL(ida_init);