irqueue.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /*********************************************************************
  2. *
  3. * Filename: irqueue.c
  4. * Version: 0.3
  5. * Description: General queue implementation
  6. * Status: Experimental.
  7. * Author: Dag Brattli <dagb@cs.uit.no>
  8. * Created at: Tue Jun 9 13:29:31 1998
  9. * Modified at: Sun Dec 12 13:48:22 1999
  10. * Modified by: Dag Brattli <dagb@cs.uit.no>
  11. * Modified at: Thu Jan 4 14:29:10 CET 2001
  12. * Modified by: Marc Zyngier <mzyngier@freesurf.fr>
  13. *
  14. * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
  15. * Copyright (C) 1998, Dag Brattli,
  16. * All Rights Reserved.
  17. *
  18. * This code is taken from the Vortex Operating System written by Aage
  19. * Kvalnes. Aage has agreed that this code can use the GPL licence,
  20. * although he does not use that licence in his own code.
  21. *
  22. * This copyright does however _not_ include the ELF hash() function
  23. * which I currently don't know which licence or copyright it
  24. * has. Please inform me if you know.
  25. *
  26. * This program is free software; you can redistribute it and/or
  27. * modify it under the terms of the GNU General Public License as
  28. * published by the Free Software Foundation; either version 2 of
  29. * the License, or (at your option) any later version.
  30. *
  31. * Neither Dag Brattli nor University of Tromsø admit liability nor
  32. * provide warranty for any of this software. This material is
  33. * provided "AS-IS" and at no charge.
  34. *
  35. ********************************************************************/
  36. /*
  37. * NOTE :
  38. * There are various problems with this package :
  39. * o the hash function for ints is pathetic (but could be changed)
  40. * o locking is sometime suspicious (especially during enumeration)
  41. * o most users have only a few elements (== overhead)
  42. * o most users never use seach, so don't benefit from hashing
  43. * Problem already fixed :
  44. * o not 64 bit compliant (most users do hashv = (int) self)
  45. * o hashbin_remove() is broken => use hashbin_remove_this()
  46. * I think most users would be better served by a simple linked list
  47. * (like include/linux/list.h) with a global spinlock per list.
  48. * Jean II
  49. */
  50. /*
  51. * Notes on the concurrent access to hashbin and other SMP issues
  52. * -------------------------------------------------------------
  53. * Hashbins are very often in the IrDA stack a global repository of
  54. * information, and therefore used in a very asynchronous manner following
  55. * various events (driver calls, timers, user calls...).
  56. * Therefore, very often it is highly important to consider the
  57. * management of concurrent access to the hashbin and how to guarantee the
  58. * consistency of the operations on it.
  59. *
  60. * First, we need to define the objective of locking :
  61. * 1) Protect user data (content pointed by the hashbin)
  62. * 2) Protect hashbin structure itself (linked list in each bin)
  63. *
  64. * OLD LOCKING
  65. * -----------
  66. *
  67. * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were
  68. * both inadequate in *both* aspect.
  69. * o HB_GLOBAL was using a spinlock for each bin (local locking).
  70. * o HB_LOCAL was disabling irq on *all* CPUs, so use a single
  71. * global semaphore.
  72. * The problems were :
  73. * A) Global irq disabling is no longer supported by the kernel
  74. * B) No protection for the hashbin struct global data
  75. * o hashbin_delete()
  76. * o hb_current
  77. * C) No protection for user data in some cases
  78. *
  79. * A) HB_LOCAL use global irq disabling, so doesn't work on kernel
  80. * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its
  81. * performance is not satisfactory on SMP setups. Most hashbins were
  82. * HB_LOCAL, so (A) definitely need fixing.
  83. * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL
  84. * lock only the individual bins, it will never be able to lock the
  85. * global data, so can't do (B).
  86. * C) Some functions return pointer to data that is still in the
  87. * hashbin :
  88. * o hashbin_find()
  89. * o hashbin_get_first()
  90. * o hashbin_get_next()
  91. * As the data is still in the hashbin, it may be changed or free'd
  92. * while the caller is examinimg the data. In those case, locking can't
  93. * be done within the hashbin, but must include use of the data within
  94. * the caller.
  95. * The caller can easily do this with HB_LOCAL (just disable irqs).
  96. * However, this is impossible with HB_GLOBAL because the caller has no
  97. * way to know the proper bin, so don't know which spinlock to use.
  98. *
  99. * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is
  100. * fundamentally broken and will never work.
  101. *
  102. * NEW LOCKING
  103. * -----------
  104. *
  105. * To fix those problems, I've introduce a few changes in the
  106. * hashbin locking :
  107. * 1) New HB_LOCK scheme
  108. * 2) hashbin->hb_spinlock
  109. * 3) New hashbin usage policy
  110. *
  111. * HB_LOCK :
  112. * -------
  113. * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL
  114. * and HB_GLOBAL. It uses a single spinlock to protect the whole content
  115. * of the hashbin. As it is a single spinlock, it can protect the global
  116. * data of the hashbin and not only the bins themselves.
  117. * HB_LOCK can only protect some of the hashbin calls, so it only lock
  118. * call that can be made 100% safe and leave other call unprotected.
  119. * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin
  120. * content is always small contention is not high, so it doesn't matter
  121. * much. HB_LOCK is probably faster than HB_LOCAL.
  122. *
  123. * hashbin->hb_spinlock :
  124. * --------------------
  125. * The spinlock that HB_LOCK uses is available for caller, so that
  126. * the caller can protect unprotected calls (see below).
  127. * If the caller want to do entirely its own locking (HB_NOLOCK), he
  128. * can do so and may use safely this spinlock.
  129. * Locking is done like this :
  130. * spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  131. * Releasing the lock :
  132. * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  133. *
  134. * Safe & Protected calls :
  135. * ----------------------
  136. * The following calls are safe or protected via HB_LOCK :
  137. * o hashbin_new() -> safe
  138. * o hashbin_delete()
  139. * o hashbin_insert()
  140. * o hashbin_remove_first()
  141. * o hashbin_remove()
  142. * o hashbin_remove_this()
  143. * o HASHBIN_GET_SIZE() -> atomic
  144. *
  145. * The following calls only protect the hashbin itself :
  146. * o hashbin_lock_find()
  147. * o hashbin_find_next()
  148. *
  149. * Unprotected calls :
  150. * -----------------
  151. * The following calls need to be protected by the caller :
  152. * o hashbin_find()
  153. * o hashbin_get_first()
  154. * o hashbin_get_next()
  155. *
  156. * Locking Policy :
  157. * --------------
  158. * If the hashbin is used only in a single thread of execution
  159. * (explicitly or implicitely), you can use HB_NOLOCK
  160. * If the calling module already provide concurrent access protection,
  161. * you may use HB_NOLOCK.
  162. *
  163. * In all other cases, you need to use HB_LOCK and lock the hashbin
  164. * every time before calling one of the unprotected calls. You also must
  165. * use the pointer returned by the unprotected call within the locked
  166. * region.
  167. *
  168. * Extra care for enumeration :
  169. * --------------------------
  170. * hashbin_get_first() and hashbin_get_next() use the hashbin to
  171. * store the current position, in hb_current.
  172. * As long as the hashbin remains locked, this is safe. If you unlock
  173. * the hashbin, the current position may change if anybody else modify
  174. * or enumerate the hashbin.
  175. * Summary : do the full enumeration while locked.
  176. *
  177. * Alternatively, you may use hashbin_find_next(). But, this will
  178. * be slower, is more complex to use and doesn't protect the hashbin
  179. * content. So, care is needed here as well.
  180. *
  181. * Other issues :
  182. * ------------
  183. * I believe that we are overdoing it by using spin_lock_irqsave()
  184. * and we should use only spin_lock_bh() or similar. But, I don't have
  185. * the balls to try it out.
  186. * Don't believe that because hashbin are now (somewhat) SMP safe
  187. * that the rest of the code is. Higher layers tend to be safest,
  188. * but LAP and LMP would need some serious dedicated love.
  189. *
  190. * Jean II
  191. */
  192. #include <linux/module.h>
  193. #include <net/irda/irda.h>
  194. #include <net/irda/irqueue.h>
  195. /************************ QUEUE SUBROUTINES ************************/
  196. /*
  197. * Hashbin
  198. */
  199. #define GET_HASHBIN(x) ( x & HASHBIN_MASK )
  200. /*
  201. * Function hash (name)
  202. *
  203. * This function hash the input string 'name' using the ELF hash
  204. * function for strings.
  205. */
  206. static __u32 hash( const char* name)
  207. {
  208. __u32 h = 0;
  209. __u32 g;
  210. while(*name) {
  211. h = (h<<4) + *name++;
  212. if ((g = (h & 0xf0000000)))
  213. h ^=g>>24;
  214. h &=~g;
  215. }
  216. return h;
  217. }
  218. /*
  219. * Function enqueue_first (queue, proc)
  220. *
  221. * Insert item first in queue.
  222. *
  223. */
  224. static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
  225. {
  226. IRDA_DEBUG( 4, "%s()\n", __func__);
  227. /*
  228. * Check if queue is empty.
  229. */
  230. if ( *queue == NULL ) {
  231. /*
  232. * Queue is empty. Insert one element into the queue.
  233. */
  234. element->q_next = element->q_prev = *queue = element;
  235. } else {
  236. /*
  237. * Queue is not empty. Insert element into front of queue.
  238. */
  239. element->q_next = (*queue);
  240. (*queue)->q_prev->q_next = element;
  241. element->q_prev = (*queue)->q_prev;
  242. (*queue)->q_prev = element;
  243. (*queue) = element;
  244. }
  245. }
  246. /*
  247. * Function dequeue (queue)
  248. *
  249. * Remove first entry in queue
  250. *
  251. */
  252. static irda_queue_t *dequeue_first(irda_queue_t **queue)
  253. {
  254. irda_queue_t *ret;
  255. IRDA_DEBUG( 4, "dequeue_first()\n");
  256. /*
  257. * Set return value
  258. */
  259. ret = *queue;
  260. if ( *queue == NULL ) {
  261. /*
  262. * Queue was empty.
  263. */
  264. } else if ( (*queue)->q_next == *queue ) {
  265. /*
  266. * Queue only contained a single element. It will now be
  267. * empty.
  268. */
  269. *queue = NULL;
  270. } else {
  271. /*
  272. * Queue contained several element. Remove the first one.
  273. */
  274. (*queue)->q_prev->q_next = (*queue)->q_next;
  275. (*queue)->q_next->q_prev = (*queue)->q_prev;
  276. *queue = (*queue)->q_next;
  277. }
  278. /*
  279. * Return the removed entry (or NULL of queue was empty).
  280. */
  281. return ret;
  282. }
  283. /*
  284. * Function dequeue_general (queue, element)
  285. *
  286. *
  287. */
  288. static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element)
  289. {
  290. irda_queue_t *ret;
  291. IRDA_DEBUG( 4, "dequeue_general()\n");
  292. /*
  293. * Set return value
  294. */
  295. ret = *queue;
  296. if ( *queue == NULL ) {
  297. /*
  298. * Queue was empty.
  299. */
  300. } else if ( (*queue)->q_next == *queue ) {
  301. /*
  302. * Queue only contained a single element. It will now be
  303. * empty.
  304. */
  305. *queue = NULL;
  306. } else {
  307. /*
  308. * Remove specific element.
  309. */
  310. element->q_prev->q_next = element->q_next;
  311. element->q_next->q_prev = element->q_prev;
  312. if ( (*queue) == element)
  313. (*queue) = element->q_next;
  314. }
  315. /*
  316. * Return the removed entry (or NULL of queue was empty).
  317. */
  318. return ret;
  319. }
  320. /************************ HASHBIN MANAGEMENT ************************/
  321. /*
  322. * Function hashbin_create ( type, name )
  323. *
  324. * Create hashbin!
  325. *
  326. */
  327. hashbin_t *hashbin_new(int type)
  328. {
  329. hashbin_t* hashbin;
  330. /*
  331. * Allocate new hashbin
  332. */
  333. hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC);
  334. if (!hashbin)
  335. return NULL;
  336. /*
  337. * Initialize structure
  338. */
  339. hashbin->hb_type = type;
  340. hashbin->magic = HB_MAGIC;
  341. //hashbin->hb_current = NULL;
  342. /* Make sure all spinlock's are unlocked */
  343. if ( hashbin->hb_type & HB_LOCK ) {
  344. spin_lock_init(&hashbin->hb_spinlock);
  345. }
  346. return hashbin;
  347. }
  348. EXPORT_SYMBOL(hashbin_new);
  349. /*
  350. * Function hashbin_delete (hashbin, free_func)
  351. *
  352. * Destroy hashbin, the free_func can be a user supplied special routine
  353. * for deallocating this structure if it's complex. If not the user can
  354. * just supply kfree, which should take care of the job.
  355. */
  356. #ifdef CONFIG_LOCKDEP
  357. static int hashbin_lock_depth = 0;
  358. #endif
  359. int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
  360. {
  361. irda_queue_t* queue;
  362. unsigned long flags = 0;
  363. int i;
  364. IRDA_ASSERT(hashbin != NULL, return -1;);
  365. IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
  366. /* Synchronize */
  367. if ( hashbin->hb_type & HB_LOCK ) {
  368. spin_lock_irqsave_nested(&hashbin->hb_spinlock, flags,
  369. hashbin_lock_depth++);
  370. }
  371. /*
  372. * Free the entries in the hashbin, TODO: use hashbin_clear when
  373. * it has been shown to work
  374. */
  375. for (i = 0; i < HASHBIN_SIZE; i ++ ) {
  376. queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
  377. while (queue ) {
  378. if (free_func)
  379. (*free_func)(queue);
  380. queue = dequeue_first(
  381. (irda_queue_t**) &hashbin->hb_queue[i]);
  382. }
  383. }
  384. /* Cleanup local data */
  385. hashbin->hb_current = NULL;
  386. hashbin->magic = ~HB_MAGIC;
  387. /* Release lock */
  388. if ( hashbin->hb_type & HB_LOCK) {
  389. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  390. #ifdef CONFIG_LOCKDEP
  391. hashbin_lock_depth--;
  392. #endif
  393. }
  394. /*
  395. * Free the hashbin structure
  396. */
  397. kfree(hashbin);
  398. return 0;
  399. }
  400. EXPORT_SYMBOL(hashbin_delete);
  401. /********************* HASHBIN LIST OPERATIONS *********************/
  402. /*
  403. * Function hashbin_insert (hashbin, entry, name)
  404. *
  405. * Insert an entry into the hashbin
  406. *
  407. */
  408. void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
  409. const char* name)
  410. {
  411. unsigned long flags = 0;
  412. int bin;
  413. IRDA_DEBUG( 4, "%s()\n", __func__);
  414. IRDA_ASSERT( hashbin != NULL, return;);
  415. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;);
  416. /*
  417. * Locate hashbin
  418. */
  419. if ( name )
  420. hashv = hash( name );
  421. bin = GET_HASHBIN( hashv );
  422. /* Synchronize */
  423. if ( hashbin->hb_type & HB_LOCK ) {
  424. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  425. } /* Default is no-lock */
  426. /*
  427. * Store name and key
  428. */
  429. entry->q_hash = hashv;
  430. if ( name )
  431. strlcpy( entry->q_name, name, sizeof(entry->q_name));
  432. /*
  433. * Insert new entry first
  434. */
  435. enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  436. entry);
  437. hashbin->hb_size++;
  438. /* Release lock */
  439. if ( hashbin->hb_type & HB_LOCK ) {
  440. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  441. } /* Default is no-lock */
  442. }
  443. EXPORT_SYMBOL(hashbin_insert);
  444. /*
  445. * Function hashbin_remove_first (hashbin)
  446. *
  447. * Remove first entry of the hashbin
  448. *
  449. * Note : this function no longer use hashbin_remove(), but does things
  450. * similar to hashbin_remove_this(), so can be considered safe.
  451. * Jean II
  452. */
  453. void *hashbin_remove_first( hashbin_t *hashbin)
  454. {
  455. unsigned long flags = 0;
  456. irda_queue_t *entry = NULL;
  457. /* Synchronize */
  458. if ( hashbin->hb_type & HB_LOCK ) {
  459. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  460. } /* Default is no-lock */
  461. entry = hashbin_get_first( hashbin);
  462. if ( entry != NULL) {
  463. int bin;
  464. long hashv;
  465. /*
  466. * Locate hashbin
  467. */
  468. hashv = entry->q_hash;
  469. bin = GET_HASHBIN( hashv );
  470. /*
  471. * Dequeue the entry...
  472. */
  473. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  474. (irda_queue_t*) entry );
  475. hashbin->hb_size--;
  476. entry->q_next = NULL;
  477. entry->q_prev = NULL;
  478. /*
  479. * Check if this item is the currently selected item, and in
  480. * that case we must reset hb_current
  481. */
  482. if ( entry == hashbin->hb_current)
  483. hashbin->hb_current = NULL;
  484. }
  485. /* Release lock */
  486. if ( hashbin->hb_type & HB_LOCK ) {
  487. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  488. } /* Default is no-lock */
  489. return entry;
  490. }
  491. /*
  492. * Function hashbin_remove (hashbin, hashv, name)
  493. *
  494. * Remove entry with the given name
  495. *
  496. * The use of this function is highly discouraged, because the whole
  497. * concept behind hashbin_remove() is broken. In many cases, it's not
  498. * possible to guarantee the unicity of the index (either hashv or name),
  499. * leading to removing the WRONG entry.
  500. * The only simple safe use is :
  501. * hashbin_remove(hasbin, (int) self, NULL);
  502. * In other case, you must think hard to guarantee unicity of the index.
  503. * Jean II
  504. */
  505. void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
  506. {
  507. int bin, found = FALSE;
  508. unsigned long flags = 0;
  509. irda_queue_t* entry;
  510. IRDA_DEBUG( 4, "%s()\n", __func__);
  511. IRDA_ASSERT( hashbin != NULL, return NULL;);
  512. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  513. /*
  514. * Locate hashbin
  515. */
  516. if ( name )
  517. hashv = hash( name );
  518. bin = GET_HASHBIN( hashv );
  519. /* Synchronize */
  520. if ( hashbin->hb_type & HB_LOCK ) {
  521. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  522. } /* Default is no-lock */
  523. /*
  524. * Search for entry
  525. */
  526. entry = hashbin->hb_queue[ bin ];
  527. if ( entry ) {
  528. do {
  529. /*
  530. * Check for key
  531. */
  532. if ( entry->q_hash == hashv ) {
  533. /*
  534. * Name compare too?
  535. */
  536. if ( name ) {
  537. if ( strcmp( entry->q_name, name) == 0)
  538. {
  539. found = TRUE;
  540. break;
  541. }
  542. } else {
  543. found = TRUE;
  544. break;
  545. }
  546. }
  547. entry = entry->q_next;
  548. } while ( entry != hashbin->hb_queue[ bin ] );
  549. }
  550. /*
  551. * If entry was found, dequeue it
  552. */
  553. if ( found ) {
  554. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  555. (irda_queue_t*) entry );
  556. hashbin->hb_size--;
  557. /*
  558. * Check if this item is the currently selected item, and in
  559. * that case we must reset hb_current
  560. */
  561. if ( entry == hashbin->hb_current)
  562. hashbin->hb_current = NULL;
  563. }
  564. /* Release lock */
  565. if ( hashbin->hb_type & HB_LOCK ) {
  566. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  567. } /* Default is no-lock */
  568. /* Return */
  569. if ( found )
  570. return entry;
  571. else
  572. return NULL;
  573. }
  574. EXPORT_SYMBOL(hashbin_remove);
  575. /*
  576. * Function hashbin_remove_this (hashbin, entry)
  577. *
  578. * Remove entry with the given name
  579. *
  580. * In some cases, the user of hashbin can't guarantee the unicity
  581. * of either the hashv or name.
  582. * In those cases, using the above function is guaranteed to cause troubles,
  583. * so we use this one instead...
  584. * And by the way, it's also faster, because we skip the search phase ;-)
  585. */
  586. void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
  587. {
  588. unsigned long flags = 0;
  589. int bin;
  590. long hashv;
  591. IRDA_DEBUG( 4, "%s()\n", __func__);
  592. IRDA_ASSERT( hashbin != NULL, return NULL;);
  593. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  594. IRDA_ASSERT( entry != NULL, return NULL;);
  595. /* Synchronize */
  596. if ( hashbin->hb_type & HB_LOCK ) {
  597. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  598. } /* Default is no-lock */
  599. /* Check if valid and not already removed... */
  600. if((entry->q_next == NULL) || (entry->q_prev == NULL)) {
  601. entry = NULL;
  602. goto out;
  603. }
  604. /*
  605. * Locate hashbin
  606. */
  607. hashv = entry->q_hash;
  608. bin = GET_HASHBIN( hashv );
  609. /*
  610. * Dequeue the entry...
  611. */
  612. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  613. (irda_queue_t*) entry );
  614. hashbin->hb_size--;
  615. entry->q_next = NULL;
  616. entry->q_prev = NULL;
  617. /*
  618. * Check if this item is the currently selected item, and in
  619. * that case we must reset hb_current
  620. */
  621. if ( entry == hashbin->hb_current)
  622. hashbin->hb_current = NULL;
  623. out:
  624. /* Release lock */
  625. if ( hashbin->hb_type & HB_LOCK ) {
  626. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  627. } /* Default is no-lock */
  628. return entry;
  629. }
  630. EXPORT_SYMBOL(hashbin_remove_this);
  631. /*********************** HASHBIN ENUMERATION ***********************/
  632. /*
  633. * Function hashbin_common_find (hashbin, hashv, name)
  634. *
  635. * Find item with the given hashv or name
  636. *
  637. */
  638. void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name )
  639. {
  640. int bin;
  641. irda_queue_t* entry;
  642. IRDA_DEBUG( 4, "hashbin_find()\n");
  643. IRDA_ASSERT( hashbin != NULL, return NULL;);
  644. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  645. /*
  646. * Locate hashbin
  647. */
  648. if ( name )
  649. hashv = hash( name );
  650. bin = GET_HASHBIN( hashv );
  651. /*
  652. * Search for entry
  653. */
  654. entry = hashbin->hb_queue[ bin];
  655. if ( entry ) {
  656. do {
  657. /*
  658. * Check for key
  659. */
  660. if ( entry->q_hash == hashv ) {
  661. /*
  662. * Name compare too?
  663. */
  664. if ( name ) {
  665. if ( strcmp( entry->q_name, name ) == 0 ) {
  666. return entry;
  667. }
  668. } else {
  669. return entry;
  670. }
  671. }
  672. entry = entry->q_next;
  673. } while ( entry != hashbin->hb_queue[ bin ] );
  674. }
  675. return NULL;
  676. }
  677. EXPORT_SYMBOL(hashbin_find);
  678. /*
  679. * Function hashbin_lock_find (hashbin, hashv, name)
  680. *
  681. * Find item with the given hashv or name
  682. *
  683. * Same, but with spinlock protection...
  684. * I call it safe, but it's only safe with respect to the hashbin, not its
  685. * content. - Jean II
  686. */
  687. void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name )
  688. {
  689. unsigned long flags = 0;
  690. irda_queue_t* entry;
  691. /* Synchronize */
  692. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  693. /*
  694. * Search for entry
  695. */
  696. entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
  697. /* Release lock */
  698. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  699. return entry;
  700. }
  701. EXPORT_SYMBOL(hashbin_lock_find);
  702. /*
  703. * Function hashbin_find (hashbin, hashv, name, pnext)
  704. *
  705. * Find an item with the given hashv or name, and its successor
  706. *
  707. * This function allow to do concurrent enumerations without the
  708. * need to lock over the whole session, because the caller keep the
  709. * context of the search. On the other hand, it might fail and return
  710. * NULL if the entry is removed. - Jean II
  711. */
  712. void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name,
  713. void ** pnext)
  714. {
  715. unsigned long flags = 0;
  716. irda_queue_t* entry;
  717. /* Synchronize */
  718. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  719. /*
  720. * Search for current entry
  721. * This allow to check if the current item is still in the
  722. * hashbin or has been removed.
  723. */
  724. entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
  725. /*
  726. * Trick hashbin_get_next() to return what we want
  727. */
  728. if(entry) {
  729. hashbin->hb_current = entry;
  730. *pnext = hashbin_get_next( hashbin );
  731. } else
  732. *pnext = NULL;
  733. /* Release lock */
  734. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  735. return entry;
  736. }
  737. /*
  738. * Function hashbin_get_first (hashbin)
  739. *
  740. * Get a pointer to first element in hashbin, this function must be
  741. * called before any calls to hashbin_get_next()!
  742. *
  743. */
  744. irda_queue_t *hashbin_get_first( hashbin_t* hashbin)
  745. {
  746. irda_queue_t *entry;
  747. int i;
  748. IRDA_ASSERT( hashbin != NULL, return NULL;);
  749. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  750. if ( hashbin == NULL)
  751. return NULL;
  752. for ( i = 0; i < HASHBIN_SIZE; i ++ ) {
  753. entry = hashbin->hb_queue[ i];
  754. if ( entry) {
  755. hashbin->hb_current = entry;
  756. return entry;
  757. }
  758. }
  759. /*
  760. * Did not find any item in hashbin
  761. */
  762. return NULL;
  763. }
  764. EXPORT_SYMBOL(hashbin_get_first);
  765. /*
  766. * Function hashbin_get_next (hashbin)
  767. *
  768. * Get next item in hashbin. A series of hashbin_get_next() calls must
  769. * be started by a call to hashbin_get_first(). The function returns
  770. * NULL when all items have been traversed
  771. *
  772. * The context of the search is stored within the hashbin, so you must
  773. * protect yourself from concurrent enumerations. - Jean II
  774. */
  775. irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
  776. {
  777. irda_queue_t* entry;
  778. int bin;
  779. int i;
  780. IRDA_ASSERT( hashbin != NULL, return NULL;);
  781. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  782. if ( hashbin->hb_current == NULL) {
  783. IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;);
  784. return NULL;
  785. }
  786. entry = hashbin->hb_current->q_next;
  787. bin = GET_HASHBIN( entry->q_hash);
  788. /*
  789. * Make sure that we are not back at the beginning of the queue
  790. * again
  791. */
  792. if ( entry != hashbin->hb_queue[ bin ]) {
  793. hashbin->hb_current = entry;
  794. return entry;
  795. }
  796. /*
  797. * Check that this is not the last queue in hashbin
  798. */
  799. if ( bin >= HASHBIN_SIZE)
  800. return NULL;
  801. /*
  802. * Move to next queue in hashbin
  803. */
  804. bin++;
  805. for ( i = bin; i < HASHBIN_SIZE; i++ ) {
  806. entry = hashbin->hb_queue[ i];
  807. if ( entry) {
  808. hashbin->hb_current = entry;
  809. return entry;
  810. }
  811. }
  812. return NULL;
  813. }
  814. EXPORT_SYMBOL(hashbin_get_next);