irqueue.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914
  1. /*********************************************************************
  2. *
  3. * Filename: irqueue.c
  4. * Version: 0.3
  5. * Description: General queue implementation
  6. * Status: Experimental.
  7. * Author: Dag Brattli <dagb@cs.uit.no>
  8. * Created at: Tue Jun 9 13:29:31 1998
  9. * Modified at: Sun Dec 12 13:48:22 1999
  10. * Modified by: Dag Brattli <dagb@cs.uit.no>
  11. * Modified at: Thu Jan 4 14:29:10 CET 2001
  12. * Modified by: Marc Zyngier <mzyngier@freesurf.fr>
  13. *
  14. * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
  15. * Copyright (C) 1998, Dag Brattli,
  16. * All Rights Reserved.
  17. *
  18. * This code is taken from the Vortex Operating System written by Aage
  19. * Kvalnes. Aage has agreed that this code can use the GPL licence,
  20. * although he does not use that licence in his own code.
  21. *
  22. * This copyright does however _not_ include the ELF hash() function
  23. * which I currently don't know which licence or copyright it
  24. * has. Please inform me if you know.
  25. *
  26. * This program is free software; you can redistribute it and/or
  27. * modify it under the terms of the GNU General Public License as
  28. * published by the Free Software Foundation; either version 2 of
  29. * the License, or (at your option) any later version.
  30. *
  31. * Neither Dag Brattli nor University of Tromsø admit liability nor
  32. * provide warranty for any of this software. This material is
  33. * provided "AS-IS" and at no charge.
  34. *
  35. ********************************************************************/
  36. /*
  37. * NOTE :
  38. * There are various problems with this package :
  39. * o the hash function for ints is pathetic (but could be changed)
  40. * o locking is sometime suspicious (especially during enumeration)
  41. * o most users have only a few elements (== overhead)
  42. * o most users never use seach, so don't benefit from hashing
  43. * Problem already fixed :
  44. * o not 64 bit compliant (most users do hashv = (int) self)
  45. * o hashbin_remove() is broken => use hashbin_remove_this()
  46. * I think most users would be better served by a simple linked list
  47. * (like include/linux/list.h) with a global spinlock per list.
  48. * Jean II
  49. */
  50. /*
  51. * Notes on the concurrent access to hashbin and other SMP issues
  52. * -------------------------------------------------------------
  53. * Hashbins are very often in the IrDA stack a global repository of
  54. * information, and therefore used in a very asynchronous manner following
  55. * various events (driver calls, timers, user calls...).
  56. * Therefore, very often it is highly important to consider the
  57. * management of concurrent access to the hashbin and how to guarantee the
  58. * consistency of the operations on it.
  59. *
  60. * First, we need to define the objective of locking :
  61. * 1) Protect user data (content pointed by the hashbin)
  62. * 2) Protect hashbin structure itself (linked list in each bin)
  63. *
  64. * OLD LOCKING
  65. * -----------
  66. *
  67. * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were
  68. * both inadequate in *both* aspect.
  69. * o HB_GLOBAL was using a spinlock for each bin (local locking).
  70. * o HB_LOCAL was disabling irq on *all* CPUs, so use a single
  71. * global semaphore.
  72. * The problems were :
  73. * A) Global irq disabling is no longer supported by the kernel
  74. * B) No protection for the hashbin struct global data
  75. * o hashbin_delete()
  76. * o hb_current
  77. * C) No protection for user data in some cases
  78. *
  79. * A) HB_LOCAL use global irq disabling, so doesn't work on kernel
  80. * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its
  81. * performance is not satisfactory on SMP setups. Most hashbins were
  82. * HB_LOCAL, so (A) definitely need fixing.
  83. * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL
  84. * lock only the individual bins, it will never be able to lock the
  85. * global data, so can't do (B).
  86. * C) Some functions return pointer to data that is still in the
  87. * hashbin :
  88. * o hashbin_find()
  89. * o hashbin_get_first()
  90. * o hashbin_get_next()
  91. * As the data is still in the hashbin, it may be changed or free'd
  92. * while the caller is examinimg the data. In those case, locking can't
  93. * be done within the hashbin, but must include use of the data within
  94. * the caller.
  95. * The caller can easily do this with HB_LOCAL (just disable irqs).
  96. * However, this is impossible with HB_GLOBAL because the caller has no
  97. * way to know the proper bin, so don't know which spinlock to use.
  98. *
  99. * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is
  100. * fundamentally broken and will never work.
  101. *
  102. * NEW LOCKING
  103. * -----------
  104. *
  105. * To fix those problems, I've introduce a few changes in the
  106. * hashbin locking :
  107. * 1) New HB_LOCK scheme
  108. * 2) hashbin->hb_spinlock
  109. * 3) New hashbin usage policy
  110. *
  111. * HB_LOCK :
  112. * -------
  113. * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL
  114. * and HB_GLOBAL. It uses a single spinlock to protect the whole content
  115. * of the hashbin. As it is a single spinlock, it can protect the global
  116. * data of the hashbin and not only the bins themselves.
  117. * HB_LOCK can only protect some of the hashbin calls, so it only lock
  118. * call that can be made 100% safe and leave other call unprotected.
  119. * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin
  120. * content is always small contention is not high, so it doesn't matter
  121. * much. HB_LOCK is probably faster than HB_LOCAL.
  122. *
  123. * hashbin->hb_spinlock :
  124. * --------------------
  125. * The spinlock that HB_LOCK uses is available for caller, so that
  126. * the caller can protect unprotected calls (see below).
  127. * If the caller want to do entirely its own locking (HB_NOLOCK), he
  128. * can do so and may use safely this spinlock.
  129. * Locking is done like this :
  130. * spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  131. * Releasing the lock :
  132. * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  133. *
  134. * Safe & Protected calls :
  135. * ----------------------
  136. * The following calls are safe or protected via HB_LOCK :
  137. * o hashbin_new() -> safe
  138. * o hashbin_delete()
  139. * o hashbin_insert()
  140. * o hashbin_remove_first()
  141. * o hashbin_remove()
  142. * o hashbin_remove_this()
  143. * o HASHBIN_GET_SIZE() -> atomic
  144. *
  145. * The following calls only protect the hashbin itself :
  146. * o hashbin_lock_find()
  147. * o hashbin_find_next()
  148. *
  149. * Unprotected calls :
  150. * -----------------
  151. * The following calls need to be protected by the caller :
  152. * o hashbin_find()
  153. * o hashbin_get_first()
  154. * o hashbin_get_next()
  155. *
  156. * Locking Policy :
  157. * --------------
  158. * If the hashbin is used only in a single thread of execution
  159. * (explicitly or implicitely), you can use HB_NOLOCK
  160. * If the calling module already provide concurrent access protection,
  161. * you may use HB_NOLOCK.
  162. *
  163. * In all other cases, you need to use HB_LOCK and lock the hashbin
  164. * every time before calling one of the unprotected calls. You also must
  165. * use the pointer returned by the unprotected call within the locked
  166. * region.
  167. *
  168. * Extra care for enumeration :
  169. * --------------------------
  170. * hashbin_get_first() and hashbin_get_next() use the hashbin to
  171. * store the current position, in hb_current.
  172. * As long as the hashbin remains locked, this is safe. If you unlock
  173. * the hashbin, the current position may change if anybody else modify
  174. * or enumerate the hashbin.
  175. * Summary : do the full enumeration while locked.
  176. *
  177. * Alternatively, you may use hashbin_find_next(). But, this will
  178. * be slower, is more complex to use and doesn't protect the hashbin
  179. * content. So, care is needed here as well.
  180. *
  181. * Other issues :
  182. * ------------
  183. * I believe that we are overdoing it by using spin_lock_irqsave()
  184. * and we should use only spin_lock_bh() or similar. But, I don't have
  185. * the balls to try it out.
  186. * Don't believe that because hashbin are now (somewhat) SMP safe
  187. * that the rest of the code is. Higher layers tend to be safest,
  188. * but LAP and LMP would need some serious dedicated love.
  189. *
  190. * Jean II
  191. */
  192. #include <linux/module.h>
  193. #include <net/irda/irda.h>
  194. #include <net/irda/irqueue.h>
  195. /************************ QUEUE SUBROUTINES ************************/
  196. /*
  197. * Hashbin
  198. */
  199. #define GET_HASHBIN(x) ( x & HASHBIN_MASK )
  200. /*
  201. * Function hash (name)
  202. *
  203. * This function hash the input string 'name' using the ELF hash
  204. * function for strings.
  205. */
  206. static __u32 hash( const char* name)
  207. {
  208. __u32 h = 0;
  209. __u32 g;
  210. while(*name) {
  211. h = (h<<4) + *name++;
  212. if ((g = (h & 0xf0000000)))
  213. h ^=g>>24;
  214. h &=~g;
  215. }
  216. return h;
  217. }
  218. /*
  219. * Function enqueue_first (queue, proc)
  220. *
  221. * Insert item first in queue.
  222. *
  223. */
  224. static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
  225. {
  226. IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
  227. /*
  228. * Check if queue is empty.
  229. */
  230. if ( *queue == NULL ) {
  231. /*
  232. * Queue is empty. Insert one element into the queue.
  233. */
  234. element->q_next = element->q_prev = *queue = element;
  235. } else {
  236. /*
  237. * Queue is not empty. Insert element into front of queue.
  238. */
  239. element->q_next = (*queue);
  240. (*queue)->q_prev->q_next = element;
  241. element->q_prev = (*queue)->q_prev;
  242. (*queue)->q_prev = element;
  243. (*queue) = element;
  244. }
  245. }
  246. /*
  247. * Function dequeue (queue)
  248. *
  249. * Remove first entry in queue
  250. *
  251. */
  252. static irda_queue_t *dequeue_first(irda_queue_t **queue)
  253. {
  254. irda_queue_t *ret;
  255. IRDA_DEBUG( 4, "dequeue_first()\n");
  256. /*
  257. * Set return value
  258. */
  259. ret = *queue;
  260. if ( *queue == NULL ) {
  261. /*
  262. * Queue was empty.
  263. */
  264. } else if ( (*queue)->q_next == *queue ) {
  265. /*
  266. * Queue only contained a single element. It will now be
  267. * empty.
  268. */
  269. *queue = NULL;
  270. } else {
  271. /*
  272. * Queue contained several element. Remove the first one.
  273. */
  274. (*queue)->q_prev->q_next = (*queue)->q_next;
  275. (*queue)->q_next->q_prev = (*queue)->q_prev;
  276. *queue = (*queue)->q_next;
  277. }
  278. /*
  279. * Return the removed entry (or NULL of queue was empty).
  280. */
  281. return ret;
  282. }
  283. /*
  284. * Function dequeue_general (queue, element)
  285. *
  286. *
  287. */
  288. static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element)
  289. {
  290. irda_queue_t *ret;
  291. IRDA_DEBUG( 4, "dequeue_general()\n");
  292. /*
  293. * Set return value
  294. */
  295. ret = *queue;
  296. if ( *queue == NULL ) {
  297. /*
  298. * Queue was empty.
  299. */
  300. } else if ( (*queue)->q_next == *queue ) {
  301. /*
  302. * Queue only contained a single element. It will now be
  303. * empty.
  304. */
  305. *queue = NULL;
  306. } else {
  307. /*
  308. * Remove specific element.
  309. */
  310. element->q_prev->q_next = element->q_next;
  311. element->q_next->q_prev = element->q_prev;
  312. if ( (*queue) == element)
  313. (*queue) = element->q_next;
  314. }
  315. /*
  316. * Return the removed entry (or NULL of queue was empty).
  317. */
  318. return ret;
  319. }
  320. /************************ HASHBIN MANAGEMENT ************************/
  321. /*
  322. * Function hashbin_create ( type, name )
  323. *
  324. * Create hashbin!
  325. *
  326. */
  327. hashbin_t *hashbin_new(int type)
  328. {
  329. hashbin_t* hashbin;
  330. /*
  331. * Allocate new hashbin
  332. */
  333. hashbin = kmalloc( sizeof(hashbin_t), GFP_ATOMIC);
  334. if (!hashbin)
  335. return NULL;
  336. /*
  337. * Initialize structure
  338. */
  339. memset(hashbin, 0, sizeof(hashbin_t));
  340. hashbin->hb_type = type;
  341. hashbin->magic = HB_MAGIC;
  342. //hashbin->hb_current = NULL;
  343. /* Make sure all spinlock's are unlocked */
  344. if ( hashbin->hb_type & HB_LOCK ) {
  345. spin_lock_init(&hashbin->hb_spinlock);
  346. }
  347. return hashbin;
  348. }
  349. EXPORT_SYMBOL(hashbin_new);
  350. /*
  351. * Function hashbin_delete (hashbin, free_func)
  352. *
  353. * Destroy hashbin, the free_func can be a user supplied special routine
  354. * for deallocating this structure if it's complex. If not the user can
  355. * just supply kfree, which should take care of the job.
  356. */
  357. int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
  358. {
  359. irda_queue_t* queue;
  360. unsigned long flags = 0;
  361. int i;
  362. IRDA_ASSERT(hashbin != NULL, return -1;);
  363. IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
  364. /* Synchronize */
  365. if ( hashbin->hb_type & HB_LOCK ) {
  366. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  367. }
  368. /*
  369. * Free the entries in the hashbin, TODO: use hashbin_clear when
  370. * it has been shown to work
  371. */
  372. for (i = 0; i < HASHBIN_SIZE; i ++ ) {
  373. queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
  374. while (queue ) {
  375. if (free_func)
  376. (*free_func)(queue);
  377. queue = dequeue_first(
  378. (irda_queue_t**) &hashbin->hb_queue[i]);
  379. }
  380. }
  381. /* Cleanup local data */
  382. hashbin->hb_current = NULL;
  383. hashbin->magic = ~HB_MAGIC;
  384. /* Release lock */
  385. if ( hashbin->hb_type & HB_LOCK) {
  386. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  387. }
  388. /*
  389. * Free the hashbin structure
  390. */
  391. kfree(hashbin);
  392. return 0;
  393. }
  394. EXPORT_SYMBOL(hashbin_delete);
  395. /********************* HASHBIN LIST OPERATIONS *********************/
  396. /*
  397. * Function hashbin_insert (hashbin, entry, name)
  398. *
  399. * Insert an entry into the hashbin
  400. *
  401. */
  402. void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
  403. const char* name)
  404. {
  405. unsigned long flags = 0;
  406. int bin;
  407. IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
  408. IRDA_ASSERT( hashbin != NULL, return;);
  409. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;);
  410. /*
  411. * Locate hashbin
  412. */
  413. if ( name )
  414. hashv = hash( name );
  415. bin = GET_HASHBIN( hashv );
  416. /* Synchronize */
  417. if ( hashbin->hb_type & HB_LOCK ) {
  418. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  419. } /* Default is no-lock */
  420. /*
  421. * Store name and key
  422. */
  423. entry->q_hash = hashv;
  424. if ( name )
  425. strlcpy( entry->q_name, name, sizeof(entry->q_name));
  426. /*
  427. * Insert new entry first
  428. */
  429. enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  430. entry);
  431. hashbin->hb_size++;
  432. /* Release lock */
  433. if ( hashbin->hb_type & HB_LOCK ) {
  434. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  435. } /* Default is no-lock */
  436. }
  437. EXPORT_SYMBOL(hashbin_insert);
  438. /*
  439. * Function hashbin_remove_first (hashbin)
  440. *
  441. * Remove first entry of the hashbin
  442. *
  443. * Note : this function no longer use hashbin_remove(), but does things
  444. * similar to hashbin_remove_this(), so can be considered safe.
  445. * Jean II
  446. */
  447. void *hashbin_remove_first( hashbin_t *hashbin)
  448. {
  449. unsigned long flags = 0;
  450. irda_queue_t *entry = NULL;
  451. /* Synchronize */
  452. if ( hashbin->hb_type & HB_LOCK ) {
  453. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  454. } /* Default is no-lock */
  455. entry = hashbin_get_first( hashbin);
  456. if ( entry != NULL) {
  457. int bin;
  458. long hashv;
  459. /*
  460. * Locate hashbin
  461. */
  462. hashv = entry->q_hash;
  463. bin = GET_HASHBIN( hashv );
  464. /*
  465. * Dequeue the entry...
  466. */
  467. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  468. (irda_queue_t*) entry );
  469. hashbin->hb_size--;
  470. entry->q_next = NULL;
  471. entry->q_prev = NULL;
  472. /*
  473. * Check if this item is the currently selected item, and in
  474. * that case we must reset hb_current
  475. */
  476. if ( entry == hashbin->hb_current)
  477. hashbin->hb_current = NULL;
  478. }
  479. /* Release lock */
  480. if ( hashbin->hb_type & HB_LOCK ) {
  481. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  482. } /* Default is no-lock */
  483. return entry;
  484. }
  485. /*
  486. * Function hashbin_remove (hashbin, hashv, name)
  487. *
  488. * Remove entry with the given name
  489. *
  490. * The use of this function is highly discouraged, because the whole
  491. * concept behind hashbin_remove() is broken. In many cases, it's not
  492. * possible to guarantee the unicity of the index (either hashv or name),
  493. * leading to removing the WRONG entry.
  494. * The only simple safe use is :
  495. * hashbin_remove(hasbin, (int) self, NULL);
  496. * In other case, you must think hard to guarantee unicity of the index.
  497. * Jean II
  498. */
  499. void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
  500. {
  501. int bin, found = FALSE;
  502. unsigned long flags = 0;
  503. irda_queue_t* entry;
  504. IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
  505. IRDA_ASSERT( hashbin != NULL, return NULL;);
  506. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  507. /*
  508. * Locate hashbin
  509. */
  510. if ( name )
  511. hashv = hash( name );
  512. bin = GET_HASHBIN( hashv );
  513. /* Synchronize */
  514. if ( hashbin->hb_type & HB_LOCK ) {
  515. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  516. } /* Default is no-lock */
  517. /*
  518. * Search for entry
  519. */
  520. entry = hashbin->hb_queue[ bin ];
  521. if ( entry ) {
  522. do {
  523. /*
  524. * Check for key
  525. */
  526. if ( entry->q_hash == hashv ) {
  527. /*
  528. * Name compare too?
  529. */
  530. if ( name ) {
  531. if ( strcmp( entry->q_name, name) == 0)
  532. {
  533. found = TRUE;
  534. break;
  535. }
  536. } else {
  537. found = TRUE;
  538. break;
  539. }
  540. }
  541. entry = entry->q_next;
  542. } while ( entry != hashbin->hb_queue[ bin ] );
  543. }
  544. /*
  545. * If entry was found, dequeue it
  546. */
  547. if ( found ) {
  548. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  549. (irda_queue_t*) entry );
  550. hashbin->hb_size--;
  551. /*
  552. * Check if this item is the currently selected item, and in
  553. * that case we must reset hb_current
  554. */
  555. if ( entry == hashbin->hb_current)
  556. hashbin->hb_current = NULL;
  557. }
  558. /* Release lock */
  559. if ( hashbin->hb_type & HB_LOCK ) {
  560. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  561. } /* Default is no-lock */
  562. /* Return */
  563. if ( found )
  564. return entry;
  565. else
  566. return NULL;
  567. }
  568. EXPORT_SYMBOL(hashbin_remove);
  569. /*
  570. * Function hashbin_remove_this (hashbin, entry)
  571. *
  572. * Remove entry with the given name
  573. *
  574. * In some cases, the user of hashbin can't guarantee the unicity
  575. * of either the hashv or name.
  576. * In those cases, using the above function is guaranteed to cause troubles,
  577. * so we use this one instead...
  578. * And by the way, it's also faster, because we skip the search phase ;-)
  579. */
  580. void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
  581. {
  582. unsigned long flags = 0;
  583. int bin;
  584. long hashv;
  585. IRDA_DEBUG( 4, "%s()\n", __FUNCTION__);
  586. IRDA_ASSERT( hashbin != NULL, return NULL;);
  587. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  588. IRDA_ASSERT( entry != NULL, return NULL;);
  589. /* Synchronize */
  590. if ( hashbin->hb_type & HB_LOCK ) {
  591. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  592. } /* Default is no-lock */
  593. /* Check if valid and not already removed... */
  594. if((entry->q_next == NULL) || (entry->q_prev == NULL)) {
  595. entry = NULL;
  596. goto out;
  597. }
  598. /*
  599. * Locate hashbin
  600. */
  601. hashv = entry->q_hash;
  602. bin = GET_HASHBIN( hashv );
  603. /*
  604. * Dequeue the entry...
  605. */
  606. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  607. (irda_queue_t*) entry );
  608. hashbin->hb_size--;
  609. entry->q_next = NULL;
  610. entry->q_prev = NULL;
  611. /*
  612. * Check if this item is the currently selected item, and in
  613. * that case we must reset hb_current
  614. */
  615. if ( entry == hashbin->hb_current)
  616. hashbin->hb_current = NULL;
  617. out:
  618. /* Release lock */
  619. if ( hashbin->hb_type & HB_LOCK ) {
  620. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  621. } /* Default is no-lock */
  622. return entry;
  623. }
  624. EXPORT_SYMBOL(hashbin_remove_this);
  625. /*********************** HASHBIN ENUMERATION ***********************/
  626. /*
  627. * Function hashbin_common_find (hashbin, hashv, name)
  628. *
  629. * Find item with the given hashv or name
  630. *
  631. */
  632. void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name )
  633. {
  634. int bin;
  635. irda_queue_t* entry;
  636. IRDA_DEBUG( 4, "hashbin_find()\n");
  637. IRDA_ASSERT( hashbin != NULL, return NULL;);
  638. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  639. /*
  640. * Locate hashbin
  641. */
  642. if ( name )
  643. hashv = hash( name );
  644. bin = GET_HASHBIN( hashv );
  645. /*
  646. * Search for entry
  647. */
  648. entry = hashbin->hb_queue[ bin];
  649. if ( entry ) {
  650. do {
  651. /*
  652. * Check for key
  653. */
  654. if ( entry->q_hash == hashv ) {
  655. /*
  656. * Name compare too?
  657. */
  658. if ( name ) {
  659. if ( strcmp( entry->q_name, name ) == 0 ) {
  660. return entry;
  661. }
  662. } else {
  663. return entry;
  664. }
  665. }
  666. entry = entry->q_next;
  667. } while ( entry != hashbin->hb_queue[ bin ] );
  668. }
  669. return NULL;
  670. }
  671. EXPORT_SYMBOL(hashbin_find);
  672. /*
  673. * Function hashbin_lock_find (hashbin, hashv, name)
  674. *
  675. * Find item with the given hashv or name
  676. *
  677. * Same, but with spinlock protection...
  678. * I call it safe, but it's only safe with respect to the hashbin, not its
  679. * content. - Jean II
  680. */
  681. void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name )
  682. {
  683. unsigned long flags = 0;
  684. irda_queue_t* entry;
  685. /* Synchronize */
  686. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  687. /*
  688. * Search for entry
  689. */
  690. entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
  691. /* Release lock */
  692. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  693. return entry;
  694. }
  695. EXPORT_SYMBOL(hashbin_lock_find);
  696. /*
  697. * Function hashbin_find (hashbin, hashv, name, pnext)
  698. *
  699. * Find an item with the given hashv or name, and its successor
  700. *
  701. * This function allow to do concurrent enumerations without the
  702. * need to lock over the whole session, because the caller keep the
  703. * context of the search. On the other hand, it might fail and return
  704. * NULL if the entry is removed. - Jean II
  705. */
  706. void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name,
  707. void ** pnext)
  708. {
  709. unsigned long flags = 0;
  710. irda_queue_t* entry;
  711. /* Synchronize */
  712. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  713. /*
  714. * Search for current entry
  715. * This allow to check if the current item is still in the
  716. * hashbin or has been removed.
  717. */
  718. entry = (irda_queue_t* ) hashbin_find( hashbin, hashv, name );
  719. /*
  720. * Trick hashbin_get_next() to return what we want
  721. */
  722. if(entry) {
  723. hashbin->hb_current = entry;
  724. *pnext = hashbin_get_next( hashbin );
  725. } else
  726. *pnext = NULL;
  727. /* Release lock */
  728. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  729. return entry;
  730. }
  731. /*
  732. * Function hashbin_get_first (hashbin)
  733. *
  734. * Get a pointer to first element in hashbin, this function must be
  735. * called before any calls to hashbin_get_next()!
  736. *
  737. */
  738. irda_queue_t *hashbin_get_first( hashbin_t* hashbin)
  739. {
  740. irda_queue_t *entry;
  741. int i;
  742. IRDA_ASSERT( hashbin != NULL, return NULL;);
  743. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  744. if ( hashbin == NULL)
  745. return NULL;
  746. for ( i = 0; i < HASHBIN_SIZE; i ++ ) {
  747. entry = hashbin->hb_queue[ i];
  748. if ( entry) {
  749. hashbin->hb_current = entry;
  750. return entry;
  751. }
  752. }
  753. /*
  754. * Did not find any item in hashbin
  755. */
  756. return NULL;
  757. }
  758. EXPORT_SYMBOL(hashbin_get_first);
  759. /*
  760. * Function hashbin_get_next (hashbin)
  761. *
  762. * Get next item in hashbin. A series of hashbin_get_next() calls must
  763. * be started by a call to hashbin_get_first(). The function returns
  764. * NULL when all items have been traversed
  765. *
  766. * The context of the search is stored within the hashbin, so you must
  767. * protect yourself from concurrent enumerations. - Jean II
  768. */
  769. irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
  770. {
  771. irda_queue_t* entry;
  772. int bin;
  773. int i;
  774. IRDA_ASSERT( hashbin != NULL, return NULL;);
  775. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  776. if ( hashbin->hb_current == NULL) {
  777. IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;);
  778. return NULL;
  779. }
  780. entry = hashbin->hb_current->q_next;
  781. bin = GET_HASHBIN( entry->q_hash);
  782. /*
  783. * Make sure that we are not back at the beginning of the queue
  784. * again
  785. */
  786. if ( entry != hashbin->hb_queue[ bin ]) {
  787. hashbin->hb_current = entry;
  788. return entry;
  789. }
  790. /*
  791. * Check that this is not the last queue in hashbin
  792. */
  793. if ( bin >= HASHBIN_SIZE)
  794. return NULL;
  795. /*
  796. * Move to next queue in hashbin
  797. */
  798. bin++;
  799. for ( i = bin; i < HASHBIN_SIZE; i++ ) {
  800. entry = hashbin->hb_queue[ i];
  801. if ( entry) {
  802. hashbin->hb_current = entry;
  803. return entry;
  804. }
  805. }
  806. return NULL;
  807. }
  808. EXPORT_SYMBOL(hashbin_get_next);