irqueue.c 23 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920
  1. /*********************************************************************
  2. *
  3. * Filename: irqueue.c
  4. * Version: 0.3
  5. * Description: General queue implementation
  6. * Status: Experimental.
  7. * Author: Dag Brattli <dagb@cs.uit.no>
  8. * Created at: Tue Jun 9 13:29:31 1998
  9. * Modified at: Sun Dec 12 13:48:22 1999
  10. * Modified by: Dag Brattli <dagb@cs.uit.no>
  11. * Modified at: Thu Jan 4 14:29:10 CET 2001
  12. * Modified by: Marc Zyngier <mzyngier@freesurf.fr>
  13. *
  14. * Copyright (C) 1998-1999, Aage Kvalnes <aage@cs.uit.no>
  15. * Copyright (C) 1998, Dag Brattli,
  16. * All Rights Reserved.
  17. *
  18. * This code is taken from the Vortex Operating System written by Aage
  19. * Kvalnes. Aage has agreed that this code can use the GPL licence,
  20. * although he does not use that licence in his own code.
  21. *
  22. * This copyright does however _not_ include the ELF hash() function
  23. * which I currently don't know which licence or copyright it
  24. * has. Please inform me if you know.
  25. *
  26. * This program is free software; you can redistribute it and/or
  27. * modify it under the terms of the GNU General Public License as
  28. * published by the Free Software Foundation; either version 2 of
  29. * the License, or (at your option) any later version.
  30. *
  31. * Neither Dag Brattli nor University of Tromsø admit liability nor
  32. * provide warranty for any of this software. This material is
  33. * provided "AS-IS" and at no charge.
  34. *
  35. ********************************************************************/
  36. /*
  37. * NOTE :
  38. * There are various problems with this package :
  39. * o the hash function for ints is pathetic (but could be changed)
  40. * o locking is sometime suspicious (especially during enumeration)
  41. * o most users have only a few elements (== overhead)
  42. * o most users never use search, so don't benefit from hashing
  43. * Problem already fixed :
  44. * o not 64 bit compliant (most users do hashv = (int) self)
  45. * o hashbin_remove() is broken => use hashbin_remove_this()
  46. * I think most users would be better served by a simple linked list
  47. * (like include/linux/list.h) with a global spinlock per list.
  48. * Jean II
  49. */
  50. /*
  51. * Notes on the concurrent access to hashbin and other SMP issues
  52. * -------------------------------------------------------------
  53. * Hashbins are very often in the IrDA stack a global repository of
  54. * information, and therefore used in a very asynchronous manner following
  55. * various events (driver calls, timers, user calls...).
  56. * Therefore, very often it is highly important to consider the
  57. * management of concurrent access to the hashbin and how to guarantee the
  58. * consistency of the operations on it.
  59. *
  60. * First, we need to define the objective of locking :
  61. * 1) Protect user data (content pointed by the hashbin)
  62. * 2) Protect hashbin structure itself (linked list in each bin)
  63. *
  64. * OLD LOCKING
  65. * -----------
  66. *
  67. * The previous locking strategy, either HB_LOCAL or HB_GLOBAL were
  68. * both inadequate in *both* aspect.
  69. * o HB_GLOBAL was using a spinlock for each bin (local locking).
  70. * o HB_LOCAL was disabling irq on *all* CPUs, so use a single
  71. * global semaphore.
  72. * The problems were :
  73. * A) Global irq disabling is no longer supported by the kernel
  74. * B) No protection for the hashbin struct global data
  75. * o hashbin_delete()
  76. * o hb_current
  77. * C) No protection for user data in some cases
  78. *
  79. * A) HB_LOCAL use global irq disabling, so doesn't work on kernel
  80. * 2.5.X. Even when it is supported (kernel 2.4.X and earlier), its
  81. * performance is not satisfactory on SMP setups. Most hashbins were
  82. * HB_LOCAL, so (A) definitely need fixing.
  83. * B) HB_LOCAL could be modified to fix (B). However, because HB_GLOBAL
  84. * lock only the individual bins, it will never be able to lock the
  85. * global data, so can't do (B).
  86. * C) Some functions return pointer to data that is still in the
  87. * hashbin :
  88. * o hashbin_find()
  89. * o hashbin_get_first()
  90. * o hashbin_get_next()
  91. * As the data is still in the hashbin, it may be changed or free'd
  92. * while the caller is examinimg the data. In those case, locking can't
  93. * be done within the hashbin, but must include use of the data within
  94. * the caller.
  95. * The caller can easily do this with HB_LOCAL (just disable irqs).
  96. * However, this is impossible with HB_GLOBAL because the caller has no
  97. * way to know the proper bin, so don't know which spinlock to use.
  98. *
  99. * Quick summary : can no longer use HB_LOCAL, and HB_GLOBAL is
  100. * fundamentally broken and will never work.
  101. *
  102. * NEW LOCKING
  103. * -----------
  104. *
  105. * To fix those problems, I've introduce a few changes in the
  106. * hashbin locking :
  107. * 1) New HB_LOCK scheme
  108. * 2) hashbin->hb_spinlock
  109. * 3) New hashbin usage policy
  110. *
  111. * HB_LOCK :
  112. * -------
  113. * HB_LOCK is a locking scheme intermediate between the old HB_LOCAL
  114. * and HB_GLOBAL. It uses a single spinlock to protect the whole content
  115. * of the hashbin. As it is a single spinlock, it can protect the global
  116. * data of the hashbin and not only the bins themselves.
  117. * HB_LOCK can only protect some of the hashbin calls, so it only lock
  118. * call that can be made 100% safe and leave other call unprotected.
  119. * HB_LOCK in theory is slower than HB_GLOBAL, but as the hashbin
  120. * content is always small contention is not high, so it doesn't matter
  121. * much. HB_LOCK is probably faster than HB_LOCAL.
  122. *
  123. * hashbin->hb_spinlock :
  124. * --------------------
  125. * The spinlock that HB_LOCK uses is available for caller, so that
  126. * the caller can protect unprotected calls (see below).
  127. * If the caller want to do entirely its own locking (HB_NOLOCK), he
  128. * can do so and may use safely this spinlock.
  129. * Locking is done like this :
  130. * spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  131. * Releasing the lock :
  132. * spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  133. *
  134. * Safe & Protected calls :
  135. * ----------------------
  136. * The following calls are safe or protected via HB_LOCK :
  137. * o hashbin_new() -> safe
  138. * o hashbin_delete()
  139. * o hashbin_insert()
  140. * o hashbin_remove_first()
  141. * o hashbin_remove()
  142. * o hashbin_remove_this()
  143. * o HASHBIN_GET_SIZE() -> atomic
  144. *
  145. * The following calls only protect the hashbin itself :
  146. * o hashbin_lock_find()
  147. * o hashbin_find_next()
  148. *
  149. * Unprotected calls :
  150. * -----------------
  151. * The following calls need to be protected by the caller :
  152. * o hashbin_find()
  153. * o hashbin_get_first()
  154. * o hashbin_get_next()
  155. *
  156. * Locking Policy :
  157. * --------------
  158. * If the hashbin is used only in a single thread of execution
  159. * (explicitly or implicitely), you can use HB_NOLOCK
  160. * If the calling module already provide concurrent access protection,
  161. * you may use HB_NOLOCK.
  162. *
  163. * In all other cases, you need to use HB_LOCK and lock the hashbin
  164. * every time before calling one of the unprotected calls. You also must
  165. * use the pointer returned by the unprotected call within the locked
  166. * region.
  167. *
  168. * Extra care for enumeration :
  169. * --------------------------
  170. * hashbin_get_first() and hashbin_get_next() use the hashbin to
  171. * store the current position, in hb_current.
  172. * As long as the hashbin remains locked, this is safe. If you unlock
  173. * the hashbin, the current position may change if anybody else modify
  174. * or enumerate the hashbin.
  175. * Summary : do the full enumeration while locked.
  176. *
  177. * Alternatively, you may use hashbin_find_next(). But, this will
  178. * be slower, is more complex to use and doesn't protect the hashbin
  179. * content. So, care is needed here as well.
  180. *
  181. * Other issues :
  182. * ------------
  183. * I believe that we are overdoing it by using spin_lock_irqsave()
  184. * and we should use only spin_lock_bh() or similar. But, I don't have
  185. * the balls to try it out.
  186. * Don't believe that because hashbin are now (somewhat) SMP safe
  187. * that the rest of the code is. Higher layers tend to be safest,
  188. * but LAP and LMP would need some serious dedicated love.
  189. *
  190. * Jean II
  191. */
  192. #include <linux/module.h>
  193. #include <linux/slab.h>
  194. #include <net/irda/irda.h>
  195. #include <net/irda/irqueue.h>
  196. /************************ QUEUE SUBROUTINES ************************/
  197. /*
  198. * Hashbin
  199. */
  200. #define GET_HASHBIN(x) ( x & HASHBIN_MASK )
  201. /*
  202. * Function hash (name)
  203. *
  204. * This function hash the input string 'name' using the ELF hash
  205. * function for strings.
  206. */
  207. static __u32 hash( const char* name)
  208. {
  209. __u32 h = 0;
  210. __u32 g;
  211. while(*name) {
  212. h = (h<<4) + *name++;
  213. if ((g = (h & 0xf0000000)))
  214. h ^=g>>24;
  215. h &=~g;
  216. }
  217. return h;
  218. }
  219. /*
  220. * Function enqueue_first (queue, proc)
  221. *
  222. * Insert item first in queue.
  223. *
  224. */
  225. static void enqueue_first(irda_queue_t **queue, irda_queue_t* element)
  226. {
  227. IRDA_DEBUG( 4, "%s()\n", __func__);
  228. /*
  229. * Check if queue is empty.
  230. */
  231. if ( *queue == NULL ) {
  232. /*
  233. * Queue is empty. Insert one element into the queue.
  234. */
  235. element->q_next = element->q_prev = *queue = element;
  236. } else {
  237. /*
  238. * Queue is not empty. Insert element into front of queue.
  239. */
  240. element->q_next = (*queue);
  241. (*queue)->q_prev->q_next = element;
  242. element->q_prev = (*queue)->q_prev;
  243. (*queue)->q_prev = element;
  244. (*queue) = element;
  245. }
  246. }
  247. /*
  248. * Function dequeue (queue)
  249. *
  250. * Remove first entry in queue
  251. *
  252. */
  253. static irda_queue_t *dequeue_first(irda_queue_t **queue)
  254. {
  255. irda_queue_t *ret;
  256. IRDA_DEBUG( 4, "dequeue_first()\n");
  257. /*
  258. * Set return value
  259. */
  260. ret = *queue;
  261. if ( *queue == NULL ) {
  262. /*
  263. * Queue was empty.
  264. */
  265. } else if ( (*queue)->q_next == *queue ) {
  266. /*
  267. * Queue only contained a single element. It will now be
  268. * empty.
  269. */
  270. *queue = NULL;
  271. } else {
  272. /*
  273. * Queue contained several element. Remove the first one.
  274. */
  275. (*queue)->q_prev->q_next = (*queue)->q_next;
  276. (*queue)->q_next->q_prev = (*queue)->q_prev;
  277. *queue = (*queue)->q_next;
  278. }
  279. /*
  280. * Return the removed entry (or NULL of queue was empty).
  281. */
  282. return ret;
  283. }
  284. /*
  285. * Function dequeue_general (queue, element)
  286. *
  287. *
  288. */
  289. static irda_queue_t *dequeue_general(irda_queue_t **queue, irda_queue_t* element)
  290. {
  291. irda_queue_t *ret;
  292. IRDA_DEBUG( 4, "dequeue_general()\n");
  293. /*
  294. * Set return value
  295. */
  296. ret = *queue;
  297. if ( *queue == NULL ) {
  298. /*
  299. * Queue was empty.
  300. */
  301. } else if ( (*queue)->q_next == *queue ) {
  302. /*
  303. * Queue only contained a single element. It will now be
  304. * empty.
  305. */
  306. *queue = NULL;
  307. } else {
  308. /*
  309. * Remove specific element.
  310. */
  311. element->q_prev->q_next = element->q_next;
  312. element->q_next->q_prev = element->q_prev;
  313. if ( (*queue) == element)
  314. (*queue) = element->q_next;
  315. }
  316. /*
  317. * Return the removed entry (or NULL of queue was empty).
  318. */
  319. return ret;
  320. }
  321. /************************ HASHBIN MANAGEMENT ************************/
  322. /*
  323. * Function hashbin_create ( type, name )
  324. *
  325. * Create hashbin!
  326. *
  327. */
  328. hashbin_t *hashbin_new(int type)
  329. {
  330. hashbin_t* hashbin;
  331. /*
  332. * Allocate new hashbin
  333. */
  334. hashbin = kzalloc(sizeof(*hashbin), GFP_ATOMIC);
  335. if (!hashbin)
  336. return NULL;
  337. /*
  338. * Initialize structure
  339. */
  340. hashbin->hb_type = type;
  341. hashbin->magic = HB_MAGIC;
  342. //hashbin->hb_current = NULL;
  343. /* Make sure all spinlock's are unlocked */
  344. if ( hashbin->hb_type & HB_LOCK ) {
  345. spin_lock_init(&hashbin->hb_spinlock);
  346. }
  347. return hashbin;
  348. }
  349. EXPORT_SYMBOL(hashbin_new);
  350. /*
  351. * Function hashbin_delete (hashbin, free_func)
  352. *
  353. * Destroy hashbin, the free_func can be a user supplied special routine
  354. * for deallocating this structure if it's complex. If not the user can
  355. * just supply kfree, which should take care of the job.
  356. */
  357. int hashbin_delete( hashbin_t* hashbin, FREE_FUNC free_func)
  358. {
  359. irda_queue_t* queue;
  360. unsigned long flags = 0;
  361. int i;
  362. IRDA_ASSERT(hashbin != NULL, return -1;);
  363. IRDA_ASSERT(hashbin->magic == HB_MAGIC, return -1;);
  364. /* Synchronize */
  365. if (hashbin->hb_type & HB_LOCK)
  366. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  367. /*
  368. * Free the entries in the hashbin, TODO: use hashbin_clear when
  369. * it has been shown to work
  370. */
  371. for (i = 0; i < HASHBIN_SIZE; i ++ ) {
  372. while (1) {
  373. queue = dequeue_first((irda_queue_t**) &hashbin->hb_queue[i]);
  374. if (!queue)
  375. break;
  376. if (free_func) {
  377. if (hashbin->hb_type & HB_LOCK)
  378. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  379. free_func(queue);
  380. if (hashbin->hb_type & HB_LOCK)
  381. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  382. }
  383. }
  384. }
  385. /* Cleanup local data */
  386. hashbin->hb_current = NULL;
  387. hashbin->magic = ~HB_MAGIC;
  388. /* Release lock */
  389. if (hashbin->hb_type & HB_LOCK)
  390. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  391. /*
  392. * Free the hashbin structure
  393. */
  394. kfree(hashbin);
  395. return 0;
  396. }
  397. EXPORT_SYMBOL(hashbin_delete);
  398. /********************* HASHBIN LIST OPERATIONS *********************/
  399. /*
  400. * Function hashbin_insert (hashbin, entry, name)
  401. *
  402. * Insert an entry into the hashbin
  403. *
  404. */
  405. void hashbin_insert(hashbin_t* hashbin, irda_queue_t* entry, long hashv,
  406. const char* name)
  407. {
  408. unsigned long flags = 0;
  409. int bin;
  410. IRDA_DEBUG( 4, "%s()\n", __func__);
  411. IRDA_ASSERT( hashbin != NULL, return;);
  412. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return;);
  413. /*
  414. * Locate hashbin
  415. */
  416. if ( name )
  417. hashv = hash( name );
  418. bin = GET_HASHBIN( hashv );
  419. /* Synchronize */
  420. if ( hashbin->hb_type & HB_LOCK ) {
  421. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  422. } /* Default is no-lock */
  423. /*
  424. * Store name and key
  425. */
  426. entry->q_hash = hashv;
  427. if ( name )
  428. strlcpy( entry->q_name, name, sizeof(entry->q_name));
  429. /*
  430. * Insert new entry first
  431. */
  432. enqueue_first( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  433. entry);
  434. hashbin->hb_size++;
  435. /* Release lock */
  436. if ( hashbin->hb_type & HB_LOCK ) {
  437. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  438. } /* Default is no-lock */
  439. }
  440. EXPORT_SYMBOL(hashbin_insert);
  441. /*
  442. * Function hashbin_remove_first (hashbin)
  443. *
  444. * Remove first entry of the hashbin
  445. *
  446. * Note : this function no longer use hashbin_remove(), but does things
  447. * similar to hashbin_remove_this(), so can be considered safe.
  448. * Jean II
  449. */
  450. void *hashbin_remove_first( hashbin_t *hashbin)
  451. {
  452. unsigned long flags = 0;
  453. irda_queue_t *entry = NULL;
  454. /* Synchronize */
  455. if ( hashbin->hb_type & HB_LOCK ) {
  456. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  457. } /* Default is no-lock */
  458. entry = hashbin_get_first( hashbin);
  459. if ( entry != NULL) {
  460. int bin;
  461. long hashv;
  462. /*
  463. * Locate hashbin
  464. */
  465. hashv = entry->q_hash;
  466. bin = GET_HASHBIN( hashv );
  467. /*
  468. * Dequeue the entry...
  469. */
  470. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  471. entry);
  472. hashbin->hb_size--;
  473. entry->q_next = NULL;
  474. entry->q_prev = NULL;
  475. /*
  476. * Check if this item is the currently selected item, and in
  477. * that case we must reset hb_current
  478. */
  479. if ( entry == hashbin->hb_current)
  480. hashbin->hb_current = NULL;
  481. }
  482. /* Release lock */
  483. if ( hashbin->hb_type & HB_LOCK ) {
  484. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  485. } /* Default is no-lock */
  486. return entry;
  487. }
  488. /*
  489. * Function hashbin_remove (hashbin, hashv, name)
  490. *
  491. * Remove entry with the given name
  492. *
  493. * The use of this function is highly discouraged, because the whole
  494. * concept behind hashbin_remove() is broken. In many cases, it's not
  495. * possible to guarantee the unicity of the index (either hashv or name),
  496. * leading to removing the WRONG entry.
  497. * The only simple safe use is :
  498. * hashbin_remove(hasbin, (int) self, NULL);
  499. * In other case, you must think hard to guarantee unicity of the index.
  500. * Jean II
  501. */
  502. void* hashbin_remove( hashbin_t* hashbin, long hashv, const char* name)
  503. {
  504. int bin, found = FALSE;
  505. unsigned long flags = 0;
  506. irda_queue_t* entry;
  507. IRDA_DEBUG( 4, "%s()\n", __func__);
  508. IRDA_ASSERT( hashbin != NULL, return NULL;);
  509. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  510. /*
  511. * Locate hashbin
  512. */
  513. if ( name )
  514. hashv = hash( name );
  515. bin = GET_HASHBIN( hashv );
  516. /* Synchronize */
  517. if ( hashbin->hb_type & HB_LOCK ) {
  518. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  519. } /* Default is no-lock */
  520. /*
  521. * Search for entry
  522. */
  523. entry = hashbin->hb_queue[ bin ];
  524. if ( entry ) {
  525. do {
  526. /*
  527. * Check for key
  528. */
  529. if ( entry->q_hash == hashv ) {
  530. /*
  531. * Name compare too?
  532. */
  533. if ( name ) {
  534. if ( strcmp( entry->q_name, name) == 0)
  535. {
  536. found = TRUE;
  537. break;
  538. }
  539. } else {
  540. found = TRUE;
  541. break;
  542. }
  543. }
  544. entry = entry->q_next;
  545. } while ( entry != hashbin->hb_queue[ bin ] );
  546. }
  547. /*
  548. * If entry was found, dequeue it
  549. */
  550. if ( found ) {
  551. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  552. entry);
  553. hashbin->hb_size--;
  554. /*
  555. * Check if this item is the currently selected item, and in
  556. * that case we must reset hb_current
  557. */
  558. if ( entry == hashbin->hb_current)
  559. hashbin->hb_current = NULL;
  560. }
  561. /* Release lock */
  562. if ( hashbin->hb_type & HB_LOCK ) {
  563. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  564. } /* Default is no-lock */
  565. /* Return */
  566. if ( found )
  567. return entry;
  568. else
  569. return NULL;
  570. }
  571. EXPORT_SYMBOL(hashbin_remove);
  572. /*
  573. * Function hashbin_remove_this (hashbin, entry)
  574. *
  575. * Remove entry with the given name
  576. *
  577. * In some cases, the user of hashbin can't guarantee the unicity
  578. * of either the hashv or name.
  579. * In those cases, using the above function is guaranteed to cause troubles,
  580. * so we use this one instead...
  581. * And by the way, it's also faster, because we skip the search phase ;-)
  582. */
  583. void* hashbin_remove_this( hashbin_t* hashbin, irda_queue_t* entry)
  584. {
  585. unsigned long flags = 0;
  586. int bin;
  587. long hashv;
  588. IRDA_DEBUG( 4, "%s()\n", __func__);
  589. IRDA_ASSERT( hashbin != NULL, return NULL;);
  590. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  591. IRDA_ASSERT( entry != NULL, return NULL;);
  592. /* Synchronize */
  593. if ( hashbin->hb_type & HB_LOCK ) {
  594. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  595. } /* Default is no-lock */
  596. /* Check if valid and not already removed... */
  597. if((entry->q_next == NULL) || (entry->q_prev == NULL)) {
  598. entry = NULL;
  599. goto out;
  600. }
  601. /*
  602. * Locate hashbin
  603. */
  604. hashv = entry->q_hash;
  605. bin = GET_HASHBIN( hashv );
  606. /*
  607. * Dequeue the entry...
  608. */
  609. dequeue_general( (irda_queue_t**) &hashbin->hb_queue[ bin ],
  610. entry);
  611. hashbin->hb_size--;
  612. entry->q_next = NULL;
  613. entry->q_prev = NULL;
  614. /*
  615. * Check if this item is the currently selected item, and in
  616. * that case we must reset hb_current
  617. */
  618. if ( entry == hashbin->hb_current)
  619. hashbin->hb_current = NULL;
  620. out:
  621. /* Release lock */
  622. if ( hashbin->hb_type & HB_LOCK ) {
  623. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  624. } /* Default is no-lock */
  625. return entry;
  626. }
  627. EXPORT_SYMBOL(hashbin_remove_this);
  628. /*********************** HASHBIN ENUMERATION ***********************/
  629. /*
  630. * Function hashbin_common_find (hashbin, hashv, name)
  631. *
  632. * Find item with the given hashv or name
  633. *
  634. */
  635. void* hashbin_find( hashbin_t* hashbin, long hashv, const char* name )
  636. {
  637. int bin;
  638. irda_queue_t* entry;
  639. IRDA_DEBUG( 4, "hashbin_find()\n");
  640. IRDA_ASSERT( hashbin != NULL, return NULL;);
  641. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  642. /*
  643. * Locate hashbin
  644. */
  645. if ( name )
  646. hashv = hash( name );
  647. bin = GET_HASHBIN( hashv );
  648. /*
  649. * Search for entry
  650. */
  651. entry = hashbin->hb_queue[ bin];
  652. if ( entry ) {
  653. do {
  654. /*
  655. * Check for key
  656. */
  657. if ( entry->q_hash == hashv ) {
  658. /*
  659. * Name compare too?
  660. */
  661. if ( name ) {
  662. if ( strcmp( entry->q_name, name ) == 0 ) {
  663. return entry;
  664. }
  665. } else {
  666. return entry;
  667. }
  668. }
  669. entry = entry->q_next;
  670. } while ( entry != hashbin->hb_queue[ bin ] );
  671. }
  672. return NULL;
  673. }
  674. EXPORT_SYMBOL(hashbin_find);
  675. /*
  676. * Function hashbin_lock_find (hashbin, hashv, name)
  677. *
  678. * Find item with the given hashv or name
  679. *
  680. * Same, but with spinlock protection...
  681. * I call it safe, but it's only safe with respect to the hashbin, not its
  682. * content. - Jean II
  683. */
  684. void* hashbin_lock_find( hashbin_t* hashbin, long hashv, const char* name )
  685. {
  686. unsigned long flags = 0;
  687. irda_queue_t* entry;
  688. /* Synchronize */
  689. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  690. /*
  691. * Search for entry
  692. */
  693. entry = hashbin_find(hashbin, hashv, name);
  694. /* Release lock */
  695. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  696. return entry;
  697. }
  698. EXPORT_SYMBOL(hashbin_lock_find);
  699. /*
  700. * Function hashbin_find (hashbin, hashv, name, pnext)
  701. *
  702. * Find an item with the given hashv or name, and its successor
  703. *
  704. * This function allow to do concurrent enumerations without the
  705. * need to lock over the whole session, because the caller keep the
  706. * context of the search. On the other hand, it might fail and return
  707. * NULL if the entry is removed. - Jean II
  708. */
  709. void* hashbin_find_next( hashbin_t* hashbin, long hashv, const char* name,
  710. void ** pnext)
  711. {
  712. unsigned long flags = 0;
  713. irda_queue_t* entry;
  714. /* Synchronize */
  715. spin_lock_irqsave(&hashbin->hb_spinlock, flags);
  716. /*
  717. * Search for current entry
  718. * This allow to check if the current item is still in the
  719. * hashbin or has been removed.
  720. */
  721. entry = hashbin_find(hashbin, hashv, name);
  722. /*
  723. * Trick hashbin_get_next() to return what we want
  724. */
  725. if(entry) {
  726. hashbin->hb_current = entry;
  727. *pnext = hashbin_get_next( hashbin );
  728. } else
  729. *pnext = NULL;
  730. /* Release lock */
  731. spin_unlock_irqrestore(&hashbin->hb_spinlock, flags);
  732. return entry;
  733. }
  734. /*
  735. * Function hashbin_get_first (hashbin)
  736. *
  737. * Get a pointer to first element in hashbin, this function must be
  738. * called before any calls to hashbin_get_next()!
  739. *
  740. */
  741. irda_queue_t *hashbin_get_first( hashbin_t* hashbin)
  742. {
  743. irda_queue_t *entry;
  744. int i;
  745. IRDA_ASSERT( hashbin != NULL, return NULL;);
  746. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  747. if ( hashbin == NULL)
  748. return NULL;
  749. for ( i = 0; i < HASHBIN_SIZE; i ++ ) {
  750. entry = hashbin->hb_queue[ i];
  751. if ( entry) {
  752. hashbin->hb_current = entry;
  753. return entry;
  754. }
  755. }
  756. /*
  757. * Did not find any item in hashbin
  758. */
  759. return NULL;
  760. }
  761. EXPORT_SYMBOL(hashbin_get_first);
  762. /*
  763. * Function hashbin_get_next (hashbin)
  764. *
  765. * Get next item in hashbin. A series of hashbin_get_next() calls must
  766. * be started by a call to hashbin_get_first(). The function returns
  767. * NULL when all items have been traversed
  768. *
  769. * The context of the search is stored within the hashbin, so you must
  770. * protect yourself from concurrent enumerations. - Jean II
  771. */
  772. irda_queue_t *hashbin_get_next( hashbin_t *hashbin)
  773. {
  774. irda_queue_t* entry;
  775. int bin;
  776. int i;
  777. IRDA_ASSERT( hashbin != NULL, return NULL;);
  778. IRDA_ASSERT( hashbin->magic == HB_MAGIC, return NULL;);
  779. if ( hashbin->hb_current == NULL) {
  780. IRDA_ASSERT( hashbin->hb_current != NULL, return NULL;);
  781. return NULL;
  782. }
  783. entry = hashbin->hb_current->q_next;
  784. bin = GET_HASHBIN( entry->q_hash);
  785. /*
  786. * Make sure that we are not back at the beginning of the queue
  787. * again
  788. */
  789. if ( entry != hashbin->hb_queue[ bin ]) {
  790. hashbin->hb_current = entry;
  791. return entry;
  792. }
  793. /*
  794. * Check that this is not the last queue in hashbin
  795. */
  796. if ( bin >= HASHBIN_SIZE)
  797. return NULL;
  798. /*
  799. * Move to next queue in hashbin
  800. */
  801. bin++;
  802. for ( i = bin; i < HASHBIN_SIZE; i++ ) {
  803. entry = hashbin->hb_queue[ i];
  804. if ( entry) {
  805. hashbin->hb_current = entry;
  806. return entry;
  807. }
  808. }
  809. return NULL;
  810. }
  811. EXPORT_SYMBOL(hashbin_get_next);