krxiod.c 6.5 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261
  1. /* krxiod.c: Rx I/O daemon
  2. *
  3. * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
  4. * Written by David Howells (dhowells@redhat.com)
  5. *
  6. * This program is free software; you can redistribute it and/or
  7. * modify it under the terms of the GNU General Public License
  8. * as published by the Free Software Foundation; either version
  9. * 2 of the License, or (at your option) any later version.
  10. */
  11. #include <linux/sched.h>
  12. #include <linux/completion.h>
  13. #include <linux/spinlock.h>
  14. #include <linux/init.h>
  15. #include <rxrpc/krxiod.h>
  16. #include <rxrpc/transport.h>
  17. #include <rxrpc/peer.h>
  18. #include <rxrpc/call.h>
  19. #include "internal.h"
  20. static DECLARE_WAIT_QUEUE_HEAD(rxrpc_krxiod_sleepq);
  21. static DECLARE_COMPLETION(rxrpc_krxiod_dead);
  22. static atomic_t rxrpc_krxiod_qcount = ATOMIC_INIT(0);
  23. static LIST_HEAD(rxrpc_krxiod_transportq);
  24. static DEFINE_SPINLOCK(rxrpc_krxiod_transportq_lock);
  25. static LIST_HEAD(rxrpc_krxiod_callq);
  26. static DEFINE_SPINLOCK(rxrpc_krxiod_callq_lock);
  27. static volatile int rxrpc_krxiod_die;
  28. /*****************************************************************************/
  29. /*
  30. * Rx I/O daemon
  31. */
  32. static int rxrpc_krxiod(void *arg)
  33. {
  34. DECLARE_WAITQUEUE(krxiod,current);
  35. printk("Started krxiod %d\n",current->pid);
  36. daemonize("krxiod");
  37. /* loop around waiting for work to do */
  38. do {
  39. /* wait for work or to be told to exit */
  40. _debug("### Begin Wait");
  41. if (!atomic_read(&rxrpc_krxiod_qcount)) {
  42. set_current_state(TASK_INTERRUPTIBLE);
  43. add_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
  44. for (;;) {
  45. set_current_state(TASK_INTERRUPTIBLE);
  46. if (atomic_read(&rxrpc_krxiod_qcount) ||
  47. rxrpc_krxiod_die ||
  48. signal_pending(current))
  49. break;
  50. schedule();
  51. }
  52. remove_wait_queue(&rxrpc_krxiod_sleepq, &krxiod);
  53. set_current_state(TASK_RUNNING);
  54. }
  55. _debug("### End Wait");
  56. /* do work if been given some to do */
  57. _debug("### Begin Work");
  58. /* see if there's a transport in need of attention */
  59. if (!list_empty(&rxrpc_krxiod_transportq)) {
  60. struct rxrpc_transport *trans = NULL;
  61. spin_lock_irq(&rxrpc_krxiod_transportq_lock);
  62. if (!list_empty(&rxrpc_krxiod_transportq)) {
  63. trans = list_entry(
  64. rxrpc_krxiod_transportq.next,
  65. struct rxrpc_transport,
  66. krxiodq_link);
  67. list_del_init(&trans->krxiodq_link);
  68. atomic_dec(&rxrpc_krxiod_qcount);
  69. /* make sure it hasn't gone away and doesn't go
  70. * away */
  71. if (atomic_read(&trans->usage)>0)
  72. rxrpc_get_transport(trans);
  73. else
  74. trans = NULL;
  75. }
  76. spin_unlock_irq(&rxrpc_krxiod_transportq_lock);
  77. if (trans) {
  78. rxrpc_trans_receive_packet(trans);
  79. rxrpc_put_transport(trans);
  80. }
  81. }
  82. /* see if there's a call in need of attention */
  83. if (!list_empty(&rxrpc_krxiod_callq)) {
  84. struct rxrpc_call *call = NULL;
  85. spin_lock_irq(&rxrpc_krxiod_callq_lock);
  86. if (!list_empty(&rxrpc_krxiod_callq)) {
  87. call = list_entry(rxrpc_krxiod_callq.next,
  88. struct rxrpc_call,
  89. rcv_krxiodq_lk);
  90. list_del_init(&call->rcv_krxiodq_lk);
  91. atomic_dec(&rxrpc_krxiod_qcount);
  92. /* make sure it hasn't gone away and doesn't go
  93. * away */
  94. if (atomic_read(&call->usage) > 0) {
  95. _debug("@@@ KRXIOD"
  96. " Begin Attend Call %p", call);
  97. rxrpc_get_call(call);
  98. }
  99. else {
  100. call = NULL;
  101. }
  102. }
  103. spin_unlock_irq(&rxrpc_krxiod_callq_lock);
  104. if (call) {
  105. rxrpc_call_do_stuff(call);
  106. rxrpc_put_call(call);
  107. _debug("@@@ KRXIOD End Attend Call %p", call);
  108. }
  109. }
  110. _debug("### End Work");
  111. try_to_freeze();
  112. /* discard pending signals */
  113. rxrpc_discard_my_signals();
  114. } while (!rxrpc_krxiod_die);
  115. /* and that's all */
  116. complete_and_exit(&rxrpc_krxiod_dead, 0);
  117. } /* end rxrpc_krxiod() */
  118. /*****************************************************************************/
  119. /*
  120. * start up a krxiod daemon
  121. */
  122. int __init rxrpc_krxiod_init(void)
  123. {
  124. return kernel_thread(rxrpc_krxiod, NULL, 0);
  125. } /* end rxrpc_krxiod_init() */
  126. /*****************************************************************************/
  127. /*
  128. * kill the krxiod daemon and wait for it to complete
  129. */
  130. void rxrpc_krxiod_kill(void)
  131. {
  132. rxrpc_krxiod_die = 1;
  133. wake_up_all(&rxrpc_krxiod_sleepq);
  134. wait_for_completion(&rxrpc_krxiod_dead);
  135. } /* end rxrpc_krxiod_kill() */
  136. /*****************************************************************************/
  137. /*
  138. * queue a transport for attention by krxiod
  139. */
  140. void rxrpc_krxiod_queue_transport(struct rxrpc_transport *trans)
  141. {
  142. unsigned long flags;
  143. _enter("");
  144. if (list_empty(&trans->krxiodq_link)) {
  145. spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
  146. if (list_empty(&trans->krxiodq_link)) {
  147. if (atomic_read(&trans->usage) > 0) {
  148. list_add_tail(&trans->krxiodq_link,
  149. &rxrpc_krxiod_transportq);
  150. atomic_inc(&rxrpc_krxiod_qcount);
  151. }
  152. }
  153. spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
  154. wake_up_all(&rxrpc_krxiod_sleepq);
  155. }
  156. _leave("");
  157. } /* end rxrpc_krxiod_queue_transport() */
  158. /*****************************************************************************/
  159. /*
  160. * dequeue a transport from krxiod's attention queue
  161. */
  162. void rxrpc_krxiod_dequeue_transport(struct rxrpc_transport *trans)
  163. {
  164. unsigned long flags;
  165. _enter("");
  166. spin_lock_irqsave(&rxrpc_krxiod_transportq_lock, flags);
  167. if (!list_empty(&trans->krxiodq_link)) {
  168. list_del_init(&trans->krxiodq_link);
  169. atomic_dec(&rxrpc_krxiod_qcount);
  170. }
  171. spin_unlock_irqrestore(&rxrpc_krxiod_transportq_lock, flags);
  172. _leave("");
  173. } /* end rxrpc_krxiod_dequeue_transport() */
  174. /*****************************************************************************/
  175. /*
  176. * queue a call for attention by krxiod
  177. */
  178. void rxrpc_krxiod_queue_call(struct rxrpc_call *call)
  179. {
  180. unsigned long flags;
  181. if (list_empty(&call->rcv_krxiodq_lk)) {
  182. spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
  183. if (atomic_read(&call->usage) > 0) {
  184. list_add_tail(&call->rcv_krxiodq_lk,
  185. &rxrpc_krxiod_callq);
  186. atomic_inc(&rxrpc_krxiod_qcount);
  187. }
  188. spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
  189. }
  190. wake_up_all(&rxrpc_krxiod_sleepq);
  191. } /* end rxrpc_krxiod_queue_call() */
  192. /*****************************************************************************/
  193. /*
  194. * dequeue a call from krxiod's attention queue
  195. */
  196. void rxrpc_krxiod_dequeue_call(struct rxrpc_call *call)
  197. {
  198. unsigned long flags;
  199. spin_lock_irqsave(&rxrpc_krxiod_callq_lock, flags);
  200. if (!list_empty(&call->rcv_krxiodq_lk)) {
  201. list_del_init(&call->rcv_krxiodq_lk);
  202. atomic_dec(&rxrpc_krxiod_qcount);
  203. }
  204. spin_unlock_irqrestore(&rxrpc_krxiod_callq_lock, flags);
  205. } /* end rxrpc_krxiod_dequeue_call() */