sir_kthread.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508
  1. /*********************************************************************
  2. *
  3. * sir_kthread.c: dedicated thread to process scheduled
  4. * sir device setup requests
  5. *
  6. * Copyright (c) 2002 Martin Diehl
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License as
  10. * published by the Free Software Foundation; either version 2 of
  11. * the License, or (at your option) any later version.
  12. *
  13. ********************************************************************/
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/version.h>
  17. #include <linux/init.h>
  18. #include <linux/smp_lock.h>
  19. #include <linux/completion.h>
  20. #include <linux/delay.h>
  21. #include <net/irda/irda.h>
  22. #include "sir-dev.h"
  23. /**************************************************************************
  24. *
  25. * kIrDAd kernel thread and config state machine
  26. *
  27. */
  28. struct irda_request_queue {
  29. struct list_head request_list;
  30. spinlock_t lock;
  31. task_t *thread;
  32. struct completion exit;
  33. wait_queue_head_t kick, done;
  34. atomic_t num_pending;
  35. };
  36. static struct irda_request_queue irda_rq_queue;
  37. static int irda_queue_request(struct irda_request *rq)
  38. {
  39. int ret = 0;
  40. unsigned long flags;
  41. if (!test_and_set_bit(0, &rq->pending)) {
  42. spin_lock_irqsave(&irda_rq_queue.lock, flags);
  43. list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
  44. wake_up(&irda_rq_queue.kick);
  45. atomic_inc(&irda_rq_queue.num_pending);
  46. spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
  47. ret = 1;
  48. }
  49. return ret;
  50. }
  51. static void irda_request_timer(unsigned long data)
  52. {
  53. struct irda_request *rq = (struct irda_request *)data;
  54. unsigned long flags;
  55. spin_lock_irqsave(&irda_rq_queue.lock, flags);
  56. list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
  57. wake_up(&irda_rq_queue.kick);
  58. spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
  59. }
  60. static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
  61. {
  62. int ret = 0;
  63. struct timer_list *timer = &rq->timer;
  64. if (!test_and_set_bit(0, &rq->pending)) {
  65. timer->expires = jiffies + delay;
  66. timer->function = irda_request_timer;
  67. timer->data = (unsigned long)rq;
  68. atomic_inc(&irda_rq_queue.num_pending);
  69. add_timer(timer);
  70. ret = 1;
  71. }
  72. return ret;
  73. }
  74. static void run_irda_queue(void)
  75. {
  76. unsigned long flags;
  77. struct list_head *entry, *tmp;
  78. struct irda_request *rq;
  79. spin_lock_irqsave(&irda_rq_queue.lock, flags);
  80. list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
  81. rq = list_entry(entry, struct irda_request, lh_request);
  82. list_del_init(entry);
  83. spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
  84. clear_bit(0, &rq->pending);
  85. rq->func(rq->data);
  86. if (atomic_dec_and_test(&irda_rq_queue.num_pending))
  87. wake_up(&irda_rq_queue.done);
  88. spin_lock_irqsave(&irda_rq_queue.lock, flags);
  89. }
  90. spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
  91. }
  92. static int irda_thread(void *startup)
  93. {
  94. DECLARE_WAITQUEUE(wait, current);
  95. daemonize("kIrDAd");
  96. irda_rq_queue.thread = current;
  97. complete((struct completion *)startup);
  98. while (irda_rq_queue.thread != NULL) {
  99. /* We use TASK_INTERRUPTIBLE, rather than
  100. * TASK_UNINTERRUPTIBLE. Andrew Morton made this
  101. * change ; he told me that it is safe, because "signal
  102. * blocking is now handled in daemonize()", he added
  103. * that the problem is that "uninterruptible sleep
  104. * contributes to load average", making user worry.
  105. * Jean II */
  106. set_task_state(current, TASK_INTERRUPTIBLE);
  107. add_wait_queue(&irda_rq_queue.kick, &wait);
  108. if (list_empty(&irda_rq_queue.request_list))
  109. schedule();
  110. else
  111. __set_task_state(current, TASK_RUNNING);
  112. remove_wait_queue(&irda_rq_queue.kick, &wait);
  113. /* make swsusp happy with our thread */
  114. try_to_freeze();
  115. run_irda_queue();
  116. }
  117. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
  118. reparent_to_init();
  119. #endif
  120. complete_and_exit(&irda_rq_queue.exit, 0);
  121. /* never reached */
  122. return 0;
  123. }
  124. static void flush_irda_queue(void)
  125. {
  126. if (atomic_read(&irda_rq_queue.num_pending)) {
  127. DECLARE_WAITQUEUE(wait, current);
  128. if (!list_empty(&irda_rq_queue.request_list))
  129. run_irda_queue();
  130. set_task_state(current, TASK_UNINTERRUPTIBLE);
  131. add_wait_queue(&irda_rq_queue.done, &wait);
  132. if (atomic_read(&irda_rq_queue.num_pending))
  133. schedule();
  134. else
  135. __set_task_state(current, TASK_RUNNING);
  136. remove_wait_queue(&irda_rq_queue.done, &wait);
  137. }
  138. }
  139. /* substate handler of the config-fsm to handle the cases where we want
  140. * to wait for transmit completion before changing the port configuration
  141. */
  142. static int irda_tx_complete_fsm(struct sir_dev *dev)
  143. {
  144. struct sir_fsm *fsm = &dev->fsm;
  145. unsigned next_state, delay;
  146. unsigned bytes_left;
  147. do {
  148. next_state = fsm->substate; /* default: stay in current substate */
  149. delay = 0;
  150. switch(fsm->substate) {
  151. case SIRDEV_STATE_WAIT_XMIT:
  152. if (dev->drv->chars_in_buffer)
  153. bytes_left = dev->drv->chars_in_buffer(dev);
  154. else
  155. bytes_left = 0;
  156. if (!bytes_left) {
  157. next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
  158. break;
  159. }
  160. if (dev->speed > 115200)
  161. delay = (bytes_left*8*10000) / (dev->speed/100);
  162. else if (dev->speed > 0)
  163. delay = (bytes_left*10*10000) / (dev->speed/100);
  164. else
  165. delay = 0;
  166. /* expected delay (usec) until remaining bytes are sent */
  167. if (delay < 100) {
  168. udelay(delay);
  169. delay = 0;
  170. break;
  171. }
  172. /* sleep some longer delay (msec) */
  173. delay = (delay+999) / 1000;
  174. break;
  175. case SIRDEV_STATE_WAIT_UNTIL_SENT:
  176. /* block until underlaying hardware buffer are empty */
  177. if (dev->drv->wait_until_sent)
  178. dev->drv->wait_until_sent(dev);
  179. next_state = SIRDEV_STATE_TX_DONE;
  180. break;
  181. case SIRDEV_STATE_TX_DONE:
  182. return 0;
  183. default:
  184. IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
  185. return -EINVAL;
  186. }
  187. fsm->substate = next_state;
  188. } while (delay == 0);
  189. return delay;
  190. }
  191. /*
  192. * Function irda_config_fsm
  193. *
  194. * State machine to handle the configuration of the device (and attached dongle, if any).
  195. * This handler is scheduled for execution in kIrDAd context, so we can sleep.
  196. * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
  197. * long. Instead, for longer delays we start a timer to reschedule us later.
  198. * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
  199. * Both must be unlocked/restarted on completion - but only on final exit.
  200. */
  201. static void irda_config_fsm(void *data)
  202. {
  203. struct sir_dev *dev = data;
  204. struct sir_fsm *fsm = &dev->fsm;
  205. int next_state;
  206. int ret = -1;
  207. unsigned delay;
  208. IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
  209. do {
  210. IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
  211. __FUNCTION__, fsm->state, fsm->substate);
  212. next_state = fsm->state;
  213. delay = 0;
  214. switch(fsm->state) {
  215. case SIRDEV_STATE_DONGLE_OPEN:
  216. if (dev->dongle_drv != NULL) {
  217. ret = sirdev_put_dongle(dev);
  218. if (ret) {
  219. fsm->result = -EINVAL;
  220. next_state = SIRDEV_STATE_ERROR;
  221. break;
  222. }
  223. }
  224. /* Initialize dongle */
  225. ret = sirdev_get_dongle(dev, fsm->param);
  226. if (ret) {
  227. fsm->result = ret;
  228. next_state = SIRDEV_STATE_ERROR;
  229. break;
  230. }
  231. /* Dongles are powered through the modem control lines which
  232. * were just set during open. Before resetting, let's wait for
  233. * the power to stabilize. This is what some dongle drivers did
  234. * in open before, while others didn't - should be safe anyway.
  235. */
  236. delay = 50;
  237. fsm->substate = SIRDEV_STATE_DONGLE_RESET;
  238. next_state = SIRDEV_STATE_DONGLE_RESET;
  239. fsm->param = 9600;
  240. break;
  241. case SIRDEV_STATE_DONGLE_CLOSE:
  242. /* shouldn't we just treat this as success=? */
  243. if (dev->dongle_drv == NULL) {
  244. fsm->result = -EINVAL;
  245. next_state = SIRDEV_STATE_ERROR;
  246. break;
  247. }
  248. ret = sirdev_put_dongle(dev);
  249. if (ret) {
  250. fsm->result = ret;
  251. next_state = SIRDEV_STATE_ERROR;
  252. break;
  253. }
  254. next_state = SIRDEV_STATE_DONE;
  255. break;
  256. case SIRDEV_STATE_SET_DTR_RTS:
  257. ret = sirdev_set_dtr_rts(dev,
  258. (fsm->param&0x02) ? TRUE : FALSE,
  259. (fsm->param&0x01) ? TRUE : FALSE);
  260. next_state = SIRDEV_STATE_DONE;
  261. break;
  262. case SIRDEV_STATE_SET_SPEED:
  263. fsm->substate = SIRDEV_STATE_WAIT_XMIT;
  264. next_state = SIRDEV_STATE_DONGLE_CHECK;
  265. break;
  266. case SIRDEV_STATE_DONGLE_CHECK:
  267. ret = irda_tx_complete_fsm(dev);
  268. if (ret < 0) {
  269. fsm->result = ret;
  270. next_state = SIRDEV_STATE_ERROR;
  271. break;
  272. }
  273. if ((delay=ret) != 0)
  274. break;
  275. if (dev->dongle_drv) {
  276. fsm->substate = SIRDEV_STATE_DONGLE_RESET;
  277. next_state = SIRDEV_STATE_DONGLE_RESET;
  278. }
  279. else {
  280. dev->speed = fsm->param;
  281. next_state = SIRDEV_STATE_PORT_SPEED;
  282. }
  283. break;
  284. case SIRDEV_STATE_DONGLE_RESET:
  285. if (dev->dongle_drv->reset) {
  286. ret = dev->dongle_drv->reset(dev);
  287. if (ret < 0) {
  288. fsm->result = ret;
  289. next_state = SIRDEV_STATE_ERROR;
  290. break;
  291. }
  292. }
  293. else
  294. ret = 0;
  295. if ((delay=ret) == 0) {
  296. /* set serial port according to dongle default speed */
  297. if (dev->drv->set_speed)
  298. dev->drv->set_speed(dev, dev->speed);
  299. fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
  300. next_state = SIRDEV_STATE_DONGLE_SPEED;
  301. }
  302. break;
  303. case SIRDEV_STATE_DONGLE_SPEED:
  304. if (dev->dongle_drv->reset) {
  305. ret = dev->dongle_drv->set_speed(dev, fsm->param);
  306. if (ret < 0) {
  307. fsm->result = ret;
  308. next_state = SIRDEV_STATE_ERROR;
  309. break;
  310. }
  311. }
  312. else
  313. ret = 0;
  314. if ((delay=ret) == 0)
  315. next_state = SIRDEV_STATE_PORT_SPEED;
  316. break;
  317. case SIRDEV_STATE_PORT_SPEED:
  318. /* Finally we are ready to change the serial port speed */
  319. if (dev->drv->set_speed)
  320. dev->drv->set_speed(dev, dev->speed);
  321. dev->new_speed = 0;
  322. next_state = SIRDEV_STATE_DONE;
  323. break;
  324. case SIRDEV_STATE_DONE:
  325. /* Signal network layer so it can send more frames */
  326. netif_wake_queue(dev->netdev);
  327. next_state = SIRDEV_STATE_COMPLETE;
  328. break;
  329. default:
  330. IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
  331. fsm->result = -EINVAL;
  332. /* fall thru */
  333. case SIRDEV_STATE_ERROR:
  334. IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
  335. #if 0 /* don't enable this before we have netdev->tx_timeout to recover */
  336. netif_stop_queue(dev->netdev);
  337. #else
  338. netif_wake_queue(dev->netdev);
  339. #endif
  340. /* fall thru */
  341. case SIRDEV_STATE_COMPLETE:
  342. /* config change finished, so we are not busy any longer */
  343. sirdev_enable_rx(dev);
  344. up(&fsm->sem);
  345. return;
  346. }
  347. fsm->state = next_state;
  348. } while(!delay);
  349. irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
  350. }
  351. /* schedule some device configuration task for execution by kIrDAd
  352. * on behalf of the above state machine.
  353. * can be called from process or interrupt/tasklet context.
  354. */
  355. int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
  356. {
  357. struct sir_fsm *fsm = &dev->fsm;
  358. int xmit_was_down;
  359. IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
  360. if (down_trylock(&fsm->sem)) {
  361. if (in_interrupt() || in_atomic() || irqs_disabled()) {
  362. IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
  363. return -EWOULDBLOCK;
  364. } else
  365. down(&fsm->sem);
  366. }
  367. if (fsm->state == SIRDEV_STATE_DEAD) {
  368. /* race with sirdev_close should never happen */
  369. IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
  370. up(&fsm->sem);
  371. return -ESTALE; /* or better EPIPE? */
  372. }
  373. xmit_was_down = netif_queue_stopped(dev->netdev);
  374. netif_stop_queue(dev->netdev);
  375. atomic_set(&dev->enable_rx, 0);
  376. fsm->state = initial_state;
  377. fsm->param = param;
  378. fsm->result = 0;
  379. INIT_LIST_HEAD(&fsm->rq.lh_request);
  380. fsm->rq.pending = 0;
  381. fsm->rq.func = irda_config_fsm;
  382. fsm->rq.data = dev;
  383. if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
  384. atomic_set(&dev->enable_rx, 1);
  385. if (!xmit_was_down)
  386. netif_wake_queue(dev->netdev);
  387. up(&fsm->sem);
  388. return -EAGAIN;
  389. }
  390. return 0;
  391. }
  392. static int __init irda_thread_create(void)
  393. {
  394. struct completion startup;
  395. int pid;
  396. spin_lock_init(&irda_rq_queue.lock);
  397. irda_rq_queue.thread = NULL;
  398. INIT_LIST_HEAD(&irda_rq_queue.request_list);
  399. init_waitqueue_head(&irda_rq_queue.kick);
  400. init_waitqueue_head(&irda_rq_queue.done);
  401. atomic_set(&irda_rq_queue.num_pending, 0);
  402. init_completion(&startup);
  403. pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
  404. if (pid <= 0)
  405. return -EAGAIN;
  406. else
  407. wait_for_completion(&startup);
  408. return 0;
  409. }
  410. static void __exit irda_thread_join(void)
  411. {
  412. if (irda_rq_queue.thread) {
  413. flush_irda_queue();
  414. init_completion(&irda_rq_queue.exit);
  415. irda_rq_queue.thread = NULL;
  416. wake_up(&irda_rq_queue.kick);
  417. wait_for_completion(&irda_rq_queue.exit);
  418. }
  419. }
  420. module_init(irda_thread_create);
  421. module_exit(irda_thread_join);
  422. MODULE_AUTHOR("Martin Diehl <info@mdiehl.de>");
  423. MODULE_DESCRIPTION("IrDA SIR core");
  424. MODULE_LICENSE("GPL");