sir_kthread.c 12 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502
  1. /*********************************************************************
  2. *
  3. * sir_kthread.c: dedicated thread to process scheduled
  4. * sir device setup requests
  5. *
  6. * Copyright (c) 2002 Martin Diehl
  7. *
  8. * This program is free software; you can redistribute it and/or
  9. * modify it under the terms of the GNU General Public License as
  10. * published by the Free Software Foundation; either version 2 of
  11. * the License, or (at your option) any later version.
  12. *
  13. ********************************************************************/
  14. #include <linux/module.h>
  15. #include <linux/kernel.h>
  16. #include <linux/version.h>
  17. #include <linux/init.h>
  18. #include <linux/smp_lock.h>
  19. #include <linux/completion.h>
  20. #include <linux/delay.h>
  21. #include <net/irda/irda.h>
  22. #include "sir-dev.h"
  23. /**************************************************************************
  24. *
  25. * kIrDAd kernel thread and config state machine
  26. *
  27. */
  28. struct irda_request_queue {
  29. struct list_head request_list;
  30. spinlock_t lock;
  31. task_t *thread;
  32. struct completion exit;
  33. wait_queue_head_t kick, done;
  34. atomic_t num_pending;
  35. };
  36. static struct irda_request_queue irda_rq_queue;
  37. static int irda_queue_request(struct irda_request *rq)
  38. {
  39. int ret = 0;
  40. unsigned long flags;
  41. if (!test_and_set_bit(0, &rq->pending)) {
  42. spin_lock_irqsave(&irda_rq_queue.lock, flags);
  43. list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
  44. wake_up(&irda_rq_queue.kick);
  45. atomic_inc(&irda_rq_queue.num_pending);
  46. spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
  47. ret = 1;
  48. }
  49. return ret;
  50. }
  51. static void irda_request_timer(unsigned long data)
  52. {
  53. struct irda_request *rq = (struct irda_request *)data;
  54. unsigned long flags;
  55. spin_lock_irqsave(&irda_rq_queue.lock, flags);
  56. list_add_tail(&rq->lh_request, &irda_rq_queue.request_list);
  57. wake_up(&irda_rq_queue.kick);
  58. spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
  59. }
  60. static int irda_queue_delayed_request(struct irda_request *rq, unsigned long delay)
  61. {
  62. int ret = 0;
  63. struct timer_list *timer = &rq->timer;
  64. if (!test_and_set_bit(0, &rq->pending)) {
  65. timer->expires = jiffies + delay;
  66. timer->function = irda_request_timer;
  67. timer->data = (unsigned long)rq;
  68. atomic_inc(&irda_rq_queue.num_pending);
  69. add_timer(timer);
  70. ret = 1;
  71. }
  72. return ret;
  73. }
  74. static void run_irda_queue(void)
  75. {
  76. unsigned long flags;
  77. struct list_head *entry, *tmp;
  78. struct irda_request *rq;
  79. spin_lock_irqsave(&irda_rq_queue.lock, flags);
  80. list_for_each_safe(entry, tmp, &irda_rq_queue.request_list) {
  81. rq = list_entry(entry, struct irda_request, lh_request);
  82. list_del_init(entry);
  83. spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
  84. clear_bit(0, &rq->pending);
  85. rq->func(rq->data);
  86. if (atomic_dec_and_test(&irda_rq_queue.num_pending))
  87. wake_up(&irda_rq_queue.done);
  88. spin_lock_irqsave(&irda_rq_queue.lock, flags);
  89. }
  90. spin_unlock_irqrestore(&irda_rq_queue.lock, flags);
  91. }
  92. static int irda_thread(void *startup)
  93. {
  94. DECLARE_WAITQUEUE(wait, current);
  95. daemonize("kIrDAd");
  96. irda_rq_queue.thread = current;
  97. complete((struct completion *)startup);
  98. while (irda_rq_queue.thread != NULL) {
  99. /* We use TASK_INTERRUPTIBLE, rather than
  100. * TASK_UNINTERRUPTIBLE. Andrew Morton made this
  101. * change ; he told me that it is safe, because "signal
  102. * blocking is now handled in daemonize()", he added
  103. * that the problem is that "uninterruptible sleep
  104. * contributes to load average", making user worry.
  105. * Jean II */
  106. set_task_state(current, TASK_INTERRUPTIBLE);
  107. add_wait_queue(&irda_rq_queue.kick, &wait);
  108. if (list_empty(&irda_rq_queue.request_list))
  109. schedule();
  110. else
  111. __set_task_state(current, TASK_RUNNING);
  112. remove_wait_queue(&irda_rq_queue.kick, &wait);
  113. /* make swsusp happy with our thread */
  114. if (current->flags & PF_FREEZE)
  115. refrigerator(PF_FREEZE);
  116. run_irda_queue();
  117. }
  118. #if LINUX_VERSION_CODE < KERNEL_VERSION(2,5,35)
  119. reparent_to_init();
  120. #endif
  121. complete_and_exit(&irda_rq_queue.exit, 0);
  122. /* never reached */
  123. return 0;
  124. }
  125. static void flush_irda_queue(void)
  126. {
  127. if (atomic_read(&irda_rq_queue.num_pending)) {
  128. DECLARE_WAITQUEUE(wait, current);
  129. if (!list_empty(&irda_rq_queue.request_list))
  130. run_irda_queue();
  131. set_task_state(current, TASK_UNINTERRUPTIBLE);
  132. add_wait_queue(&irda_rq_queue.done, &wait);
  133. if (atomic_read(&irda_rq_queue.num_pending))
  134. schedule();
  135. else
  136. __set_task_state(current, TASK_RUNNING);
  137. remove_wait_queue(&irda_rq_queue.done, &wait);
  138. }
  139. }
  140. /* substate handler of the config-fsm to handle the cases where we want
  141. * to wait for transmit completion before changing the port configuration
  142. */
  143. static int irda_tx_complete_fsm(struct sir_dev *dev)
  144. {
  145. struct sir_fsm *fsm = &dev->fsm;
  146. unsigned next_state, delay;
  147. unsigned bytes_left;
  148. do {
  149. next_state = fsm->substate; /* default: stay in current substate */
  150. delay = 0;
  151. switch(fsm->substate) {
  152. case SIRDEV_STATE_WAIT_XMIT:
  153. if (dev->drv->chars_in_buffer)
  154. bytes_left = dev->drv->chars_in_buffer(dev);
  155. else
  156. bytes_left = 0;
  157. if (!bytes_left) {
  158. next_state = SIRDEV_STATE_WAIT_UNTIL_SENT;
  159. break;
  160. }
  161. if (dev->speed > 115200)
  162. delay = (bytes_left*8*10000) / (dev->speed/100);
  163. else if (dev->speed > 0)
  164. delay = (bytes_left*10*10000) / (dev->speed/100);
  165. else
  166. delay = 0;
  167. /* expected delay (usec) until remaining bytes are sent */
  168. if (delay < 100) {
  169. udelay(delay);
  170. delay = 0;
  171. break;
  172. }
  173. /* sleep some longer delay (msec) */
  174. delay = (delay+999) / 1000;
  175. break;
  176. case SIRDEV_STATE_WAIT_UNTIL_SENT:
  177. /* block until underlaying hardware buffer are empty */
  178. if (dev->drv->wait_until_sent)
  179. dev->drv->wait_until_sent(dev);
  180. next_state = SIRDEV_STATE_TX_DONE;
  181. break;
  182. case SIRDEV_STATE_TX_DONE:
  183. return 0;
  184. default:
  185. IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
  186. return -EINVAL;
  187. }
  188. fsm->substate = next_state;
  189. } while (delay == 0);
  190. return delay;
  191. }
  192. /*
  193. * Function irda_config_fsm
  194. *
  195. * State machine to handle the configuration of the device (and attached dongle, if any).
  196. * This handler is scheduled for execution in kIrDAd context, so we can sleep.
  197. * however, kIrDAd is shared by all sir_dev devices so we better don't sleep there too
  198. * long. Instead, for longer delays we start a timer to reschedule us later.
  199. * On entry, fsm->sem is always locked and the netdev xmit queue stopped.
  200. * Both must be unlocked/restarted on completion - but only on final exit.
  201. */
  202. static void irda_config_fsm(void *data)
  203. {
  204. struct sir_dev *dev = data;
  205. struct sir_fsm *fsm = &dev->fsm;
  206. int next_state;
  207. int ret = -1;
  208. unsigned delay;
  209. IRDA_DEBUG(2, "%s(), <%ld>\n", __FUNCTION__, jiffies);
  210. do {
  211. IRDA_DEBUG(3, "%s - state=0x%04x / substate=0x%04x\n",
  212. __FUNCTION__, fsm->state, fsm->substate);
  213. next_state = fsm->state;
  214. delay = 0;
  215. switch(fsm->state) {
  216. case SIRDEV_STATE_DONGLE_OPEN:
  217. if (dev->dongle_drv != NULL) {
  218. ret = sirdev_put_dongle(dev);
  219. if (ret) {
  220. fsm->result = -EINVAL;
  221. next_state = SIRDEV_STATE_ERROR;
  222. break;
  223. }
  224. }
  225. /* Initialize dongle */
  226. ret = sirdev_get_dongle(dev, fsm->param);
  227. if (ret) {
  228. fsm->result = ret;
  229. next_state = SIRDEV_STATE_ERROR;
  230. break;
  231. }
  232. /* Dongles are powered through the modem control lines which
  233. * were just set during open. Before resetting, let's wait for
  234. * the power to stabilize. This is what some dongle drivers did
  235. * in open before, while others didn't - should be safe anyway.
  236. */
  237. delay = 50;
  238. fsm->substate = SIRDEV_STATE_DONGLE_RESET;
  239. next_state = SIRDEV_STATE_DONGLE_RESET;
  240. fsm->param = 9600;
  241. break;
  242. case SIRDEV_STATE_DONGLE_CLOSE:
  243. /* shouldn't we just treat this as success=? */
  244. if (dev->dongle_drv == NULL) {
  245. fsm->result = -EINVAL;
  246. next_state = SIRDEV_STATE_ERROR;
  247. break;
  248. }
  249. ret = sirdev_put_dongle(dev);
  250. if (ret) {
  251. fsm->result = ret;
  252. next_state = SIRDEV_STATE_ERROR;
  253. break;
  254. }
  255. next_state = SIRDEV_STATE_DONE;
  256. break;
  257. case SIRDEV_STATE_SET_DTR_RTS:
  258. ret = sirdev_set_dtr_rts(dev,
  259. (fsm->param&0x02) ? TRUE : FALSE,
  260. (fsm->param&0x01) ? TRUE : FALSE);
  261. next_state = SIRDEV_STATE_DONE;
  262. break;
  263. case SIRDEV_STATE_SET_SPEED:
  264. fsm->substate = SIRDEV_STATE_WAIT_XMIT;
  265. next_state = SIRDEV_STATE_DONGLE_CHECK;
  266. break;
  267. case SIRDEV_STATE_DONGLE_CHECK:
  268. ret = irda_tx_complete_fsm(dev);
  269. if (ret < 0) {
  270. fsm->result = ret;
  271. next_state = SIRDEV_STATE_ERROR;
  272. break;
  273. }
  274. if ((delay=ret) != 0)
  275. break;
  276. if (dev->dongle_drv) {
  277. fsm->substate = SIRDEV_STATE_DONGLE_RESET;
  278. next_state = SIRDEV_STATE_DONGLE_RESET;
  279. }
  280. else {
  281. dev->speed = fsm->param;
  282. next_state = SIRDEV_STATE_PORT_SPEED;
  283. }
  284. break;
  285. case SIRDEV_STATE_DONGLE_RESET:
  286. if (dev->dongle_drv->reset) {
  287. ret = dev->dongle_drv->reset(dev);
  288. if (ret < 0) {
  289. fsm->result = ret;
  290. next_state = SIRDEV_STATE_ERROR;
  291. break;
  292. }
  293. }
  294. else
  295. ret = 0;
  296. if ((delay=ret) == 0) {
  297. /* set serial port according to dongle default speed */
  298. if (dev->drv->set_speed)
  299. dev->drv->set_speed(dev, dev->speed);
  300. fsm->substate = SIRDEV_STATE_DONGLE_SPEED;
  301. next_state = SIRDEV_STATE_DONGLE_SPEED;
  302. }
  303. break;
  304. case SIRDEV_STATE_DONGLE_SPEED:
  305. if (dev->dongle_drv->reset) {
  306. ret = dev->dongle_drv->set_speed(dev, fsm->param);
  307. if (ret < 0) {
  308. fsm->result = ret;
  309. next_state = SIRDEV_STATE_ERROR;
  310. break;
  311. }
  312. }
  313. else
  314. ret = 0;
  315. if ((delay=ret) == 0)
  316. next_state = SIRDEV_STATE_PORT_SPEED;
  317. break;
  318. case SIRDEV_STATE_PORT_SPEED:
  319. /* Finally we are ready to change the serial port speed */
  320. if (dev->drv->set_speed)
  321. dev->drv->set_speed(dev, dev->speed);
  322. dev->new_speed = 0;
  323. next_state = SIRDEV_STATE_DONE;
  324. break;
  325. case SIRDEV_STATE_DONE:
  326. /* Signal network layer so it can send more frames */
  327. netif_wake_queue(dev->netdev);
  328. next_state = SIRDEV_STATE_COMPLETE;
  329. break;
  330. default:
  331. IRDA_ERROR("%s - undefined state\n", __FUNCTION__);
  332. fsm->result = -EINVAL;
  333. /* fall thru */
  334. case SIRDEV_STATE_ERROR:
  335. IRDA_ERROR("%s - error: %d\n", __FUNCTION__, fsm->result);
  336. #if 0 /* don't enable this before we have netdev->tx_timeout to recover */
  337. netif_stop_queue(dev->netdev);
  338. #else
  339. netif_wake_queue(dev->netdev);
  340. #endif
  341. /* fall thru */
  342. case SIRDEV_STATE_COMPLETE:
  343. /* config change finished, so we are not busy any longer */
  344. sirdev_enable_rx(dev);
  345. up(&fsm->sem);
  346. return;
  347. }
  348. fsm->state = next_state;
  349. } while(!delay);
  350. irda_queue_delayed_request(&fsm->rq, msecs_to_jiffies(delay));
  351. }
  352. /* schedule some device configuration task for execution by kIrDAd
  353. * on behalf of the above state machine.
  354. * can be called from process or interrupt/tasklet context.
  355. */
  356. int sirdev_schedule_request(struct sir_dev *dev, int initial_state, unsigned param)
  357. {
  358. struct sir_fsm *fsm = &dev->fsm;
  359. int xmit_was_down;
  360. IRDA_DEBUG(2, "%s - state=0x%04x / param=%u\n", __FUNCTION__, initial_state, param);
  361. if (down_trylock(&fsm->sem)) {
  362. if (in_interrupt() || in_atomic() || irqs_disabled()) {
  363. IRDA_DEBUG(1, "%s(), state machine busy!\n", __FUNCTION__);
  364. return -EWOULDBLOCK;
  365. } else
  366. down(&fsm->sem);
  367. }
  368. if (fsm->state == SIRDEV_STATE_DEAD) {
  369. /* race with sirdev_close should never happen */
  370. IRDA_ERROR("%s(), instance staled!\n", __FUNCTION__);
  371. up(&fsm->sem);
  372. return -ESTALE; /* or better EPIPE? */
  373. }
  374. xmit_was_down = netif_queue_stopped(dev->netdev);
  375. netif_stop_queue(dev->netdev);
  376. atomic_set(&dev->enable_rx, 0);
  377. fsm->state = initial_state;
  378. fsm->param = param;
  379. fsm->result = 0;
  380. INIT_LIST_HEAD(&fsm->rq.lh_request);
  381. fsm->rq.pending = 0;
  382. fsm->rq.func = irda_config_fsm;
  383. fsm->rq.data = dev;
  384. if (!irda_queue_request(&fsm->rq)) { /* returns 0 on error! */
  385. atomic_set(&dev->enable_rx, 1);
  386. if (!xmit_was_down)
  387. netif_wake_queue(dev->netdev);
  388. up(&fsm->sem);
  389. return -EAGAIN;
  390. }
  391. return 0;
  392. }
  393. int __init irda_thread_create(void)
  394. {
  395. struct completion startup;
  396. int pid;
  397. spin_lock_init(&irda_rq_queue.lock);
  398. irda_rq_queue.thread = NULL;
  399. INIT_LIST_HEAD(&irda_rq_queue.request_list);
  400. init_waitqueue_head(&irda_rq_queue.kick);
  401. init_waitqueue_head(&irda_rq_queue.done);
  402. atomic_set(&irda_rq_queue.num_pending, 0);
  403. init_completion(&startup);
  404. pid = kernel_thread(irda_thread, &startup, CLONE_FS|CLONE_FILES);
  405. if (pid <= 0)
  406. return -EAGAIN;
  407. else
  408. wait_for_completion(&startup);
  409. return 0;
  410. }
  411. void __exit irda_thread_join(void)
  412. {
  413. if (irda_rq_queue.thread) {
  414. flush_irda_queue();
  415. init_completion(&irda_rq_queue.exit);
  416. irda_rq_queue.thread = NULL;
  417. wake_up(&irda_rq_queue.kick);
  418. wait_for_completion(&irda_rq_queue.exit);
  419. }
  420. }