hwchannel.c 8.4 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/module.h>
  18. #include <linux/mISDNhw.h>
  19. static void
  20. dchannel_bh(struct work_struct *ws)
  21. {
  22. struct dchannel *dch = container_of(ws, struct dchannel, workq);
  23. struct sk_buff *skb;
  24. int err;
  25. if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
  26. while ((skb = skb_dequeue(&dch->rqueue))) {
  27. if (likely(dch->dev.D.peer)) {
  28. err = dch->dev.D.recv(dch->dev.D.peer, skb);
  29. if (err)
  30. dev_kfree_skb(skb);
  31. } else
  32. dev_kfree_skb(skb);
  33. }
  34. }
  35. if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
  36. if (dch->phfunc)
  37. dch->phfunc(dch);
  38. }
  39. }
  40. static void
  41. bchannel_bh(struct work_struct *ws)
  42. {
  43. struct bchannel *bch = container_of(ws, struct bchannel, workq);
  44. struct sk_buff *skb;
  45. int err;
  46. if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
  47. while ((skb = skb_dequeue(&bch->rqueue))) {
  48. bch->rcount--;
  49. if (likely(bch->ch.peer)) {
  50. err = bch->ch.recv(bch->ch.peer, skb);
  51. if (err)
  52. dev_kfree_skb(skb);
  53. } else
  54. dev_kfree_skb(skb);
  55. }
  56. }
  57. }
  58. int
  59. mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
  60. {
  61. test_and_set_bit(FLG_HDLC, &ch->Flags);
  62. ch->maxlen = maxlen;
  63. ch->hw = NULL;
  64. ch->rx_skb = NULL;
  65. ch->tx_skb = NULL;
  66. ch->tx_idx = 0;
  67. ch->phfunc = phf;
  68. skb_queue_head_init(&ch->squeue);
  69. skb_queue_head_init(&ch->rqueue);
  70. INIT_LIST_HEAD(&ch->dev.bchannels);
  71. INIT_WORK(&ch->workq, dchannel_bh);
  72. return 0;
  73. }
  74. EXPORT_SYMBOL(mISDN_initdchannel);
  75. int
  76. mISDN_initbchannel(struct bchannel *ch, int maxlen)
  77. {
  78. ch->Flags = 0;
  79. ch->maxlen = maxlen;
  80. ch->hw = NULL;
  81. ch->rx_skb = NULL;
  82. ch->tx_skb = NULL;
  83. ch->tx_idx = 0;
  84. skb_queue_head_init(&ch->rqueue);
  85. ch->rcount = 0;
  86. ch->next_skb = NULL;
  87. INIT_WORK(&ch->workq, bchannel_bh);
  88. return 0;
  89. }
  90. EXPORT_SYMBOL(mISDN_initbchannel);
  91. int
  92. mISDN_freedchannel(struct dchannel *ch)
  93. {
  94. if (ch->tx_skb) {
  95. dev_kfree_skb(ch->tx_skb);
  96. ch->tx_skb = NULL;
  97. }
  98. if (ch->rx_skb) {
  99. dev_kfree_skb(ch->rx_skb);
  100. ch->rx_skb = NULL;
  101. }
  102. skb_queue_purge(&ch->squeue);
  103. skb_queue_purge(&ch->rqueue);
  104. flush_scheduled_work();
  105. return 0;
  106. }
  107. EXPORT_SYMBOL(mISDN_freedchannel);
  108. int
  109. mISDN_freebchannel(struct bchannel *ch)
  110. {
  111. if (ch->tx_skb) {
  112. dev_kfree_skb(ch->tx_skb);
  113. ch->tx_skb = NULL;
  114. }
  115. if (ch->rx_skb) {
  116. dev_kfree_skb(ch->rx_skb);
  117. ch->rx_skb = NULL;
  118. }
  119. if (ch->next_skb) {
  120. dev_kfree_skb(ch->next_skb);
  121. ch->next_skb = NULL;
  122. }
  123. skb_queue_purge(&ch->rqueue);
  124. ch->rcount = 0;
  125. flush_scheduled_work();
  126. return 0;
  127. }
  128. EXPORT_SYMBOL(mISDN_freebchannel);
  129. static inline u_int
  130. get_sapi_tei(u_char *p)
  131. {
  132. u_int sapi, tei;
  133. sapi = *p >> 2;
  134. tei = p[1] >> 1;
  135. return sapi | (tei << 8);
  136. }
  137. void
  138. recv_Dchannel(struct dchannel *dch)
  139. {
  140. struct mISDNhead *hh;
  141. if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
  142. dev_kfree_skb(dch->rx_skb);
  143. dch->rx_skb = NULL;
  144. return;
  145. }
  146. hh = mISDN_HEAD_P(dch->rx_skb);
  147. hh->prim = PH_DATA_IND;
  148. hh->id = get_sapi_tei(dch->rx_skb->data);
  149. skb_queue_tail(&dch->rqueue, dch->rx_skb);
  150. dch->rx_skb = NULL;
  151. schedule_event(dch, FLG_RECVQUEUE);
  152. }
  153. EXPORT_SYMBOL(recv_Dchannel);
  154. void
  155. recv_Echannel(struct dchannel *ech, struct dchannel *dch)
  156. {
  157. struct mISDNhead *hh;
  158. if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
  159. dev_kfree_skb(ech->rx_skb);
  160. ech->rx_skb = NULL;
  161. return;
  162. }
  163. hh = mISDN_HEAD_P(ech->rx_skb);
  164. hh->prim = PH_DATA_E_IND;
  165. hh->id = get_sapi_tei(ech->rx_skb->data);
  166. skb_queue_tail(&dch->rqueue, ech->rx_skb);
  167. ech->rx_skb = NULL;
  168. schedule_event(dch, FLG_RECVQUEUE);
  169. }
  170. EXPORT_SYMBOL(recv_Echannel);
  171. void
  172. recv_Bchannel(struct bchannel *bch)
  173. {
  174. struct mISDNhead *hh;
  175. hh = mISDN_HEAD_P(bch->rx_skb);
  176. hh->prim = PH_DATA_IND;
  177. hh->id = MISDN_ID_ANY;
  178. if (bch->rcount >= 64) {
  179. printk(KERN_WARNING "B-channel %p receive queue overflow, "
  180. "fushing!\n", bch);
  181. skb_queue_purge(&bch->rqueue);
  182. bch->rcount = 0;
  183. return;
  184. }
  185. bch->rcount++;
  186. skb_queue_tail(&bch->rqueue, bch->rx_skb);
  187. bch->rx_skb = NULL;
  188. schedule_event(bch, FLG_RECVQUEUE);
  189. }
  190. EXPORT_SYMBOL(recv_Bchannel);
  191. void
  192. recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
  193. {
  194. skb_queue_tail(&dch->rqueue, skb);
  195. schedule_event(dch, FLG_RECVQUEUE);
  196. }
  197. EXPORT_SYMBOL(recv_Dchannel_skb);
  198. void
  199. recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
  200. {
  201. if (bch->rcount >= 64) {
  202. printk(KERN_WARNING "B-channel %p receive queue overflow, "
  203. "fushing!\n", bch);
  204. skb_queue_purge(&bch->rqueue);
  205. bch->rcount = 0;
  206. }
  207. bch->rcount++;
  208. skb_queue_tail(&bch->rqueue, skb);
  209. schedule_event(bch, FLG_RECVQUEUE);
  210. }
  211. EXPORT_SYMBOL(recv_Bchannel_skb);
  212. static void
  213. confirm_Dsend(struct dchannel *dch)
  214. {
  215. struct sk_buff *skb;
  216. skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
  217. 0, NULL, GFP_ATOMIC);
  218. if (!skb) {
  219. printk(KERN_ERR "%s: no skb id %x\n", __func__,
  220. mISDN_HEAD_ID(dch->tx_skb));
  221. return;
  222. }
  223. skb_queue_tail(&dch->rqueue, skb);
  224. schedule_event(dch, FLG_RECVQUEUE);
  225. }
  226. int
  227. get_next_dframe(struct dchannel *dch)
  228. {
  229. dch->tx_idx = 0;
  230. dch->tx_skb = skb_dequeue(&dch->squeue);
  231. if (dch->tx_skb) {
  232. confirm_Dsend(dch);
  233. return 1;
  234. }
  235. dch->tx_skb = NULL;
  236. test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
  237. return 0;
  238. }
  239. EXPORT_SYMBOL(get_next_dframe);
  240. void
  241. confirm_Bsend(struct bchannel *bch)
  242. {
  243. struct sk_buff *skb;
  244. if (bch->rcount >= 64) {
  245. printk(KERN_WARNING "B-channel %p receive queue overflow, "
  246. "fushing!\n", bch);
  247. skb_queue_purge(&bch->rqueue);
  248. bch->rcount = 0;
  249. }
  250. skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
  251. 0, NULL, GFP_ATOMIC);
  252. if (!skb) {
  253. printk(KERN_ERR "%s: no skb id %x\n", __func__,
  254. mISDN_HEAD_ID(bch->tx_skb));
  255. return;
  256. }
  257. bch->rcount++;
  258. skb_queue_tail(&bch->rqueue, skb);
  259. schedule_event(bch, FLG_RECVQUEUE);
  260. }
  261. EXPORT_SYMBOL(confirm_Bsend);
  262. int
  263. get_next_bframe(struct bchannel *bch)
  264. {
  265. bch->tx_idx = 0;
  266. if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
  267. bch->tx_skb = bch->next_skb;
  268. if (bch->tx_skb) {
  269. bch->next_skb = NULL;
  270. test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
  271. if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
  272. confirm_Bsend(bch); /* not for transparent */
  273. return 1;
  274. } else {
  275. test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
  276. printk(KERN_WARNING "B TX_NEXT without skb\n");
  277. }
  278. }
  279. bch->tx_skb = NULL;
  280. test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
  281. return 0;
  282. }
  283. EXPORT_SYMBOL(get_next_bframe);
  284. void
  285. queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
  286. {
  287. struct mISDNhead *hh;
  288. if (!skb) {
  289. _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
  290. } else {
  291. if (ch->peer) {
  292. hh = mISDN_HEAD_P(skb);
  293. hh->prim = pr;
  294. hh->id = id;
  295. if (!ch->recv(ch->peer, skb))
  296. return;
  297. }
  298. dev_kfree_skb(skb);
  299. }
  300. }
  301. EXPORT_SYMBOL(queue_ch_frame);
  302. int
  303. dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
  304. {
  305. /* check oversize */
  306. if (skb->len <= 0) {
  307. printk(KERN_WARNING "%s: skb too small\n", __func__);
  308. return -EINVAL;
  309. }
  310. if (skb->len > ch->maxlen) {
  311. printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
  312. __func__, skb->len, ch->maxlen);
  313. return -EINVAL;
  314. }
  315. /* HW lock must be obtained */
  316. if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
  317. skb_queue_tail(&ch->squeue, skb);
  318. return 0;
  319. } else {
  320. /* write to fifo */
  321. ch->tx_skb = skb;
  322. ch->tx_idx = 0;
  323. return 1;
  324. }
  325. }
  326. EXPORT_SYMBOL(dchannel_senddata);
  327. int
  328. bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
  329. {
  330. /* check oversize */
  331. if (skb->len <= 0) {
  332. printk(KERN_WARNING "%s: skb too small\n", __func__);
  333. return -EINVAL;
  334. }
  335. if (skb->len > ch->maxlen) {
  336. printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
  337. __func__, skb->len, ch->maxlen);
  338. return -EINVAL;
  339. }
  340. /* HW lock must be obtained */
  341. /* check for pending next_skb */
  342. if (ch->next_skb) {
  343. printk(KERN_WARNING
  344. "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
  345. __func__, skb->len, ch->next_skb->len);
  346. return -EBUSY;
  347. }
  348. if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
  349. test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
  350. ch->next_skb = skb;
  351. return 0;
  352. } else {
  353. /* write to fifo */
  354. ch->tx_skb = skb;
  355. ch->tx_idx = 0;
  356. return 1;
  357. }
  358. }
  359. EXPORT_SYMBOL(bchannel_senddata);