hwchannel.c 8.6 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/module.h>
  18. #include <linux/mISDNhw.h>
  19. static void
  20. dchannel_bh(struct work_struct *ws)
  21. {
  22. struct dchannel *dch = container_of(ws, struct dchannel, workq);
  23. struct sk_buff *skb;
  24. int err;
  25. if (test_and_clear_bit(FLG_RECVQUEUE, &dch->Flags)) {
  26. while ((skb = skb_dequeue(&dch->rqueue))) {
  27. if (likely(dch->dev.D.peer)) {
  28. err = dch->dev.D.recv(dch->dev.D.peer, skb);
  29. if (err)
  30. dev_kfree_skb(skb);
  31. } else
  32. dev_kfree_skb(skb);
  33. }
  34. }
  35. if (test_and_clear_bit(FLG_PHCHANGE, &dch->Flags)) {
  36. if (dch->phfunc)
  37. dch->phfunc(dch);
  38. }
  39. }
  40. static void
  41. bchannel_bh(struct work_struct *ws)
  42. {
  43. struct bchannel *bch = container_of(ws, struct bchannel, workq);
  44. struct sk_buff *skb;
  45. int err;
  46. if (test_and_clear_bit(FLG_RECVQUEUE, &bch->Flags)) {
  47. while ((skb = skb_dequeue(&bch->rqueue))) {
  48. bch->rcount--;
  49. if (likely(bch->ch.peer)) {
  50. err = bch->ch.recv(bch->ch.peer, skb);
  51. if (err)
  52. dev_kfree_skb(skb);
  53. } else
  54. dev_kfree_skb(skb);
  55. }
  56. }
  57. }
  58. int
  59. mISDN_initdchannel(struct dchannel *ch, int maxlen, void *phf)
  60. {
  61. test_and_set_bit(FLG_HDLC, &ch->Flags);
  62. ch->maxlen = maxlen;
  63. ch->hw = NULL;
  64. ch->rx_skb = NULL;
  65. ch->tx_skb = NULL;
  66. ch->tx_idx = 0;
  67. ch->phfunc = phf;
  68. skb_queue_head_init(&ch->squeue);
  69. skb_queue_head_init(&ch->rqueue);
  70. INIT_LIST_HEAD(&ch->dev.bchannels);
  71. INIT_WORK(&ch->workq, dchannel_bh);
  72. return 0;
  73. }
  74. EXPORT_SYMBOL(mISDN_initdchannel);
  75. int
  76. mISDN_initbchannel(struct bchannel *ch, int maxlen)
  77. {
  78. ch->Flags = 0;
  79. ch->maxlen = maxlen;
  80. ch->hw = NULL;
  81. ch->rx_skb = NULL;
  82. ch->tx_skb = NULL;
  83. ch->tx_idx = 0;
  84. skb_queue_head_init(&ch->rqueue);
  85. ch->rcount = 0;
  86. ch->next_skb = NULL;
  87. INIT_WORK(&ch->workq, bchannel_bh);
  88. return 0;
  89. }
  90. EXPORT_SYMBOL(mISDN_initbchannel);
  91. int
  92. mISDN_freedchannel(struct dchannel *ch)
  93. {
  94. if (ch->tx_skb) {
  95. dev_kfree_skb(ch->tx_skb);
  96. ch->tx_skb = NULL;
  97. }
  98. if (ch->rx_skb) {
  99. dev_kfree_skb(ch->rx_skb);
  100. ch->rx_skb = NULL;
  101. }
  102. skb_queue_purge(&ch->squeue);
  103. skb_queue_purge(&ch->rqueue);
  104. flush_scheduled_work();
  105. return 0;
  106. }
  107. EXPORT_SYMBOL(mISDN_freedchannel);
  108. void
  109. mISDN_clear_bchannel(struct bchannel *ch)
  110. {
  111. if (ch->tx_skb) {
  112. dev_kfree_skb(ch->tx_skb);
  113. ch->tx_skb = NULL;
  114. }
  115. ch->tx_idx = 0;
  116. if (ch->rx_skb) {
  117. dev_kfree_skb(ch->rx_skb);
  118. ch->rx_skb = NULL;
  119. }
  120. if (ch->next_skb) {
  121. dev_kfree_skb(ch->next_skb);
  122. ch->next_skb = NULL;
  123. }
  124. test_and_clear_bit(FLG_TX_BUSY, &ch->Flags);
  125. test_and_clear_bit(FLG_TX_NEXT, &ch->Flags);
  126. test_and_clear_bit(FLG_ACTIVE, &ch->Flags);
  127. }
  128. EXPORT_SYMBOL(mISDN_clear_bchannel);
  129. int
  130. mISDN_freebchannel(struct bchannel *ch)
  131. {
  132. mISDN_clear_bchannel(ch);
  133. skb_queue_purge(&ch->rqueue);
  134. ch->rcount = 0;
  135. flush_scheduled_work();
  136. return 0;
  137. }
  138. EXPORT_SYMBOL(mISDN_freebchannel);
  139. static inline u_int
  140. get_sapi_tei(u_char *p)
  141. {
  142. u_int sapi, tei;
  143. sapi = *p >> 2;
  144. tei = p[1] >> 1;
  145. return sapi | (tei << 8);
  146. }
  147. void
  148. recv_Dchannel(struct dchannel *dch)
  149. {
  150. struct mISDNhead *hh;
  151. if (dch->rx_skb->len < 2) { /* at least 2 for sapi / tei */
  152. dev_kfree_skb(dch->rx_skb);
  153. dch->rx_skb = NULL;
  154. return;
  155. }
  156. hh = mISDN_HEAD_P(dch->rx_skb);
  157. hh->prim = PH_DATA_IND;
  158. hh->id = get_sapi_tei(dch->rx_skb->data);
  159. skb_queue_tail(&dch->rqueue, dch->rx_skb);
  160. dch->rx_skb = NULL;
  161. schedule_event(dch, FLG_RECVQUEUE);
  162. }
  163. EXPORT_SYMBOL(recv_Dchannel);
  164. void
  165. recv_Echannel(struct dchannel *ech, struct dchannel *dch)
  166. {
  167. struct mISDNhead *hh;
  168. if (ech->rx_skb->len < 2) { /* at least 2 for sapi / tei */
  169. dev_kfree_skb(ech->rx_skb);
  170. ech->rx_skb = NULL;
  171. return;
  172. }
  173. hh = mISDN_HEAD_P(ech->rx_skb);
  174. hh->prim = PH_DATA_E_IND;
  175. hh->id = get_sapi_tei(ech->rx_skb->data);
  176. skb_queue_tail(&dch->rqueue, ech->rx_skb);
  177. ech->rx_skb = NULL;
  178. schedule_event(dch, FLG_RECVQUEUE);
  179. }
  180. EXPORT_SYMBOL(recv_Echannel);
  181. void
  182. recv_Bchannel(struct bchannel *bch, unsigned int id)
  183. {
  184. struct mISDNhead *hh;
  185. hh = mISDN_HEAD_P(bch->rx_skb);
  186. hh->prim = PH_DATA_IND;
  187. hh->id = id;
  188. if (bch->rcount >= 64) {
  189. printk(KERN_WARNING "B-channel %p receive queue overflow, "
  190. "fushing!\n", bch);
  191. skb_queue_purge(&bch->rqueue);
  192. bch->rcount = 0;
  193. return;
  194. }
  195. bch->rcount++;
  196. skb_queue_tail(&bch->rqueue, bch->rx_skb);
  197. bch->rx_skb = NULL;
  198. schedule_event(bch, FLG_RECVQUEUE);
  199. }
  200. EXPORT_SYMBOL(recv_Bchannel);
  201. void
  202. recv_Dchannel_skb(struct dchannel *dch, struct sk_buff *skb)
  203. {
  204. skb_queue_tail(&dch->rqueue, skb);
  205. schedule_event(dch, FLG_RECVQUEUE);
  206. }
  207. EXPORT_SYMBOL(recv_Dchannel_skb);
  208. void
  209. recv_Bchannel_skb(struct bchannel *bch, struct sk_buff *skb)
  210. {
  211. if (bch->rcount >= 64) {
  212. printk(KERN_WARNING "B-channel %p receive queue overflow, "
  213. "fushing!\n", bch);
  214. skb_queue_purge(&bch->rqueue);
  215. bch->rcount = 0;
  216. }
  217. bch->rcount++;
  218. skb_queue_tail(&bch->rqueue, skb);
  219. schedule_event(bch, FLG_RECVQUEUE);
  220. }
  221. EXPORT_SYMBOL(recv_Bchannel_skb);
  222. static void
  223. confirm_Dsend(struct dchannel *dch)
  224. {
  225. struct sk_buff *skb;
  226. skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(dch->tx_skb),
  227. 0, NULL, GFP_ATOMIC);
  228. if (!skb) {
  229. printk(KERN_ERR "%s: no skb id %x\n", __func__,
  230. mISDN_HEAD_ID(dch->tx_skb));
  231. return;
  232. }
  233. skb_queue_tail(&dch->rqueue, skb);
  234. schedule_event(dch, FLG_RECVQUEUE);
  235. }
  236. int
  237. get_next_dframe(struct dchannel *dch)
  238. {
  239. dch->tx_idx = 0;
  240. dch->tx_skb = skb_dequeue(&dch->squeue);
  241. if (dch->tx_skb) {
  242. confirm_Dsend(dch);
  243. return 1;
  244. }
  245. dch->tx_skb = NULL;
  246. test_and_clear_bit(FLG_TX_BUSY, &dch->Flags);
  247. return 0;
  248. }
  249. EXPORT_SYMBOL(get_next_dframe);
  250. void
  251. confirm_Bsend(struct bchannel *bch)
  252. {
  253. struct sk_buff *skb;
  254. if (bch->rcount >= 64) {
  255. printk(KERN_WARNING "B-channel %p receive queue overflow, "
  256. "fushing!\n", bch);
  257. skb_queue_purge(&bch->rqueue);
  258. bch->rcount = 0;
  259. }
  260. skb = _alloc_mISDN_skb(PH_DATA_CNF, mISDN_HEAD_ID(bch->tx_skb),
  261. 0, NULL, GFP_ATOMIC);
  262. if (!skb) {
  263. printk(KERN_ERR "%s: no skb id %x\n", __func__,
  264. mISDN_HEAD_ID(bch->tx_skb));
  265. return;
  266. }
  267. bch->rcount++;
  268. skb_queue_tail(&bch->rqueue, skb);
  269. schedule_event(bch, FLG_RECVQUEUE);
  270. }
  271. EXPORT_SYMBOL(confirm_Bsend);
  272. int
  273. get_next_bframe(struct bchannel *bch)
  274. {
  275. bch->tx_idx = 0;
  276. if (test_bit(FLG_TX_NEXT, &bch->Flags)) {
  277. bch->tx_skb = bch->next_skb;
  278. if (bch->tx_skb) {
  279. bch->next_skb = NULL;
  280. test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
  281. if (!test_bit(FLG_TRANSPARENT, &bch->Flags))
  282. confirm_Bsend(bch); /* not for transparent */
  283. return 1;
  284. } else {
  285. test_and_clear_bit(FLG_TX_NEXT, &bch->Flags);
  286. printk(KERN_WARNING "B TX_NEXT without skb\n");
  287. }
  288. }
  289. bch->tx_skb = NULL;
  290. test_and_clear_bit(FLG_TX_BUSY, &bch->Flags);
  291. return 0;
  292. }
  293. EXPORT_SYMBOL(get_next_bframe);
  294. void
  295. queue_ch_frame(struct mISDNchannel *ch, u_int pr, int id, struct sk_buff *skb)
  296. {
  297. struct mISDNhead *hh;
  298. if (!skb) {
  299. _queue_data(ch, pr, id, 0, NULL, GFP_ATOMIC);
  300. } else {
  301. if (ch->peer) {
  302. hh = mISDN_HEAD_P(skb);
  303. hh->prim = pr;
  304. hh->id = id;
  305. if (!ch->recv(ch->peer, skb))
  306. return;
  307. }
  308. dev_kfree_skb(skb);
  309. }
  310. }
  311. EXPORT_SYMBOL(queue_ch_frame);
  312. int
  313. dchannel_senddata(struct dchannel *ch, struct sk_buff *skb)
  314. {
  315. /* check oversize */
  316. if (skb->len <= 0) {
  317. printk(KERN_WARNING "%s: skb too small\n", __func__);
  318. return -EINVAL;
  319. }
  320. if (skb->len > ch->maxlen) {
  321. printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
  322. __func__, skb->len, ch->maxlen);
  323. return -EINVAL;
  324. }
  325. /* HW lock must be obtained */
  326. if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
  327. skb_queue_tail(&ch->squeue, skb);
  328. return 0;
  329. } else {
  330. /* write to fifo */
  331. ch->tx_skb = skb;
  332. ch->tx_idx = 0;
  333. return 1;
  334. }
  335. }
  336. EXPORT_SYMBOL(dchannel_senddata);
  337. int
  338. bchannel_senddata(struct bchannel *ch, struct sk_buff *skb)
  339. {
  340. /* check oversize */
  341. if (skb->len <= 0) {
  342. printk(KERN_WARNING "%s: skb too small\n", __func__);
  343. return -EINVAL;
  344. }
  345. if (skb->len > ch->maxlen) {
  346. printk(KERN_WARNING "%s: skb too large(%d/%d)\n",
  347. __func__, skb->len, ch->maxlen);
  348. return -EINVAL;
  349. }
  350. /* HW lock must be obtained */
  351. /* check for pending next_skb */
  352. if (ch->next_skb) {
  353. printk(KERN_WARNING
  354. "%s: next_skb exist ERROR (skb->len=%d next_skb->len=%d)\n",
  355. __func__, skb->len, ch->next_skb->len);
  356. return -EBUSY;
  357. }
  358. if (test_and_set_bit(FLG_TX_BUSY, &ch->Flags)) {
  359. test_and_set_bit(FLG_TX_NEXT, &ch->Flags);
  360. ch->next_skb = skb;
  361. return 0;
  362. } else {
  363. /* write to fifo */
  364. ch->tx_skb = skb;
  365. ch->tx_idx = 0;
  366. return 1;
  367. }
  368. }
  369. EXPORT_SYMBOL(bchannel_senddata);