stack.c 17 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/mISDNif.h>
  18. #include <linux/kthread.h>
  19. #include "core.h"
  20. static u_int *debug;
  21. static inline void
  22. _queue_message(struct mISDNstack *st, struct sk_buff *skb)
  23. {
  24. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  25. if (*debug & DEBUG_QUEUE_FUNC)
  26. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  27. __func__, hh->prim, hh->id, skb);
  28. skb_queue_tail(&st->msgq, skb);
  29. if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
  30. test_and_set_bit(mISDN_STACK_WORK, &st->status);
  31. wake_up_interruptible(&st->workq);
  32. }
  33. }
  34. int
  35. mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
  36. {
  37. _queue_message(ch->st, skb);
  38. return 0;
  39. }
  40. static struct mISDNchannel *
  41. get_channel4id(struct mISDNstack *st, u_int id)
  42. {
  43. struct mISDNchannel *ch;
  44. mutex_lock(&st->lmutex);
  45. list_for_each_entry(ch, &st->layer2, list) {
  46. if (id == ch->nr)
  47. goto unlock;
  48. }
  49. ch = NULL;
  50. unlock:
  51. mutex_unlock(&st->lmutex);
  52. return ch;
  53. }
  54. static void
  55. send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
  56. {
  57. struct hlist_node *node;
  58. struct sock *sk;
  59. struct sk_buff *cskb = NULL;
  60. read_lock(&sl->lock);
  61. sk_for_each(sk, node, &sl->head) {
  62. if (sk->sk_state != MISDN_BOUND)
  63. continue;
  64. if (!cskb)
  65. cskb = skb_copy(skb, GFP_KERNEL);
  66. if (!cskb) {
  67. printk(KERN_WARNING "%s no skb\n", __func__);
  68. break;
  69. }
  70. if (!sock_queue_rcv_skb(sk, cskb))
  71. cskb = NULL;
  72. }
  73. read_unlock(&sl->lock);
  74. if (cskb)
  75. dev_kfree_skb(cskb);
  76. }
  77. static void
  78. send_layer2(struct mISDNstack *st, struct sk_buff *skb)
  79. {
  80. struct sk_buff *cskb;
  81. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  82. struct mISDNchannel *ch;
  83. int ret;
  84. if (!st)
  85. return;
  86. mutex_lock(&st->lmutex);
  87. if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */
  88. list_for_each_entry(ch, &st->layer2, list) {
  89. if (list_is_last(&ch->list, &st->layer2)) {
  90. cskb = skb;
  91. skb = NULL;
  92. } else {
  93. cskb = skb_copy(skb, GFP_KERNEL);
  94. }
  95. if (cskb) {
  96. ret = ch->send(ch, cskb);
  97. if (ret) {
  98. if (*debug & DEBUG_SEND_ERR)
  99. printk(KERN_DEBUG
  100. "%s ch%d prim(%x) addr(%x)"
  101. " err %d\n",
  102. __func__, ch->nr,
  103. hh->prim, ch->addr, ret);
  104. dev_kfree_skb(cskb);
  105. }
  106. } else {
  107. printk(KERN_WARNING "%s ch%d addr %x no mem\n",
  108. __func__, ch->nr, ch->addr);
  109. goto out;
  110. }
  111. }
  112. } else {
  113. list_for_each_entry(ch, &st->layer2, list) {
  114. if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
  115. ret = ch->send(ch, skb);
  116. if (!ret)
  117. skb = NULL;
  118. goto out;
  119. }
  120. }
  121. ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
  122. if (!ret)
  123. skb = NULL;
  124. else if (*debug & DEBUG_SEND_ERR)
  125. printk(KERN_DEBUG
  126. "%s ch%d mgr prim(%x) addr(%x) err %d\n",
  127. __func__, ch->nr, hh->prim, ch->addr, ret);
  128. }
  129. out:
  130. mutex_unlock(&st->lmutex);
  131. if (skb)
  132. dev_kfree_skb(skb);
  133. }
  134. static inline int
  135. send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
  136. {
  137. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  138. struct mISDNchannel *ch;
  139. int lm;
  140. lm = hh->prim & MISDN_LAYERMASK;
  141. if (*debug & DEBUG_QUEUE_FUNC)
  142. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  143. __func__, hh->prim, hh->id, skb);
  144. if (lm == 0x1) {
  145. if (!hlist_empty(&st->l1sock.head)) {
  146. __net_timestamp(skb);
  147. send_socklist(&st->l1sock, skb);
  148. }
  149. return st->layer1->send(st->layer1, skb);
  150. } else if (lm == 0x2) {
  151. if (!hlist_empty(&st->l1sock.head))
  152. send_socklist(&st->l1sock, skb);
  153. send_layer2(st, skb);
  154. return 0;
  155. } else if (lm == 0x4) {
  156. ch = get_channel4id(st, hh->id);
  157. if (ch)
  158. return ch->send(ch, skb);
  159. else
  160. printk(KERN_WARNING
  161. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  162. __func__, st->dev->name, hh->prim, hh->id);
  163. } else if (lm == 0x8) {
  164. WARN_ON(lm == 0x8);
  165. ch = get_channel4id(st, hh->id);
  166. if (ch)
  167. return ch->send(ch, skb);
  168. else
  169. printk(KERN_WARNING
  170. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  171. __func__, st->dev->name, hh->prim, hh->id);
  172. } else {
  173. /* broadcast not handled yet */
  174. printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
  175. __func__, st->dev->name, hh->prim);
  176. }
  177. return -ESRCH;
  178. }
  179. static void
  180. do_clear_stack(struct mISDNstack *st)
  181. {
  182. }
  183. static int
  184. mISDNStackd(void *data)
  185. {
  186. struct mISDNstack *st = data;
  187. int err = 0;
  188. #ifdef CONFIG_SMP
  189. lock_kernel();
  190. #endif
  191. sigfillset(&current->blocked);
  192. #ifdef CONFIG_SMP
  193. unlock_kernel();
  194. #endif
  195. if (*debug & DEBUG_MSG_THREAD)
  196. printk(KERN_DEBUG "mISDNStackd %s started\n", st->dev->name);
  197. if (st->notify != NULL) {
  198. complete(st->notify);
  199. st->notify = NULL;
  200. }
  201. for (;;) {
  202. struct sk_buff *skb;
  203. if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
  204. test_and_clear_bit(mISDN_STACK_WORK, &st->status);
  205. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  206. } else
  207. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  208. while (test_bit(mISDN_STACK_WORK, &st->status)) {
  209. skb = skb_dequeue(&st->msgq);
  210. if (!skb) {
  211. test_and_clear_bit(mISDN_STACK_WORK,
  212. &st->status);
  213. /* test if a race happens */
  214. skb = skb_dequeue(&st->msgq);
  215. if (!skb)
  216. continue;
  217. test_and_set_bit(mISDN_STACK_WORK,
  218. &st->status);
  219. }
  220. #ifdef MISDN_MSG_STATS
  221. st->msg_cnt++;
  222. #endif
  223. err = send_msg_to_layer(st, skb);
  224. if (unlikely(err)) {
  225. if (*debug & DEBUG_SEND_ERR)
  226. printk(KERN_DEBUG
  227. "%s: %s prim(%x) id(%x) "
  228. "send call(%d)\n",
  229. __func__, st->dev->name,
  230. mISDN_HEAD_PRIM(skb),
  231. mISDN_HEAD_ID(skb), err);
  232. dev_kfree_skb(skb);
  233. continue;
  234. }
  235. if (unlikely(test_bit(mISDN_STACK_STOPPED,
  236. &st->status))) {
  237. test_and_clear_bit(mISDN_STACK_WORK,
  238. &st->status);
  239. test_and_clear_bit(mISDN_STACK_RUNNING,
  240. &st->status);
  241. break;
  242. }
  243. }
  244. if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
  245. test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
  246. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  247. do_clear_stack(st);
  248. test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
  249. test_and_set_bit(mISDN_STACK_RESTART, &st->status);
  250. }
  251. if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
  252. test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
  253. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  254. if (!skb_queue_empty(&st->msgq))
  255. test_and_set_bit(mISDN_STACK_WORK,
  256. &st->status);
  257. }
  258. if (test_bit(mISDN_STACK_ABORT, &st->status))
  259. break;
  260. if (st->notify != NULL) {
  261. complete(st->notify);
  262. st->notify = NULL;
  263. }
  264. #ifdef MISDN_MSG_STATS
  265. st->sleep_cnt++;
  266. #endif
  267. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  268. wait_event_interruptible(st->workq, (st->status &
  269. mISDN_STACK_ACTION_MASK));
  270. if (*debug & DEBUG_MSG_THREAD)
  271. printk(KERN_DEBUG "%s: %s wake status %08lx\n",
  272. __func__, st->dev->name, st->status);
  273. test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
  274. test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
  275. if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
  276. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  277. #ifdef MISDN_MSG_STATS
  278. st->stopped_cnt++;
  279. #endif
  280. }
  281. }
  282. #ifdef MISDN_MSG_STATS
  283. printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
  284. "msg %d sleep %d stopped\n",
  285. st->dev->name, st->msg_cnt, st->sleep_cnt, st->stopped_cnt);
  286. printk(KERN_DEBUG
  287. "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
  288. st->dev->name, st->thread->utime, st->thread->stime);
  289. printk(KERN_DEBUG
  290. "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
  291. st->dev->name, st->thread->nvcsw, st->thread->nivcsw);
  292. printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
  293. st->dev->name);
  294. #endif
  295. test_and_set_bit(mISDN_STACK_KILLED, &st->status);
  296. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  297. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  298. test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
  299. skb_queue_purge(&st->msgq);
  300. st->thread = NULL;
  301. if (st->notify != NULL) {
  302. complete(st->notify);
  303. st->notify = NULL;
  304. }
  305. return 0;
  306. }
  307. static int
  308. l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
  309. {
  310. if (!ch->st)
  311. return -ENODEV;
  312. __net_timestamp(skb);
  313. _queue_message(ch->st, skb);
  314. return 0;
  315. }
  316. void
  317. set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
  318. {
  319. ch->addr = sapi | (tei << 8);
  320. }
  321. void
  322. __add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  323. {
  324. list_add_tail(&ch->list, &st->layer2);
  325. }
  326. void
  327. add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  328. {
  329. mutex_lock(&st->lmutex);
  330. __add_layer2(ch, st);
  331. mutex_unlock(&st->lmutex);
  332. }
  333. static int
  334. st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  335. {
  336. if (!ch->st || ch->st->layer1)
  337. return -EINVAL;
  338. return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
  339. }
  340. int
  341. create_stack(struct mISDNdevice *dev)
  342. {
  343. struct mISDNstack *newst;
  344. int err;
  345. DECLARE_COMPLETION_ONSTACK(done);
  346. newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
  347. if (!newst) {
  348. printk(KERN_ERR "kmalloc mISDN_stack failed\n");
  349. return -ENOMEM;
  350. }
  351. newst->dev = dev;
  352. INIT_LIST_HEAD(&newst->layer2);
  353. INIT_HLIST_HEAD(&newst->l1sock.head);
  354. rwlock_init(&newst->l1sock.lock);
  355. init_waitqueue_head(&newst->workq);
  356. skb_queue_head_init(&newst->msgq);
  357. mutex_init(&newst->lmutex);
  358. dev->D.st = newst;
  359. err = create_teimanager(dev);
  360. if (err) {
  361. printk(KERN_ERR "kmalloc teimanager failed\n");
  362. kfree(newst);
  363. return err;
  364. }
  365. dev->teimgr->peer = &newst->own;
  366. dev->teimgr->recv = mISDN_queue_message;
  367. dev->teimgr->st = newst;
  368. newst->layer1 = &dev->D;
  369. dev->D.recv = l1_receive;
  370. dev->D.peer = &newst->own;
  371. newst->own.st = newst;
  372. newst->own.ctrl = st_own_ctrl;
  373. newst->own.send = mISDN_queue_message;
  374. newst->own.recv = mISDN_queue_message;
  375. if (*debug & DEBUG_CORE_FUNC)
  376. printk(KERN_DEBUG "%s: st(%s)\n", __func__, newst->dev->name);
  377. newst->notify = &done;
  378. newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
  379. newst->dev->name);
  380. if (IS_ERR(newst->thread)) {
  381. err = PTR_ERR(newst->thread);
  382. printk(KERN_ERR
  383. "mISDN:cannot create kernel thread for %s (%d)\n",
  384. newst->dev->name, err);
  385. delete_teimanager(dev->teimgr);
  386. kfree(newst);
  387. } else
  388. wait_for_completion(&done);
  389. return err;
  390. }
  391. int
  392. connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
  393. u_int protocol, struct sockaddr_mISDN *adr)
  394. {
  395. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  396. struct channel_req rq;
  397. int err;
  398. if (*debug & DEBUG_CORE_FUNC)
  399. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  400. __func__, dev->name, protocol, adr->dev, adr->channel,
  401. adr->sapi, adr->tei);
  402. switch (protocol) {
  403. case ISDN_P_NT_S0:
  404. case ISDN_P_NT_E1:
  405. case ISDN_P_TE_S0:
  406. case ISDN_P_TE_E1:
  407. #ifdef PROTOCOL_CHECK
  408. /* this should be enhanced */
  409. if (!list_empty(&dev->D.st->layer2)
  410. && dev->D.protocol != protocol)
  411. return -EBUSY;
  412. if (!hlist_empty(&dev->D.st->l1sock.head)
  413. && dev->D.protocol != protocol)
  414. return -EBUSY;
  415. #endif
  416. ch->recv = mISDN_queue_message;
  417. ch->peer = &dev->D.st->own;
  418. ch->st = dev->D.st;
  419. rq.protocol = protocol;
  420. rq.adr.channel = 0;
  421. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  422. printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
  423. if (err)
  424. return err;
  425. write_lock_bh(&dev->D.st->l1sock.lock);
  426. sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
  427. write_unlock_bh(&dev->D.st->l1sock.lock);
  428. break;
  429. default:
  430. return -ENOPROTOOPT;
  431. }
  432. return 0;
  433. }
  434. int
  435. connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
  436. u_int protocol, struct sockaddr_mISDN *adr)
  437. {
  438. struct channel_req rq, rq2;
  439. int pmask, err;
  440. struct Bprotocol *bp;
  441. if (*debug & DEBUG_CORE_FUNC)
  442. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  443. __func__, dev->name, protocol,
  444. adr->dev, adr->channel, adr->sapi,
  445. adr->tei);
  446. ch->st = dev->D.st;
  447. pmask = 1 << (protocol & ISDN_P_B_MASK);
  448. if (pmask & dev->Bprotocols) {
  449. rq.protocol = protocol;
  450. rq.adr = *adr;
  451. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  452. if (err)
  453. return err;
  454. ch->recv = rq.ch->send;
  455. ch->peer = rq.ch;
  456. rq.ch->recv = ch->send;
  457. rq.ch->peer = ch;
  458. rq.ch->st = dev->D.st;
  459. } else {
  460. bp = get_Bprotocol4mask(pmask);
  461. if (!bp)
  462. return -ENOPROTOOPT;
  463. rq2.protocol = protocol;
  464. rq2.adr = *adr;
  465. rq2.ch = ch;
  466. err = bp->create(&rq2);
  467. if (err)
  468. return err;
  469. ch->recv = rq2.ch->send;
  470. ch->peer = rq2.ch;
  471. rq2.ch->st = dev->D.st;
  472. rq.protocol = rq2.protocol;
  473. rq.adr = *adr;
  474. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  475. if (err) {
  476. rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
  477. return err;
  478. }
  479. rq2.ch->recv = rq.ch->send;
  480. rq2.ch->peer = rq.ch;
  481. rq.ch->recv = rq2.ch->send;
  482. rq.ch->peer = rq2.ch;
  483. rq.ch->st = dev->D.st;
  484. }
  485. ch->protocol = protocol;
  486. ch->nr = rq.ch->nr;
  487. return 0;
  488. }
  489. int
  490. create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
  491. u_int protocol, struct sockaddr_mISDN *adr)
  492. {
  493. struct channel_req rq;
  494. int err;
  495. if (*debug & DEBUG_CORE_FUNC)
  496. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  497. __func__, dev->name, protocol,
  498. adr->dev, adr->channel, adr->sapi,
  499. adr->tei);
  500. rq.protocol = ISDN_P_TE_S0;
  501. if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
  502. rq.protocol = ISDN_P_TE_E1;
  503. switch (protocol) {
  504. case ISDN_P_LAPD_NT:
  505. rq.protocol = ISDN_P_NT_S0;
  506. if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
  507. rq.protocol = ISDN_P_NT_E1;
  508. case ISDN_P_LAPD_TE:
  509. #ifdef PROTOCOL_CHECK
  510. /* this should be enhanced */
  511. if (!list_empty(&dev->D.st->layer2)
  512. && dev->D.protocol != protocol)
  513. return -EBUSY;
  514. if (!hlist_empty(&dev->D.st->l1sock.head)
  515. && dev->D.protocol != protocol)
  516. return -EBUSY;
  517. #endif
  518. ch->recv = mISDN_queue_message;
  519. ch->peer = &dev->D.st->own;
  520. ch->st = dev->D.st;
  521. rq.adr.channel = 0;
  522. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  523. printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
  524. if (err)
  525. break;
  526. rq.protocol = protocol;
  527. rq.adr = *adr;
  528. rq.ch = ch;
  529. err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
  530. printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
  531. if (!err) {
  532. if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
  533. break;
  534. add_layer2(rq.ch, dev->D.st);
  535. rq.ch->recv = mISDN_queue_message;
  536. rq.ch->peer = &dev->D.st->own;
  537. rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
  538. }
  539. break;
  540. default:
  541. err = -EPROTONOSUPPORT;
  542. }
  543. return err;
  544. }
  545. void
  546. delete_channel(struct mISDNchannel *ch)
  547. {
  548. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  549. struct mISDNchannel *pch;
  550. if (!ch->st) {
  551. printk(KERN_WARNING "%s: no stack\n", __func__);
  552. return;
  553. }
  554. if (*debug & DEBUG_CORE_FUNC)
  555. printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
  556. ch->st->dev->name, ch->protocol);
  557. if (ch->protocol >= ISDN_P_B_START) {
  558. if (ch->peer) {
  559. ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
  560. ch->peer = NULL;
  561. }
  562. return;
  563. }
  564. switch (ch->protocol) {
  565. case ISDN_P_NT_S0:
  566. case ISDN_P_TE_S0:
  567. case ISDN_P_NT_E1:
  568. case ISDN_P_TE_E1:
  569. write_lock_bh(&ch->st->l1sock.lock);
  570. sk_del_node_init(&msk->sk);
  571. write_unlock_bh(&ch->st->l1sock.lock);
  572. ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
  573. break;
  574. case ISDN_P_LAPD_TE:
  575. pch = get_channel4id(ch->st, ch->nr);
  576. if (pch) {
  577. mutex_lock(&ch->st->lmutex);
  578. list_del(&pch->list);
  579. mutex_unlock(&ch->st->lmutex);
  580. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  581. pch = ch->st->dev->teimgr;
  582. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  583. } else
  584. printk(KERN_WARNING "%s: no l2 channel\n",
  585. __func__);
  586. break;
  587. case ISDN_P_LAPD_NT:
  588. pch = ch->st->dev->teimgr;
  589. if (pch) {
  590. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  591. } else
  592. printk(KERN_WARNING "%s: no l2 channel\n",
  593. __func__);
  594. break;
  595. default:
  596. break;
  597. }
  598. return;
  599. }
  600. void
  601. delete_stack(struct mISDNdevice *dev)
  602. {
  603. struct mISDNstack *st = dev->D.st;
  604. DECLARE_COMPLETION_ONSTACK(done);
  605. if (*debug & DEBUG_CORE_FUNC)
  606. printk(KERN_DEBUG "%s: st(%s)\n", __func__,
  607. st->dev->name);
  608. if (dev->teimgr)
  609. delete_teimanager(dev->teimgr);
  610. if (st->thread) {
  611. if (st->notify) {
  612. printk(KERN_WARNING "%s: notifier in use\n",
  613. __func__);
  614. complete(st->notify);
  615. }
  616. st->notify = &done;
  617. test_and_set_bit(mISDN_STACK_ABORT, &st->status);
  618. test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
  619. wake_up_interruptible(&st->workq);
  620. wait_for_completion(&done);
  621. }
  622. if (!list_empty(&st->layer2))
  623. printk(KERN_WARNING "%s: layer2 list not empty\n",
  624. __func__);
  625. if (!hlist_empty(&st->l1sock.head))
  626. printk(KERN_WARNING "%s: layer1 list not empty\n",
  627. __func__);
  628. kfree(st);
  629. }
  630. void
  631. mISDN_initstack(u_int *dp)
  632. {
  633. debug = dp;
  634. }