stack.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/slab.h>
  18. #include <linux/mISDNif.h>
  19. #include <linux/kthread.h>
  20. #include <linux/smp_lock.h>
  21. #include "core.h"
  22. static u_int *debug;
  23. static inline void
  24. _queue_message(struct mISDNstack *st, struct sk_buff *skb)
  25. {
  26. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  27. if (*debug & DEBUG_QUEUE_FUNC)
  28. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  29. __func__, hh->prim, hh->id, skb);
  30. skb_queue_tail(&st->msgq, skb);
  31. if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
  32. test_and_set_bit(mISDN_STACK_WORK, &st->status);
  33. wake_up_interruptible(&st->workq);
  34. }
  35. }
  36. static int
  37. mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
  38. {
  39. _queue_message(ch->st, skb);
  40. return 0;
  41. }
  42. static struct mISDNchannel *
  43. get_channel4id(struct mISDNstack *st, u_int id)
  44. {
  45. struct mISDNchannel *ch;
  46. mutex_lock(&st->lmutex);
  47. list_for_each_entry(ch, &st->layer2, list) {
  48. if (id == ch->nr)
  49. goto unlock;
  50. }
  51. ch = NULL;
  52. unlock:
  53. mutex_unlock(&st->lmutex);
  54. return ch;
  55. }
  56. static void
  57. send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
  58. {
  59. struct hlist_node *node;
  60. struct sock *sk;
  61. struct sk_buff *cskb = NULL;
  62. read_lock(&sl->lock);
  63. sk_for_each(sk, node, &sl->head) {
  64. if (sk->sk_state != MISDN_BOUND)
  65. continue;
  66. if (!cskb)
  67. cskb = skb_copy(skb, GFP_KERNEL);
  68. if (!cskb) {
  69. printk(KERN_WARNING "%s no skb\n", __func__);
  70. break;
  71. }
  72. if (!sock_queue_rcv_skb(sk, cskb))
  73. cskb = NULL;
  74. }
  75. read_unlock(&sl->lock);
  76. if (cskb)
  77. dev_kfree_skb(cskb);
  78. }
  79. static void
  80. send_layer2(struct mISDNstack *st, struct sk_buff *skb)
  81. {
  82. struct sk_buff *cskb;
  83. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  84. struct mISDNchannel *ch;
  85. int ret;
  86. if (!st)
  87. return;
  88. mutex_lock(&st->lmutex);
  89. if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */
  90. list_for_each_entry(ch, &st->layer2, list) {
  91. if (list_is_last(&ch->list, &st->layer2)) {
  92. cskb = skb;
  93. skb = NULL;
  94. } else {
  95. cskb = skb_copy(skb, GFP_KERNEL);
  96. }
  97. if (cskb) {
  98. ret = ch->send(ch, cskb);
  99. if (ret) {
  100. if (*debug & DEBUG_SEND_ERR)
  101. printk(KERN_DEBUG
  102. "%s ch%d prim(%x) addr(%x)"
  103. " err %d\n",
  104. __func__, ch->nr,
  105. hh->prim, ch->addr, ret);
  106. dev_kfree_skb(cskb);
  107. }
  108. } else {
  109. printk(KERN_WARNING "%s ch%d addr %x no mem\n",
  110. __func__, ch->nr, ch->addr);
  111. goto out;
  112. }
  113. }
  114. } else {
  115. list_for_each_entry(ch, &st->layer2, list) {
  116. if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
  117. ret = ch->send(ch, skb);
  118. if (!ret)
  119. skb = NULL;
  120. goto out;
  121. }
  122. }
  123. ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
  124. if (!ret)
  125. skb = NULL;
  126. else if (*debug & DEBUG_SEND_ERR)
  127. printk(KERN_DEBUG
  128. "%s ch%d mgr prim(%x) addr(%x) err %d\n",
  129. __func__, ch->nr, hh->prim, ch->addr, ret);
  130. }
  131. out:
  132. mutex_unlock(&st->lmutex);
  133. if (skb)
  134. dev_kfree_skb(skb);
  135. }
  136. static inline int
  137. send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
  138. {
  139. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  140. struct mISDNchannel *ch;
  141. int lm;
  142. lm = hh->prim & MISDN_LAYERMASK;
  143. if (*debug & DEBUG_QUEUE_FUNC)
  144. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  145. __func__, hh->prim, hh->id, skb);
  146. if (lm == 0x1) {
  147. if (!hlist_empty(&st->l1sock.head)) {
  148. __net_timestamp(skb);
  149. send_socklist(&st->l1sock, skb);
  150. }
  151. return st->layer1->send(st->layer1, skb);
  152. } else if (lm == 0x2) {
  153. if (!hlist_empty(&st->l1sock.head))
  154. send_socklist(&st->l1sock, skb);
  155. send_layer2(st, skb);
  156. return 0;
  157. } else if (lm == 0x4) {
  158. ch = get_channel4id(st, hh->id);
  159. if (ch)
  160. return ch->send(ch, skb);
  161. else
  162. printk(KERN_WARNING
  163. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  164. __func__, dev_name(&st->dev->dev), hh->prim,
  165. hh->id);
  166. } else if (lm == 0x8) {
  167. WARN_ON(lm == 0x8);
  168. ch = get_channel4id(st, hh->id);
  169. if (ch)
  170. return ch->send(ch, skb);
  171. else
  172. printk(KERN_WARNING
  173. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  174. __func__, dev_name(&st->dev->dev), hh->prim,
  175. hh->id);
  176. } else {
  177. /* broadcast not handled yet */
  178. printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
  179. __func__, dev_name(&st->dev->dev), hh->prim);
  180. }
  181. return -ESRCH;
  182. }
  183. static void
  184. do_clear_stack(struct mISDNstack *st)
  185. {
  186. }
  187. static int
  188. mISDNStackd(void *data)
  189. {
  190. struct mISDNstack *st = data;
  191. int err = 0;
  192. #ifdef CONFIG_SMP
  193. lock_kernel();
  194. #endif
  195. sigfillset(&current->blocked);
  196. #ifdef CONFIG_SMP
  197. unlock_kernel();
  198. #endif
  199. if (*debug & DEBUG_MSG_THREAD)
  200. printk(KERN_DEBUG "mISDNStackd %s started\n",
  201. dev_name(&st->dev->dev));
  202. if (st->notify != NULL) {
  203. complete(st->notify);
  204. st->notify = NULL;
  205. }
  206. for (;;) {
  207. struct sk_buff *skb;
  208. if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
  209. test_and_clear_bit(mISDN_STACK_WORK, &st->status);
  210. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  211. } else
  212. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  213. while (test_bit(mISDN_STACK_WORK, &st->status)) {
  214. skb = skb_dequeue(&st->msgq);
  215. if (!skb) {
  216. test_and_clear_bit(mISDN_STACK_WORK,
  217. &st->status);
  218. /* test if a race happens */
  219. skb = skb_dequeue(&st->msgq);
  220. if (!skb)
  221. continue;
  222. test_and_set_bit(mISDN_STACK_WORK,
  223. &st->status);
  224. }
  225. #ifdef MISDN_MSG_STATS
  226. st->msg_cnt++;
  227. #endif
  228. err = send_msg_to_layer(st, skb);
  229. if (unlikely(err)) {
  230. if (*debug & DEBUG_SEND_ERR)
  231. printk(KERN_DEBUG
  232. "%s: %s prim(%x) id(%x) "
  233. "send call(%d)\n",
  234. __func__, dev_name(&st->dev->dev),
  235. mISDN_HEAD_PRIM(skb),
  236. mISDN_HEAD_ID(skb), err);
  237. dev_kfree_skb(skb);
  238. continue;
  239. }
  240. if (unlikely(test_bit(mISDN_STACK_STOPPED,
  241. &st->status))) {
  242. test_and_clear_bit(mISDN_STACK_WORK,
  243. &st->status);
  244. test_and_clear_bit(mISDN_STACK_RUNNING,
  245. &st->status);
  246. break;
  247. }
  248. }
  249. if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
  250. test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
  251. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  252. do_clear_stack(st);
  253. test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
  254. test_and_set_bit(mISDN_STACK_RESTART, &st->status);
  255. }
  256. if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
  257. test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
  258. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  259. if (!skb_queue_empty(&st->msgq))
  260. test_and_set_bit(mISDN_STACK_WORK,
  261. &st->status);
  262. }
  263. if (test_bit(mISDN_STACK_ABORT, &st->status))
  264. break;
  265. if (st->notify != NULL) {
  266. complete(st->notify);
  267. st->notify = NULL;
  268. }
  269. #ifdef MISDN_MSG_STATS
  270. st->sleep_cnt++;
  271. #endif
  272. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  273. wait_event_interruptible(st->workq, (st->status &
  274. mISDN_STACK_ACTION_MASK));
  275. if (*debug & DEBUG_MSG_THREAD)
  276. printk(KERN_DEBUG "%s: %s wake status %08lx\n",
  277. __func__, dev_name(&st->dev->dev), st->status);
  278. test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
  279. test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
  280. if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
  281. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  282. #ifdef MISDN_MSG_STATS
  283. st->stopped_cnt++;
  284. #endif
  285. }
  286. }
  287. #ifdef MISDN_MSG_STATS
  288. printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
  289. "msg %d sleep %d stopped\n",
  290. dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
  291. st->stopped_cnt);
  292. printk(KERN_DEBUG
  293. "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
  294. dev_name(&st->dev->dev), st->thread->utime, st->thread->stime);
  295. printk(KERN_DEBUG
  296. "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
  297. dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
  298. printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
  299. dev_name(&st->dev->dev));
  300. #endif
  301. test_and_set_bit(mISDN_STACK_KILLED, &st->status);
  302. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  303. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  304. test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
  305. skb_queue_purge(&st->msgq);
  306. st->thread = NULL;
  307. if (st->notify != NULL) {
  308. complete(st->notify);
  309. st->notify = NULL;
  310. }
  311. return 0;
  312. }
  313. static int
  314. l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
  315. {
  316. if (!ch->st)
  317. return -ENODEV;
  318. __net_timestamp(skb);
  319. _queue_message(ch->st, skb);
  320. return 0;
  321. }
  322. void
  323. set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
  324. {
  325. ch->addr = sapi | (tei << 8);
  326. }
  327. void
  328. __add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  329. {
  330. list_add_tail(&ch->list, &st->layer2);
  331. }
  332. void
  333. add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  334. {
  335. mutex_lock(&st->lmutex);
  336. __add_layer2(ch, st);
  337. mutex_unlock(&st->lmutex);
  338. }
  339. static int
  340. st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  341. {
  342. if (!ch->st || !ch->st->layer1)
  343. return -EINVAL;
  344. return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
  345. }
  346. int
  347. create_stack(struct mISDNdevice *dev)
  348. {
  349. struct mISDNstack *newst;
  350. int err;
  351. DECLARE_COMPLETION_ONSTACK(done);
  352. newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
  353. if (!newst) {
  354. printk(KERN_ERR "kmalloc mISDN_stack failed\n");
  355. return -ENOMEM;
  356. }
  357. newst->dev = dev;
  358. INIT_LIST_HEAD(&newst->layer2);
  359. INIT_HLIST_HEAD(&newst->l1sock.head);
  360. rwlock_init(&newst->l1sock.lock);
  361. init_waitqueue_head(&newst->workq);
  362. skb_queue_head_init(&newst->msgq);
  363. mutex_init(&newst->lmutex);
  364. dev->D.st = newst;
  365. err = create_teimanager(dev);
  366. if (err) {
  367. printk(KERN_ERR "kmalloc teimanager failed\n");
  368. kfree(newst);
  369. return err;
  370. }
  371. dev->teimgr->peer = &newst->own;
  372. dev->teimgr->recv = mISDN_queue_message;
  373. dev->teimgr->st = newst;
  374. newst->layer1 = &dev->D;
  375. dev->D.recv = l1_receive;
  376. dev->D.peer = &newst->own;
  377. newst->own.st = newst;
  378. newst->own.ctrl = st_own_ctrl;
  379. newst->own.send = mISDN_queue_message;
  380. newst->own.recv = mISDN_queue_message;
  381. if (*debug & DEBUG_CORE_FUNC)
  382. printk(KERN_DEBUG "%s: st(%s)\n", __func__,
  383. dev_name(&newst->dev->dev));
  384. newst->notify = &done;
  385. newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
  386. dev_name(&newst->dev->dev));
  387. if (IS_ERR(newst->thread)) {
  388. err = PTR_ERR(newst->thread);
  389. printk(KERN_ERR
  390. "mISDN:cannot create kernel thread for %s (%d)\n",
  391. dev_name(&newst->dev->dev), err);
  392. delete_teimanager(dev->teimgr);
  393. kfree(newst);
  394. } else
  395. wait_for_completion(&done);
  396. return err;
  397. }
  398. int
  399. connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
  400. u_int protocol, struct sockaddr_mISDN *adr)
  401. {
  402. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  403. struct channel_req rq;
  404. int err;
  405. if (*debug & DEBUG_CORE_FUNC)
  406. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  407. __func__, dev_name(&dev->dev), protocol, adr->dev,
  408. adr->channel, adr->sapi, adr->tei);
  409. switch (protocol) {
  410. case ISDN_P_NT_S0:
  411. case ISDN_P_NT_E1:
  412. case ISDN_P_TE_S0:
  413. case ISDN_P_TE_E1:
  414. ch->recv = mISDN_queue_message;
  415. ch->peer = &dev->D.st->own;
  416. ch->st = dev->D.st;
  417. rq.protocol = protocol;
  418. rq.adr.channel = adr->channel;
  419. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  420. printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err,
  421. dev->id);
  422. if (err)
  423. return err;
  424. write_lock_bh(&dev->D.st->l1sock.lock);
  425. sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
  426. write_unlock_bh(&dev->D.st->l1sock.lock);
  427. break;
  428. default:
  429. return -ENOPROTOOPT;
  430. }
  431. return 0;
  432. }
  433. int
  434. connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
  435. u_int protocol, struct sockaddr_mISDN *adr)
  436. {
  437. struct channel_req rq, rq2;
  438. int pmask, err;
  439. struct Bprotocol *bp;
  440. if (*debug & DEBUG_CORE_FUNC)
  441. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  442. __func__, dev_name(&dev->dev), protocol,
  443. adr->dev, adr->channel, adr->sapi,
  444. adr->tei);
  445. ch->st = dev->D.st;
  446. pmask = 1 << (protocol & ISDN_P_B_MASK);
  447. if (pmask & dev->Bprotocols) {
  448. rq.protocol = protocol;
  449. rq.adr = *adr;
  450. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  451. if (err)
  452. return err;
  453. ch->recv = rq.ch->send;
  454. ch->peer = rq.ch;
  455. rq.ch->recv = ch->send;
  456. rq.ch->peer = ch;
  457. rq.ch->st = dev->D.st;
  458. } else {
  459. bp = get_Bprotocol4mask(pmask);
  460. if (!bp)
  461. return -ENOPROTOOPT;
  462. rq2.protocol = protocol;
  463. rq2.adr = *adr;
  464. rq2.ch = ch;
  465. err = bp->create(&rq2);
  466. if (err)
  467. return err;
  468. ch->recv = rq2.ch->send;
  469. ch->peer = rq2.ch;
  470. rq2.ch->st = dev->D.st;
  471. rq.protocol = rq2.protocol;
  472. rq.adr = *adr;
  473. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  474. if (err) {
  475. rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
  476. return err;
  477. }
  478. rq2.ch->recv = rq.ch->send;
  479. rq2.ch->peer = rq.ch;
  480. rq.ch->recv = rq2.ch->send;
  481. rq.ch->peer = rq2.ch;
  482. rq.ch->st = dev->D.st;
  483. }
  484. ch->protocol = protocol;
  485. ch->nr = rq.ch->nr;
  486. return 0;
  487. }
  488. int
  489. create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
  490. u_int protocol, struct sockaddr_mISDN *adr)
  491. {
  492. struct channel_req rq;
  493. int err;
  494. if (*debug & DEBUG_CORE_FUNC)
  495. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  496. __func__, dev_name(&dev->dev), protocol,
  497. adr->dev, adr->channel, adr->sapi,
  498. adr->tei);
  499. rq.protocol = ISDN_P_TE_S0;
  500. if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
  501. rq.protocol = ISDN_P_TE_E1;
  502. switch (protocol) {
  503. case ISDN_P_LAPD_NT:
  504. rq.protocol = ISDN_P_NT_S0;
  505. if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
  506. rq.protocol = ISDN_P_NT_E1;
  507. case ISDN_P_LAPD_TE:
  508. ch->recv = mISDN_queue_message;
  509. ch->peer = &dev->D.st->own;
  510. ch->st = dev->D.st;
  511. rq.adr.channel = 0;
  512. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  513. printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
  514. if (err)
  515. break;
  516. rq.protocol = protocol;
  517. rq.adr = *adr;
  518. rq.ch = ch;
  519. err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
  520. printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
  521. if (!err) {
  522. if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
  523. break;
  524. add_layer2(rq.ch, dev->D.st);
  525. rq.ch->recv = mISDN_queue_message;
  526. rq.ch->peer = &dev->D.st->own;
  527. rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
  528. }
  529. break;
  530. default:
  531. err = -EPROTONOSUPPORT;
  532. }
  533. return err;
  534. }
  535. void
  536. delete_channel(struct mISDNchannel *ch)
  537. {
  538. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  539. struct mISDNchannel *pch;
  540. if (!ch->st) {
  541. printk(KERN_WARNING "%s: no stack\n", __func__);
  542. return;
  543. }
  544. if (*debug & DEBUG_CORE_FUNC)
  545. printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
  546. dev_name(&ch->st->dev->dev), ch->protocol);
  547. if (ch->protocol >= ISDN_P_B_START) {
  548. if (ch->peer) {
  549. ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
  550. ch->peer = NULL;
  551. }
  552. return;
  553. }
  554. switch (ch->protocol) {
  555. case ISDN_P_NT_S0:
  556. case ISDN_P_TE_S0:
  557. case ISDN_P_NT_E1:
  558. case ISDN_P_TE_E1:
  559. write_lock_bh(&ch->st->l1sock.lock);
  560. sk_del_node_init(&msk->sk);
  561. write_unlock_bh(&ch->st->l1sock.lock);
  562. ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
  563. break;
  564. case ISDN_P_LAPD_TE:
  565. pch = get_channel4id(ch->st, ch->nr);
  566. if (pch) {
  567. mutex_lock(&ch->st->lmutex);
  568. list_del(&pch->list);
  569. mutex_unlock(&ch->st->lmutex);
  570. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  571. pch = ch->st->dev->teimgr;
  572. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  573. } else
  574. printk(KERN_WARNING "%s: no l2 channel\n",
  575. __func__);
  576. break;
  577. case ISDN_P_LAPD_NT:
  578. pch = ch->st->dev->teimgr;
  579. if (pch) {
  580. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  581. } else
  582. printk(KERN_WARNING "%s: no l2 channel\n",
  583. __func__);
  584. break;
  585. default:
  586. break;
  587. }
  588. return;
  589. }
  590. void
  591. delete_stack(struct mISDNdevice *dev)
  592. {
  593. struct mISDNstack *st = dev->D.st;
  594. DECLARE_COMPLETION_ONSTACK(done);
  595. if (*debug & DEBUG_CORE_FUNC)
  596. printk(KERN_DEBUG "%s: st(%s)\n", __func__,
  597. dev_name(&st->dev->dev));
  598. if (dev->teimgr)
  599. delete_teimanager(dev->teimgr);
  600. if (st->thread) {
  601. if (st->notify) {
  602. printk(KERN_WARNING "%s: notifier in use\n",
  603. __func__);
  604. complete(st->notify);
  605. }
  606. st->notify = &done;
  607. test_and_set_bit(mISDN_STACK_ABORT, &st->status);
  608. test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
  609. wake_up_interruptible(&st->workq);
  610. wait_for_completion(&done);
  611. }
  612. if (!list_empty(&st->layer2))
  613. printk(KERN_WARNING "%s: layer2 list not empty\n",
  614. __func__);
  615. if (!hlist_empty(&st->l1sock.head))
  616. printk(KERN_WARNING "%s: layer1 list not empty\n",
  617. __func__);
  618. kfree(st);
  619. }
  620. void
  621. mISDN_initstack(u_int *dp)
  622. {
  623. debug = dp;
  624. }