stack.c 16 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/mISDNif.h>
  18. #include <linux/kthread.h>
  19. #include <linux/smp_lock.h>
  20. #include "core.h"
  21. static u_int *debug;
  22. static inline void
  23. _queue_message(struct mISDNstack *st, struct sk_buff *skb)
  24. {
  25. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  26. if (*debug & DEBUG_QUEUE_FUNC)
  27. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  28. __func__, hh->prim, hh->id, skb);
  29. skb_queue_tail(&st->msgq, skb);
  30. if (likely(!test_bit(mISDN_STACK_STOPPED, &st->status))) {
  31. test_and_set_bit(mISDN_STACK_WORK, &st->status);
  32. wake_up_interruptible(&st->workq);
  33. }
  34. }
  35. static int
  36. mISDN_queue_message(struct mISDNchannel *ch, struct sk_buff *skb)
  37. {
  38. _queue_message(ch->st, skb);
  39. return 0;
  40. }
  41. static struct mISDNchannel *
  42. get_channel4id(struct mISDNstack *st, u_int id)
  43. {
  44. struct mISDNchannel *ch;
  45. mutex_lock(&st->lmutex);
  46. list_for_each_entry(ch, &st->layer2, list) {
  47. if (id == ch->nr)
  48. goto unlock;
  49. }
  50. ch = NULL;
  51. unlock:
  52. mutex_unlock(&st->lmutex);
  53. return ch;
  54. }
  55. static void
  56. send_socklist(struct mISDN_sock_list *sl, struct sk_buff *skb)
  57. {
  58. struct hlist_node *node;
  59. struct sock *sk;
  60. struct sk_buff *cskb = NULL;
  61. read_lock(&sl->lock);
  62. sk_for_each(sk, node, &sl->head) {
  63. if (sk->sk_state != MISDN_BOUND)
  64. continue;
  65. if (!cskb)
  66. cskb = skb_copy(skb, GFP_KERNEL);
  67. if (!cskb) {
  68. printk(KERN_WARNING "%s no skb\n", __func__);
  69. break;
  70. }
  71. if (!sock_queue_rcv_skb(sk, cskb))
  72. cskb = NULL;
  73. }
  74. read_unlock(&sl->lock);
  75. if (cskb)
  76. dev_kfree_skb(cskb);
  77. }
  78. static void
  79. send_layer2(struct mISDNstack *st, struct sk_buff *skb)
  80. {
  81. struct sk_buff *cskb;
  82. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  83. struct mISDNchannel *ch;
  84. int ret;
  85. if (!st)
  86. return;
  87. mutex_lock(&st->lmutex);
  88. if ((hh->id & MISDN_ID_ADDR_MASK) == MISDN_ID_ANY) { /* L2 for all */
  89. list_for_each_entry(ch, &st->layer2, list) {
  90. if (list_is_last(&ch->list, &st->layer2)) {
  91. cskb = skb;
  92. skb = NULL;
  93. } else {
  94. cskb = skb_copy(skb, GFP_KERNEL);
  95. }
  96. if (cskb) {
  97. ret = ch->send(ch, cskb);
  98. if (ret) {
  99. if (*debug & DEBUG_SEND_ERR)
  100. printk(KERN_DEBUG
  101. "%s ch%d prim(%x) addr(%x)"
  102. " err %d\n",
  103. __func__, ch->nr,
  104. hh->prim, ch->addr, ret);
  105. dev_kfree_skb(cskb);
  106. }
  107. } else {
  108. printk(KERN_WARNING "%s ch%d addr %x no mem\n",
  109. __func__, ch->nr, ch->addr);
  110. goto out;
  111. }
  112. }
  113. } else {
  114. list_for_each_entry(ch, &st->layer2, list) {
  115. if ((hh->id & MISDN_ID_ADDR_MASK) == ch->addr) {
  116. ret = ch->send(ch, skb);
  117. if (!ret)
  118. skb = NULL;
  119. goto out;
  120. }
  121. }
  122. ret = st->dev->teimgr->ctrl(st->dev->teimgr, CHECK_DATA, skb);
  123. if (!ret)
  124. skb = NULL;
  125. else if (*debug & DEBUG_SEND_ERR)
  126. printk(KERN_DEBUG
  127. "%s ch%d mgr prim(%x) addr(%x) err %d\n",
  128. __func__, ch->nr, hh->prim, ch->addr, ret);
  129. }
  130. out:
  131. mutex_unlock(&st->lmutex);
  132. if (skb)
  133. dev_kfree_skb(skb);
  134. }
  135. static inline int
  136. send_msg_to_layer(struct mISDNstack *st, struct sk_buff *skb)
  137. {
  138. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  139. struct mISDNchannel *ch;
  140. int lm;
  141. lm = hh->prim & MISDN_LAYERMASK;
  142. if (*debug & DEBUG_QUEUE_FUNC)
  143. printk(KERN_DEBUG "%s prim(%x) id(%x) %p\n",
  144. __func__, hh->prim, hh->id, skb);
  145. if (lm == 0x1) {
  146. if (!hlist_empty(&st->l1sock.head)) {
  147. __net_timestamp(skb);
  148. send_socklist(&st->l1sock, skb);
  149. }
  150. return st->layer1->send(st->layer1, skb);
  151. } else if (lm == 0x2) {
  152. if (!hlist_empty(&st->l1sock.head))
  153. send_socklist(&st->l1sock, skb);
  154. send_layer2(st, skb);
  155. return 0;
  156. } else if (lm == 0x4) {
  157. ch = get_channel4id(st, hh->id);
  158. if (ch)
  159. return ch->send(ch, skb);
  160. else
  161. printk(KERN_WARNING
  162. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  163. __func__, dev_name(&st->dev->dev), hh->prim,
  164. hh->id);
  165. } else if (lm == 0x8) {
  166. WARN_ON(lm == 0x8);
  167. ch = get_channel4id(st, hh->id);
  168. if (ch)
  169. return ch->send(ch, skb);
  170. else
  171. printk(KERN_WARNING
  172. "%s: dev(%s) prim(%x) id(%x) no channel\n",
  173. __func__, dev_name(&st->dev->dev), hh->prim,
  174. hh->id);
  175. } else {
  176. /* broadcast not handled yet */
  177. printk(KERN_WARNING "%s: dev(%s) prim %x not delivered\n",
  178. __func__, dev_name(&st->dev->dev), hh->prim);
  179. }
  180. return -ESRCH;
  181. }
  182. static void
  183. do_clear_stack(struct mISDNstack *st)
  184. {
  185. }
  186. static int
  187. mISDNStackd(void *data)
  188. {
  189. struct mISDNstack *st = data;
  190. int err = 0;
  191. #ifdef CONFIG_SMP
  192. lock_kernel();
  193. #endif
  194. sigfillset(&current->blocked);
  195. #ifdef CONFIG_SMP
  196. unlock_kernel();
  197. #endif
  198. if (*debug & DEBUG_MSG_THREAD)
  199. printk(KERN_DEBUG "mISDNStackd %s started\n",
  200. dev_name(&st->dev->dev));
  201. if (st->notify != NULL) {
  202. complete(st->notify);
  203. st->notify = NULL;
  204. }
  205. for (;;) {
  206. struct sk_buff *skb;
  207. if (unlikely(test_bit(mISDN_STACK_STOPPED, &st->status))) {
  208. test_and_clear_bit(mISDN_STACK_WORK, &st->status);
  209. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  210. } else
  211. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  212. while (test_bit(mISDN_STACK_WORK, &st->status)) {
  213. skb = skb_dequeue(&st->msgq);
  214. if (!skb) {
  215. test_and_clear_bit(mISDN_STACK_WORK,
  216. &st->status);
  217. /* test if a race happens */
  218. skb = skb_dequeue(&st->msgq);
  219. if (!skb)
  220. continue;
  221. test_and_set_bit(mISDN_STACK_WORK,
  222. &st->status);
  223. }
  224. #ifdef MISDN_MSG_STATS
  225. st->msg_cnt++;
  226. #endif
  227. err = send_msg_to_layer(st, skb);
  228. if (unlikely(err)) {
  229. if (*debug & DEBUG_SEND_ERR)
  230. printk(KERN_DEBUG
  231. "%s: %s prim(%x) id(%x) "
  232. "send call(%d)\n",
  233. __func__, dev_name(&st->dev->dev),
  234. mISDN_HEAD_PRIM(skb),
  235. mISDN_HEAD_ID(skb), err);
  236. dev_kfree_skb(skb);
  237. continue;
  238. }
  239. if (unlikely(test_bit(mISDN_STACK_STOPPED,
  240. &st->status))) {
  241. test_and_clear_bit(mISDN_STACK_WORK,
  242. &st->status);
  243. test_and_clear_bit(mISDN_STACK_RUNNING,
  244. &st->status);
  245. break;
  246. }
  247. }
  248. if (test_bit(mISDN_STACK_CLEARING, &st->status)) {
  249. test_and_set_bit(mISDN_STACK_STOPPED, &st->status);
  250. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  251. do_clear_stack(st);
  252. test_and_clear_bit(mISDN_STACK_CLEARING, &st->status);
  253. test_and_set_bit(mISDN_STACK_RESTART, &st->status);
  254. }
  255. if (test_and_clear_bit(mISDN_STACK_RESTART, &st->status)) {
  256. test_and_clear_bit(mISDN_STACK_STOPPED, &st->status);
  257. test_and_set_bit(mISDN_STACK_RUNNING, &st->status);
  258. if (!skb_queue_empty(&st->msgq))
  259. test_and_set_bit(mISDN_STACK_WORK,
  260. &st->status);
  261. }
  262. if (test_bit(mISDN_STACK_ABORT, &st->status))
  263. break;
  264. if (st->notify != NULL) {
  265. complete(st->notify);
  266. st->notify = NULL;
  267. }
  268. #ifdef MISDN_MSG_STATS
  269. st->sleep_cnt++;
  270. #endif
  271. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  272. wait_event_interruptible(st->workq, (st->status &
  273. mISDN_STACK_ACTION_MASK));
  274. if (*debug & DEBUG_MSG_THREAD)
  275. printk(KERN_DEBUG "%s: %s wake status %08lx\n",
  276. __func__, dev_name(&st->dev->dev), st->status);
  277. test_and_set_bit(mISDN_STACK_ACTIVE, &st->status);
  278. test_and_clear_bit(mISDN_STACK_WAKEUP, &st->status);
  279. if (test_bit(mISDN_STACK_STOPPED, &st->status)) {
  280. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  281. #ifdef MISDN_MSG_STATS
  282. st->stopped_cnt++;
  283. #endif
  284. }
  285. }
  286. #ifdef MISDN_MSG_STATS
  287. printk(KERN_DEBUG "mISDNStackd daemon for %s proceed %d "
  288. "msg %d sleep %d stopped\n",
  289. dev_name(&st->dev->dev), st->msg_cnt, st->sleep_cnt,
  290. st->stopped_cnt);
  291. printk(KERN_DEBUG
  292. "mISDNStackd daemon for %s utime(%ld) stime(%ld)\n",
  293. dev_name(&st->dev->dev), st->thread->utime, st->thread->stime);
  294. printk(KERN_DEBUG
  295. "mISDNStackd daemon for %s nvcsw(%ld) nivcsw(%ld)\n",
  296. dev_name(&st->dev->dev), st->thread->nvcsw, st->thread->nivcsw);
  297. printk(KERN_DEBUG "mISDNStackd daemon for %s killed now\n",
  298. dev_name(&st->dev->dev));
  299. #endif
  300. test_and_set_bit(mISDN_STACK_KILLED, &st->status);
  301. test_and_clear_bit(mISDN_STACK_RUNNING, &st->status);
  302. test_and_clear_bit(mISDN_STACK_ACTIVE, &st->status);
  303. test_and_clear_bit(mISDN_STACK_ABORT, &st->status);
  304. skb_queue_purge(&st->msgq);
  305. st->thread = NULL;
  306. if (st->notify != NULL) {
  307. complete(st->notify);
  308. st->notify = NULL;
  309. }
  310. return 0;
  311. }
  312. static int
  313. l1_receive(struct mISDNchannel *ch, struct sk_buff *skb)
  314. {
  315. if (!ch->st)
  316. return -ENODEV;
  317. __net_timestamp(skb);
  318. _queue_message(ch->st, skb);
  319. return 0;
  320. }
  321. void
  322. set_channel_address(struct mISDNchannel *ch, u_int sapi, u_int tei)
  323. {
  324. ch->addr = sapi | (tei << 8);
  325. }
  326. void
  327. __add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  328. {
  329. list_add_tail(&ch->list, &st->layer2);
  330. }
  331. void
  332. add_layer2(struct mISDNchannel *ch, struct mISDNstack *st)
  333. {
  334. mutex_lock(&st->lmutex);
  335. __add_layer2(ch, st);
  336. mutex_unlock(&st->lmutex);
  337. }
  338. static int
  339. st_own_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  340. {
  341. if (!ch->st || ch->st->layer1)
  342. return -EINVAL;
  343. return ch->st->layer1->ctrl(ch->st->layer1, cmd, arg);
  344. }
  345. int
  346. create_stack(struct mISDNdevice *dev)
  347. {
  348. struct mISDNstack *newst;
  349. int err;
  350. DECLARE_COMPLETION_ONSTACK(done);
  351. newst = kzalloc(sizeof(struct mISDNstack), GFP_KERNEL);
  352. if (!newst) {
  353. printk(KERN_ERR "kmalloc mISDN_stack failed\n");
  354. return -ENOMEM;
  355. }
  356. newst->dev = dev;
  357. INIT_LIST_HEAD(&newst->layer2);
  358. INIT_HLIST_HEAD(&newst->l1sock.head);
  359. rwlock_init(&newst->l1sock.lock);
  360. init_waitqueue_head(&newst->workq);
  361. skb_queue_head_init(&newst->msgq);
  362. mutex_init(&newst->lmutex);
  363. dev->D.st = newst;
  364. err = create_teimanager(dev);
  365. if (err) {
  366. printk(KERN_ERR "kmalloc teimanager failed\n");
  367. kfree(newst);
  368. return err;
  369. }
  370. dev->teimgr->peer = &newst->own;
  371. dev->teimgr->recv = mISDN_queue_message;
  372. dev->teimgr->st = newst;
  373. newst->layer1 = &dev->D;
  374. dev->D.recv = l1_receive;
  375. dev->D.peer = &newst->own;
  376. newst->own.st = newst;
  377. newst->own.ctrl = st_own_ctrl;
  378. newst->own.send = mISDN_queue_message;
  379. newst->own.recv = mISDN_queue_message;
  380. if (*debug & DEBUG_CORE_FUNC)
  381. printk(KERN_DEBUG "%s: st(%s)\n", __func__,
  382. dev_name(&newst->dev->dev));
  383. newst->notify = &done;
  384. newst->thread = kthread_run(mISDNStackd, (void *)newst, "mISDN_%s",
  385. dev_name(&newst->dev->dev));
  386. if (IS_ERR(newst->thread)) {
  387. err = PTR_ERR(newst->thread);
  388. printk(KERN_ERR
  389. "mISDN:cannot create kernel thread for %s (%d)\n",
  390. dev_name(&newst->dev->dev), err);
  391. delete_teimanager(dev->teimgr);
  392. kfree(newst);
  393. } else
  394. wait_for_completion(&done);
  395. return err;
  396. }
  397. int
  398. connect_layer1(struct mISDNdevice *dev, struct mISDNchannel *ch,
  399. u_int protocol, struct sockaddr_mISDN *adr)
  400. {
  401. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  402. struct channel_req rq;
  403. int err;
  404. if (*debug & DEBUG_CORE_FUNC)
  405. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  406. __func__, dev_name(&dev->dev), protocol, adr->dev,
  407. adr->channel, adr->sapi, adr->tei);
  408. switch (protocol) {
  409. case ISDN_P_NT_S0:
  410. case ISDN_P_NT_E1:
  411. case ISDN_P_TE_S0:
  412. case ISDN_P_TE_E1:
  413. ch->recv = mISDN_queue_message;
  414. ch->peer = &dev->D.st->own;
  415. ch->st = dev->D.st;
  416. rq.protocol = protocol;
  417. rq.adr.channel = adr->channel;
  418. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  419. printk(KERN_DEBUG "%s: ret %d (dev %d)\n", __func__, err,
  420. dev->id);
  421. if (err)
  422. return err;
  423. write_lock_bh(&dev->D.st->l1sock.lock);
  424. sk_add_node(&msk->sk, &dev->D.st->l1sock.head);
  425. write_unlock_bh(&dev->D.st->l1sock.lock);
  426. break;
  427. default:
  428. return -ENOPROTOOPT;
  429. }
  430. return 0;
  431. }
  432. int
  433. connect_Bstack(struct mISDNdevice *dev, struct mISDNchannel *ch,
  434. u_int protocol, struct sockaddr_mISDN *adr)
  435. {
  436. struct channel_req rq, rq2;
  437. int pmask, err;
  438. struct Bprotocol *bp;
  439. if (*debug & DEBUG_CORE_FUNC)
  440. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  441. __func__, dev_name(&dev->dev), protocol,
  442. adr->dev, adr->channel, adr->sapi,
  443. adr->tei);
  444. ch->st = dev->D.st;
  445. pmask = 1 << (protocol & ISDN_P_B_MASK);
  446. if (pmask & dev->Bprotocols) {
  447. rq.protocol = protocol;
  448. rq.adr = *adr;
  449. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  450. if (err)
  451. return err;
  452. ch->recv = rq.ch->send;
  453. ch->peer = rq.ch;
  454. rq.ch->recv = ch->send;
  455. rq.ch->peer = ch;
  456. rq.ch->st = dev->D.st;
  457. } else {
  458. bp = get_Bprotocol4mask(pmask);
  459. if (!bp)
  460. return -ENOPROTOOPT;
  461. rq2.protocol = protocol;
  462. rq2.adr = *adr;
  463. rq2.ch = ch;
  464. err = bp->create(&rq2);
  465. if (err)
  466. return err;
  467. ch->recv = rq2.ch->send;
  468. ch->peer = rq2.ch;
  469. rq2.ch->st = dev->D.st;
  470. rq.protocol = rq2.protocol;
  471. rq.adr = *adr;
  472. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  473. if (err) {
  474. rq2.ch->ctrl(rq2.ch, CLOSE_CHANNEL, NULL);
  475. return err;
  476. }
  477. rq2.ch->recv = rq.ch->send;
  478. rq2.ch->peer = rq.ch;
  479. rq.ch->recv = rq2.ch->send;
  480. rq.ch->peer = rq2.ch;
  481. rq.ch->st = dev->D.st;
  482. }
  483. ch->protocol = protocol;
  484. ch->nr = rq.ch->nr;
  485. return 0;
  486. }
  487. int
  488. create_l2entity(struct mISDNdevice *dev, struct mISDNchannel *ch,
  489. u_int protocol, struct sockaddr_mISDN *adr)
  490. {
  491. struct channel_req rq;
  492. int err;
  493. if (*debug & DEBUG_CORE_FUNC)
  494. printk(KERN_DEBUG "%s: %s proto(%x) adr(%d %d %d %d)\n",
  495. __func__, dev_name(&dev->dev), protocol,
  496. adr->dev, adr->channel, adr->sapi,
  497. adr->tei);
  498. rq.protocol = ISDN_P_TE_S0;
  499. if (dev->Dprotocols & (1 << ISDN_P_TE_E1))
  500. rq.protocol = ISDN_P_TE_E1;
  501. switch (protocol) {
  502. case ISDN_P_LAPD_NT:
  503. rq.protocol = ISDN_P_NT_S0;
  504. if (dev->Dprotocols & (1 << ISDN_P_NT_E1))
  505. rq.protocol = ISDN_P_NT_E1;
  506. case ISDN_P_LAPD_TE:
  507. ch->recv = mISDN_queue_message;
  508. ch->peer = &dev->D.st->own;
  509. ch->st = dev->D.st;
  510. rq.adr.channel = 0;
  511. err = dev->D.ctrl(&dev->D, OPEN_CHANNEL, &rq);
  512. printk(KERN_DEBUG "%s: ret 1 %d\n", __func__, err);
  513. if (err)
  514. break;
  515. rq.protocol = protocol;
  516. rq.adr = *adr;
  517. rq.ch = ch;
  518. err = dev->teimgr->ctrl(dev->teimgr, OPEN_CHANNEL, &rq);
  519. printk(KERN_DEBUG "%s: ret 2 %d\n", __func__, err);
  520. if (!err) {
  521. if ((protocol == ISDN_P_LAPD_NT) && !rq.ch)
  522. break;
  523. add_layer2(rq.ch, dev->D.st);
  524. rq.ch->recv = mISDN_queue_message;
  525. rq.ch->peer = &dev->D.st->own;
  526. rq.ch->ctrl(rq.ch, OPEN_CHANNEL, NULL); /* can't fail */
  527. }
  528. break;
  529. default:
  530. err = -EPROTONOSUPPORT;
  531. }
  532. return err;
  533. }
  534. void
  535. delete_channel(struct mISDNchannel *ch)
  536. {
  537. struct mISDN_sock *msk = container_of(ch, struct mISDN_sock, ch);
  538. struct mISDNchannel *pch;
  539. if (!ch->st) {
  540. printk(KERN_WARNING "%s: no stack\n", __func__);
  541. return;
  542. }
  543. if (*debug & DEBUG_CORE_FUNC)
  544. printk(KERN_DEBUG "%s: st(%s) protocol(%x)\n", __func__,
  545. dev_name(&ch->st->dev->dev), ch->protocol);
  546. if (ch->protocol >= ISDN_P_B_START) {
  547. if (ch->peer) {
  548. ch->peer->ctrl(ch->peer, CLOSE_CHANNEL, NULL);
  549. ch->peer = NULL;
  550. }
  551. return;
  552. }
  553. switch (ch->protocol) {
  554. case ISDN_P_NT_S0:
  555. case ISDN_P_TE_S0:
  556. case ISDN_P_NT_E1:
  557. case ISDN_P_TE_E1:
  558. write_lock_bh(&ch->st->l1sock.lock);
  559. sk_del_node_init(&msk->sk);
  560. write_unlock_bh(&ch->st->l1sock.lock);
  561. ch->st->dev->D.ctrl(&ch->st->dev->D, CLOSE_CHANNEL, NULL);
  562. break;
  563. case ISDN_P_LAPD_TE:
  564. pch = get_channel4id(ch->st, ch->nr);
  565. if (pch) {
  566. mutex_lock(&ch->st->lmutex);
  567. list_del(&pch->list);
  568. mutex_unlock(&ch->st->lmutex);
  569. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  570. pch = ch->st->dev->teimgr;
  571. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  572. } else
  573. printk(KERN_WARNING "%s: no l2 channel\n",
  574. __func__);
  575. break;
  576. case ISDN_P_LAPD_NT:
  577. pch = ch->st->dev->teimgr;
  578. if (pch) {
  579. pch->ctrl(pch, CLOSE_CHANNEL, NULL);
  580. } else
  581. printk(KERN_WARNING "%s: no l2 channel\n",
  582. __func__);
  583. break;
  584. default:
  585. break;
  586. }
  587. return;
  588. }
  589. void
  590. delete_stack(struct mISDNdevice *dev)
  591. {
  592. struct mISDNstack *st = dev->D.st;
  593. DECLARE_COMPLETION_ONSTACK(done);
  594. if (*debug & DEBUG_CORE_FUNC)
  595. printk(KERN_DEBUG "%s: st(%s)\n", __func__,
  596. dev_name(&st->dev->dev));
  597. if (dev->teimgr)
  598. delete_teimanager(dev->teimgr);
  599. if (st->thread) {
  600. if (st->notify) {
  601. printk(KERN_WARNING "%s: notifier in use\n",
  602. __func__);
  603. complete(st->notify);
  604. }
  605. st->notify = &done;
  606. test_and_set_bit(mISDN_STACK_ABORT, &st->status);
  607. test_and_set_bit(mISDN_STACK_WAKEUP, &st->status);
  608. wake_up_interruptible(&st->workq);
  609. wait_for_completion(&done);
  610. }
  611. if (!list_empty(&st->layer2))
  612. printk(KERN_WARNING "%s: layer2 list not empty\n",
  613. __func__);
  614. if (!hlist_empty(&st->l1sock.head))
  615. printk(KERN_WARNING "%s: layer1 list not empty\n",
  616. __func__);
  617. kfree(st);
  618. }
  619. void
  620. mISDN_initstack(u_int *dp)
  621. {
  622. debug = dp;
  623. }