layer2.c 51 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921192219231924192519261927192819291930193119321933193419351936193719381939194019411942194319441945194619471948194919501951195219531954195519561957195819591960196119621963196419651966196719681969197019711972197319741975197619771978197919801981198219831984198519861987198819891990199119921993199419951996199719981999200020012002200320042005200620072008200920102011201220132014201520162017201820192020202120222023202420252026202720282029203020312032203320342035203620372038203920402041204220432044204520462047204820492050205120522053205420552056205720582059206020612062206320642065206620672068206920702071207220732074207520762077207820792080208120822083208420852086208720882089209020912092209320942095209620972098209921002101210221032104210521062107210821092110211121122113211421152116211721182119212021212122212321242125212621272128212921302131213221332134213521362137213821392140214121422143214421452146214721482149215021512152215321542155215621572158215921602161216221632164216521662167216821692170217121722173217421752176217721782179218021812182218321842185218621872188218921902191219221932194219521962197219821992200220122022203220422052206220722082209221022112212221322142215221622172218221922202221222222232224222522262227222822292230223122322233223422352236223722382239224022412242224322442245224622472248224922502251225222532254225522562257225822592260226122622263226422652266226722682269
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/mISDNif.h>
  18. #include <linux/slab.h>
  19. #include "core.h"
  20. #include "fsm.h"
  21. #include "layer2.h"
  22. static u_int *debug;
  23. static
  24. struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
  25. static char *strL2State[] =
  26. {
  27. "ST_L2_1",
  28. "ST_L2_2",
  29. "ST_L2_3",
  30. "ST_L2_4",
  31. "ST_L2_5",
  32. "ST_L2_6",
  33. "ST_L2_7",
  34. "ST_L2_8",
  35. };
  36. enum {
  37. EV_L2_UI,
  38. EV_L2_SABME,
  39. EV_L2_DISC,
  40. EV_L2_DM,
  41. EV_L2_UA,
  42. EV_L2_FRMR,
  43. EV_L2_SUPER,
  44. EV_L2_I,
  45. EV_L2_DL_DATA,
  46. EV_L2_ACK_PULL,
  47. EV_L2_DL_UNITDATA,
  48. EV_L2_DL_ESTABLISH_REQ,
  49. EV_L2_DL_RELEASE_REQ,
  50. EV_L2_MDL_ASSIGN,
  51. EV_L2_MDL_REMOVE,
  52. EV_L2_MDL_ERROR,
  53. EV_L1_DEACTIVATE,
  54. EV_L2_T200,
  55. EV_L2_T203,
  56. EV_L2_T200I,
  57. EV_L2_T203I,
  58. EV_L2_SET_OWN_BUSY,
  59. EV_L2_CLEAR_OWN_BUSY,
  60. EV_L2_FRAME_ERROR,
  61. };
  62. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR + 1)
  63. static char *strL2Event[] =
  64. {
  65. "EV_L2_UI",
  66. "EV_L2_SABME",
  67. "EV_L2_DISC",
  68. "EV_L2_DM",
  69. "EV_L2_UA",
  70. "EV_L2_FRMR",
  71. "EV_L2_SUPER",
  72. "EV_L2_I",
  73. "EV_L2_DL_DATA",
  74. "EV_L2_ACK_PULL",
  75. "EV_L2_DL_UNITDATA",
  76. "EV_L2_DL_ESTABLISH_REQ",
  77. "EV_L2_DL_RELEASE_REQ",
  78. "EV_L2_MDL_ASSIGN",
  79. "EV_L2_MDL_REMOVE",
  80. "EV_L2_MDL_ERROR",
  81. "EV_L1_DEACTIVATE",
  82. "EV_L2_T200",
  83. "EV_L2_T203",
  84. "EV_L2_T200I",
  85. "EV_L2_T203I",
  86. "EV_L2_SET_OWN_BUSY",
  87. "EV_L2_CLEAR_OWN_BUSY",
  88. "EV_L2_FRAME_ERROR",
  89. };
  90. static void
  91. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  92. {
  93. struct layer2 *l2 = fi->userdata;
  94. struct va_format vaf;
  95. va_list va;
  96. if (!(*debug & DEBUG_L2_FSM))
  97. return;
  98. va_start(va, fmt);
  99. vaf.fmt = fmt;
  100. vaf.va = &va;
  101. printk(KERN_DEBUG "l2 (sapi %d tei %d): %pV\n",
  102. l2->sapi, l2->tei, &vaf);
  103. va_end(va);
  104. }
  105. inline u_int
  106. l2headersize(struct layer2 *l2, int ui)
  107. {
  108. return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  109. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  110. }
  111. inline u_int
  112. l2addrsize(struct layer2 *l2)
  113. {
  114. return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
  115. }
  116. static u_int
  117. l2_newid(struct layer2 *l2)
  118. {
  119. u_int id;
  120. id = l2->next_id++;
  121. if (id == 0x7fff)
  122. l2->next_id = 1;
  123. id <<= 16;
  124. id |= l2->tei << 8;
  125. id |= l2->sapi;
  126. return id;
  127. }
  128. static void
  129. l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
  130. {
  131. int err;
  132. if (!l2->up)
  133. return;
  134. mISDN_HEAD_PRIM(skb) = prim;
  135. mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
  136. err = l2->up->send(l2->up, skb);
  137. if (err) {
  138. printk(KERN_WARNING "%s: err=%d\n", __func__, err);
  139. dev_kfree_skb(skb);
  140. }
  141. }
  142. static void
  143. l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
  144. {
  145. struct sk_buff *skb;
  146. struct mISDNhead *hh;
  147. int err;
  148. if (!l2->up)
  149. return;
  150. skb = mI_alloc_skb(len, GFP_ATOMIC);
  151. if (!skb)
  152. return;
  153. hh = mISDN_HEAD_P(skb);
  154. hh->prim = prim;
  155. hh->id = (l2->ch.nr << 16) | l2->ch.addr;
  156. if (len)
  157. memcpy(skb_put(skb, len), arg, len);
  158. err = l2->up->send(l2->up, skb);
  159. if (err) {
  160. printk(KERN_WARNING "%s: err=%d\n", __func__, err);
  161. dev_kfree_skb(skb);
  162. }
  163. }
  164. static int
  165. l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
  166. int ret;
  167. ret = l2->ch.recv(l2->ch.peer, skb);
  168. if (ret && (*debug & DEBUG_L2_RECV))
  169. printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
  170. return ret;
  171. }
  172. static int
  173. l2down_raw(struct layer2 *l2, struct sk_buff *skb)
  174. {
  175. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  176. if (hh->prim == PH_DATA_REQ) {
  177. if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  178. skb_queue_tail(&l2->down_queue, skb);
  179. return 0;
  180. }
  181. l2->down_id = mISDN_HEAD_ID(skb);
  182. }
  183. return l2down_skb(l2, skb);
  184. }
  185. static int
  186. l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
  187. {
  188. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  189. hh->prim = prim;
  190. hh->id = id;
  191. return l2down_raw(l2, skb);
  192. }
  193. static int
  194. l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
  195. {
  196. struct sk_buff *skb;
  197. int err;
  198. struct mISDNhead *hh;
  199. skb = mI_alloc_skb(len, GFP_ATOMIC);
  200. if (!skb)
  201. return -ENOMEM;
  202. hh = mISDN_HEAD_P(skb);
  203. hh->prim = prim;
  204. hh->id = id;
  205. if (len)
  206. memcpy(skb_put(skb, len), arg, len);
  207. err = l2down_raw(l2, skb);
  208. if (err)
  209. dev_kfree_skb(skb);
  210. return err;
  211. }
  212. static int
  213. ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
  214. struct sk_buff *nskb = skb;
  215. int ret = -EAGAIN;
  216. if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
  217. if (hh->id == l2->down_id) {
  218. nskb = skb_dequeue(&l2->down_queue);
  219. if (nskb) {
  220. l2->down_id = mISDN_HEAD_ID(nskb);
  221. if (l2down_skb(l2, nskb)) {
  222. dev_kfree_skb(nskb);
  223. l2->down_id = MISDN_ID_NONE;
  224. }
  225. } else
  226. l2->down_id = MISDN_ID_NONE;
  227. if (ret) {
  228. dev_kfree_skb(skb);
  229. ret = 0;
  230. }
  231. if (l2->down_id == MISDN_ID_NONE) {
  232. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  233. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  234. }
  235. }
  236. }
  237. if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  238. nskb = skb_dequeue(&l2->down_queue);
  239. if (nskb) {
  240. l2->down_id = mISDN_HEAD_ID(nskb);
  241. if (l2down_skb(l2, nskb)) {
  242. dev_kfree_skb(nskb);
  243. l2->down_id = MISDN_ID_NONE;
  244. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  245. }
  246. } else
  247. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  248. }
  249. return ret;
  250. }
  251. static void
  252. l2_timeout(struct FsmInst *fi, int event, void *arg)
  253. {
  254. struct layer2 *l2 = fi->userdata;
  255. struct sk_buff *skb;
  256. struct mISDNhead *hh;
  257. skb = mI_alloc_skb(0, GFP_ATOMIC);
  258. if (!skb) {
  259. printk(KERN_WARNING "L2(%d,%d) nr:%x timer %s lost - no skb\n",
  260. l2->sapi, l2->tei, l2->ch.nr, event == EV_L2_T200 ?
  261. "T200" : "T203");
  262. return;
  263. }
  264. hh = mISDN_HEAD_P(skb);
  265. hh->prim = event == EV_L2_T200 ? DL_TIMER200_IND : DL_TIMER203_IND;
  266. hh->id = l2->ch.nr;
  267. if (*debug & DEBUG_TIMER)
  268. printk(KERN_DEBUG "L2(%d,%d) nr:%x timer %s expired\n",
  269. l2->sapi, l2->tei, l2->ch.nr, event == EV_L2_T200 ?
  270. "T200" : "T203");
  271. if (l2->ch.st)
  272. l2->ch.st->own.recv(&l2->ch.st->own, skb);
  273. }
  274. static int
  275. l2mgr(struct layer2 *l2, u_int prim, void *arg) {
  276. long c = (long)arg;
  277. printk(KERN_WARNING
  278. "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
  279. if (test_bit(FLG_LAPD, &l2->flag) &&
  280. !test_bit(FLG_FIXED_TEI, &l2->flag)) {
  281. switch (c) {
  282. case 'C':
  283. case 'D':
  284. case 'G':
  285. case 'H':
  286. l2_tei(l2, prim, (u_long)arg);
  287. break;
  288. }
  289. }
  290. return 0;
  291. }
  292. static void
  293. set_peer_busy(struct layer2 *l2) {
  294. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  295. if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
  296. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  297. }
  298. static void
  299. clear_peer_busy(struct layer2 *l2) {
  300. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  301. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  302. }
  303. static void
  304. InitWin(struct layer2 *l2)
  305. {
  306. int i;
  307. for (i = 0; i < MAX_WINDOW; i++)
  308. l2->windowar[i] = NULL;
  309. }
  310. static int
  311. freewin(struct layer2 *l2)
  312. {
  313. int i, cnt = 0;
  314. for (i = 0; i < MAX_WINDOW; i++) {
  315. if (l2->windowar[i]) {
  316. cnt++;
  317. dev_kfree_skb(l2->windowar[i]);
  318. l2->windowar[i] = NULL;
  319. }
  320. }
  321. return cnt;
  322. }
  323. static void
  324. ReleaseWin(struct layer2 *l2)
  325. {
  326. int cnt = freewin(l2);
  327. if (cnt)
  328. printk(KERN_WARNING
  329. "isdnl2 freed %d skbuffs in release\n", cnt);
  330. }
  331. inline unsigned int
  332. cansend(struct layer2 *l2)
  333. {
  334. unsigned int p1;
  335. if (test_bit(FLG_MOD128, &l2->flag))
  336. p1 = (l2->vs - l2->va) % 128;
  337. else
  338. p1 = (l2->vs - l2->va) % 8;
  339. return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
  340. }
  341. inline void
  342. clear_exception(struct layer2 *l2)
  343. {
  344. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  345. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  346. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  347. clear_peer_busy(l2);
  348. }
  349. static int
  350. sethdraddr(struct layer2 *l2, u_char *header, int rsp)
  351. {
  352. u_char *ptr = header;
  353. int crbit = rsp;
  354. if (test_bit(FLG_LAPD, &l2->flag)) {
  355. if (test_bit(FLG_LAPD_NET, &l2->flag))
  356. crbit = !crbit;
  357. *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
  358. *ptr++ = (l2->tei << 1) | 1;
  359. return 2;
  360. } else {
  361. if (test_bit(FLG_ORIG, &l2->flag))
  362. crbit = !crbit;
  363. if (crbit)
  364. *ptr++ = l2->addr.B;
  365. else
  366. *ptr++ = l2->addr.A;
  367. return 1;
  368. }
  369. }
  370. static inline void
  371. enqueue_super(struct layer2 *l2, struct sk_buff *skb)
  372. {
  373. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  374. dev_kfree_skb(skb);
  375. }
  376. static inline void
  377. enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
  378. {
  379. if (l2->tm)
  380. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  381. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  382. dev_kfree_skb(skb);
  383. }
  384. inline int
  385. IsUI(u_char *data)
  386. {
  387. return (data[0] & 0xef) == UI;
  388. }
  389. inline int
  390. IsUA(u_char *data)
  391. {
  392. return (data[0] & 0xef) == UA;
  393. }
  394. inline int
  395. IsDM(u_char *data)
  396. {
  397. return (data[0] & 0xef) == DM;
  398. }
  399. inline int
  400. IsDISC(u_char *data)
  401. {
  402. return (data[0] & 0xef) == DISC;
  403. }
  404. inline int
  405. IsRR(u_char *data, struct layer2 *l2)
  406. {
  407. if (test_bit(FLG_MOD128, &l2->flag))
  408. return data[0] == RR;
  409. else
  410. return (data[0] & 0xf) == 1;
  411. }
  412. inline int
  413. IsSFrame(u_char *data, struct layer2 *l2)
  414. {
  415. register u_char d = *data;
  416. if (!test_bit(FLG_MOD128, &l2->flag))
  417. d &= 0xf;
  418. return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
  419. }
  420. inline int
  421. IsSABME(u_char *data, struct layer2 *l2)
  422. {
  423. u_char d = data[0] & ~0x10;
  424. return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
  425. }
  426. inline int
  427. IsREJ(u_char *data, struct layer2 *l2)
  428. {
  429. return test_bit(FLG_MOD128, &l2->flag) ?
  430. data[0] == REJ : (data[0] & 0xf) == REJ;
  431. }
  432. inline int
  433. IsFRMR(u_char *data)
  434. {
  435. return (data[0] & 0xef) == FRMR;
  436. }
  437. inline int
  438. IsRNR(u_char *data, struct layer2 *l2)
  439. {
  440. return test_bit(FLG_MOD128, &l2->flag) ?
  441. data[0] == RNR : (data[0] & 0xf) == RNR;
  442. }
  443. static int
  444. iframe_error(struct layer2 *l2, struct sk_buff *skb)
  445. {
  446. u_int i;
  447. int rsp = *skb->data & 0x2;
  448. i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
  449. if (test_bit(FLG_ORIG, &l2->flag))
  450. rsp = !rsp;
  451. if (rsp)
  452. return 'L';
  453. if (skb->len < i)
  454. return 'N';
  455. if ((skb->len - i) > l2->maxlen)
  456. return 'O';
  457. return 0;
  458. }
  459. static int
  460. super_error(struct layer2 *l2, struct sk_buff *skb)
  461. {
  462. if (skb->len != l2addrsize(l2) +
  463. (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
  464. return 'N';
  465. return 0;
  466. }
  467. static int
  468. unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
  469. {
  470. int rsp = (*skb->data & 0x2) >> 1;
  471. if (test_bit(FLG_ORIG, &l2->flag))
  472. rsp = !rsp;
  473. if (rsp != wantrsp)
  474. return 'L';
  475. if (skb->len != l2addrsize(l2) + 1)
  476. return 'N';
  477. return 0;
  478. }
  479. static int
  480. UI_error(struct layer2 *l2, struct sk_buff *skb)
  481. {
  482. int rsp = *skb->data & 0x2;
  483. if (test_bit(FLG_ORIG, &l2->flag))
  484. rsp = !rsp;
  485. if (rsp)
  486. return 'L';
  487. if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
  488. return 'O';
  489. return 0;
  490. }
  491. static int
  492. FRMR_error(struct layer2 *l2, struct sk_buff *skb)
  493. {
  494. u_int headers = l2addrsize(l2) + 1;
  495. u_char *datap = skb->data + headers;
  496. int rsp = *skb->data & 0x2;
  497. if (test_bit(FLG_ORIG, &l2->flag))
  498. rsp = !rsp;
  499. if (!rsp)
  500. return 'L';
  501. if (test_bit(FLG_MOD128, &l2->flag)) {
  502. if (skb->len < headers + 5)
  503. return 'N';
  504. else if (*debug & DEBUG_L2)
  505. l2m_debug(&l2->l2m,
  506. "FRMR information %2x %2x %2x %2x %2x",
  507. datap[0], datap[1], datap[2], datap[3], datap[4]);
  508. } else {
  509. if (skb->len < headers + 3)
  510. return 'N';
  511. else if (*debug & DEBUG_L2)
  512. l2m_debug(&l2->l2m,
  513. "FRMR information %2x %2x %2x",
  514. datap[0], datap[1], datap[2]);
  515. }
  516. return 0;
  517. }
  518. static unsigned int
  519. legalnr(struct layer2 *l2, unsigned int nr)
  520. {
  521. if (test_bit(FLG_MOD128, &l2->flag))
  522. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  523. else
  524. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  525. }
  526. static void
  527. setva(struct layer2 *l2, unsigned int nr)
  528. {
  529. struct sk_buff *skb;
  530. while (l2->va != nr) {
  531. l2->va++;
  532. if (test_bit(FLG_MOD128, &l2->flag))
  533. l2->va %= 128;
  534. else
  535. l2->va %= 8;
  536. if (l2->windowar[l2->sow]) {
  537. skb_trim(l2->windowar[l2->sow], 0);
  538. skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
  539. l2->windowar[l2->sow] = NULL;
  540. }
  541. l2->sow = (l2->sow + 1) % l2->window;
  542. }
  543. skb = skb_dequeue(&l2->tmp_queue);
  544. while (skb) {
  545. dev_kfree_skb(skb);
  546. skb = skb_dequeue(&l2->tmp_queue);
  547. }
  548. }
  549. static void
  550. send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
  551. {
  552. u_char tmp[MAX_L2HEADER_LEN];
  553. int i;
  554. i = sethdraddr(l2, tmp, cr);
  555. tmp[i++] = cmd;
  556. if (skb)
  557. skb_trim(skb, 0);
  558. else {
  559. skb = mI_alloc_skb(i, GFP_ATOMIC);
  560. if (!skb) {
  561. printk(KERN_WARNING "%s: can't alloc skbuff\n",
  562. __func__);
  563. return;
  564. }
  565. }
  566. memcpy(skb_put(skb, i), tmp, i);
  567. enqueue_super(l2, skb);
  568. }
  569. inline u_char
  570. get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
  571. {
  572. return skb->data[l2addrsize(l2)] & 0x10;
  573. }
  574. inline u_char
  575. get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
  576. {
  577. u_char PF;
  578. PF = get_PollFlag(l2, skb);
  579. dev_kfree_skb(skb);
  580. return PF;
  581. }
  582. inline void
  583. start_t200(struct layer2 *l2, int i)
  584. {
  585. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  586. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  587. }
  588. inline void
  589. restart_t200(struct layer2 *l2, int i)
  590. {
  591. mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  592. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  593. }
  594. inline void
  595. stop_t200(struct layer2 *l2, int i)
  596. {
  597. if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
  598. mISDN_FsmDelTimer(&l2->t200, i);
  599. }
  600. inline void
  601. st5_dl_release_l2l3(struct layer2 *l2)
  602. {
  603. int pr;
  604. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  605. pr = DL_RELEASE_CNF;
  606. else
  607. pr = DL_RELEASE_IND;
  608. l2up_create(l2, pr, 0, NULL);
  609. }
  610. inline void
  611. lapb_dl_release_l2l3(struct layer2 *l2, int f)
  612. {
  613. if (test_bit(FLG_LAPB, &l2->flag))
  614. l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
  615. l2up_create(l2, f, 0, NULL);
  616. }
  617. static void
  618. establishlink(struct FsmInst *fi)
  619. {
  620. struct layer2 *l2 = fi->userdata;
  621. u_char cmd;
  622. clear_exception(l2);
  623. l2->rc = 0;
  624. cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
  625. send_uframe(l2, NULL, cmd, CMD);
  626. mISDN_FsmDelTimer(&l2->t203, 1);
  627. restart_t200(l2, 1);
  628. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  629. freewin(l2);
  630. mISDN_FsmChangeState(fi, ST_L2_5);
  631. }
  632. static void
  633. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  634. {
  635. struct sk_buff *skb = arg;
  636. struct layer2 *l2 = fi->userdata;
  637. if (get_PollFlagFree(l2, skb))
  638. l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
  639. else
  640. l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
  641. }
  642. static void
  643. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  644. {
  645. struct sk_buff *skb = arg;
  646. struct layer2 *l2 = fi->userdata;
  647. if (get_PollFlagFree(l2, skb))
  648. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  649. else {
  650. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  651. establishlink(fi);
  652. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  653. }
  654. }
  655. static void
  656. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  657. {
  658. struct sk_buff *skb = arg;
  659. struct layer2 *l2 = fi->userdata;
  660. if (get_PollFlagFree(l2, skb))
  661. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  662. else
  663. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  664. establishlink(fi);
  665. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  666. }
  667. static void
  668. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  669. {
  670. dev_kfree_skb((struct sk_buff *)arg);
  671. mISDN_FsmChangeState(fi, ST_L2_3);
  672. }
  673. static void
  674. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  675. {
  676. struct layer2 *l2 = fi->userdata;
  677. mISDN_FsmChangeState(fi, ST_L2_3);
  678. dev_kfree_skb((struct sk_buff *)arg);
  679. l2_tei(l2, MDL_ASSIGN_IND, 0);
  680. }
  681. static void
  682. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  683. {
  684. struct layer2 *l2 = fi->userdata;
  685. struct sk_buff *skb = arg;
  686. skb_queue_tail(&l2->ui_queue, skb);
  687. mISDN_FsmChangeState(fi, ST_L2_2);
  688. l2_tei(l2, MDL_ASSIGN_IND, 0);
  689. }
  690. static void
  691. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  692. {
  693. struct layer2 *l2 = fi->userdata;
  694. struct sk_buff *skb = arg;
  695. skb_queue_tail(&l2->ui_queue, skb);
  696. }
  697. static void
  698. tx_ui(struct layer2 *l2)
  699. {
  700. struct sk_buff *skb;
  701. u_char header[MAX_L2HEADER_LEN];
  702. int i;
  703. i = sethdraddr(l2, header, CMD);
  704. if (test_bit(FLG_LAPD_NET, &l2->flag))
  705. header[1] = 0xff; /* tei 127 */
  706. header[i++] = UI;
  707. while ((skb = skb_dequeue(&l2->ui_queue))) {
  708. memcpy(skb_push(skb, i), header, i);
  709. enqueue_ui(l2, skb);
  710. }
  711. }
  712. static void
  713. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  714. {
  715. struct layer2 *l2 = fi->userdata;
  716. struct sk_buff *skb = arg;
  717. skb_queue_tail(&l2->ui_queue, skb);
  718. tx_ui(l2);
  719. }
  720. static void
  721. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  722. {
  723. struct layer2 *l2 = fi->userdata;
  724. struct sk_buff *skb = arg;
  725. skb_pull(skb, l2headersize(l2, 1));
  726. /*
  727. * in states 1-3 for broadcast
  728. */
  729. if (l2->tm)
  730. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  731. l2up(l2, DL_UNITDATA_IND, skb);
  732. }
  733. static void
  734. l2_establish(struct FsmInst *fi, int event, void *arg)
  735. {
  736. struct sk_buff *skb = arg;
  737. struct layer2 *l2 = fi->userdata;
  738. establishlink(fi);
  739. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  740. dev_kfree_skb(skb);
  741. }
  742. static void
  743. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  744. {
  745. struct sk_buff *skb = arg;
  746. struct layer2 *l2 = fi->userdata;
  747. skb_queue_purge(&l2->i_queue);
  748. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  749. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  750. dev_kfree_skb(skb);
  751. }
  752. static void
  753. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  754. {
  755. struct sk_buff *skb = arg;
  756. struct layer2 *l2 = fi->userdata;
  757. skb_queue_purge(&l2->i_queue);
  758. establishlink(fi);
  759. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  760. dev_kfree_skb(skb);
  761. }
  762. static void
  763. l2_release(struct FsmInst *fi, int event, void *arg)
  764. {
  765. struct layer2 *l2 = fi->userdata;
  766. struct sk_buff *skb = arg;
  767. skb_trim(skb, 0);
  768. l2up(l2, DL_RELEASE_CNF, skb);
  769. }
  770. static void
  771. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  772. {
  773. struct sk_buff *skb = arg;
  774. struct layer2 *l2 = fi->userdata;
  775. test_and_set_bit(FLG_PEND_REL, &l2->flag);
  776. dev_kfree_skb(skb);
  777. }
  778. static void
  779. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  780. {
  781. struct layer2 *l2 = fi->userdata;
  782. struct sk_buff *skb = arg;
  783. skb_queue_purge(&l2->i_queue);
  784. freewin(l2);
  785. mISDN_FsmChangeState(fi, ST_L2_6);
  786. l2->rc = 0;
  787. send_uframe(l2, NULL, DISC | 0x10, CMD);
  788. mISDN_FsmDelTimer(&l2->t203, 1);
  789. restart_t200(l2, 2);
  790. if (skb)
  791. dev_kfree_skb(skb);
  792. }
  793. static void
  794. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  795. {
  796. struct layer2 *l2 = fi->userdata;
  797. struct sk_buff *skb = arg;
  798. l2->vs = 0;
  799. l2->va = 0;
  800. l2->vr = 0;
  801. l2->sow = 0;
  802. clear_exception(l2);
  803. send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
  804. mISDN_FsmChangeState(fi, ST_L2_7);
  805. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  806. skb_trim(skb, 0);
  807. l2up(l2, DL_ESTABLISH_IND, skb);
  808. if (l2->tm)
  809. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  810. }
  811. static void
  812. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  813. {
  814. struct layer2 *l2 = fi->userdata;
  815. struct sk_buff *skb = arg;
  816. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  817. }
  818. static void
  819. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  820. {
  821. struct layer2 *l2 = fi->userdata;
  822. struct sk_buff *skb = arg;
  823. send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
  824. }
  825. static void
  826. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  827. {
  828. struct layer2 *l2 = fi->userdata;
  829. struct sk_buff *skb = arg;
  830. int est = 0;
  831. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  832. l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
  833. if (l2->vs != l2->va) {
  834. skb_queue_purge(&l2->i_queue);
  835. est = 1;
  836. }
  837. clear_exception(l2);
  838. l2->vs = 0;
  839. l2->va = 0;
  840. l2->vr = 0;
  841. l2->sow = 0;
  842. mISDN_FsmChangeState(fi, ST_L2_7);
  843. stop_t200(l2, 3);
  844. mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  845. if (est)
  846. l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
  847. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  848. * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
  849. * 0, NULL, 0);
  850. */
  851. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  852. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  853. }
  854. static void
  855. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  856. {
  857. struct layer2 *l2 = fi->userdata;
  858. struct sk_buff *skb = arg;
  859. mISDN_FsmChangeState(fi, ST_L2_4);
  860. mISDN_FsmDelTimer(&l2->t203, 3);
  861. stop_t200(l2, 4);
  862. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  863. skb_queue_purge(&l2->i_queue);
  864. freewin(l2);
  865. lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
  866. if (l2->tm)
  867. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  868. }
  869. static void
  870. l2_connected(struct FsmInst *fi, int event, void *arg)
  871. {
  872. struct layer2 *l2 = fi->userdata;
  873. struct sk_buff *skb = arg;
  874. int pr = -1;
  875. if (!get_PollFlag(l2, skb)) {
  876. l2_mdl_error_ua(fi, event, arg);
  877. return;
  878. }
  879. dev_kfree_skb(skb);
  880. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  881. l2_disconnect(fi, event, NULL);
  882. if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
  883. pr = DL_ESTABLISH_CNF;
  884. } else if (l2->vs != l2->va) {
  885. skb_queue_purge(&l2->i_queue);
  886. pr = DL_ESTABLISH_IND;
  887. }
  888. stop_t200(l2, 5);
  889. l2->vr = 0;
  890. l2->vs = 0;
  891. l2->va = 0;
  892. l2->sow = 0;
  893. mISDN_FsmChangeState(fi, ST_L2_7);
  894. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
  895. if (pr != -1)
  896. l2up_create(l2, pr, 0, NULL);
  897. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  898. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  899. if (l2->tm)
  900. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  901. }
  902. static void
  903. l2_released(struct FsmInst *fi, int event, void *arg)
  904. {
  905. struct layer2 *l2 = fi->userdata;
  906. struct sk_buff *skb = arg;
  907. if (!get_PollFlag(l2, skb)) {
  908. l2_mdl_error_ua(fi, event, arg);
  909. return;
  910. }
  911. dev_kfree_skb(skb);
  912. stop_t200(l2, 6);
  913. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  914. mISDN_FsmChangeState(fi, ST_L2_4);
  915. if (l2->tm)
  916. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  917. }
  918. static void
  919. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  920. {
  921. struct layer2 *l2 = fi->userdata;
  922. struct sk_buff *skb = arg;
  923. if (!get_PollFlagFree(l2, skb)) {
  924. establishlink(fi);
  925. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  926. }
  927. }
  928. static void
  929. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  930. {
  931. struct layer2 *l2 = fi->userdata;
  932. struct sk_buff *skb = arg;
  933. if (get_PollFlagFree(l2, skb)) {
  934. stop_t200(l2, 7);
  935. if (!test_bit(FLG_L3_INIT, &l2->flag))
  936. skb_queue_purge(&l2->i_queue);
  937. if (test_bit(FLG_LAPB, &l2->flag))
  938. l2down_create(l2, PH_DEACTIVATE_REQ,
  939. l2_newid(l2), 0, NULL);
  940. st5_dl_release_l2l3(l2);
  941. mISDN_FsmChangeState(fi, ST_L2_4);
  942. if (l2->tm)
  943. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  944. }
  945. }
  946. static void
  947. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  948. {
  949. struct layer2 *l2 = fi->userdata;
  950. struct sk_buff *skb = arg;
  951. if (get_PollFlagFree(l2, skb)) {
  952. stop_t200(l2, 8);
  953. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  954. mISDN_FsmChangeState(fi, ST_L2_4);
  955. if (l2->tm)
  956. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  957. }
  958. }
  959. static void
  960. enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
  961. {
  962. struct sk_buff *skb;
  963. u_char tmp[MAX_L2HEADER_LEN];
  964. int i;
  965. i = sethdraddr(l2, tmp, cr);
  966. if (test_bit(FLG_MOD128, &l2->flag)) {
  967. tmp[i++] = typ;
  968. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  969. } else
  970. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  971. skb = mI_alloc_skb(i, GFP_ATOMIC);
  972. if (!skb) {
  973. printk(KERN_WARNING
  974. "isdnl2 can't alloc sbbuff for enquiry_cr\n");
  975. return;
  976. }
  977. memcpy(skb_put(skb, i), tmp, i);
  978. enqueue_super(l2, skb);
  979. }
  980. inline void
  981. enquiry_response(struct layer2 *l2)
  982. {
  983. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  984. enquiry_cr(l2, RNR, RSP, 1);
  985. else
  986. enquiry_cr(l2, RR, RSP, 1);
  987. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  988. }
  989. inline void
  990. transmit_enquiry(struct layer2 *l2)
  991. {
  992. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  993. enquiry_cr(l2, RNR, CMD, 1);
  994. else
  995. enquiry_cr(l2, RR, CMD, 1);
  996. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  997. start_t200(l2, 9);
  998. }
  999. static void
  1000. nrerrorrecovery(struct FsmInst *fi)
  1001. {
  1002. struct layer2 *l2 = fi->userdata;
  1003. l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
  1004. establishlink(fi);
  1005. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1006. }
  1007. static void
  1008. invoke_retransmission(struct layer2 *l2, unsigned int nr)
  1009. {
  1010. u_int p1;
  1011. if (l2->vs != nr) {
  1012. while (l2->vs != nr) {
  1013. (l2->vs)--;
  1014. if (test_bit(FLG_MOD128, &l2->flag)) {
  1015. l2->vs %= 128;
  1016. p1 = (l2->vs - l2->va) % 128;
  1017. } else {
  1018. l2->vs %= 8;
  1019. p1 = (l2->vs - l2->va) % 8;
  1020. }
  1021. p1 = (p1 + l2->sow) % l2->window;
  1022. if (l2->windowar[p1])
  1023. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  1024. else
  1025. printk(KERN_WARNING
  1026. "%s: windowar[%d] is NULL\n",
  1027. __func__, p1);
  1028. l2->windowar[p1] = NULL;
  1029. }
  1030. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  1031. }
  1032. }
  1033. static void
  1034. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  1035. {
  1036. struct layer2 *l2 = fi->userdata;
  1037. struct sk_buff *skb = arg;
  1038. int PollFlag, rsp, typ = RR;
  1039. unsigned int nr;
  1040. rsp = *skb->data & 0x2;
  1041. if (test_bit(FLG_ORIG, &l2->flag))
  1042. rsp = !rsp;
  1043. skb_pull(skb, l2addrsize(l2));
  1044. if (IsRNR(skb->data, l2)) {
  1045. set_peer_busy(l2);
  1046. typ = RNR;
  1047. } else
  1048. clear_peer_busy(l2);
  1049. if (IsREJ(skb->data, l2))
  1050. typ = REJ;
  1051. if (test_bit(FLG_MOD128, &l2->flag)) {
  1052. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1053. nr = skb->data[1] >> 1;
  1054. } else {
  1055. PollFlag = (skb->data[0] & 0x10);
  1056. nr = (skb->data[0] >> 5) & 0x7;
  1057. }
  1058. dev_kfree_skb(skb);
  1059. if (PollFlag) {
  1060. if (rsp)
  1061. l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
  1062. else
  1063. enquiry_response(l2);
  1064. }
  1065. if (legalnr(l2, nr)) {
  1066. if (typ == REJ) {
  1067. setva(l2, nr);
  1068. invoke_retransmission(l2, nr);
  1069. stop_t200(l2, 10);
  1070. if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1071. EV_L2_T203, NULL, 6))
  1072. l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
  1073. } else if ((nr == l2->vs) && (typ == RR)) {
  1074. setva(l2, nr);
  1075. stop_t200(l2, 11);
  1076. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1077. EV_L2_T203, NULL, 7);
  1078. } else if ((l2->va != nr) || (typ == RNR)) {
  1079. setva(l2, nr);
  1080. if (typ != RR)
  1081. mISDN_FsmDelTimer(&l2->t203, 9);
  1082. restart_t200(l2, 12);
  1083. }
  1084. if (skb_queue_len(&l2->i_queue) && (typ == RR))
  1085. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1086. } else
  1087. nrerrorrecovery(fi);
  1088. }
  1089. static void
  1090. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  1091. {
  1092. struct layer2 *l2 = fi->userdata;
  1093. struct sk_buff *skb = arg;
  1094. if (!test_bit(FLG_L3_INIT, &l2->flag))
  1095. skb_queue_tail(&l2->i_queue, skb);
  1096. else
  1097. dev_kfree_skb(skb);
  1098. }
  1099. static void
  1100. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  1101. {
  1102. struct layer2 *l2 = fi->userdata;
  1103. struct sk_buff *skb = arg;
  1104. skb_queue_tail(&l2->i_queue, skb);
  1105. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1106. }
  1107. static void
  1108. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  1109. {
  1110. struct layer2 *l2 = fi->userdata;
  1111. struct sk_buff *skb = arg;
  1112. skb_queue_tail(&l2->i_queue, skb);
  1113. }
  1114. static void
  1115. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  1116. {
  1117. struct layer2 *l2 = fi->userdata;
  1118. struct sk_buff *skb = arg;
  1119. int PollFlag, i;
  1120. u_int ns, nr;
  1121. i = l2addrsize(l2);
  1122. if (test_bit(FLG_MOD128, &l2->flag)) {
  1123. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  1124. ns = skb->data[i] >> 1;
  1125. nr = (skb->data[i + 1] >> 1) & 0x7f;
  1126. } else {
  1127. PollFlag = (skb->data[i] & 0x10);
  1128. ns = (skb->data[i] >> 1) & 0x7;
  1129. nr = (skb->data[i] >> 5) & 0x7;
  1130. }
  1131. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  1132. dev_kfree_skb(skb);
  1133. if (PollFlag)
  1134. enquiry_response(l2);
  1135. } else {
  1136. if (l2->vr == ns) {
  1137. l2->vr++;
  1138. if (test_bit(FLG_MOD128, &l2->flag))
  1139. l2->vr %= 128;
  1140. else
  1141. l2->vr %= 8;
  1142. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  1143. if (PollFlag)
  1144. enquiry_response(l2);
  1145. else
  1146. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  1147. skb_pull(skb, l2headersize(l2, 0));
  1148. l2up(l2, DL_DATA_IND, skb);
  1149. } else {
  1150. /* n(s)!=v(r) */
  1151. dev_kfree_skb(skb);
  1152. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  1153. if (PollFlag)
  1154. enquiry_response(l2);
  1155. } else {
  1156. enquiry_cr(l2, REJ, RSP, PollFlag);
  1157. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1158. }
  1159. }
  1160. }
  1161. if (legalnr(l2, nr)) {
  1162. if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
  1163. (fi->state == ST_L2_7)) {
  1164. if (nr == l2->vs) {
  1165. stop_t200(l2, 13);
  1166. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1167. EV_L2_T203, NULL, 7);
  1168. } else if (nr != l2->va)
  1169. restart_t200(l2, 14);
  1170. }
  1171. setva(l2, nr);
  1172. } else {
  1173. nrerrorrecovery(fi);
  1174. return;
  1175. }
  1176. if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
  1177. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1178. if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
  1179. enquiry_cr(l2, RR, RSP, 0);
  1180. }
  1181. static void
  1182. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  1183. {
  1184. struct layer2 *l2 = fi->userdata;
  1185. u_int info;
  1186. l2->tei = (signed char)(long)arg;
  1187. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1188. info = DL_INFO_L2_CONNECT;
  1189. l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
  1190. if (fi->state == ST_L2_3) {
  1191. establishlink(fi);
  1192. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  1193. } else
  1194. mISDN_FsmChangeState(fi, ST_L2_4);
  1195. if (skb_queue_len(&l2->ui_queue))
  1196. tx_ui(l2);
  1197. }
  1198. static void
  1199. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  1200. {
  1201. struct layer2 *l2 = fi->userdata;
  1202. if (test_bit(FLG_LAPD, &l2->flag) &&
  1203. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1204. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1205. } else if (l2->rc == l2->N200) {
  1206. mISDN_FsmChangeState(fi, ST_L2_4);
  1207. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1208. skb_queue_purge(&l2->i_queue);
  1209. l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
  1210. if (test_bit(FLG_LAPB, &l2->flag))
  1211. l2down_create(l2, PH_DEACTIVATE_REQ,
  1212. l2_newid(l2), 0, NULL);
  1213. st5_dl_release_l2l3(l2);
  1214. if (l2->tm)
  1215. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1216. } else {
  1217. l2->rc++;
  1218. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1219. send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
  1220. SABME : SABM) | 0x10, CMD);
  1221. }
  1222. }
  1223. static void
  1224. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  1225. {
  1226. struct layer2 *l2 = fi->userdata;
  1227. if (test_bit(FLG_LAPD, &l2->flag) &&
  1228. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1229. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1230. } else if (l2->rc == l2->N200) {
  1231. mISDN_FsmChangeState(fi, ST_L2_4);
  1232. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1233. l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
  1234. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  1235. if (l2->tm)
  1236. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1237. } else {
  1238. l2->rc++;
  1239. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
  1240. NULL, 9);
  1241. send_uframe(l2, NULL, DISC | 0x10, CMD);
  1242. }
  1243. }
  1244. static void
  1245. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1246. {
  1247. struct layer2 *l2 = fi->userdata;
  1248. if (test_bit(FLG_LAPD, &l2->flag) &&
  1249. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1250. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1251. return;
  1252. }
  1253. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1254. l2->rc = 0;
  1255. mISDN_FsmChangeState(fi, ST_L2_8);
  1256. transmit_enquiry(l2);
  1257. l2->rc++;
  1258. }
  1259. static void
  1260. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1261. {
  1262. struct layer2 *l2 = fi->userdata;
  1263. if (test_bit(FLG_LAPD, &l2->flag) &&
  1264. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1265. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1266. return;
  1267. }
  1268. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1269. if (l2->rc == l2->N200) {
  1270. l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
  1271. establishlink(fi);
  1272. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1273. } else {
  1274. transmit_enquiry(l2);
  1275. l2->rc++;
  1276. }
  1277. }
  1278. static void
  1279. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1280. {
  1281. struct layer2 *l2 = fi->userdata;
  1282. if (test_bit(FLG_LAPD, &l2->flag) &&
  1283. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1284. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
  1285. return;
  1286. }
  1287. mISDN_FsmChangeState(fi, ST_L2_8);
  1288. transmit_enquiry(l2);
  1289. l2->rc = 0;
  1290. }
  1291. static void
  1292. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1293. {
  1294. struct layer2 *l2 = fi->userdata;
  1295. struct sk_buff *skb, *nskb, *oskb;
  1296. u_char header[MAX_L2HEADER_LEN];
  1297. u_int i, p1;
  1298. if (!cansend(l2))
  1299. return;
  1300. skb = skb_dequeue(&l2->i_queue);
  1301. if (!skb)
  1302. return;
  1303. if (test_bit(FLG_MOD128, &l2->flag))
  1304. p1 = (l2->vs - l2->va) % 128;
  1305. else
  1306. p1 = (l2->vs - l2->va) % 8;
  1307. p1 = (p1 + l2->sow) % l2->window;
  1308. if (l2->windowar[p1]) {
  1309. printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
  1310. p1);
  1311. dev_kfree_skb(l2->windowar[p1]);
  1312. }
  1313. l2->windowar[p1] = skb;
  1314. i = sethdraddr(l2, header, CMD);
  1315. if (test_bit(FLG_MOD128, &l2->flag)) {
  1316. header[i++] = l2->vs << 1;
  1317. header[i++] = l2->vr << 1;
  1318. l2->vs = (l2->vs + 1) % 128;
  1319. } else {
  1320. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1321. l2->vs = (l2->vs + 1) % 8;
  1322. }
  1323. nskb = skb_clone(skb, GFP_ATOMIC);
  1324. p1 = skb_headroom(nskb);
  1325. if (p1 >= i)
  1326. memcpy(skb_push(nskb, i), header, i);
  1327. else {
  1328. printk(KERN_WARNING
  1329. "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
  1330. oskb = nskb;
  1331. nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
  1332. if (!nskb) {
  1333. dev_kfree_skb(oskb);
  1334. printk(KERN_WARNING "%s: no skb mem\n", __func__);
  1335. return;
  1336. }
  1337. memcpy(skb_put(nskb, i), header, i);
  1338. memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
  1339. dev_kfree_skb(oskb);
  1340. }
  1341. l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
  1342. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1343. if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
  1344. mISDN_FsmDelTimer(&l2->t203, 13);
  1345. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
  1346. }
  1347. }
  1348. static void
  1349. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1350. {
  1351. struct layer2 *l2 = fi->userdata;
  1352. struct sk_buff *skb = arg;
  1353. int PollFlag, rsp, rnr = 0;
  1354. unsigned int nr;
  1355. rsp = *skb->data & 0x2;
  1356. if (test_bit(FLG_ORIG, &l2->flag))
  1357. rsp = !rsp;
  1358. skb_pull(skb, l2addrsize(l2));
  1359. if (IsRNR(skb->data, l2)) {
  1360. set_peer_busy(l2);
  1361. rnr = 1;
  1362. } else
  1363. clear_peer_busy(l2);
  1364. if (test_bit(FLG_MOD128, &l2->flag)) {
  1365. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1366. nr = skb->data[1] >> 1;
  1367. } else {
  1368. PollFlag = (skb->data[0] & 0x10);
  1369. nr = (skb->data[0] >> 5) & 0x7;
  1370. }
  1371. dev_kfree_skb(skb);
  1372. if (rsp && PollFlag) {
  1373. if (legalnr(l2, nr)) {
  1374. if (rnr) {
  1375. restart_t200(l2, 15);
  1376. } else {
  1377. stop_t200(l2, 16);
  1378. mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1379. EV_L2_T203, NULL, 5);
  1380. setva(l2, nr);
  1381. }
  1382. invoke_retransmission(l2, nr);
  1383. mISDN_FsmChangeState(fi, ST_L2_7);
  1384. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  1385. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1386. } else
  1387. nrerrorrecovery(fi);
  1388. } else {
  1389. if (!rsp && PollFlag)
  1390. enquiry_response(l2);
  1391. if (legalnr(l2, nr))
  1392. setva(l2, nr);
  1393. else
  1394. nrerrorrecovery(fi);
  1395. }
  1396. }
  1397. static void
  1398. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1399. {
  1400. struct layer2 *l2 = fi->userdata;
  1401. struct sk_buff *skb = arg;
  1402. skb_pull(skb, l2addrsize(l2) + 1);
  1403. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1404. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1405. l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
  1406. establishlink(fi);
  1407. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1408. }
  1409. dev_kfree_skb(skb);
  1410. }
  1411. static void
  1412. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1413. {
  1414. struct layer2 *l2 = fi->userdata;
  1415. skb_queue_purge(&l2->ui_queue);
  1416. l2->tei = GROUP_TEI;
  1417. mISDN_FsmChangeState(fi, ST_L2_1);
  1418. }
  1419. static void
  1420. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1421. {
  1422. struct layer2 *l2 = fi->userdata;
  1423. skb_queue_purge(&l2->ui_queue);
  1424. l2->tei = GROUP_TEI;
  1425. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1426. mISDN_FsmChangeState(fi, ST_L2_1);
  1427. }
  1428. static void
  1429. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1430. {
  1431. struct layer2 *l2 = fi->userdata;
  1432. skb_queue_purge(&l2->i_queue);
  1433. skb_queue_purge(&l2->ui_queue);
  1434. freewin(l2);
  1435. l2->tei = GROUP_TEI;
  1436. stop_t200(l2, 17);
  1437. st5_dl_release_l2l3(l2);
  1438. mISDN_FsmChangeState(fi, ST_L2_1);
  1439. }
  1440. static void
  1441. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1442. {
  1443. struct layer2 *l2 = fi->userdata;
  1444. skb_queue_purge(&l2->ui_queue);
  1445. l2->tei = GROUP_TEI;
  1446. stop_t200(l2, 18);
  1447. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1448. mISDN_FsmChangeState(fi, ST_L2_1);
  1449. }
  1450. static void
  1451. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1452. {
  1453. struct layer2 *l2 = fi->userdata;
  1454. skb_queue_purge(&l2->i_queue);
  1455. skb_queue_purge(&l2->ui_queue);
  1456. freewin(l2);
  1457. l2->tei = GROUP_TEI;
  1458. stop_t200(l2, 17);
  1459. mISDN_FsmDelTimer(&l2->t203, 19);
  1460. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1461. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  1462. * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
  1463. * 0, NULL, 0);
  1464. */
  1465. mISDN_FsmChangeState(fi, ST_L2_1);
  1466. }
  1467. static void
  1468. l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
  1469. {
  1470. struct layer2 *l2 = fi->userdata;
  1471. struct sk_buff *skb = arg;
  1472. skb_queue_purge(&l2->i_queue);
  1473. skb_queue_purge(&l2->ui_queue);
  1474. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1475. l2up(l2, DL_RELEASE_IND, skb);
  1476. else
  1477. dev_kfree_skb(skb);
  1478. }
  1479. static void
  1480. l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
  1481. {
  1482. struct layer2 *l2 = fi->userdata;
  1483. struct sk_buff *skb = arg;
  1484. skb_queue_purge(&l2->i_queue);
  1485. skb_queue_purge(&l2->ui_queue);
  1486. freewin(l2);
  1487. stop_t200(l2, 19);
  1488. st5_dl_release_l2l3(l2);
  1489. mISDN_FsmChangeState(fi, ST_L2_4);
  1490. if (l2->tm)
  1491. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1492. dev_kfree_skb(skb);
  1493. }
  1494. static void
  1495. l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
  1496. {
  1497. struct layer2 *l2 = fi->userdata;
  1498. struct sk_buff *skb = arg;
  1499. skb_queue_purge(&l2->ui_queue);
  1500. stop_t200(l2, 20);
  1501. l2up(l2, DL_RELEASE_CNF, skb);
  1502. mISDN_FsmChangeState(fi, ST_L2_4);
  1503. if (l2->tm)
  1504. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1505. }
  1506. static void
  1507. l2_persistent_da(struct FsmInst *fi, int event, void *arg)
  1508. {
  1509. struct layer2 *l2 = fi->userdata;
  1510. struct sk_buff *skb = arg;
  1511. skb_queue_purge(&l2->i_queue);
  1512. skb_queue_purge(&l2->ui_queue);
  1513. freewin(l2);
  1514. stop_t200(l2, 19);
  1515. mISDN_FsmDelTimer(&l2->t203, 19);
  1516. l2up(l2, DL_RELEASE_IND, skb);
  1517. mISDN_FsmChangeState(fi, ST_L2_4);
  1518. if (l2->tm)
  1519. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1520. }
  1521. static void
  1522. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1523. {
  1524. struct layer2 *l2 = fi->userdata;
  1525. struct sk_buff *skb = arg;
  1526. if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
  1527. enquiry_cr(l2, RNR, RSP, 0);
  1528. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1529. }
  1530. if (skb)
  1531. dev_kfree_skb(skb);
  1532. }
  1533. static void
  1534. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1535. {
  1536. struct layer2 *l2 = fi->userdata;
  1537. struct sk_buff *skb = arg;
  1538. if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
  1539. enquiry_cr(l2, RR, RSP, 0);
  1540. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1541. }
  1542. if (skb)
  1543. dev_kfree_skb(skb);
  1544. }
  1545. static void
  1546. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1547. {
  1548. struct layer2 *l2 = fi->userdata;
  1549. l2mgr(l2, MDL_ERROR_IND, arg);
  1550. }
  1551. static void
  1552. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1553. {
  1554. struct layer2 *l2 = fi->userdata;
  1555. l2mgr(l2, MDL_ERROR_IND, arg);
  1556. establishlink(fi);
  1557. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1558. }
  1559. static struct FsmNode L2FnList[] =
  1560. {
  1561. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1562. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1563. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1564. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1565. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1566. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1567. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1568. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1569. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1570. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1571. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1572. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1573. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1574. {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
  1575. {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
  1576. {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
  1577. {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
  1578. {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
  1579. {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
  1580. {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
  1581. {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
  1582. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1583. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1584. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1585. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1586. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1587. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1588. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1589. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1590. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1591. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1592. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1593. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1594. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1595. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1596. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1597. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1598. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1599. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1600. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1601. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1602. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1603. {ST_L2_5, EV_L2_UA, l2_connected},
  1604. {ST_L2_6, EV_L2_UA, l2_released},
  1605. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1606. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1607. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1608. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1609. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1610. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1611. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1612. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1613. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1614. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1615. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1616. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1617. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1618. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1619. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1620. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1621. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1622. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1623. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1624. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1625. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1626. {ST_L2_5, EV_L2_T200, l2_timeout},
  1627. {ST_L2_6, EV_L2_T200, l2_timeout},
  1628. {ST_L2_7, EV_L2_T200, l2_timeout},
  1629. {ST_L2_8, EV_L2_T200, l2_timeout},
  1630. {ST_L2_7, EV_L2_T203, l2_timeout},
  1631. {ST_L2_5, EV_L2_T200I, l2_st5_tout_200},
  1632. {ST_L2_6, EV_L2_T200I, l2_st6_tout_200},
  1633. {ST_L2_7, EV_L2_T200I, l2_st7_tout_200},
  1634. {ST_L2_8, EV_L2_T200I, l2_st8_tout_200},
  1635. {ST_L2_7, EV_L2_T203I, l2_st7_tout_203},
  1636. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1637. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1638. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1639. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1640. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1641. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1642. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1643. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1644. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1645. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1646. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1647. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1648. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1649. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1650. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
  1651. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
  1652. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
  1653. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
  1654. };
  1655. static int
  1656. ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
  1657. {
  1658. u_char *datap = skb->data;
  1659. int ret = -EINVAL;
  1660. int psapi, ptei;
  1661. u_int l;
  1662. int c = 0;
  1663. l = l2addrsize(l2);
  1664. if (skb->len <= l) {
  1665. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1666. return ret;
  1667. }
  1668. if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
  1669. psapi = *datap++;
  1670. ptei = *datap++;
  1671. if ((psapi & 1) || !(ptei & 1)) {
  1672. printk(KERN_WARNING
  1673. "l2 D-channel frame wrong EA0/EA1\n");
  1674. return ret;
  1675. }
  1676. psapi >>= 2;
  1677. ptei >>= 1;
  1678. if (psapi != l2->sapi) {
  1679. /* not our business */
  1680. if (*debug & DEBUG_L2)
  1681. printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
  1682. __func__, psapi, l2->sapi);
  1683. dev_kfree_skb(skb);
  1684. return 0;
  1685. }
  1686. if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
  1687. /* not our business */
  1688. if (*debug & DEBUG_L2)
  1689. printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
  1690. __func__, ptei, l2->tei);
  1691. dev_kfree_skb(skb);
  1692. return 0;
  1693. }
  1694. } else
  1695. datap += l;
  1696. if (!(*datap & 1)) { /* I-Frame */
  1697. c = iframe_error(l2, skb);
  1698. if (!c)
  1699. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
  1700. } else if (IsSFrame(datap, l2)) { /* S-Frame */
  1701. c = super_error(l2, skb);
  1702. if (!c)
  1703. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
  1704. } else if (IsUI(datap)) {
  1705. c = UI_error(l2, skb);
  1706. if (!c)
  1707. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
  1708. } else if (IsSABME(datap, l2)) {
  1709. c = unnum_error(l2, skb, CMD);
  1710. if (!c)
  1711. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
  1712. } else if (IsUA(datap)) {
  1713. c = unnum_error(l2, skb, RSP);
  1714. if (!c)
  1715. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
  1716. } else if (IsDISC(datap)) {
  1717. c = unnum_error(l2, skb, CMD);
  1718. if (!c)
  1719. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
  1720. } else if (IsDM(datap)) {
  1721. c = unnum_error(l2, skb, RSP);
  1722. if (!c)
  1723. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
  1724. } else if (IsFRMR(datap)) {
  1725. c = FRMR_error(l2, skb);
  1726. if (!c)
  1727. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
  1728. } else
  1729. c = 'L';
  1730. if (c) {
  1731. printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
  1732. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1733. }
  1734. return ret;
  1735. }
  1736. static int
  1737. l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
  1738. {
  1739. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1740. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  1741. int ret = -EINVAL;
  1742. if (*debug & DEBUG_L2_RECV)
  1743. printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n",
  1744. __func__, hh->prim, hh->id, l2->sapi, l2->tei);
  1745. if (hh->prim == DL_INTERN_MSG) {
  1746. struct mISDNhead *chh = hh + 1; /* saved copy */
  1747. *hh = *chh;
  1748. if (*debug & DEBUG_L2_RECV)
  1749. printk(KERN_DEBUG "%s: prim(%x) id(%x) internal msg\n",
  1750. __func__, hh->prim, hh->id);
  1751. }
  1752. switch (hh->prim) {
  1753. case PH_DATA_IND:
  1754. ret = ph_data_indication(l2, hh, skb);
  1755. break;
  1756. case PH_DATA_CNF:
  1757. ret = ph_data_confirm(l2, hh, skb);
  1758. break;
  1759. case PH_ACTIVATE_IND:
  1760. test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
  1761. l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
  1762. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1763. ret = mISDN_FsmEvent(&l2->l2m,
  1764. EV_L2_DL_ESTABLISH_REQ, skb);
  1765. break;
  1766. case PH_DEACTIVATE_IND:
  1767. test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
  1768. l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
  1769. ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
  1770. break;
  1771. case MPH_INFORMATION_IND:
  1772. if (!l2->up)
  1773. break;
  1774. ret = l2->up->send(l2->up, skb);
  1775. break;
  1776. case DL_DATA_REQ:
  1777. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
  1778. break;
  1779. case DL_UNITDATA_REQ:
  1780. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
  1781. break;
  1782. case DL_ESTABLISH_REQ:
  1783. if (test_bit(FLG_LAPB, &l2->flag))
  1784. test_and_set_bit(FLG_ORIG, &l2->flag);
  1785. if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
  1786. if (test_bit(FLG_LAPD, &l2->flag) ||
  1787. test_bit(FLG_ORIG, &l2->flag))
  1788. ret = mISDN_FsmEvent(&l2->l2m,
  1789. EV_L2_DL_ESTABLISH_REQ, skb);
  1790. } else {
  1791. if (test_bit(FLG_LAPD, &l2->flag) ||
  1792. test_bit(FLG_ORIG, &l2->flag)) {
  1793. test_and_set_bit(FLG_ESTAB_PEND,
  1794. &l2->flag);
  1795. }
  1796. ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
  1797. skb);
  1798. }
  1799. break;
  1800. case DL_RELEASE_REQ:
  1801. if (test_bit(FLG_LAPB, &l2->flag))
  1802. l2down_create(l2, PH_DEACTIVATE_REQ,
  1803. l2_newid(l2), 0, NULL);
  1804. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
  1805. skb);
  1806. break;
  1807. case DL_TIMER200_IND:
  1808. mISDN_FsmEvent(&l2->l2m, EV_L2_T200I, NULL);
  1809. break;
  1810. case DL_TIMER203_IND:
  1811. mISDN_FsmEvent(&l2->l2m, EV_L2_T203I, NULL);
  1812. break;
  1813. default:
  1814. if (*debug & DEBUG_L2)
  1815. l2m_debug(&l2->l2m, "l2 unknown pr %04x",
  1816. hh->prim);
  1817. }
  1818. if (ret) {
  1819. dev_kfree_skb(skb);
  1820. ret = 0;
  1821. }
  1822. return ret;
  1823. }
  1824. int
  1825. tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
  1826. {
  1827. int ret = -EINVAL;
  1828. if (*debug & DEBUG_L2_TEI)
  1829. printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
  1830. switch (cmd) {
  1831. case (MDL_ASSIGN_REQ):
  1832. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
  1833. break;
  1834. case (MDL_REMOVE_REQ):
  1835. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
  1836. break;
  1837. case (MDL_ERROR_IND):
  1838. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1839. break;
  1840. case (MDL_ERROR_RSP):
  1841. /* ETS 300-125 5.3.2.1 Test: TC13010 */
  1842. printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
  1843. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1844. break;
  1845. }
  1846. return ret;
  1847. }
  1848. static void
  1849. release_l2(struct layer2 *l2)
  1850. {
  1851. mISDN_FsmDelTimer(&l2->t200, 21);
  1852. mISDN_FsmDelTimer(&l2->t203, 16);
  1853. skb_queue_purge(&l2->i_queue);
  1854. skb_queue_purge(&l2->ui_queue);
  1855. skb_queue_purge(&l2->down_queue);
  1856. ReleaseWin(l2);
  1857. if (test_bit(FLG_LAPD, &l2->flag)) {
  1858. TEIrelease(l2);
  1859. if (l2->ch.st)
  1860. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
  1861. CLOSE_CHANNEL, NULL);
  1862. }
  1863. kfree(l2);
  1864. }
  1865. static int
  1866. l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  1867. {
  1868. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1869. u_int info;
  1870. if (*debug & DEBUG_L2_CTRL)
  1871. printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
  1872. switch (cmd) {
  1873. case OPEN_CHANNEL:
  1874. if (test_bit(FLG_LAPD, &l2->flag)) {
  1875. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1876. info = DL_INFO_L2_CONNECT;
  1877. l2up_create(l2, DL_INFORMATION_IND,
  1878. sizeof(info), &info);
  1879. }
  1880. break;
  1881. case CLOSE_CHANNEL:
  1882. if (l2->ch.peer)
  1883. l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
  1884. release_l2(l2);
  1885. break;
  1886. }
  1887. return 0;
  1888. }
  1889. struct layer2 *
  1890. create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
  1891. int sapi)
  1892. {
  1893. struct layer2 *l2;
  1894. struct channel_req rq;
  1895. l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
  1896. if (!l2) {
  1897. printk(KERN_ERR "kzalloc layer2 failed\n");
  1898. return NULL;
  1899. }
  1900. l2->next_id = 1;
  1901. l2->down_id = MISDN_ID_NONE;
  1902. l2->up = ch;
  1903. l2->ch.st = ch->st;
  1904. l2->ch.send = l2_send;
  1905. l2->ch.ctrl = l2_ctrl;
  1906. switch (protocol) {
  1907. case ISDN_P_LAPD_NT:
  1908. test_and_set_bit(FLG_LAPD, &l2->flag);
  1909. test_and_set_bit(FLG_LAPD_NET, &l2->flag);
  1910. test_and_set_bit(FLG_MOD128, &l2->flag);
  1911. l2->sapi = sapi;
  1912. l2->maxlen = MAX_DFRAME_LEN;
  1913. if (test_bit(OPTION_L2_PMX, &options))
  1914. l2->window = 7;
  1915. else
  1916. l2->window = 1;
  1917. if (test_bit(OPTION_L2_PTP, &options))
  1918. test_and_set_bit(FLG_PTP, &l2->flag);
  1919. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1920. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1921. l2->tei = tei;
  1922. l2->T200 = 1000;
  1923. l2->N200 = 3;
  1924. l2->T203 = 10000;
  1925. if (test_bit(OPTION_L2_PMX, &options))
  1926. rq.protocol = ISDN_P_NT_E1;
  1927. else
  1928. rq.protocol = ISDN_P_NT_S0;
  1929. rq.adr.channel = 0;
  1930. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1931. break;
  1932. case ISDN_P_LAPD_TE:
  1933. test_and_set_bit(FLG_LAPD, &l2->flag);
  1934. test_and_set_bit(FLG_MOD128, &l2->flag);
  1935. test_and_set_bit(FLG_ORIG, &l2->flag);
  1936. l2->sapi = sapi;
  1937. l2->maxlen = MAX_DFRAME_LEN;
  1938. if (test_bit(OPTION_L2_PMX, &options))
  1939. l2->window = 7;
  1940. else
  1941. l2->window = 1;
  1942. if (test_bit(OPTION_L2_PTP, &options))
  1943. test_and_set_bit(FLG_PTP, &l2->flag);
  1944. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1945. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1946. l2->tei = tei;
  1947. l2->T200 = 1000;
  1948. l2->N200 = 3;
  1949. l2->T203 = 10000;
  1950. if (test_bit(OPTION_L2_PMX, &options))
  1951. rq.protocol = ISDN_P_TE_E1;
  1952. else
  1953. rq.protocol = ISDN_P_TE_S0;
  1954. rq.adr.channel = 0;
  1955. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1956. break;
  1957. case ISDN_P_B_X75SLP:
  1958. test_and_set_bit(FLG_LAPB, &l2->flag);
  1959. l2->window = 7;
  1960. l2->maxlen = MAX_DATA_SIZE;
  1961. l2->T200 = 1000;
  1962. l2->N200 = 4;
  1963. l2->T203 = 5000;
  1964. l2->addr.A = 3;
  1965. l2->addr.B = 1;
  1966. break;
  1967. default:
  1968. printk(KERN_ERR "layer2 create failed prt %x\n",
  1969. protocol);
  1970. kfree(l2);
  1971. return NULL;
  1972. }
  1973. skb_queue_head_init(&l2->i_queue);
  1974. skb_queue_head_init(&l2->ui_queue);
  1975. skb_queue_head_init(&l2->down_queue);
  1976. skb_queue_head_init(&l2->tmp_queue);
  1977. InitWin(l2);
  1978. l2->l2m.fsm = &l2fsm;
  1979. if (test_bit(FLG_LAPB, &l2->flag) ||
  1980. test_bit(FLG_PTP, &l2->flag) ||
  1981. test_bit(FLG_LAPD_NET, &l2->flag))
  1982. l2->l2m.state = ST_L2_4;
  1983. else
  1984. l2->l2m.state = ST_L2_1;
  1985. l2->l2m.debug = *debug;
  1986. l2->l2m.userdata = l2;
  1987. l2->l2m.userint = 0;
  1988. l2->l2m.printdebug = l2m_debug;
  1989. mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
  1990. mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
  1991. return l2;
  1992. }
  1993. static int
  1994. x75create(struct channel_req *crq)
  1995. {
  1996. struct layer2 *l2;
  1997. if (crq->protocol != ISDN_P_B_X75SLP)
  1998. return -EPROTONOSUPPORT;
  1999. l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
  2000. if (!l2)
  2001. return -ENOMEM;
  2002. crq->ch = &l2->ch;
  2003. crq->protocol = ISDN_P_B_HDLC;
  2004. return 0;
  2005. }
  2006. static struct Bprotocol X75SLP = {
  2007. .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
  2008. .name = "X75SLP",
  2009. .create = x75create
  2010. };
  2011. int
  2012. Isdnl2_Init(u_int *deb)
  2013. {
  2014. debug = deb;
  2015. mISDN_register_Bprotocol(&X75SLP);
  2016. l2fsm.state_count = L2_STATE_COUNT;
  2017. l2fsm.event_count = L2_EVENT_COUNT;
  2018. l2fsm.strEvent = strL2Event;
  2019. l2fsm.strState = strL2State;
  2020. mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
  2021. TEIInit(deb);
  2022. return 0;
  2023. }
  2024. void
  2025. Isdnl2_cleanup(void)
  2026. {
  2027. mISDN_unregister_Bprotocol(&X75SLP);
  2028. TEIFree();
  2029. mISDN_FsmFree(&l2fsm);
  2030. }