isdnl2.c 42 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846
  1. /* $Id: isdnl2.c,v 2.30.2.4 2004/02/11 13:21:34 keil Exp $
  2. *
  3. * Author Karsten Keil
  4. * based on the teles driver from Jan den Ouden
  5. * Copyright by Karsten Keil <keil@isdn4linux.de>
  6. *
  7. * This software may be used and distributed according to the terms
  8. * of the GNU General Public License, incorporated herein by reference.
  9. *
  10. * For changes and modifications please read
  11. * Documentation/isdn/HiSax.cert
  12. *
  13. * Thanks to Jan den Ouden
  14. * Fritz Elfert
  15. *
  16. */
  17. #include <linux/init.h>
  18. #include "hisax.h"
  19. #include "isdnl2.h"
  20. const char *l2_revision = "$Revision: 2.30.2.4 $";
  21. static void l2m_debug(struct FsmInst *fi, char *fmt, ...);
  22. static struct Fsm l2fsm;
  23. enum {
  24. ST_L2_1,
  25. ST_L2_2,
  26. ST_L2_3,
  27. ST_L2_4,
  28. ST_L2_5,
  29. ST_L2_6,
  30. ST_L2_7,
  31. ST_L2_8,
  32. };
  33. #define L2_STATE_COUNT (ST_L2_8+1)
  34. static char *strL2State[] =
  35. {
  36. "ST_L2_1",
  37. "ST_L2_2",
  38. "ST_L2_3",
  39. "ST_L2_4",
  40. "ST_L2_5",
  41. "ST_L2_6",
  42. "ST_L2_7",
  43. "ST_L2_8",
  44. };
  45. enum {
  46. EV_L2_UI,
  47. EV_L2_SABME,
  48. EV_L2_DISC,
  49. EV_L2_DM,
  50. EV_L2_UA,
  51. EV_L2_FRMR,
  52. EV_L2_SUPER,
  53. EV_L2_I,
  54. EV_L2_DL_DATA,
  55. EV_L2_ACK_PULL,
  56. EV_L2_DL_UNIT_DATA,
  57. EV_L2_DL_ESTABLISH_REQ,
  58. EV_L2_DL_RELEASE_REQ,
  59. EV_L2_MDL_ASSIGN,
  60. EV_L2_MDL_REMOVE,
  61. EV_L2_MDL_ERROR,
  62. EV_L1_DEACTIVATE,
  63. EV_L2_T200,
  64. EV_L2_T203,
  65. EV_L2_SET_OWN_BUSY,
  66. EV_L2_CLEAR_OWN_BUSY,
  67. EV_L2_FRAME_ERROR,
  68. };
  69. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
  70. static char *strL2Event[] =
  71. {
  72. "EV_L2_UI",
  73. "EV_L2_SABME",
  74. "EV_L2_DISC",
  75. "EV_L2_DM",
  76. "EV_L2_UA",
  77. "EV_L2_FRMR",
  78. "EV_L2_SUPER",
  79. "EV_L2_I",
  80. "EV_L2_DL_DATA",
  81. "EV_L2_ACK_PULL",
  82. "EV_L2_DL_UNIT_DATA",
  83. "EV_L2_DL_ESTABLISH_REQ",
  84. "EV_L2_DL_RELEASE_REQ",
  85. "EV_L2_MDL_ASSIGN",
  86. "EV_L2_MDL_REMOVE",
  87. "EV_L2_MDL_ERROR",
  88. "EV_L1_DEACTIVATE",
  89. "EV_L2_T200",
  90. "EV_L2_T203",
  91. "EV_L2_SET_OWN_BUSY",
  92. "EV_L2_CLEAR_OWN_BUSY",
  93. "EV_L2_FRAME_ERROR",
  94. };
  95. static int l2addrsize(struct Layer2 *l2);
  96. static void
  97. set_peer_busy(struct Layer2 *l2) {
  98. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  99. if (!skb_queue_empty(&l2->i_queue) ||
  100. !skb_queue_empty(&l2->ui_queue))
  101. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  102. }
  103. static void
  104. clear_peer_busy(struct Layer2 *l2) {
  105. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  106. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  107. }
  108. static void
  109. InitWin(struct Layer2 *l2)
  110. {
  111. int i;
  112. for (i = 0; i < MAX_WINDOW; i++)
  113. l2->windowar[i] = NULL;
  114. }
  115. static int
  116. freewin1(struct Layer2 *l2)
  117. {
  118. int i, cnt = 0;
  119. for (i = 0; i < MAX_WINDOW; i++) {
  120. if (l2->windowar[i]) {
  121. cnt++;
  122. dev_kfree_skb(l2->windowar[i]);
  123. l2->windowar[i] = NULL;
  124. }
  125. }
  126. return cnt;
  127. }
  128. static inline void
  129. freewin(struct PStack *st)
  130. {
  131. freewin1(&st->l2);
  132. }
  133. static void
  134. ReleaseWin(struct Layer2 *l2)
  135. {
  136. int cnt;
  137. if((cnt = freewin1(l2)))
  138. printk(KERN_WARNING "isdl2 freed %d skbuffs in release\n", cnt);
  139. }
  140. static inline unsigned int
  141. cansend(struct PStack *st)
  142. {
  143. unsigned int p1;
  144. if(test_bit(FLG_MOD128, &st->l2.flag))
  145. p1 = (st->l2.vs - st->l2.va) % 128;
  146. else
  147. p1 = (st->l2.vs - st->l2.va) % 8;
  148. return ((p1 < st->l2.window) && !test_bit(FLG_PEER_BUSY, &st->l2.flag));
  149. }
  150. static inline void
  151. clear_exception(struct Layer2 *l2)
  152. {
  153. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  154. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  155. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  156. clear_peer_busy(l2);
  157. }
  158. static inline int
  159. l2headersize(struct Layer2 *l2, int ui)
  160. {
  161. return (((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  162. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1));
  163. }
  164. inline int
  165. l2addrsize(struct Layer2 *l2)
  166. {
  167. return (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  168. }
  169. static int
  170. sethdraddr(struct Layer2 *l2, u_char * header, int rsp)
  171. {
  172. u_char *ptr = header;
  173. int crbit = rsp;
  174. if (test_bit(FLG_LAPD, &l2->flag)) {
  175. *ptr++ = (l2->sap << 2) | (rsp ? 2 : 0);
  176. *ptr++ = (l2->tei << 1) | 1;
  177. return (2);
  178. } else {
  179. if (test_bit(FLG_ORIG, &l2->flag))
  180. crbit = !crbit;
  181. if (crbit)
  182. *ptr++ = 1;
  183. else
  184. *ptr++ = 3;
  185. return (1);
  186. }
  187. }
  188. static inline void
  189. enqueue_super(struct PStack *st,
  190. struct sk_buff *skb)
  191. {
  192. if (test_bit(FLG_LAPB, &st->l2.flag))
  193. st->l1.bcs->tx_cnt += skb->len;
  194. st->l2.l2l1(st, PH_DATA | REQUEST, skb);
  195. }
  196. #define enqueue_ui(a, b) enqueue_super(a, b)
  197. static inline int
  198. IsUI(u_char * data)
  199. {
  200. return ((data[0] & 0xef) == UI);
  201. }
  202. static inline int
  203. IsUA(u_char * data)
  204. {
  205. return ((data[0] & 0xef) == UA);
  206. }
  207. static inline int
  208. IsDM(u_char * data)
  209. {
  210. return ((data[0] & 0xef) == DM);
  211. }
  212. static inline int
  213. IsDISC(u_char * data)
  214. {
  215. return ((data[0] & 0xef) == DISC);
  216. }
  217. static inline int
  218. IsSFrame(u_char * data, struct PStack *st)
  219. {
  220. register u_char d = *data;
  221. if (!test_bit(FLG_MOD128, &st->l2.flag))
  222. d &= 0xf;
  223. return(((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c));
  224. }
  225. static inline int
  226. IsSABME(u_char * data, struct PStack *st)
  227. {
  228. u_char d = data[0] & ~0x10;
  229. return (test_bit(FLG_MOD128, &st->l2.flag) ? d == SABME : d == SABM);
  230. }
  231. static inline int
  232. IsREJ(u_char * data, struct PStack *st)
  233. {
  234. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == REJ : (data[0] & 0xf) == REJ);
  235. }
  236. static inline int
  237. IsFRMR(u_char * data)
  238. {
  239. return ((data[0] & 0xef) == FRMR);
  240. }
  241. static inline int
  242. IsRNR(u_char * data, struct PStack *st)
  243. {
  244. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == RNR : (data[0] & 0xf) == RNR);
  245. }
  246. static int
  247. iframe_error(struct PStack *st, struct sk_buff *skb)
  248. {
  249. int i = l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1);
  250. int rsp = *skb->data & 0x2;
  251. if (test_bit(FLG_ORIG, &st->l2.flag))
  252. rsp = !rsp;
  253. if (rsp)
  254. return 'L';
  255. if (skb->len < i)
  256. return 'N';
  257. if ((skb->len - i) > st->l2.maxlen)
  258. return 'O';
  259. return 0;
  260. }
  261. static int
  262. super_error(struct PStack *st, struct sk_buff *skb)
  263. {
  264. if (skb->len != l2addrsize(&st->l2) +
  265. (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1))
  266. return 'N';
  267. return 0;
  268. }
  269. static int
  270. unnum_error(struct PStack *st, struct sk_buff *skb, int wantrsp)
  271. {
  272. int rsp = (*skb->data & 0x2) >> 1;
  273. if (test_bit(FLG_ORIG, &st->l2.flag))
  274. rsp = !rsp;
  275. if (rsp != wantrsp)
  276. return 'L';
  277. if (skb->len != l2addrsize(&st->l2) + 1)
  278. return 'N';
  279. return 0;
  280. }
  281. static int
  282. UI_error(struct PStack *st, struct sk_buff *skb)
  283. {
  284. int rsp = *skb->data & 0x2;
  285. if (test_bit(FLG_ORIG, &st->l2.flag))
  286. rsp = !rsp;
  287. if (rsp)
  288. return 'L';
  289. if (skb->len > st->l2.maxlen + l2addrsize(&st->l2) + 1)
  290. return 'O';
  291. return 0;
  292. }
  293. static int
  294. FRMR_error(struct PStack *st, struct sk_buff *skb)
  295. {
  296. int headers = l2addrsize(&st->l2) + 1;
  297. u_char *datap = skb->data + headers;
  298. int rsp = *skb->data & 0x2;
  299. if (test_bit(FLG_ORIG, &st->l2.flag))
  300. rsp = !rsp;
  301. if (!rsp)
  302. return 'L';
  303. if (test_bit(FLG_MOD128, &st->l2.flag)) {
  304. if (skb->len < headers + 5)
  305. return 'N';
  306. else
  307. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x %2x %2x",
  308. datap[0], datap[1], datap[2],
  309. datap[3], datap[4]);
  310. } else {
  311. if (skb->len < headers + 3)
  312. return 'N';
  313. else
  314. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x",
  315. datap[0], datap[1], datap[2]);
  316. }
  317. return 0;
  318. }
  319. static unsigned int
  320. legalnr(struct PStack *st, unsigned int nr)
  321. {
  322. struct Layer2 *l2 = &st->l2;
  323. if(test_bit(FLG_MOD128, &l2->flag))
  324. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  325. else
  326. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  327. }
  328. static void
  329. setva(struct PStack *st, unsigned int nr)
  330. {
  331. struct Layer2 *l2 = &st->l2;
  332. int len;
  333. u_long flags;
  334. spin_lock_irqsave(&l2->lock, flags);
  335. while (l2->va != nr) {
  336. (l2->va)++;
  337. if(test_bit(FLG_MOD128, &l2->flag))
  338. l2->va %= 128;
  339. else
  340. l2->va %= 8;
  341. len = l2->windowar[l2->sow]->len;
  342. if (PACKET_NOACK == l2->windowar[l2->sow]->pkt_type)
  343. len = -1;
  344. dev_kfree_skb(l2->windowar[l2->sow]);
  345. l2->windowar[l2->sow] = NULL;
  346. l2->sow = (l2->sow + 1) % l2->window;
  347. spin_unlock_irqrestore(&l2->lock, flags);
  348. if (test_bit(FLG_LLI_L2WAKEUP, &st->lli.flag) && (len >=0))
  349. lli_writewakeup(st, len);
  350. spin_lock_irqsave(&l2->lock, flags);
  351. }
  352. spin_unlock_irqrestore(&l2->lock, flags);
  353. }
  354. static void
  355. send_uframe(struct PStack *st, u_char cmd, u_char cr)
  356. {
  357. struct sk_buff *skb;
  358. u_char tmp[MAX_HEADER_LEN];
  359. int i;
  360. i = sethdraddr(&st->l2, tmp, cr);
  361. tmp[i++] = cmd;
  362. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  363. printk(KERN_WARNING "isdl2 can't alloc sbbuff for send_uframe\n");
  364. return;
  365. }
  366. memcpy(skb_put(skb, i), tmp, i);
  367. enqueue_super(st, skb);
  368. }
  369. static inline u_char
  370. get_PollFlag(struct PStack * st, struct sk_buff * skb)
  371. {
  372. return (skb->data[l2addrsize(&(st->l2))] & 0x10);
  373. }
  374. static inline u_char
  375. get_PollFlagFree(struct PStack *st, struct sk_buff *skb)
  376. {
  377. u_char PF;
  378. PF = get_PollFlag(st, skb);
  379. dev_kfree_skb(skb);
  380. return (PF);
  381. }
  382. static inline void
  383. start_t200(struct PStack *st, int i)
  384. {
  385. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  386. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  387. }
  388. static inline void
  389. restart_t200(struct PStack *st, int i)
  390. {
  391. FsmRestartTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  392. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  393. }
  394. static inline void
  395. stop_t200(struct PStack *st, int i)
  396. {
  397. if(test_and_clear_bit(FLG_T200_RUN, &st->l2.flag))
  398. FsmDelTimer(&st->l2.t200, i);
  399. }
  400. static inline void
  401. st5_dl_release_l2l3(struct PStack *st)
  402. {
  403. int pr;
  404. if(test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  405. pr = DL_RELEASE | CONFIRM;
  406. else
  407. pr = DL_RELEASE | INDICATION;
  408. st->l2.l2l3(st, pr, NULL);
  409. }
  410. static inline void
  411. lapb_dl_release_l2l3(struct PStack *st, int f)
  412. {
  413. if (test_bit(FLG_LAPB, &st->l2.flag))
  414. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  415. st->l2.l2l3(st, DL_RELEASE | f, NULL);
  416. }
  417. static void
  418. establishlink(struct FsmInst *fi)
  419. {
  420. struct PStack *st = fi->userdata;
  421. u_char cmd;
  422. clear_exception(&st->l2);
  423. st->l2.rc = 0;
  424. cmd = (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10;
  425. send_uframe(st, cmd, CMD);
  426. FsmDelTimer(&st->l2.t203, 1);
  427. restart_t200(st, 1);
  428. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  429. freewin(st);
  430. FsmChangeState(fi, ST_L2_5);
  431. }
  432. static void
  433. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  434. {
  435. struct sk_buff *skb = arg;
  436. struct PStack *st = fi->userdata;
  437. if (get_PollFlagFree(st, skb))
  438. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'C');
  439. else
  440. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'D');
  441. }
  442. static void
  443. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  444. {
  445. struct sk_buff *skb = arg;
  446. struct PStack *st = fi->userdata;
  447. if (get_PollFlagFree(st, skb))
  448. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  449. else {
  450. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  451. establishlink(fi);
  452. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  453. }
  454. }
  455. static void
  456. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  457. {
  458. struct sk_buff *skb = arg;
  459. struct PStack *st = fi->userdata;
  460. if (get_PollFlagFree(st, skb))
  461. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  462. else {
  463. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  464. }
  465. establishlink(fi);
  466. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  467. }
  468. static void
  469. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  470. {
  471. FsmChangeState(fi, ST_L2_3);
  472. }
  473. static void
  474. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  475. {
  476. struct PStack *st = fi->userdata;
  477. FsmChangeState(fi, ST_L2_3);
  478. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  479. }
  480. static void
  481. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  482. {
  483. struct PStack *st = fi->userdata;
  484. struct sk_buff *skb = arg;
  485. skb_queue_tail(&st->l2.ui_queue, skb);
  486. FsmChangeState(fi, ST_L2_2);
  487. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  488. }
  489. static void
  490. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  491. {
  492. struct PStack *st = fi->userdata;
  493. struct sk_buff *skb = arg;
  494. skb_queue_tail(&st->l2.ui_queue, skb);
  495. }
  496. static void
  497. tx_ui(struct PStack *st)
  498. {
  499. struct sk_buff *skb;
  500. u_char header[MAX_HEADER_LEN];
  501. int i;
  502. i = sethdraddr(&(st->l2), header, CMD);
  503. header[i++] = UI;
  504. while ((skb = skb_dequeue(&st->l2.ui_queue))) {
  505. memcpy(skb_push(skb, i), header, i);
  506. enqueue_ui(st, skb);
  507. }
  508. }
  509. static void
  510. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  511. {
  512. struct PStack *st = fi->userdata;
  513. struct sk_buff *skb = arg;
  514. skb_queue_tail(&st->l2.ui_queue, skb);
  515. tx_ui(st);
  516. }
  517. static void
  518. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  519. {
  520. struct PStack *st = fi->userdata;
  521. struct sk_buff *skb = arg;
  522. skb_pull(skb, l2headersize(&st->l2, 1));
  523. st->l2.l2l3(st, DL_UNIT_DATA | INDICATION, skb);
  524. /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  525. * in states 1-3 for broadcast
  526. */
  527. }
  528. static void
  529. l2_establish(struct FsmInst *fi, int event, void *arg)
  530. {
  531. struct PStack *st = fi->userdata;
  532. establishlink(fi);
  533. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  534. }
  535. static void
  536. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  537. {
  538. struct PStack *st = fi->userdata;
  539. skb_queue_purge(&st->l2.i_queue);
  540. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  541. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  542. }
  543. static void
  544. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  545. {
  546. struct PStack *st = fi->userdata;
  547. skb_queue_purge(&st->l2.i_queue);
  548. establishlink(fi);
  549. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  550. }
  551. static void
  552. l2_release(struct FsmInst *fi, int event, void *arg)
  553. {
  554. struct PStack *st = fi->userdata;
  555. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  556. }
  557. static void
  558. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  559. {
  560. struct PStack *st = fi->userdata;
  561. test_and_set_bit(FLG_PEND_REL, &st->l2.flag);
  562. }
  563. static void
  564. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  565. {
  566. struct PStack *st = fi->userdata;
  567. skb_queue_purge(&st->l2.i_queue);
  568. freewin(st);
  569. FsmChangeState(fi, ST_L2_6);
  570. st->l2.rc = 0;
  571. send_uframe(st, DISC | 0x10, CMD);
  572. FsmDelTimer(&st->l2.t203, 1);
  573. restart_t200(st, 2);
  574. }
  575. static void
  576. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  577. {
  578. struct PStack *st = fi->userdata;
  579. struct sk_buff *skb = arg;
  580. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  581. clear_exception(&st->l2);
  582. st->l2.vs = 0;
  583. st->l2.va = 0;
  584. st->l2.vr = 0;
  585. st->l2.sow = 0;
  586. FsmChangeState(fi, ST_L2_7);
  587. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  588. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  589. }
  590. static void
  591. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  592. {
  593. struct PStack *st = fi->userdata;
  594. struct sk_buff *skb = arg;
  595. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  596. }
  597. static void
  598. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  599. {
  600. struct PStack *st = fi->userdata;
  601. struct sk_buff *skb = arg;
  602. send_uframe(st, DM | get_PollFlagFree(st, skb), RSP);
  603. }
  604. static void
  605. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  606. {
  607. struct PStack *st = fi->userdata;
  608. struct sk_buff *skb = arg;
  609. int est = 0, state;
  610. state = fi->state;
  611. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  612. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'F');
  613. if (st->l2.vs != st->l2.va) {
  614. skb_queue_purge(&st->l2.i_queue);
  615. est = 1;
  616. }
  617. clear_exception(&st->l2);
  618. st->l2.vs = 0;
  619. st->l2.va = 0;
  620. st->l2.vr = 0;
  621. st->l2.sow = 0;
  622. FsmChangeState(fi, ST_L2_7);
  623. stop_t200(st, 3);
  624. FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  625. if (est)
  626. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  627. if ((ST_L2_7==state) || (ST_L2_8 == state))
  628. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  629. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  630. }
  631. static void
  632. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  633. {
  634. struct PStack *st = fi->userdata;
  635. struct sk_buff *skb = arg;
  636. FsmChangeState(fi, ST_L2_4);
  637. FsmDelTimer(&st->l2.t203, 3);
  638. stop_t200(st, 4);
  639. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  640. skb_queue_purge(&st->l2.i_queue);
  641. freewin(st);
  642. lapb_dl_release_l2l3(st, INDICATION);
  643. }
  644. static void
  645. l2_connected(struct FsmInst *fi, int event, void *arg)
  646. {
  647. struct PStack *st = fi->userdata;
  648. struct sk_buff *skb = arg;
  649. int pr=-1;
  650. if (!get_PollFlag(st, skb)) {
  651. l2_mdl_error_ua(fi, event, arg);
  652. return;
  653. }
  654. dev_kfree_skb(skb);
  655. if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  656. l2_disconnect(fi, event, arg);
  657. if (test_and_clear_bit(FLG_L3_INIT, &st->l2.flag)) {
  658. pr = DL_ESTABLISH | CONFIRM;
  659. } else if (st->l2.vs != st->l2.va) {
  660. skb_queue_purge(&st->l2.i_queue);
  661. pr = DL_ESTABLISH | INDICATION;
  662. }
  663. stop_t200(st, 5);
  664. st->l2.vr = 0;
  665. st->l2.vs = 0;
  666. st->l2.va = 0;
  667. st->l2.sow = 0;
  668. FsmChangeState(fi, ST_L2_7);
  669. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 4);
  670. if (pr != -1)
  671. st->l2.l2l3(st, pr, NULL);
  672. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  673. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  674. }
  675. static void
  676. l2_released(struct FsmInst *fi, int event, void *arg)
  677. {
  678. struct PStack *st = fi->userdata;
  679. struct sk_buff *skb = arg;
  680. if (!get_PollFlag(st, skb)) {
  681. l2_mdl_error_ua(fi, event, arg);
  682. return;
  683. }
  684. dev_kfree_skb(skb);
  685. stop_t200(st, 6);
  686. lapb_dl_release_l2l3(st, CONFIRM);
  687. FsmChangeState(fi, ST_L2_4);
  688. }
  689. static void
  690. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  691. {
  692. struct PStack *st = fi->userdata;
  693. struct sk_buff *skb = arg;
  694. if (!get_PollFlagFree(st, skb)) {
  695. establishlink(fi);
  696. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  697. }
  698. }
  699. static void
  700. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  701. {
  702. struct PStack *st = fi->userdata;
  703. struct sk_buff *skb = arg;
  704. if (get_PollFlagFree(st, skb)) {
  705. stop_t200(st, 7);
  706. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  707. skb_queue_purge(&st->l2.i_queue);
  708. if (test_bit(FLG_LAPB, &st->l2.flag))
  709. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  710. st5_dl_release_l2l3(st);
  711. FsmChangeState(fi, ST_L2_4);
  712. }
  713. }
  714. static void
  715. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  716. {
  717. struct PStack *st = fi->userdata;
  718. struct sk_buff *skb = arg;
  719. if (get_PollFlagFree(st, skb)) {
  720. stop_t200(st, 8);
  721. lapb_dl_release_l2l3(st, CONFIRM);
  722. FsmChangeState(fi, ST_L2_4);
  723. }
  724. }
  725. static inline void
  726. enquiry_cr(struct PStack *st, u_char typ, u_char cr, u_char pf)
  727. {
  728. struct sk_buff *skb;
  729. struct Layer2 *l2;
  730. u_char tmp[MAX_HEADER_LEN];
  731. int i;
  732. l2 = &st->l2;
  733. i = sethdraddr(l2, tmp, cr);
  734. if (test_bit(FLG_MOD128, &l2->flag)) {
  735. tmp[i++] = typ;
  736. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  737. } else
  738. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  739. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  740. printk(KERN_WARNING "isdl2 can't alloc sbbuff for enquiry_cr\n");
  741. return;
  742. }
  743. memcpy(skb_put(skb, i), tmp, i);
  744. enqueue_super(st, skb);
  745. }
  746. static inline void
  747. enquiry_response(struct PStack *st)
  748. {
  749. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  750. enquiry_cr(st, RNR, RSP, 1);
  751. else
  752. enquiry_cr(st, RR, RSP, 1);
  753. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  754. }
  755. static inline void
  756. transmit_enquiry(struct PStack *st)
  757. {
  758. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  759. enquiry_cr(st, RNR, CMD, 1);
  760. else
  761. enquiry_cr(st, RR, CMD, 1);
  762. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  763. start_t200(st, 9);
  764. }
  765. static void
  766. nrerrorrecovery(struct FsmInst *fi)
  767. {
  768. struct PStack *st = fi->userdata;
  769. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'J');
  770. establishlink(fi);
  771. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  772. }
  773. static void
  774. invoke_retransmission(struct PStack *st, unsigned int nr)
  775. {
  776. struct Layer2 *l2 = &st->l2;
  777. u_int p1;
  778. u_long flags;
  779. spin_lock_irqsave(&l2->lock, flags);
  780. if (l2->vs != nr) {
  781. while (l2->vs != nr) {
  782. (l2->vs)--;
  783. if(test_bit(FLG_MOD128, &l2->flag)) {
  784. l2->vs %= 128;
  785. p1 = (l2->vs - l2->va) % 128;
  786. } else {
  787. l2->vs %= 8;
  788. p1 = (l2->vs - l2->va) % 8;
  789. }
  790. p1 = (p1 + l2->sow) % l2->window;
  791. if (test_bit(FLG_LAPB, &l2->flag))
  792. st->l1.bcs->tx_cnt += l2->windowar[p1]->len + l2headersize(l2, 0);
  793. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  794. l2->windowar[p1] = NULL;
  795. }
  796. spin_unlock_irqrestore(&l2->lock, flags);
  797. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  798. return;
  799. }
  800. spin_unlock_irqrestore(&l2->lock, flags);
  801. }
  802. static void
  803. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  804. {
  805. struct PStack *st = fi->userdata;
  806. struct sk_buff *skb = arg;
  807. int PollFlag, rsp, typ = RR;
  808. unsigned int nr;
  809. struct Layer2 *l2 = &st->l2;
  810. rsp = *skb->data & 0x2;
  811. if (test_bit(FLG_ORIG, &l2->flag))
  812. rsp = !rsp;
  813. skb_pull(skb, l2addrsize(l2));
  814. if (IsRNR(skb->data, st)) {
  815. set_peer_busy(l2);
  816. typ = RNR;
  817. } else
  818. clear_peer_busy(l2);
  819. if (IsREJ(skb->data, st))
  820. typ = REJ;
  821. if (test_bit(FLG_MOD128, &l2->flag)) {
  822. PollFlag = (skb->data[1] & 0x1) == 0x1;
  823. nr = skb->data[1] >> 1;
  824. } else {
  825. PollFlag = (skb->data[0] & 0x10);
  826. nr = (skb->data[0] >> 5) & 0x7;
  827. }
  828. dev_kfree_skb(skb);
  829. if (PollFlag) {
  830. if (rsp)
  831. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'A');
  832. else
  833. enquiry_response(st);
  834. }
  835. if (legalnr(st, nr)) {
  836. if (typ == REJ) {
  837. setva(st, nr);
  838. invoke_retransmission(st, nr);
  839. stop_t200(st, 10);
  840. if (FsmAddTimer(&st->l2.t203, st->l2.T203,
  841. EV_L2_T203, NULL, 6))
  842. l2m_debug(&st->l2.l2m, "Restart T203 ST7 REJ");
  843. } else if ((nr == l2->vs) && (typ == RR)) {
  844. setva(st, nr);
  845. stop_t200(st, 11);
  846. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  847. EV_L2_T203, NULL, 7);
  848. } else if ((l2->va != nr) || (typ == RNR)) {
  849. setva(st, nr);
  850. if(typ != RR) FsmDelTimer(&st->l2.t203, 9);
  851. restart_t200(st, 12);
  852. }
  853. if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR))
  854. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  855. } else
  856. nrerrorrecovery(fi);
  857. }
  858. static void
  859. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  860. {
  861. struct PStack *st = fi->userdata;
  862. struct sk_buff *skb = arg;
  863. if (test_bit(FLG_LAPB, &st->l2.flag))
  864. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  865. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  866. skb_queue_tail(&st->l2.i_queue, skb);
  867. else
  868. dev_kfree_skb(skb);
  869. }
  870. static void
  871. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  872. {
  873. struct PStack *st = fi->userdata;
  874. struct sk_buff *skb = arg;
  875. if (test_bit(FLG_LAPB, &st->l2.flag))
  876. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  877. skb_queue_tail(&st->l2.i_queue, skb);
  878. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  879. }
  880. static void
  881. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  882. {
  883. struct PStack *st = fi->userdata;
  884. struct sk_buff *skb = arg;
  885. if (test_bit(FLG_LAPB, &st->l2.flag))
  886. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  887. skb_queue_tail(&st->l2.i_queue, skb);
  888. }
  889. static void
  890. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  891. {
  892. struct PStack *st = fi->userdata;
  893. struct sk_buff *skb = arg;
  894. struct Layer2 *l2 = &(st->l2);
  895. int PollFlag, ns, i;
  896. unsigned int nr;
  897. i = l2addrsize(l2);
  898. if (test_bit(FLG_MOD128, &l2->flag)) {
  899. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  900. ns = skb->data[i] >> 1;
  901. nr = (skb->data[i + 1] >> 1) & 0x7f;
  902. } else {
  903. PollFlag = (skb->data[i] & 0x10);
  904. ns = (skb->data[i] >> 1) & 0x7;
  905. nr = (skb->data[i] >> 5) & 0x7;
  906. }
  907. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  908. dev_kfree_skb(skb);
  909. if(PollFlag) enquiry_response(st);
  910. } else if (l2->vr == ns) {
  911. (l2->vr)++;
  912. if(test_bit(FLG_MOD128, &l2->flag))
  913. l2->vr %= 128;
  914. else
  915. l2->vr %= 8;
  916. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  917. if (PollFlag)
  918. enquiry_response(st);
  919. else
  920. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  921. skb_pull(skb, l2headersize(l2, 0));
  922. st->l2.l2l3(st, DL_DATA | INDICATION, skb);
  923. } else {
  924. /* n(s)!=v(r) */
  925. dev_kfree_skb(skb);
  926. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  927. if (PollFlag)
  928. enquiry_response(st);
  929. } else {
  930. enquiry_cr(st, REJ, RSP, PollFlag);
  931. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  932. }
  933. }
  934. if (legalnr(st, nr)) {
  935. if (!test_bit(FLG_PEER_BUSY, &st->l2.flag) && (fi->state == ST_L2_7)) {
  936. if (nr == st->l2.vs) {
  937. stop_t200(st, 13);
  938. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  939. EV_L2_T203, NULL, 7);
  940. } else if (nr != st->l2.va)
  941. restart_t200(st, 14);
  942. }
  943. setva(st, nr);
  944. } else {
  945. nrerrorrecovery(fi);
  946. return;
  947. }
  948. if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7))
  949. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  950. if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag))
  951. enquiry_cr(st, RR, RSP, 0);
  952. }
  953. static void
  954. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  955. {
  956. struct PStack *st = fi->userdata;
  957. st->l2.tei = (long) arg;
  958. if (fi->state == ST_L2_3) {
  959. establishlink(fi);
  960. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  961. } else
  962. FsmChangeState(fi, ST_L2_4);
  963. if (!skb_queue_empty(&st->l2.ui_queue))
  964. tx_ui(st);
  965. }
  966. static void
  967. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  968. {
  969. struct PStack *st = fi->userdata;
  970. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  971. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  972. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  973. } else if (st->l2.rc == st->l2.N200) {
  974. FsmChangeState(fi, ST_L2_4);
  975. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  976. skb_queue_purge(&st->l2.i_queue);
  977. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'G');
  978. if (test_bit(FLG_LAPB, &st->l2.flag))
  979. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  980. st5_dl_release_l2l3(st);
  981. } else {
  982. st->l2.rc++;
  983. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  984. send_uframe(st, (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM)
  985. | 0x10, CMD);
  986. }
  987. }
  988. static void
  989. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  990. {
  991. struct PStack *st = fi->userdata;
  992. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  993. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  994. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  995. } else if (st->l2.rc == st->l2.N200) {
  996. FsmChangeState(fi, ST_L2_4);
  997. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  998. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'H');
  999. lapb_dl_release_l2l3(st, CONFIRM);
  1000. } else {
  1001. st->l2.rc++;
  1002. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200,
  1003. NULL, 9);
  1004. send_uframe(st, DISC | 0x10, CMD);
  1005. }
  1006. }
  1007. static void
  1008. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1009. {
  1010. struct PStack *st = fi->userdata;
  1011. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1012. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1013. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1014. return;
  1015. }
  1016. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1017. st->l2.rc = 0;
  1018. FsmChangeState(fi, ST_L2_8);
  1019. transmit_enquiry(st);
  1020. st->l2.rc++;
  1021. }
  1022. static void
  1023. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1024. {
  1025. struct PStack *st = fi->userdata;
  1026. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1027. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1028. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1029. return;
  1030. }
  1031. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1032. if (st->l2.rc == st->l2.N200) {
  1033. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'I');
  1034. establishlink(fi);
  1035. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1036. } else {
  1037. transmit_enquiry(st);
  1038. st->l2.rc++;
  1039. }
  1040. }
  1041. static void
  1042. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1043. {
  1044. struct PStack *st = fi->userdata;
  1045. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1046. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1047. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 9);
  1048. return;
  1049. }
  1050. FsmChangeState(fi, ST_L2_8);
  1051. transmit_enquiry(st);
  1052. st->l2.rc = 0;
  1053. }
  1054. static void
  1055. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1056. {
  1057. struct PStack *st = fi->userdata;
  1058. struct sk_buff *skb, *oskb;
  1059. struct Layer2 *l2 = &st->l2;
  1060. u_char header[MAX_HEADER_LEN];
  1061. int i;
  1062. int unsigned p1;
  1063. u_long flags;
  1064. if (!cansend(st))
  1065. return;
  1066. skb = skb_dequeue(&l2->i_queue);
  1067. if (!skb)
  1068. return;
  1069. spin_lock_irqsave(&l2->lock, flags);
  1070. if(test_bit(FLG_MOD128, &l2->flag))
  1071. p1 = (l2->vs - l2->va) % 128;
  1072. else
  1073. p1 = (l2->vs - l2->va) % 8;
  1074. p1 = (p1 + l2->sow) % l2->window;
  1075. if (l2->windowar[p1]) {
  1076. printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
  1077. p1);
  1078. dev_kfree_skb(l2->windowar[p1]);
  1079. }
  1080. l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
  1081. i = sethdraddr(&st->l2, header, CMD);
  1082. if (test_bit(FLG_MOD128, &l2->flag)) {
  1083. header[i++] = l2->vs << 1;
  1084. header[i++] = l2->vr << 1;
  1085. l2->vs = (l2->vs + 1) % 128;
  1086. } else {
  1087. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1088. l2->vs = (l2->vs + 1) % 8;
  1089. }
  1090. spin_unlock_irqrestore(&l2->lock, flags);
  1091. p1 = skb->data - skb->head;
  1092. if (p1 >= i)
  1093. memcpy(skb_push(skb, i), header, i);
  1094. else {
  1095. printk(KERN_WARNING
  1096. "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
  1097. oskb = skb;
  1098. skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
  1099. memcpy(skb_put(skb, i), header, i);
  1100. skb_copy_from_linear_data(oskb,
  1101. skb_put(skb, oskb->len), oskb->len);
  1102. dev_kfree_skb(oskb);
  1103. }
  1104. st->l2.l2l1(st, PH_PULL | INDICATION, skb);
  1105. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1106. if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
  1107. FsmDelTimer(&st->l2.t203, 13);
  1108. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11);
  1109. }
  1110. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1111. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1112. }
  1113. static void
  1114. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1115. {
  1116. struct PStack *st = fi->userdata;
  1117. struct sk_buff *skb = arg;
  1118. int PollFlag, rsp, rnr = 0;
  1119. unsigned int nr;
  1120. struct Layer2 *l2 = &st->l2;
  1121. rsp = *skb->data & 0x2;
  1122. if (test_bit(FLG_ORIG, &l2->flag))
  1123. rsp = !rsp;
  1124. skb_pull(skb, l2addrsize(l2));
  1125. if (IsRNR(skb->data, st)) {
  1126. set_peer_busy(l2);
  1127. rnr = 1;
  1128. } else
  1129. clear_peer_busy(l2);
  1130. if (test_bit(FLG_MOD128, &l2->flag)) {
  1131. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1132. nr = skb->data[1] >> 1;
  1133. } else {
  1134. PollFlag = (skb->data[0] & 0x10);
  1135. nr = (skb->data[0] >> 5) & 0x7;
  1136. }
  1137. dev_kfree_skb(skb);
  1138. if (rsp && PollFlag) {
  1139. if (legalnr(st, nr)) {
  1140. if (rnr) {
  1141. restart_t200(st, 15);
  1142. } else {
  1143. stop_t200(st, 16);
  1144. FsmAddTimer(&l2->t203, l2->T203,
  1145. EV_L2_T203, NULL, 5);
  1146. setva(st, nr);
  1147. }
  1148. invoke_retransmission(st, nr);
  1149. FsmChangeState(fi, ST_L2_7);
  1150. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1151. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1152. } else
  1153. nrerrorrecovery(fi);
  1154. } else {
  1155. if (!rsp && PollFlag)
  1156. enquiry_response(st);
  1157. if (legalnr(st, nr)) {
  1158. setva(st, nr);
  1159. } else
  1160. nrerrorrecovery(fi);
  1161. }
  1162. }
  1163. static void
  1164. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1165. {
  1166. struct PStack *st = fi->userdata;
  1167. struct sk_buff *skb = arg;
  1168. skb_pull(skb, l2addrsize(&st->l2) + 1);
  1169. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1170. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1171. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'K');
  1172. establishlink(fi);
  1173. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1174. }
  1175. dev_kfree_skb(skb);
  1176. }
  1177. static void
  1178. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1179. {
  1180. struct PStack *st = fi->userdata;
  1181. skb_queue_purge(&st->l2.ui_queue);
  1182. st->l2.tei = -1;
  1183. FsmChangeState(fi, ST_L2_1);
  1184. }
  1185. static void
  1186. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1187. {
  1188. struct PStack *st = fi->userdata;
  1189. skb_queue_purge(&st->l2.ui_queue);
  1190. st->l2.tei = -1;
  1191. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1192. FsmChangeState(fi, ST_L2_1);
  1193. }
  1194. static void
  1195. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1196. {
  1197. struct PStack *st = fi->userdata;
  1198. skb_queue_purge(&st->l2.i_queue);
  1199. skb_queue_purge(&st->l2.ui_queue);
  1200. freewin(st);
  1201. st->l2.tei = -1;
  1202. stop_t200(st, 17);
  1203. st5_dl_release_l2l3(st);
  1204. FsmChangeState(fi, ST_L2_1);
  1205. }
  1206. static void
  1207. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1208. {
  1209. struct PStack *st = fi->userdata;
  1210. skb_queue_purge(&st->l2.ui_queue);
  1211. st->l2.tei = -1;
  1212. stop_t200(st, 18);
  1213. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1214. FsmChangeState(fi, ST_L2_1);
  1215. }
  1216. static void
  1217. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1218. {
  1219. struct PStack *st = fi->userdata;
  1220. skb_queue_purge(&st->l2.i_queue);
  1221. skb_queue_purge(&st->l2.ui_queue);
  1222. freewin(st);
  1223. st->l2.tei = -1;
  1224. stop_t200(st, 17);
  1225. FsmDelTimer(&st->l2.t203, 19);
  1226. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1227. FsmChangeState(fi, ST_L2_1);
  1228. }
  1229. static void
  1230. l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
  1231. {
  1232. struct PStack *st = fi->userdata;
  1233. skb_queue_purge(&st->l2.i_queue);
  1234. skb_queue_purge(&st->l2.ui_queue);
  1235. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1236. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1237. }
  1238. static void
  1239. l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
  1240. {
  1241. struct PStack *st = fi->userdata;
  1242. skb_queue_purge(&st->l2.i_queue);
  1243. skb_queue_purge(&st->l2.ui_queue);
  1244. freewin(st);
  1245. stop_t200(st, 19);
  1246. st5_dl_release_l2l3(st);
  1247. FsmChangeState(fi, ST_L2_4);
  1248. }
  1249. static void
  1250. l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
  1251. {
  1252. struct PStack *st = fi->userdata;
  1253. skb_queue_purge(&st->l2.ui_queue);
  1254. stop_t200(st, 20);
  1255. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1256. FsmChangeState(fi, ST_L2_4);
  1257. }
  1258. static void
  1259. l2_persistent_da(struct FsmInst *fi, int event, void *arg)
  1260. {
  1261. struct PStack *st = fi->userdata;
  1262. skb_queue_purge(&st->l2.i_queue);
  1263. skb_queue_purge(&st->l2.ui_queue);
  1264. freewin(st);
  1265. stop_t200(st, 19);
  1266. FsmDelTimer(&st->l2.t203, 19);
  1267. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1268. FsmChangeState(fi, ST_L2_4);
  1269. }
  1270. static void
  1271. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1272. {
  1273. struct PStack *st = fi->userdata;
  1274. if(!test_and_set_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1275. enquiry_cr(st, RNR, RSP, 0);
  1276. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1277. }
  1278. }
  1279. static void
  1280. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1281. {
  1282. struct PStack *st = fi->userdata;
  1283. if(!test_and_clear_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1284. enquiry_cr(st, RR, RSP, 0);
  1285. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1286. }
  1287. }
  1288. static void
  1289. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1290. {
  1291. struct PStack *st = fi->userdata;
  1292. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1293. }
  1294. static void
  1295. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1296. {
  1297. struct PStack *st = fi->userdata;
  1298. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1299. establishlink(fi);
  1300. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1301. }
  1302. static struct FsmNode L2FnList[] __initdata =
  1303. {
  1304. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1305. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1306. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1307. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1308. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1309. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1310. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1311. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1312. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1313. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1314. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1315. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1316. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1317. {ST_L2_1, EV_L2_DL_UNIT_DATA, l2_queue_ui_assign},
  1318. {ST_L2_2, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1319. {ST_L2_3, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1320. {ST_L2_4, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1321. {ST_L2_5, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1322. {ST_L2_6, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1323. {ST_L2_7, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1324. {ST_L2_8, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1325. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1326. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1327. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1328. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1329. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1330. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1331. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1332. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1333. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1334. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1335. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1336. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1337. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1338. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1339. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1340. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1341. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1342. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1343. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1344. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1345. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1346. {ST_L2_5, EV_L2_UA, l2_connected},
  1347. {ST_L2_6, EV_L2_UA, l2_released},
  1348. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1349. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1350. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1351. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1352. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1353. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1354. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1355. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1356. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1357. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1358. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1359. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1360. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1361. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1362. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1363. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1364. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1365. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1366. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1367. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1368. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1369. {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
  1370. {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
  1371. {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
  1372. {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
  1373. {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
  1374. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1375. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1376. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1377. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1378. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1379. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1380. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1381. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1382. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1383. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1384. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1385. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1386. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1387. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1388. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
  1389. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
  1390. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
  1391. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
  1392. };
  1393. #define L2_FN_COUNT (sizeof(L2FnList)/sizeof(struct FsmNode))
  1394. static void
  1395. isdnl2_l1l2(struct PStack *st, int pr, void *arg)
  1396. {
  1397. struct sk_buff *skb = arg;
  1398. u_char *datap;
  1399. int ret = 1, len;
  1400. int c = 0;
  1401. switch (pr) {
  1402. case (PH_DATA | INDICATION):
  1403. datap = skb->data;
  1404. len = l2addrsize(&st->l2);
  1405. if (skb->len > len)
  1406. datap += len;
  1407. else {
  1408. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1409. dev_kfree_skb(skb);
  1410. return;
  1411. }
  1412. if (!(*datap & 1)) { /* I-Frame */
  1413. if(!(c = iframe_error(st, skb)))
  1414. ret = FsmEvent(&st->l2.l2m, EV_L2_I, skb);
  1415. } else if (IsSFrame(datap, st)) { /* S-Frame */
  1416. if(!(c = super_error(st, skb)))
  1417. ret = FsmEvent(&st->l2.l2m, EV_L2_SUPER, skb);
  1418. } else if (IsUI(datap)) {
  1419. if(!(c = UI_error(st, skb)))
  1420. ret = FsmEvent(&st->l2.l2m, EV_L2_UI, skb);
  1421. } else if (IsSABME(datap, st)) {
  1422. if(!(c = unnum_error(st, skb, CMD)))
  1423. ret = FsmEvent(&st->l2.l2m, EV_L2_SABME, skb);
  1424. } else if (IsUA(datap)) {
  1425. if(!(c = unnum_error(st, skb, RSP)))
  1426. ret = FsmEvent(&st->l2.l2m, EV_L2_UA, skb);
  1427. } else if (IsDISC(datap)) {
  1428. if(!(c = unnum_error(st, skb, CMD)))
  1429. ret = FsmEvent(&st->l2.l2m, EV_L2_DISC, skb);
  1430. } else if (IsDM(datap)) {
  1431. if(!(c = unnum_error(st, skb, RSP)))
  1432. ret = FsmEvent(&st->l2.l2m, EV_L2_DM, skb);
  1433. } else if (IsFRMR(datap)) {
  1434. if(!(c = FRMR_error(st,skb)))
  1435. ret = FsmEvent(&st->l2.l2m, EV_L2_FRMR, skb);
  1436. } else {
  1437. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'L');
  1438. dev_kfree_skb(skb);
  1439. ret = 0;
  1440. }
  1441. if(c) {
  1442. dev_kfree_skb(skb);
  1443. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1444. ret = 0;
  1445. }
  1446. if (ret)
  1447. dev_kfree_skb(skb);
  1448. break;
  1449. case (PH_PULL | CONFIRM):
  1450. FsmEvent(&st->l2.l2m, EV_L2_ACK_PULL, arg);
  1451. break;
  1452. case (PH_PAUSE | INDICATION):
  1453. test_and_set_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1454. break;
  1455. case (PH_PAUSE | CONFIRM):
  1456. test_and_clear_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1457. break;
  1458. case (PH_ACTIVATE | CONFIRM):
  1459. case (PH_ACTIVATE | INDICATION):
  1460. test_and_set_bit(FLG_L1_ACTIV, &st->l2.flag);
  1461. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1462. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1463. break;
  1464. case (PH_DEACTIVATE | INDICATION):
  1465. case (PH_DEACTIVATE | CONFIRM):
  1466. test_and_clear_bit(FLG_L1_ACTIV, &st->l2.flag);
  1467. FsmEvent(&st->l2.l2m, EV_L1_DEACTIVATE, arg);
  1468. break;
  1469. default:
  1470. l2m_debug(&st->l2.l2m, "l2 unknown pr %04x", pr);
  1471. break;
  1472. }
  1473. }
  1474. static void
  1475. isdnl2_l3l2(struct PStack *st, int pr, void *arg)
  1476. {
  1477. switch (pr) {
  1478. case (DL_DATA | REQUEST):
  1479. if (FsmEvent(&st->l2.l2m, EV_L2_DL_DATA, arg)) {
  1480. dev_kfree_skb((struct sk_buff *) arg);
  1481. }
  1482. break;
  1483. case (DL_UNIT_DATA | REQUEST):
  1484. if (FsmEvent(&st->l2.l2m, EV_L2_DL_UNIT_DATA, arg)) {
  1485. dev_kfree_skb((struct sk_buff *) arg);
  1486. }
  1487. break;
  1488. case (DL_ESTABLISH | REQUEST):
  1489. if (test_bit(FLG_L1_ACTIV, &st->l2.flag)) {
  1490. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1491. test_bit(FLG_ORIG, &st->l2.flag)) {
  1492. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1493. }
  1494. } else {
  1495. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1496. test_bit(FLG_ORIG, &st->l2.flag)) {
  1497. test_and_set_bit(FLG_ESTAB_PEND, &st->l2.flag);
  1498. }
  1499. st->l2.l2l1(st, PH_ACTIVATE, NULL);
  1500. }
  1501. break;
  1502. case (DL_RELEASE | REQUEST):
  1503. if (test_bit(FLG_LAPB, &st->l2.flag)) {
  1504. st->l2.l2l1(st, PH_DEACTIVATE, NULL);
  1505. }
  1506. FsmEvent(&st->l2.l2m, EV_L2_DL_RELEASE_REQ, arg);
  1507. break;
  1508. case (MDL_ASSIGN | REQUEST):
  1509. FsmEvent(&st->l2.l2m, EV_L2_MDL_ASSIGN, arg);
  1510. break;
  1511. case (MDL_REMOVE | REQUEST):
  1512. FsmEvent(&st->l2.l2m, EV_L2_MDL_REMOVE, arg);
  1513. break;
  1514. case (MDL_ERROR | RESPONSE):
  1515. FsmEvent(&st->l2.l2m, EV_L2_MDL_ERROR, arg);
  1516. break;
  1517. }
  1518. }
  1519. void
  1520. releasestack_isdnl2(struct PStack *st)
  1521. {
  1522. FsmDelTimer(&st->l2.t200, 21);
  1523. FsmDelTimer(&st->l2.t203, 16);
  1524. skb_queue_purge(&st->l2.i_queue);
  1525. skb_queue_purge(&st->l2.ui_queue);
  1526. ReleaseWin(&st->l2);
  1527. }
  1528. static void
  1529. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  1530. {
  1531. va_list args;
  1532. struct PStack *st = fi->userdata;
  1533. va_start(args, fmt);
  1534. VHiSax_putstatus(st->l1.hardware, st->l2.debug_id, fmt, args);
  1535. va_end(args);
  1536. }
  1537. void
  1538. setstack_isdnl2(struct PStack *st, char *debug_id)
  1539. {
  1540. spin_lock_init(&st->l2.lock);
  1541. st->l1.l1l2 = isdnl2_l1l2;
  1542. st->l3.l3l2 = isdnl2_l3l2;
  1543. skb_queue_head_init(&st->l2.i_queue);
  1544. skb_queue_head_init(&st->l2.ui_queue);
  1545. InitWin(&st->l2);
  1546. st->l2.debug = 0;
  1547. st->l2.l2m.fsm = &l2fsm;
  1548. if (test_bit(FLG_LAPB, &st->l2.flag))
  1549. st->l2.l2m.state = ST_L2_4;
  1550. else
  1551. st->l2.l2m.state = ST_L2_1;
  1552. st->l2.l2m.debug = 0;
  1553. st->l2.l2m.userdata = st;
  1554. st->l2.l2m.userint = 0;
  1555. st->l2.l2m.printdebug = l2m_debug;
  1556. strcpy(st->l2.debug_id, debug_id);
  1557. FsmInitTimer(&st->l2.l2m, &st->l2.t200);
  1558. FsmInitTimer(&st->l2.l2m, &st->l2.t203);
  1559. }
  1560. static void
  1561. transl2_l3l2(struct PStack *st, int pr, void *arg)
  1562. {
  1563. switch (pr) {
  1564. case (DL_DATA | REQUEST):
  1565. case (DL_UNIT_DATA | REQUEST):
  1566. st->l2.l2l1(st, PH_DATA | REQUEST, arg);
  1567. break;
  1568. case (DL_ESTABLISH | REQUEST):
  1569. st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL);
  1570. break;
  1571. case (DL_RELEASE | REQUEST):
  1572. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  1573. break;
  1574. }
  1575. }
  1576. void
  1577. setstack_transl2(struct PStack *st)
  1578. {
  1579. st->l3.l3l2 = transl2_l3l2;
  1580. }
  1581. void
  1582. releasestack_transl2(struct PStack *st)
  1583. {
  1584. }
  1585. int __init
  1586. Isdnl2New(void)
  1587. {
  1588. l2fsm.state_count = L2_STATE_COUNT;
  1589. l2fsm.event_count = L2_EVENT_COUNT;
  1590. l2fsm.strEvent = strL2Event;
  1591. l2fsm.strState = strL2State;
  1592. return FsmNew(&l2fsm, L2FnList, L2_FN_COUNT);
  1593. }
  1594. void
  1595. Isdnl2Free(void)
  1596. {
  1597. FsmFree(&l2fsm);
  1598. }