isdnl2.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845
  1. /* $Id: isdnl2.c,v 2.30.2.4 2004/02/11 13:21:34 keil Exp $
  2. *
  3. * Author Karsten Keil
  4. * based on the teles driver from Jan den Ouden
  5. * Copyright by Karsten Keil <keil@isdn4linux.de>
  6. *
  7. * This software may be used and distributed according to the terms
  8. * of the GNU General Public License, incorporated herein by reference.
  9. *
  10. * For changes and modifications please read
  11. * Documentation/isdn/HiSax.cert
  12. *
  13. * Thanks to Jan den Ouden
  14. * Fritz Elfert
  15. *
  16. */
  17. #include <linux/init.h>
  18. #include "hisax.h"
  19. #include "isdnl2.h"
  20. const char *l2_revision = "$Revision: 2.30.2.4 $";
  21. static void l2m_debug(struct FsmInst *fi, char *fmt, ...);
  22. static struct Fsm l2fsm;
  23. enum {
  24. ST_L2_1,
  25. ST_L2_2,
  26. ST_L2_3,
  27. ST_L2_4,
  28. ST_L2_5,
  29. ST_L2_6,
  30. ST_L2_7,
  31. ST_L2_8,
  32. };
  33. #define L2_STATE_COUNT (ST_L2_8+1)
  34. static char *strL2State[] =
  35. {
  36. "ST_L2_1",
  37. "ST_L2_2",
  38. "ST_L2_3",
  39. "ST_L2_4",
  40. "ST_L2_5",
  41. "ST_L2_6",
  42. "ST_L2_7",
  43. "ST_L2_8",
  44. };
  45. enum {
  46. EV_L2_UI,
  47. EV_L2_SABME,
  48. EV_L2_DISC,
  49. EV_L2_DM,
  50. EV_L2_UA,
  51. EV_L2_FRMR,
  52. EV_L2_SUPER,
  53. EV_L2_I,
  54. EV_L2_DL_DATA,
  55. EV_L2_ACK_PULL,
  56. EV_L2_DL_UNIT_DATA,
  57. EV_L2_DL_ESTABLISH_REQ,
  58. EV_L2_DL_RELEASE_REQ,
  59. EV_L2_MDL_ASSIGN,
  60. EV_L2_MDL_REMOVE,
  61. EV_L2_MDL_ERROR,
  62. EV_L1_DEACTIVATE,
  63. EV_L2_T200,
  64. EV_L2_T203,
  65. EV_L2_SET_OWN_BUSY,
  66. EV_L2_CLEAR_OWN_BUSY,
  67. EV_L2_FRAME_ERROR,
  68. };
  69. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
  70. static char *strL2Event[] =
  71. {
  72. "EV_L2_UI",
  73. "EV_L2_SABME",
  74. "EV_L2_DISC",
  75. "EV_L2_DM",
  76. "EV_L2_UA",
  77. "EV_L2_FRMR",
  78. "EV_L2_SUPER",
  79. "EV_L2_I",
  80. "EV_L2_DL_DATA",
  81. "EV_L2_ACK_PULL",
  82. "EV_L2_DL_UNIT_DATA",
  83. "EV_L2_DL_ESTABLISH_REQ",
  84. "EV_L2_DL_RELEASE_REQ",
  85. "EV_L2_MDL_ASSIGN",
  86. "EV_L2_MDL_REMOVE",
  87. "EV_L2_MDL_ERROR",
  88. "EV_L1_DEACTIVATE",
  89. "EV_L2_T200",
  90. "EV_L2_T203",
  91. "EV_L2_SET_OWN_BUSY",
  92. "EV_L2_CLEAR_OWN_BUSY",
  93. "EV_L2_FRAME_ERROR",
  94. };
  95. static int l2addrsize(struct Layer2 *l2);
  96. static void
  97. set_peer_busy(struct Layer2 *l2) {
  98. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  99. if (!skb_queue_empty(&l2->i_queue) ||
  100. !skb_queue_empty(&l2->ui_queue))
  101. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  102. }
  103. static void
  104. clear_peer_busy(struct Layer2 *l2) {
  105. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  106. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  107. }
  108. static void
  109. InitWin(struct Layer2 *l2)
  110. {
  111. int i;
  112. for (i = 0; i < MAX_WINDOW; i++)
  113. l2->windowar[i] = NULL;
  114. }
  115. static int
  116. freewin1(struct Layer2 *l2)
  117. {
  118. int i, cnt = 0;
  119. for (i = 0; i < MAX_WINDOW; i++) {
  120. if (l2->windowar[i]) {
  121. cnt++;
  122. dev_kfree_skb(l2->windowar[i]);
  123. l2->windowar[i] = NULL;
  124. }
  125. }
  126. return cnt;
  127. }
  128. static inline void
  129. freewin(struct PStack *st)
  130. {
  131. freewin1(&st->l2);
  132. }
  133. static void
  134. ReleaseWin(struct Layer2 *l2)
  135. {
  136. int cnt;
  137. if((cnt = freewin1(l2)))
  138. printk(KERN_WARNING "isdl2 freed %d skbuffs in release\n", cnt);
  139. }
  140. static inline unsigned int
  141. cansend(struct PStack *st)
  142. {
  143. unsigned int p1;
  144. if(test_bit(FLG_MOD128, &st->l2.flag))
  145. p1 = (st->l2.vs - st->l2.va) % 128;
  146. else
  147. p1 = (st->l2.vs - st->l2.va) % 8;
  148. return ((p1 < st->l2.window) && !test_bit(FLG_PEER_BUSY, &st->l2.flag));
  149. }
  150. static inline void
  151. clear_exception(struct Layer2 *l2)
  152. {
  153. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  154. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  155. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  156. clear_peer_busy(l2);
  157. }
  158. static inline int
  159. l2headersize(struct Layer2 *l2, int ui)
  160. {
  161. return (((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  162. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1));
  163. }
  164. inline int
  165. l2addrsize(struct Layer2 *l2)
  166. {
  167. return (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  168. }
  169. static int
  170. sethdraddr(struct Layer2 *l2, u_char * header, int rsp)
  171. {
  172. u_char *ptr = header;
  173. int crbit = rsp;
  174. if (test_bit(FLG_LAPD, &l2->flag)) {
  175. *ptr++ = (l2->sap << 2) | (rsp ? 2 : 0);
  176. *ptr++ = (l2->tei << 1) | 1;
  177. return (2);
  178. } else {
  179. if (test_bit(FLG_ORIG, &l2->flag))
  180. crbit = !crbit;
  181. if (crbit)
  182. *ptr++ = 1;
  183. else
  184. *ptr++ = 3;
  185. return (1);
  186. }
  187. }
  188. static inline void
  189. enqueue_super(struct PStack *st,
  190. struct sk_buff *skb)
  191. {
  192. if (test_bit(FLG_LAPB, &st->l2.flag))
  193. st->l1.bcs->tx_cnt += skb->len;
  194. st->l2.l2l1(st, PH_DATA | REQUEST, skb);
  195. }
  196. #define enqueue_ui(a, b) enqueue_super(a, b)
  197. static inline int
  198. IsUI(u_char * data)
  199. {
  200. return ((data[0] & 0xef) == UI);
  201. }
  202. static inline int
  203. IsUA(u_char * data)
  204. {
  205. return ((data[0] & 0xef) == UA);
  206. }
  207. static inline int
  208. IsDM(u_char * data)
  209. {
  210. return ((data[0] & 0xef) == DM);
  211. }
  212. static inline int
  213. IsDISC(u_char * data)
  214. {
  215. return ((data[0] & 0xef) == DISC);
  216. }
  217. static inline int
  218. IsSFrame(u_char * data, struct PStack *st)
  219. {
  220. register u_char d = *data;
  221. if (!test_bit(FLG_MOD128, &st->l2.flag))
  222. d &= 0xf;
  223. return(((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c));
  224. }
  225. static inline int
  226. IsSABME(u_char * data, struct PStack *st)
  227. {
  228. u_char d = data[0] & ~0x10;
  229. return (test_bit(FLG_MOD128, &st->l2.flag) ? d == SABME : d == SABM);
  230. }
  231. static inline int
  232. IsREJ(u_char * data, struct PStack *st)
  233. {
  234. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == REJ : (data[0] & 0xf) == REJ);
  235. }
  236. static inline int
  237. IsFRMR(u_char * data)
  238. {
  239. return ((data[0] & 0xef) == FRMR);
  240. }
  241. static inline int
  242. IsRNR(u_char * data, struct PStack *st)
  243. {
  244. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == RNR : (data[0] & 0xf) == RNR);
  245. }
  246. static int
  247. iframe_error(struct PStack *st, struct sk_buff *skb)
  248. {
  249. int i = l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1);
  250. int rsp = *skb->data & 0x2;
  251. if (test_bit(FLG_ORIG, &st->l2.flag))
  252. rsp = !rsp;
  253. if (rsp)
  254. return 'L';
  255. if (skb->len < i)
  256. return 'N';
  257. if ((skb->len - i) > st->l2.maxlen)
  258. return 'O';
  259. return 0;
  260. }
  261. static int
  262. super_error(struct PStack *st, struct sk_buff *skb)
  263. {
  264. if (skb->len != l2addrsize(&st->l2) +
  265. (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1))
  266. return 'N';
  267. return 0;
  268. }
  269. static int
  270. unnum_error(struct PStack *st, struct sk_buff *skb, int wantrsp)
  271. {
  272. int rsp = (*skb->data & 0x2) >> 1;
  273. if (test_bit(FLG_ORIG, &st->l2.flag))
  274. rsp = !rsp;
  275. if (rsp != wantrsp)
  276. return 'L';
  277. if (skb->len != l2addrsize(&st->l2) + 1)
  278. return 'N';
  279. return 0;
  280. }
  281. static int
  282. UI_error(struct PStack *st, struct sk_buff *skb)
  283. {
  284. int rsp = *skb->data & 0x2;
  285. if (test_bit(FLG_ORIG, &st->l2.flag))
  286. rsp = !rsp;
  287. if (rsp)
  288. return 'L';
  289. if (skb->len > st->l2.maxlen + l2addrsize(&st->l2) + 1)
  290. return 'O';
  291. return 0;
  292. }
  293. static int
  294. FRMR_error(struct PStack *st, struct sk_buff *skb)
  295. {
  296. int headers = l2addrsize(&st->l2) + 1;
  297. u_char *datap = skb->data + headers;
  298. int rsp = *skb->data & 0x2;
  299. if (test_bit(FLG_ORIG, &st->l2.flag))
  300. rsp = !rsp;
  301. if (!rsp)
  302. return 'L';
  303. if (test_bit(FLG_MOD128, &st->l2.flag)) {
  304. if (skb->len < headers + 5)
  305. return 'N';
  306. else
  307. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x %2x %2x",
  308. datap[0], datap[1], datap[2],
  309. datap[3], datap[4]);
  310. } else {
  311. if (skb->len < headers + 3)
  312. return 'N';
  313. else
  314. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x",
  315. datap[0], datap[1], datap[2]);
  316. }
  317. return 0;
  318. }
  319. static unsigned int
  320. legalnr(struct PStack *st, unsigned int nr)
  321. {
  322. struct Layer2 *l2 = &st->l2;
  323. if(test_bit(FLG_MOD128, &l2->flag))
  324. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  325. else
  326. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  327. }
  328. static void
  329. setva(struct PStack *st, unsigned int nr)
  330. {
  331. struct Layer2 *l2 = &st->l2;
  332. int len;
  333. u_long flags;
  334. spin_lock_irqsave(&l2->lock, flags);
  335. while (l2->va != nr) {
  336. (l2->va)++;
  337. if(test_bit(FLG_MOD128, &l2->flag))
  338. l2->va %= 128;
  339. else
  340. l2->va %= 8;
  341. len = l2->windowar[l2->sow]->len;
  342. if (PACKET_NOACK == l2->windowar[l2->sow]->pkt_type)
  343. len = -1;
  344. dev_kfree_skb(l2->windowar[l2->sow]);
  345. l2->windowar[l2->sow] = NULL;
  346. l2->sow = (l2->sow + 1) % l2->window;
  347. spin_unlock_irqrestore(&l2->lock, flags);
  348. if (test_bit(FLG_LLI_L2WAKEUP, &st->lli.flag) && (len >=0))
  349. lli_writewakeup(st, len);
  350. spin_lock_irqsave(&l2->lock, flags);
  351. }
  352. spin_unlock_irqrestore(&l2->lock, flags);
  353. }
  354. static void
  355. send_uframe(struct PStack *st, u_char cmd, u_char cr)
  356. {
  357. struct sk_buff *skb;
  358. u_char tmp[MAX_HEADER_LEN];
  359. int i;
  360. i = sethdraddr(&st->l2, tmp, cr);
  361. tmp[i++] = cmd;
  362. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  363. printk(KERN_WARNING "isdl2 can't alloc sbbuff for send_uframe\n");
  364. return;
  365. }
  366. memcpy(skb_put(skb, i), tmp, i);
  367. enqueue_super(st, skb);
  368. }
  369. static inline u_char
  370. get_PollFlag(struct PStack * st, struct sk_buff * skb)
  371. {
  372. return (skb->data[l2addrsize(&(st->l2))] & 0x10);
  373. }
  374. static inline u_char
  375. get_PollFlagFree(struct PStack *st, struct sk_buff *skb)
  376. {
  377. u_char PF;
  378. PF = get_PollFlag(st, skb);
  379. dev_kfree_skb(skb);
  380. return (PF);
  381. }
  382. static inline void
  383. start_t200(struct PStack *st, int i)
  384. {
  385. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  386. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  387. }
  388. static inline void
  389. restart_t200(struct PStack *st, int i)
  390. {
  391. FsmRestartTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  392. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  393. }
  394. static inline void
  395. stop_t200(struct PStack *st, int i)
  396. {
  397. if(test_and_clear_bit(FLG_T200_RUN, &st->l2.flag))
  398. FsmDelTimer(&st->l2.t200, i);
  399. }
  400. static inline void
  401. st5_dl_release_l2l3(struct PStack *st)
  402. {
  403. int pr;
  404. if(test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  405. pr = DL_RELEASE | CONFIRM;
  406. else
  407. pr = DL_RELEASE | INDICATION;
  408. st->l2.l2l3(st, pr, NULL);
  409. }
  410. static inline void
  411. lapb_dl_release_l2l3(struct PStack *st, int f)
  412. {
  413. if (test_bit(FLG_LAPB, &st->l2.flag))
  414. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  415. st->l2.l2l3(st, DL_RELEASE | f, NULL);
  416. }
  417. static void
  418. establishlink(struct FsmInst *fi)
  419. {
  420. struct PStack *st = fi->userdata;
  421. u_char cmd;
  422. clear_exception(&st->l2);
  423. st->l2.rc = 0;
  424. cmd = (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10;
  425. send_uframe(st, cmd, CMD);
  426. FsmDelTimer(&st->l2.t203, 1);
  427. restart_t200(st, 1);
  428. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  429. freewin(st);
  430. FsmChangeState(fi, ST_L2_5);
  431. }
  432. static void
  433. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  434. {
  435. struct sk_buff *skb = arg;
  436. struct PStack *st = fi->userdata;
  437. if (get_PollFlagFree(st, skb))
  438. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'C');
  439. else
  440. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'D');
  441. }
  442. static void
  443. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  444. {
  445. struct sk_buff *skb = arg;
  446. struct PStack *st = fi->userdata;
  447. if (get_PollFlagFree(st, skb))
  448. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  449. else {
  450. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  451. establishlink(fi);
  452. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  453. }
  454. }
  455. static void
  456. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  457. {
  458. struct sk_buff *skb = arg;
  459. struct PStack *st = fi->userdata;
  460. if (get_PollFlagFree(st, skb))
  461. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  462. else {
  463. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  464. }
  465. establishlink(fi);
  466. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  467. }
  468. static void
  469. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  470. {
  471. FsmChangeState(fi, ST_L2_3);
  472. }
  473. static void
  474. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  475. {
  476. struct PStack *st = fi->userdata;
  477. FsmChangeState(fi, ST_L2_3);
  478. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  479. }
  480. static void
  481. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  482. {
  483. struct PStack *st = fi->userdata;
  484. struct sk_buff *skb = arg;
  485. skb_queue_tail(&st->l2.ui_queue, skb);
  486. FsmChangeState(fi, ST_L2_2);
  487. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  488. }
  489. static void
  490. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  491. {
  492. struct PStack *st = fi->userdata;
  493. struct sk_buff *skb = arg;
  494. skb_queue_tail(&st->l2.ui_queue, skb);
  495. }
  496. static void
  497. tx_ui(struct PStack *st)
  498. {
  499. struct sk_buff *skb;
  500. u_char header[MAX_HEADER_LEN];
  501. int i;
  502. i = sethdraddr(&(st->l2), header, CMD);
  503. header[i++] = UI;
  504. while ((skb = skb_dequeue(&st->l2.ui_queue))) {
  505. memcpy(skb_push(skb, i), header, i);
  506. enqueue_ui(st, skb);
  507. }
  508. }
  509. static void
  510. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  511. {
  512. struct PStack *st = fi->userdata;
  513. struct sk_buff *skb = arg;
  514. skb_queue_tail(&st->l2.ui_queue, skb);
  515. tx_ui(st);
  516. }
  517. static void
  518. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  519. {
  520. struct PStack *st = fi->userdata;
  521. struct sk_buff *skb = arg;
  522. skb_pull(skb, l2headersize(&st->l2, 1));
  523. st->l2.l2l3(st, DL_UNIT_DATA | INDICATION, skb);
  524. /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  525. * in states 1-3 for broadcast
  526. */
  527. }
  528. static void
  529. l2_establish(struct FsmInst *fi, int event, void *arg)
  530. {
  531. struct PStack *st = fi->userdata;
  532. establishlink(fi);
  533. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  534. }
  535. static void
  536. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  537. {
  538. struct PStack *st = fi->userdata;
  539. skb_queue_purge(&st->l2.i_queue);
  540. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  541. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  542. }
  543. static void
  544. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  545. {
  546. struct PStack *st = fi->userdata;
  547. skb_queue_purge(&st->l2.i_queue);
  548. establishlink(fi);
  549. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  550. }
  551. static void
  552. l2_release(struct FsmInst *fi, int event, void *arg)
  553. {
  554. struct PStack *st = fi->userdata;
  555. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  556. }
  557. static void
  558. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  559. {
  560. struct PStack *st = fi->userdata;
  561. test_and_set_bit(FLG_PEND_REL, &st->l2.flag);
  562. }
  563. static void
  564. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  565. {
  566. struct PStack *st = fi->userdata;
  567. skb_queue_purge(&st->l2.i_queue);
  568. freewin(st);
  569. FsmChangeState(fi, ST_L2_6);
  570. st->l2.rc = 0;
  571. send_uframe(st, DISC | 0x10, CMD);
  572. FsmDelTimer(&st->l2.t203, 1);
  573. restart_t200(st, 2);
  574. }
  575. static void
  576. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  577. {
  578. struct PStack *st = fi->userdata;
  579. struct sk_buff *skb = arg;
  580. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  581. clear_exception(&st->l2);
  582. st->l2.vs = 0;
  583. st->l2.va = 0;
  584. st->l2.vr = 0;
  585. st->l2.sow = 0;
  586. FsmChangeState(fi, ST_L2_7);
  587. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  588. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  589. }
  590. static void
  591. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  592. {
  593. struct PStack *st = fi->userdata;
  594. struct sk_buff *skb = arg;
  595. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  596. }
  597. static void
  598. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  599. {
  600. struct PStack *st = fi->userdata;
  601. struct sk_buff *skb = arg;
  602. send_uframe(st, DM | get_PollFlagFree(st, skb), RSP);
  603. }
  604. static void
  605. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  606. {
  607. struct PStack *st = fi->userdata;
  608. struct sk_buff *skb = arg;
  609. int est = 0, state;
  610. state = fi->state;
  611. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  612. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'F');
  613. if (st->l2.vs != st->l2.va) {
  614. skb_queue_purge(&st->l2.i_queue);
  615. est = 1;
  616. }
  617. clear_exception(&st->l2);
  618. st->l2.vs = 0;
  619. st->l2.va = 0;
  620. st->l2.vr = 0;
  621. st->l2.sow = 0;
  622. FsmChangeState(fi, ST_L2_7);
  623. stop_t200(st, 3);
  624. FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  625. if (est)
  626. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  627. if ((ST_L2_7==state) || (ST_L2_8 == state))
  628. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  629. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  630. }
  631. static void
  632. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  633. {
  634. struct PStack *st = fi->userdata;
  635. struct sk_buff *skb = arg;
  636. FsmChangeState(fi, ST_L2_4);
  637. FsmDelTimer(&st->l2.t203, 3);
  638. stop_t200(st, 4);
  639. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  640. skb_queue_purge(&st->l2.i_queue);
  641. freewin(st);
  642. lapb_dl_release_l2l3(st, INDICATION);
  643. }
  644. static void
  645. l2_connected(struct FsmInst *fi, int event, void *arg)
  646. {
  647. struct PStack *st = fi->userdata;
  648. struct sk_buff *skb = arg;
  649. int pr=-1;
  650. if (!get_PollFlag(st, skb)) {
  651. l2_mdl_error_ua(fi, event, arg);
  652. return;
  653. }
  654. dev_kfree_skb(skb);
  655. if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  656. l2_disconnect(fi, event, arg);
  657. if (test_and_clear_bit(FLG_L3_INIT, &st->l2.flag)) {
  658. pr = DL_ESTABLISH | CONFIRM;
  659. } else if (st->l2.vs != st->l2.va) {
  660. skb_queue_purge(&st->l2.i_queue);
  661. pr = DL_ESTABLISH | INDICATION;
  662. }
  663. stop_t200(st, 5);
  664. st->l2.vr = 0;
  665. st->l2.vs = 0;
  666. st->l2.va = 0;
  667. st->l2.sow = 0;
  668. FsmChangeState(fi, ST_L2_7);
  669. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 4);
  670. if (pr != -1)
  671. st->l2.l2l3(st, pr, NULL);
  672. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  673. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  674. }
  675. static void
  676. l2_released(struct FsmInst *fi, int event, void *arg)
  677. {
  678. struct PStack *st = fi->userdata;
  679. struct sk_buff *skb = arg;
  680. if (!get_PollFlag(st, skb)) {
  681. l2_mdl_error_ua(fi, event, arg);
  682. return;
  683. }
  684. dev_kfree_skb(skb);
  685. stop_t200(st, 6);
  686. lapb_dl_release_l2l3(st, CONFIRM);
  687. FsmChangeState(fi, ST_L2_4);
  688. }
  689. static void
  690. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  691. {
  692. struct PStack *st = fi->userdata;
  693. struct sk_buff *skb = arg;
  694. if (!get_PollFlagFree(st, skb)) {
  695. establishlink(fi);
  696. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  697. }
  698. }
  699. static void
  700. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  701. {
  702. struct PStack *st = fi->userdata;
  703. struct sk_buff *skb = arg;
  704. if (get_PollFlagFree(st, skb)) {
  705. stop_t200(st, 7);
  706. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  707. skb_queue_purge(&st->l2.i_queue);
  708. if (test_bit(FLG_LAPB, &st->l2.flag))
  709. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  710. st5_dl_release_l2l3(st);
  711. FsmChangeState(fi, ST_L2_4);
  712. }
  713. }
  714. static void
  715. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  716. {
  717. struct PStack *st = fi->userdata;
  718. struct sk_buff *skb = arg;
  719. if (get_PollFlagFree(st, skb)) {
  720. stop_t200(st, 8);
  721. lapb_dl_release_l2l3(st, CONFIRM);
  722. FsmChangeState(fi, ST_L2_4);
  723. }
  724. }
  725. static inline void
  726. enquiry_cr(struct PStack *st, u_char typ, u_char cr, u_char pf)
  727. {
  728. struct sk_buff *skb;
  729. struct Layer2 *l2;
  730. u_char tmp[MAX_HEADER_LEN];
  731. int i;
  732. l2 = &st->l2;
  733. i = sethdraddr(l2, tmp, cr);
  734. if (test_bit(FLG_MOD128, &l2->flag)) {
  735. tmp[i++] = typ;
  736. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  737. } else
  738. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  739. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  740. printk(KERN_WARNING "isdl2 can't alloc sbbuff for enquiry_cr\n");
  741. return;
  742. }
  743. memcpy(skb_put(skb, i), tmp, i);
  744. enqueue_super(st, skb);
  745. }
  746. static inline void
  747. enquiry_response(struct PStack *st)
  748. {
  749. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  750. enquiry_cr(st, RNR, RSP, 1);
  751. else
  752. enquiry_cr(st, RR, RSP, 1);
  753. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  754. }
  755. static inline void
  756. transmit_enquiry(struct PStack *st)
  757. {
  758. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  759. enquiry_cr(st, RNR, CMD, 1);
  760. else
  761. enquiry_cr(st, RR, CMD, 1);
  762. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  763. start_t200(st, 9);
  764. }
  765. static void
  766. nrerrorrecovery(struct FsmInst *fi)
  767. {
  768. struct PStack *st = fi->userdata;
  769. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'J');
  770. establishlink(fi);
  771. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  772. }
  773. static void
  774. invoke_retransmission(struct PStack *st, unsigned int nr)
  775. {
  776. struct Layer2 *l2 = &st->l2;
  777. u_int p1;
  778. u_long flags;
  779. spin_lock_irqsave(&l2->lock, flags);
  780. if (l2->vs != nr) {
  781. while (l2->vs != nr) {
  782. (l2->vs)--;
  783. if(test_bit(FLG_MOD128, &l2->flag)) {
  784. l2->vs %= 128;
  785. p1 = (l2->vs - l2->va) % 128;
  786. } else {
  787. l2->vs %= 8;
  788. p1 = (l2->vs - l2->va) % 8;
  789. }
  790. p1 = (p1 + l2->sow) % l2->window;
  791. if (test_bit(FLG_LAPB, &l2->flag))
  792. st->l1.bcs->tx_cnt += l2->windowar[p1]->len + l2headersize(l2, 0);
  793. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  794. l2->windowar[p1] = NULL;
  795. }
  796. spin_unlock_irqrestore(&l2->lock, flags);
  797. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  798. return;
  799. }
  800. spin_unlock_irqrestore(&l2->lock, flags);
  801. }
  802. static void
  803. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  804. {
  805. struct PStack *st = fi->userdata;
  806. struct sk_buff *skb = arg;
  807. int PollFlag, rsp, typ = RR;
  808. unsigned int nr;
  809. struct Layer2 *l2 = &st->l2;
  810. rsp = *skb->data & 0x2;
  811. if (test_bit(FLG_ORIG, &l2->flag))
  812. rsp = !rsp;
  813. skb_pull(skb, l2addrsize(l2));
  814. if (IsRNR(skb->data, st)) {
  815. set_peer_busy(l2);
  816. typ = RNR;
  817. } else
  818. clear_peer_busy(l2);
  819. if (IsREJ(skb->data, st))
  820. typ = REJ;
  821. if (test_bit(FLG_MOD128, &l2->flag)) {
  822. PollFlag = (skb->data[1] & 0x1) == 0x1;
  823. nr = skb->data[1] >> 1;
  824. } else {
  825. PollFlag = (skb->data[0] & 0x10);
  826. nr = (skb->data[0] >> 5) & 0x7;
  827. }
  828. dev_kfree_skb(skb);
  829. if (PollFlag) {
  830. if (rsp)
  831. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'A');
  832. else
  833. enquiry_response(st);
  834. }
  835. if (legalnr(st, nr)) {
  836. if (typ == REJ) {
  837. setva(st, nr);
  838. invoke_retransmission(st, nr);
  839. stop_t200(st, 10);
  840. if (FsmAddTimer(&st->l2.t203, st->l2.T203,
  841. EV_L2_T203, NULL, 6))
  842. l2m_debug(&st->l2.l2m, "Restart T203 ST7 REJ");
  843. } else if ((nr == l2->vs) && (typ == RR)) {
  844. setva(st, nr);
  845. stop_t200(st, 11);
  846. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  847. EV_L2_T203, NULL, 7);
  848. } else if ((l2->va != nr) || (typ == RNR)) {
  849. setva(st, nr);
  850. if(typ != RR) FsmDelTimer(&st->l2.t203, 9);
  851. restart_t200(st, 12);
  852. }
  853. if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR))
  854. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  855. } else
  856. nrerrorrecovery(fi);
  857. }
  858. static void
  859. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  860. {
  861. struct PStack *st = fi->userdata;
  862. struct sk_buff *skb = arg;
  863. if (test_bit(FLG_LAPB, &st->l2.flag))
  864. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  865. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  866. skb_queue_tail(&st->l2.i_queue, skb);
  867. else
  868. dev_kfree_skb(skb);
  869. }
  870. static void
  871. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  872. {
  873. struct PStack *st = fi->userdata;
  874. struct sk_buff *skb = arg;
  875. if (test_bit(FLG_LAPB, &st->l2.flag))
  876. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  877. skb_queue_tail(&st->l2.i_queue, skb);
  878. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  879. }
  880. static void
  881. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  882. {
  883. struct PStack *st = fi->userdata;
  884. struct sk_buff *skb = arg;
  885. if (test_bit(FLG_LAPB, &st->l2.flag))
  886. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  887. skb_queue_tail(&st->l2.i_queue, skb);
  888. }
  889. static void
  890. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  891. {
  892. struct PStack *st = fi->userdata;
  893. struct sk_buff *skb = arg;
  894. struct Layer2 *l2 = &(st->l2);
  895. int PollFlag, ns, i;
  896. unsigned int nr;
  897. i = l2addrsize(l2);
  898. if (test_bit(FLG_MOD128, &l2->flag)) {
  899. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  900. ns = skb->data[i] >> 1;
  901. nr = (skb->data[i + 1] >> 1) & 0x7f;
  902. } else {
  903. PollFlag = (skb->data[i] & 0x10);
  904. ns = (skb->data[i] >> 1) & 0x7;
  905. nr = (skb->data[i] >> 5) & 0x7;
  906. }
  907. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  908. dev_kfree_skb(skb);
  909. if(PollFlag) enquiry_response(st);
  910. } else if (l2->vr == ns) {
  911. (l2->vr)++;
  912. if(test_bit(FLG_MOD128, &l2->flag))
  913. l2->vr %= 128;
  914. else
  915. l2->vr %= 8;
  916. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  917. if (PollFlag)
  918. enquiry_response(st);
  919. else
  920. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  921. skb_pull(skb, l2headersize(l2, 0));
  922. st->l2.l2l3(st, DL_DATA | INDICATION, skb);
  923. } else {
  924. /* n(s)!=v(r) */
  925. dev_kfree_skb(skb);
  926. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  927. if (PollFlag)
  928. enquiry_response(st);
  929. } else {
  930. enquiry_cr(st, REJ, RSP, PollFlag);
  931. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  932. }
  933. }
  934. if (legalnr(st, nr)) {
  935. if (!test_bit(FLG_PEER_BUSY, &st->l2.flag) && (fi->state == ST_L2_7)) {
  936. if (nr == st->l2.vs) {
  937. stop_t200(st, 13);
  938. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  939. EV_L2_T203, NULL, 7);
  940. } else if (nr != st->l2.va)
  941. restart_t200(st, 14);
  942. }
  943. setva(st, nr);
  944. } else {
  945. nrerrorrecovery(fi);
  946. return;
  947. }
  948. if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7))
  949. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  950. if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag))
  951. enquiry_cr(st, RR, RSP, 0);
  952. }
  953. static void
  954. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  955. {
  956. struct PStack *st = fi->userdata;
  957. st->l2.tei = (long) arg;
  958. if (fi->state == ST_L2_3) {
  959. establishlink(fi);
  960. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  961. } else
  962. FsmChangeState(fi, ST_L2_4);
  963. if (!skb_queue_empty(&st->l2.ui_queue))
  964. tx_ui(st);
  965. }
  966. static void
  967. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  968. {
  969. struct PStack *st = fi->userdata;
  970. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  971. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  972. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  973. } else if (st->l2.rc == st->l2.N200) {
  974. FsmChangeState(fi, ST_L2_4);
  975. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  976. skb_queue_purge(&st->l2.i_queue);
  977. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'G');
  978. if (test_bit(FLG_LAPB, &st->l2.flag))
  979. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  980. st5_dl_release_l2l3(st);
  981. } else {
  982. st->l2.rc++;
  983. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  984. send_uframe(st, (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM)
  985. | 0x10, CMD);
  986. }
  987. }
  988. static void
  989. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  990. {
  991. struct PStack *st = fi->userdata;
  992. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  993. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  994. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  995. } else if (st->l2.rc == st->l2.N200) {
  996. FsmChangeState(fi, ST_L2_4);
  997. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  998. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'H');
  999. lapb_dl_release_l2l3(st, CONFIRM);
  1000. } else {
  1001. st->l2.rc++;
  1002. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200,
  1003. NULL, 9);
  1004. send_uframe(st, DISC | 0x10, CMD);
  1005. }
  1006. }
  1007. static void
  1008. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1009. {
  1010. struct PStack *st = fi->userdata;
  1011. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1012. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1013. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1014. return;
  1015. }
  1016. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1017. st->l2.rc = 0;
  1018. FsmChangeState(fi, ST_L2_8);
  1019. transmit_enquiry(st);
  1020. st->l2.rc++;
  1021. }
  1022. static void
  1023. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1024. {
  1025. struct PStack *st = fi->userdata;
  1026. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1027. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1028. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1029. return;
  1030. }
  1031. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1032. if (st->l2.rc == st->l2.N200) {
  1033. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'I');
  1034. establishlink(fi);
  1035. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1036. } else {
  1037. transmit_enquiry(st);
  1038. st->l2.rc++;
  1039. }
  1040. }
  1041. static void
  1042. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1043. {
  1044. struct PStack *st = fi->userdata;
  1045. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1046. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1047. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 9);
  1048. return;
  1049. }
  1050. FsmChangeState(fi, ST_L2_8);
  1051. transmit_enquiry(st);
  1052. st->l2.rc = 0;
  1053. }
  1054. static void
  1055. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1056. {
  1057. struct PStack *st = fi->userdata;
  1058. struct sk_buff *skb, *oskb;
  1059. struct Layer2 *l2 = &st->l2;
  1060. u_char header[MAX_HEADER_LEN];
  1061. int i;
  1062. int unsigned p1;
  1063. u_long flags;
  1064. if (!cansend(st))
  1065. return;
  1066. skb = skb_dequeue(&l2->i_queue);
  1067. if (!skb)
  1068. return;
  1069. spin_lock_irqsave(&l2->lock, flags);
  1070. if(test_bit(FLG_MOD128, &l2->flag))
  1071. p1 = (l2->vs - l2->va) % 128;
  1072. else
  1073. p1 = (l2->vs - l2->va) % 8;
  1074. p1 = (p1 + l2->sow) % l2->window;
  1075. if (l2->windowar[p1]) {
  1076. printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
  1077. p1);
  1078. dev_kfree_skb(l2->windowar[p1]);
  1079. }
  1080. l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
  1081. i = sethdraddr(&st->l2, header, CMD);
  1082. if (test_bit(FLG_MOD128, &l2->flag)) {
  1083. header[i++] = l2->vs << 1;
  1084. header[i++] = l2->vr << 1;
  1085. l2->vs = (l2->vs + 1) % 128;
  1086. } else {
  1087. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1088. l2->vs = (l2->vs + 1) % 8;
  1089. }
  1090. spin_unlock_irqrestore(&l2->lock, flags);
  1091. p1 = skb->data - skb->head;
  1092. if (p1 >= i)
  1093. memcpy(skb_push(skb, i), header, i);
  1094. else {
  1095. printk(KERN_WARNING
  1096. "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
  1097. oskb = skb;
  1098. skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
  1099. memcpy(skb_put(skb, i), header, i);
  1100. memcpy(skb_put(skb, oskb->len), oskb->data, oskb->len);
  1101. dev_kfree_skb(oskb);
  1102. }
  1103. st->l2.l2l1(st, PH_PULL | INDICATION, skb);
  1104. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1105. if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
  1106. FsmDelTimer(&st->l2.t203, 13);
  1107. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11);
  1108. }
  1109. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1110. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1111. }
  1112. static void
  1113. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1114. {
  1115. struct PStack *st = fi->userdata;
  1116. struct sk_buff *skb = arg;
  1117. int PollFlag, rsp, rnr = 0;
  1118. unsigned int nr;
  1119. struct Layer2 *l2 = &st->l2;
  1120. rsp = *skb->data & 0x2;
  1121. if (test_bit(FLG_ORIG, &l2->flag))
  1122. rsp = !rsp;
  1123. skb_pull(skb, l2addrsize(l2));
  1124. if (IsRNR(skb->data, st)) {
  1125. set_peer_busy(l2);
  1126. rnr = 1;
  1127. } else
  1128. clear_peer_busy(l2);
  1129. if (test_bit(FLG_MOD128, &l2->flag)) {
  1130. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1131. nr = skb->data[1] >> 1;
  1132. } else {
  1133. PollFlag = (skb->data[0] & 0x10);
  1134. nr = (skb->data[0] >> 5) & 0x7;
  1135. }
  1136. dev_kfree_skb(skb);
  1137. if (rsp && PollFlag) {
  1138. if (legalnr(st, nr)) {
  1139. if (rnr) {
  1140. restart_t200(st, 15);
  1141. } else {
  1142. stop_t200(st, 16);
  1143. FsmAddTimer(&l2->t203, l2->T203,
  1144. EV_L2_T203, NULL, 5);
  1145. setva(st, nr);
  1146. }
  1147. invoke_retransmission(st, nr);
  1148. FsmChangeState(fi, ST_L2_7);
  1149. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1150. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1151. } else
  1152. nrerrorrecovery(fi);
  1153. } else {
  1154. if (!rsp && PollFlag)
  1155. enquiry_response(st);
  1156. if (legalnr(st, nr)) {
  1157. setva(st, nr);
  1158. } else
  1159. nrerrorrecovery(fi);
  1160. }
  1161. }
  1162. static void
  1163. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1164. {
  1165. struct PStack *st = fi->userdata;
  1166. struct sk_buff *skb = arg;
  1167. skb_pull(skb, l2addrsize(&st->l2) + 1);
  1168. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1169. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1170. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'K');
  1171. establishlink(fi);
  1172. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1173. }
  1174. dev_kfree_skb(skb);
  1175. }
  1176. static void
  1177. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1178. {
  1179. struct PStack *st = fi->userdata;
  1180. skb_queue_purge(&st->l2.ui_queue);
  1181. st->l2.tei = -1;
  1182. FsmChangeState(fi, ST_L2_1);
  1183. }
  1184. static void
  1185. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1186. {
  1187. struct PStack *st = fi->userdata;
  1188. skb_queue_purge(&st->l2.ui_queue);
  1189. st->l2.tei = -1;
  1190. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1191. FsmChangeState(fi, ST_L2_1);
  1192. }
  1193. static void
  1194. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1195. {
  1196. struct PStack *st = fi->userdata;
  1197. skb_queue_purge(&st->l2.i_queue);
  1198. skb_queue_purge(&st->l2.ui_queue);
  1199. freewin(st);
  1200. st->l2.tei = -1;
  1201. stop_t200(st, 17);
  1202. st5_dl_release_l2l3(st);
  1203. FsmChangeState(fi, ST_L2_1);
  1204. }
  1205. static void
  1206. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1207. {
  1208. struct PStack *st = fi->userdata;
  1209. skb_queue_purge(&st->l2.ui_queue);
  1210. st->l2.tei = -1;
  1211. stop_t200(st, 18);
  1212. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1213. FsmChangeState(fi, ST_L2_1);
  1214. }
  1215. static void
  1216. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1217. {
  1218. struct PStack *st = fi->userdata;
  1219. skb_queue_purge(&st->l2.i_queue);
  1220. skb_queue_purge(&st->l2.ui_queue);
  1221. freewin(st);
  1222. st->l2.tei = -1;
  1223. stop_t200(st, 17);
  1224. FsmDelTimer(&st->l2.t203, 19);
  1225. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1226. FsmChangeState(fi, ST_L2_1);
  1227. }
  1228. static void
  1229. l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
  1230. {
  1231. struct PStack *st = fi->userdata;
  1232. skb_queue_purge(&st->l2.i_queue);
  1233. skb_queue_purge(&st->l2.ui_queue);
  1234. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1235. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1236. }
  1237. static void
  1238. l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
  1239. {
  1240. struct PStack *st = fi->userdata;
  1241. skb_queue_purge(&st->l2.i_queue);
  1242. skb_queue_purge(&st->l2.ui_queue);
  1243. freewin(st);
  1244. stop_t200(st, 19);
  1245. st5_dl_release_l2l3(st);
  1246. FsmChangeState(fi, ST_L2_4);
  1247. }
  1248. static void
  1249. l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
  1250. {
  1251. struct PStack *st = fi->userdata;
  1252. skb_queue_purge(&st->l2.ui_queue);
  1253. stop_t200(st, 20);
  1254. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1255. FsmChangeState(fi, ST_L2_4);
  1256. }
  1257. static void
  1258. l2_persistant_da(struct FsmInst *fi, int event, void *arg)
  1259. {
  1260. struct PStack *st = fi->userdata;
  1261. skb_queue_purge(&st->l2.i_queue);
  1262. skb_queue_purge(&st->l2.ui_queue);
  1263. freewin(st);
  1264. stop_t200(st, 19);
  1265. FsmDelTimer(&st->l2.t203, 19);
  1266. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1267. FsmChangeState(fi, ST_L2_4);
  1268. }
  1269. static void
  1270. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1271. {
  1272. struct PStack *st = fi->userdata;
  1273. if(!test_and_set_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1274. enquiry_cr(st, RNR, RSP, 0);
  1275. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1276. }
  1277. }
  1278. static void
  1279. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1280. {
  1281. struct PStack *st = fi->userdata;
  1282. if(!test_and_clear_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1283. enquiry_cr(st, RR, RSP, 0);
  1284. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1285. }
  1286. }
  1287. static void
  1288. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1289. {
  1290. struct PStack *st = fi->userdata;
  1291. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1292. }
  1293. static void
  1294. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1295. {
  1296. struct PStack *st = fi->userdata;
  1297. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1298. establishlink(fi);
  1299. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1300. }
  1301. static struct FsmNode L2FnList[] __initdata =
  1302. {
  1303. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1304. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1305. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1306. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1307. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1308. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1309. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1310. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1311. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1312. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1313. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1314. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1315. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1316. {ST_L2_1, EV_L2_DL_UNIT_DATA, l2_queue_ui_assign},
  1317. {ST_L2_2, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1318. {ST_L2_3, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1319. {ST_L2_4, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1320. {ST_L2_5, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1321. {ST_L2_6, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1322. {ST_L2_7, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1323. {ST_L2_8, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1324. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1325. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1326. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1327. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1328. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1329. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1330. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1331. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1332. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1333. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1334. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1335. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1336. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1337. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1338. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1339. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1340. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1341. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1342. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1343. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1344. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1345. {ST_L2_5, EV_L2_UA, l2_connected},
  1346. {ST_L2_6, EV_L2_UA, l2_released},
  1347. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1348. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1349. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1350. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1351. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1352. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1353. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1354. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1355. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1356. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1357. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1358. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1359. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1360. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1361. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1362. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1363. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1364. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1365. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1366. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1367. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1368. {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
  1369. {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
  1370. {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
  1371. {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
  1372. {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
  1373. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1374. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1375. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1376. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1377. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1378. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1379. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1380. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1381. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1382. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1383. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
  1384. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1385. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1386. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
  1387. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
  1388. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
  1389. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
  1390. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
  1391. };
  1392. #define L2_FN_COUNT (sizeof(L2FnList)/sizeof(struct FsmNode))
  1393. static void
  1394. isdnl2_l1l2(struct PStack *st, int pr, void *arg)
  1395. {
  1396. struct sk_buff *skb = arg;
  1397. u_char *datap;
  1398. int ret = 1, len;
  1399. int c = 0;
  1400. switch (pr) {
  1401. case (PH_DATA | INDICATION):
  1402. datap = skb->data;
  1403. len = l2addrsize(&st->l2);
  1404. if (skb->len > len)
  1405. datap += len;
  1406. else {
  1407. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1408. dev_kfree_skb(skb);
  1409. return;
  1410. }
  1411. if (!(*datap & 1)) { /* I-Frame */
  1412. if(!(c = iframe_error(st, skb)))
  1413. ret = FsmEvent(&st->l2.l2m, EV_L2_I, skb);
  1414. } else if (IsSFrame(datap, st)) { /* S-Frame */
  1415. if(!(c = super_error(st, skb)))
  1416. ret = FsmEvent(&st->l2.l2m, EV_L2_SUPER, skb);
  1417. } else if (IsUI(datap)) {
  1418. if(!(c = UI_error(st, skb)))
  1419. ret = FsmEvent(&st->l2.l2m, EV_L2_UI, skb);
  1420. } else if (IsSABME(datap, st)) {
  1421. if(!(c = unnum_error(st, skb, CMD)))
  1422. ret = FsmEvent(&st->l2.l2m, EV_L2_SABME, skb);
  1423. } else if (IsUA(datap)) {
  1424. if(!(c = unnum_error(st, skb, RSP)))
  1425. ret = FsmEvent(&st->l2.l2m, EV_L2_UA, skb);
  1426. } else if (IsDISC(datap)) {
  1427. if(!(c = unnum_error(st, skb, CMD)))
  1428. ret = FsmEvent(&st->l2.l2m, EV_L2_DISC, skb);
  1429. } else if (IsDM(datap)) {
  1430. if(!(c = unnum_error(st, skb, RSP)))
  1431. ret = FsmEvent(&st->l2.l2m, EV_L2_DM, skb);
  1432. } else if (IsFRMR(datap)) {
  1433. if(!(c = FRMR_error(st,skb)))
  1434. ret = FsmEvent(&st->l2.l2m, EV_L2_FRMR, skb);
  1435. } else {
  1436. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'L');
  1437. dev_kfree_skb(skb);
  1438. ret = 0;
  1439. }
  1440. if(c) {
  1441. dev_kfree_skb(skb);
  1442. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1443. ret = 0;
  1444. }
  1445. if (ret)
  1446. dev_kfree_skb(skb);
  1447. break;
  1448. case (PH_PULL | CONFIRM):
  1449. FsmEvent(&st->l2.l2m, EV_L2_ACK_PULL, arg);
  1450. break;
  1451. case (PH_PAUSE | INDICATION):
  1452. test_and_set_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1453. break;
  1454. case (PH_PAUSE | CONFIRM):
  1455. test_and_clear_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1456. break;
  1457. case (PH_ACTIVATE | CONFIRM):
  1458. case (PH_ACTIVATE | INDICATION):
  1459. test_and_set_bit(FLG_L1_ACTIV, &st->l2.flag);
  1460. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1461. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1462. break;
  1463. case (PH_DEACTIVATE | INDICATION):
  1464. case (PH_DEACTIVATE | CONFIRM):
  1465. test_and_clear_bit(FLG_L1_ACTIV, &st->l2.flag);
  1466. FsmEvent(&st->l2.l2m, EV_L1_DEACTIVATE, arg);
  1467. break;
  1468. default:
  1469. l2m_debug(&st->l2.l2m, "l2 unknown pr %04x", pr);
  1470. break;
  1471. }
  1472. }
  1473. static void
  1474. isdnl2_l3l2(struct PStack *st, int pr, void *arg)
  1475. {
  1476. switch (pr) {
  1477. case (DL_DATA | REQUEST):
  1478. if (FsmEvent(&st->l2.l2m, EV_L2_DL_DATA, arg)) {
  1479. dev_kfree_skb((struct sk_buff *) arg);
  1480. }
  1481. break;
  1482. case (DL_UNIT_DATA | REQUEST):
  1483. if (FsmEvent(&st->l2.l2m, EV_L2_DL_UNIT_DATA, arg)) {
  1484. dev_kfree_skb((struct sk_buff *) arg);
  1485. }
  1486. break;
  1487. case (DL_ESTABLISH | REQUEST):
  1488. if (test_bit(FLG_L1_ACTIV, &st->l2.flag)) {
  1489. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1490. test_bit(FLG_ORIG, &st->l2.flag)) {
  1491. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1492. }
  1493. } else {
  1494. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1495. test_bit(FLG_ORIG, &st->l2.flag)) {
  1496. test_and_set_bit(FLG_ESTAB_PEND, &st->l2.flag);
  1497. }
  1498. st->l2.l2l1(st, PH_ACTIVATE, NULL);
  1499. }
  1500. break;
  1501. case (DL_RELEASE | REQUEST):
  1502. if (test_bit(FLG_LAPB, &st->l2.flag)) {
  1503. st->l2.l2l1(st, PH_DEACTIVATE, NULL);
  1504. }
  1505. FsmEvent(&st->l2.l2m, EV_L2_DL_RELEASE_REQ, arg);
  1506. break;
  1507. case (MDL_ASSIGN | REQUEST):
  1508. FsmEvent(&st->l2.l2m, EV_L2_MDL_ASSIGN, arg);
  1509. break;
  1510. case (MDL_REMOVE | REQUEST):
  1511. FsmEvent(&st->l2.l2m, EV_L2_MDL_REMOVE, arg);
  1512. break;
  1513. case (MDL_ERROR | RESPONSE):
  1514. FsmEvent(&st->l2.l2m, EV_L2_MDL_ERROR, arg);
  1515. break;
  1516. }
  1517. }
  1518. void
  1519. releasestack_isdnl2(struct PStack *st)
  1520. {
  1521. FsmDelTimer(&st->l2.t200, 21);
  1522. FsmDelTimer(&st->l2.t203, 16);
  1523. skb_queue_purge(&st->l2.i_queue);
  1524. skb_queue_purge(&st->l2.ui_queue);
  1525. ReleaseWin(&st->l2);
  1526. }
  1527. static void
  1528. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  1529. {
  1530. va_list args;
  1531. struct PStack *st = fi->userdata;
  1532. va_start(args, fmt);
  1533. VHiSax_putstatus(st->l1.hardware, st->l2.debug_id, fmt, args);
  1534. va_end(args);
  1535. }
  1536. void
  1537. setstack_isdnl2(struct PStack *st, char *debug_id)
  1538. {
  1539. spin_lock_init(&st->l2.lock);
  1540. st->l1.l1l2 = isdnl2_l1l2;
  1541. st->l3.l3l2 = isdnl2_l3l2;
  1542. skb_queue_head_init(&st->l2.i_queue);
  1543. skb_queue_head_init(&st->l2.ui_queue);
  1544. InitWin(&st->l2);
  1545. st->l2.debug = 0;
  1546. st->l2.l2m.fsm = &l2fsm;
  1547. if (test_bit(FLG_LAPB, &st->l2.flag))
  1548. st->l2.l2m.state = ST_L2_4;
  1549. else
  1550. st->l2.l2m.state = ST_L2_1;
  1551. st->l2.l2m.debug = 0;
  1552. st->l2.l2m.userdata = st;
  1553. st->l2.l2m.userint = 0;
  1554. st->l2.l2m.printdebug = l2m_debug;
  1555. strcpy(st->l2.debug_id, debug_id);
  1556. FsmInitTimer(&st->l2.l2m, &st->l2.t200);
  1557. FsmInitTimer(&st->l2.l2m, &st->l2.t203);
  1558. }
  1559. static void
  1560. transl2_l3l2(struct PStack *st, int pr, void *arg)
  1561. {
  1562. switch (pr) {
  1563. case (DL_DATA | REQUEST):
  1564. case (DL_UNIT_DATA | REQUEST):
  1565. st->l2.l2l1(st, PH_DATA | REQUEST, arg);
  1566. break;
  1567. case (DL_ESTABLISH | REQUEST):
  1568. st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL);
  1569. break;
  1570. case (DL_RELEASE | REQUEST):
  1571. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  1572. break;
  1573. }
  1574. }
  1575. void
  1576. setstack_transl2(struct PStack *st)
  1577. {
  1578. st->l3.l3l2 = transl2_l3l2;
  1579. }
  1580. void
  1581. releasestack_transl2(struct PStack *st)
  1582. {
  1583. }
  1584. int __init
  1585. Isdnl2New(void)
  1586. {
  1587. l2fsm.state_count = L2_STATE_COUNT;
  1588. l2fsm.event_count = L2_EVENT_COUNT;
  1589. l2fsm.strEvent = strL2Event;
  1590. l2fsm.strState = strL2State;
  1591. return FsmNew(&l2fsm, L2FnList, L2_FN_COUNT);
  1592. }
  1593. void
  1594. Isdnl2Free(void)
  1595. {
  1596. FsmFree(&l2fsm);
  1597. }