isdnl2.c 42 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844
  1. /* $Id: isdnl2.c,v 2.30.2.4 2004/02/11 13:21:34 keil Exp $
  2. *
  3. * Author Karsten Keil
  4. * based on the teles driver from Jan den Ouden
  5. * Copyright by Karsten Keil <keil@isdn4linux.de>
  6. *
  7. * This software may be used and distributed according to the terms
  8. * of the GNU General Public License, incorporated herein by reference.
  9. *
  10. * For changes and modifications please read
  11. * Documentation/isdn/HiSax.cert
  12. *
  13. * Thanks to Jan den Ouden
  14. * Fritz Elfert
  15. *
  16. */
  17. #include <linux/init.h>
  18. #include "hisax.h"
  19. #include "isdnl2.h"
  20. const char *l2_revision = "$Revision: 2.30.2.4 $";
  21. static void l2m_debug(struct FsmInst *fi, char *fmt, ...);
  22. static struct Fsm l2fsm;
  23. enum {
  24. ST_L2_1,
  25. ST_L2_2,
  26. ST_L2_3,
  27. ST_L2_4,
  28. ST_L2_5,
  29. ST_L2_6,
  30. ST_L2_7,
  31. ST_L2_8,
  32. };
  33. #define L2_STATE_COUNT (ST_L2_8+1)
  34. static char *strL2State[] =
  35. {
  36. "ST_L2_1",
  37. "ST_L2_2",
  38. "ST_L2_3",
  39. "ST_L2_4",
  40. "ST_L2_5",
  41. "ST_L2_6",
  42. "ST_L2_7",
  43. "ST_L2_8",
  44. };
  45. enum {
  46. EV_L2_UI,
  47. EV_L2_SABME,
  48. EV_L2_DISC,
  49. EV_L2_DM,
  50. EV_L2_UA,
  51. EV_L2_FRMR,
  52. EV_L2_SUPER,
  53. EV_L2_I,
  54. EV_L2_DL_DATA,
  55. EV_L2_ACK_PULL,
  56. EV_L2_DL_UNIT_DATA,
  57. EV_L2_DL_ESTABLISH_REQ,
  58. EV_L2_DL_RELEASE_REQ,
  59. EV_L2_MDL_ASSIGN,
  60. EV_L2_MDL_REMOVE,
  61. EV_L2_MDL_ERROR,
  62. EV_L1_DEACTIVATE,
  63. EV_L2_T200,
  64. EV_L2_T203,
  65. EV_L2_SET_OWN_BUSY,
  66. EV_L2_CLEAR_OWN_BUSY,
  67. EV_L2_FRAME_ERROR,
  68. };
  69. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
  70. static char *strL2Event[] =
  71. {
  72. "EV_L2_UI",
  73. "EV_L2_SABME",
  74. "EV_L2_DISC",
  75. "EV_L2_DM",
  76. "EV_L2_UA",
  77. "EV_L2_FRMR",
  78. "EV_L2_SUPER",
  79. "EV_L2_I",
  80. "EV_L2_DL_DATA",
  81. "EV_L2_ACK_PULL",
  82. "EV_L2_DL_UNIT_DATA",
  83. "EV_L2_DL_ESTABLISH_REQ",
  84. "EV_L2_DL_RELEASE_REQ",
  85. "EV_L2_MDL_ASSIGN",
  86. "EV_L2_MDL_REMOVE",
  87. "EV_L2_MDL_ERROR",
  88. "EV_L1_DEACTIVATE",
  89. "EV_L2_T200",
  90. "EV_L2_T203",
  91. "EV_L2_SET_OWN_BUSY",
  92. "EV_L2_CLEAR_OWN_BUSY",
  93. "EV_L2_FRAME_ERROR",
  94. };
  95. static int l2addrsize(struct Layer2 *l2);
  96. static void
  97. set_peer_busy(struct Layer2 *l2) {
  98. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  99. if (!skb_queue_empty(&l2->i_queue) ||
  100. !skb_queue_empty(&l2->ui_queue))
  101. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  102. }
  103. static void
  104. clear_peer_busy(struct Layer2 *l2) {
  105. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  106. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  107. }
  108. static void
  109. InitWin(struct Layer2 *l2)
  110. {
  111. int i;
  112. for (i = 0; i < MAX_WINDOW; i++)
  113. l2->windowar[i] = NULL;
  114. }
  115. static int
  116. freewin1(struct Layer2 *l2)
  117. {
  118. int i, cnt = 0;
  119. for (i = 0; i < MAX_WINDOW; i++) {
  120. if (l2->windowar[i]) {
  121. cnt++;
  122. dev_kfree_skb(l2->windowar[i]);
  123. l2->windowar[i] = NULL;
  124. }
  125. }
  126. return cnt;
  127. }
  128. static inline void
  129. freewin(struct PStack *st)
  130. {
  131. freewin1(&st->l2);
  132. }
  133. static void
  134. ReleaseWin(struct Layer2 *l2)
  135. {
  136. int cnt;
  137. if((cnt = freewin1(l2)))
  138. printk(KERN_WARNING "isdl2 freed %d skbuffs in release\n", cnt);
  139. }
  140. static inline unsigned int
  141. cansend(struct PStack *st)
  142. {
  143. unsigned int p1;
  144. if(test_bit(FLG_MOD128, &st->l2.flag))
  145. p1 = (st->l2.vs - st->l2.va) % 128;
  146. else
  147. p1 = (st->l2.vs - st->l2.va) % 8;
  148. return ((p1 < st->l2.window) && !test_bit(FLG_PEER_BUSY, &st->l2.flag));
  149. }
  150. static inline void
  151. clear_exception(struct Layer2 *l2)
  152. {
  153. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  154. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  155. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  156. clear_peer_busy(l2);
  157. }
  158. static inline int
  159. l2headersize(struct Layer2 *l2, int ui)
  160. {
  161. return (((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  162. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1));
  163. }
  164. inline int
  165. l2addrsize(struct Layer2 *l2)
  166. {
  167. return (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  168. }
  169. static int
  170. sethdraddr(struct Layer2 *l2, u_char * header, int rsp)
  171. {
  172. u_char *ptr = header;
  173. int crbit = rsp;
  174. if (test_bit(FLG_LAPD, &l2->flag)) {
  175. *ptr++ = (l2->sap << 2) | (rsp ? 2 : 0);
  176. *ptr++ = (l2->tei << 1) | 1;
  177. return (2);
  178. } else {
  179. if (test_bit(FLG_ORIG, &l2->flag))
  180. crbit = !crbit;
  181. if (crbit)
  182. *ptr++ = 1;
  183. else
  184. *ptr++ = 3;
  185. return (1);
  186. }
  187. }
  188. static inline void
  189. enqueue_super(struct PStack *st,
  190. struct sk_buff *skb)
  191. {
  192. if (test_bit(FLG_LAPB, &st->l2.flag))
  193. st->l1.bcs->tx_cnt += skb->len;
  194. st->l2.l2l1(st, PH_DATA | REQUEST, skb);
  195. }
  196. #define enqueue_ui(a, b) enqueue_super(a, b)
  197. static inline int
  198. IsUI(u_char * data)
  199. {
  200. return ((data[0] & 0xef) == UI);
  201. }
  202. static inline int
  203. IsUA(u_char * data)
  204. {
  205. return ((data[0] & 0xef) == UA);
  206. }
  207. static inline int
  208. IsDM(u_char * data)
  209. {
  210. return ((data[0] & 0xef) == DM);
  211. }
  212. static inline int
  213. IsDISC(u_char * data)
  214. {
  215. return ((data[0] & 0xef) == DISC);
  216. }
  217. static inline int
  218. IsSFrame(u_char * data, struct PStack *st)
  219. {
  220. register u_char d = *data;
  221. if (!test_bit(FLG_MOD128, &st->l2.flag))
  222. d &= 0xf;
  223. return(((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c));
  224. }
  225. static inline int
  226. IsSABME(u_char * data, struct PStack *st)
  227. {
  228. u_char d = data[0] & ~0x10;
  229. return (test_bit(FLG_MOD128, &st->l2.flag) ? d == SABME : d == SABM);
  230. }
  231. static inline int
  232. IsREJ(u_char * data, struct PStack *st)
  233. {
  234. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == REJ : (data[0] & 0xf) == REJ);
  235. }
  236. static inline int
  237. IsFRMR(u_char * data)
  238. {
  239. return ((data[0] & 0xef) == FRMR);
  240. }
  241. static inline int
  242. IsRNR(u_char * data, struct PStack *st)
  243. {
  244. return (test_bit(FLG_MOD128, &st->l2.flag) ? data[0] == RNR : (data[0] & 0xf) == RNR);
  245. }
  246. static int
  247. iframe_error(struct PStack *st, struct sk_buff *skb)
  248. {
  249. int i = l2addrsize(&st->l2) + (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1);
  250. int rsp = *skb->data & 0x2;
  251. if (test_bit(FLG_ORIG, &st->l2.flag))
  252. rsp = !rsp;
  253. if (rsp)
  254. return 'L';
  255. if (skb->len < i)
  256. return 'N';
  257. if ((skb->len - i) > st->l2.maxlen)
  258. return 'O';
  259. return 0;
  260. }
  261. static int
  262. super_error(struct PStack *st, struct sk_buff *skb)
  263. {
  264. if (skb->len != l2addrsize(&st->l2) +
  265. (test_bit(FLG_MOD128, &st->l2.flag) ? 2 : 1))
  266. return 'N';
  267. return 0;
  268. }
  269. static int
  270. unnum_error(struct PStack *st, struct sk_buff *skb, int wantrsp)
  271. {
  272. int rsp = (*skb->data & 0x2) >> 1;
  273. if (test_bit(FLG_ORIG, &st->l2.flag))
  274. rsp = !rsp;
  275. if (rsp != wantrsp)
  276. return 'L';
  277. if (skb->len != l2addrsize(&st->l2) + 1)
  278. return 'N';
  279. return 0;
  280. }
  281. static int
  282. UI_error(struct PStack *st, struct sk_buff *skb)
  283. {
  284. int rsp = *skb->data & 0x2;
  285. if (test_bit(FLG_ORIG, &st->l2.flag))
  286. rsp = !rsp;
  287. if (rsp)
  288. return 'L';
  289. if (skb->len > st->l2.maxlen + l2addrsize(&st->l2) + 1)
  290. return 'O';
  291. return 0;
  292. }
  293. static int
  294. FRMR_error(struct PStack *st, struct sk_buff *skb)
  295. {
  296. int headers = l2addrsize(&st->l2) + 1;
  297. u_char *datap = skb->data + headers;
  298. int rsp = *skb->data & 0x2;
  299. if (test_bit(FLG_ORIG, &st->l2.flag))
  300. rsp = !rsp;
  301. if (!rsp)
  302. return 'L';
  303. if (test_bit(FLG_MOD128, &st->l2.flag)) {
  304. if (skb->len < headers + 5)
  305. return 'N';
  306. else
  307. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x %2x %2x",
  308. datap[0], datap[1], datap[2],
  309. datap[3], datap[4]);
  310. } else {
  311. if (skb->len < headers + 3)
  312. return 'N';
  313. else
  314. l2m_debug(&st->l2.l2m, "FRMR information %2x %2x %2x",
  315. datap[0], datap[1], datap[2]);
  316. }
  317. return 0;
  318. }
  319. static unsigned int
  320. legalnr(struct PStack *st, unsigned int nr)
  321. {
  322. struct Layer2 *l2 = &st->l2;
  323. if(test_bit(FLG_MOD128, &l2->flag))
  324. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  325. else
  326. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  327. }
  328. static void
  329. setva(struct PStack *st, unsigned int nr)
  330. {
  331. struct Layer2 *l2 = &st->l2;
  332. int len;
  333. u_long flags;
  334. spin_lock_irqsave(&l2->lock, flags);
  335. while (l2->va != nr) {
  336. (l2->va)++;
  337. if(test_bit(FLG_MOD128, &l2->flag))
  338. l2->va %= 128;
  339. else
  340. l2->va %= 8;
  341. len = l2->windowar[l2->sow]->len;
  342. if (PACKET_NOACK == l2->windowar[l2->sow]->pkt_type)
  343. len = -1;
  344. dev_kfree_skb(l2->windowar[l2->sow]);
  345. l2->windowar[l2->sow] = NULL;
  346. l2->sow = (l2->sow + 1) % l2->window;
  347. spin_unlock_irqrestore(&l2->lock, flags);
  348. if (test_bit(FLG_LLI_L2WAKEUP, &st->lli.flag) && (len >=0))
  349. lli_writewakeup(st, len);
  350. spin_lock_irqsave(&l2->lock, flags);
  351. }
  352. spin_unlock_irqrestore(&l2->lock, flags);
  353. }
  354. static void
  355. send_uframe(struct PStack *st, u_char cmd, u_char cr)
  356. {
  357. struct sk_buff *skb;
  358. u_char tmp[MAX_HEADER_LEN];
  359. int i;
  360. i = sethdraddr(&st->l2, tmp, cr);
  361. tmp[i++] = cmd;
  362. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  363. printk(KERN_WARNING "isdl2 can't alloc sbbuff for send_uframe\n");
  364. return;
  365. }
  366. memcpy(skb_put(skb, i), tmp, i);
  367. enqueue_super(st, skb);
  368. }
  369. static inline u_char
  370. get_PollFlag(struct PStack * st, struct sk_buff * skb)
  371. {
  372. return (skb->data[l2addrsize(&(st->l2))] & 0x10);
  373. }
  374. static inline u_char
  375. get_PollFlagFree(struct PStack *st, struct sk_buff *skb)
  376. {
  377. u_char PF;
  378. PF = get_PollFlag(st, skb);
  379. dev_kfree_skb(skb);
  380. return (PF);
  381. }
  382. static inline void
  383. start_t200(struct PStack *st, int i)
  384. {
  385. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  386. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  387. }
  388. static inline void
  389. restart_t200(struct PStack *st, int i)
  390. {
  391. FsmRestartTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, i);
  392. test_and_set_bit(FLG_T200_RUN, &st->l2.flag);
  393. }
  394. static inline void
  395. stop_t200(struct PStack *st, int i)
  396. {
  397. if(test_and_clear_bit(FLG_T200_RUN, &st->l2.flag))
  398. FsmDelTimer(&st->l2.t200, i);
  399. }
  400. static inline void
  401. st5_dl_release_l2l3(struct PStack *st)
  402. {
  403. int pr;
  404. if(test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  405. pr = DL_RELEASE | CONFIRM;
  406. else
  407. pr = DL_RELEASE | INDICATION;
  408. st->l2.l2l3(st, pr, NULL);
  409. }
  410. static inline void
  411. lapb_dl_release_l2l3(struct PStack *st, int f)
  412. {
  413. if (test_bit(FLG_LAPB, &st->l2.flag))
  414. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  415. st->l2.l2l3(st, DL_RELEASE | f, NULL);
  416. }
  417. static void
  418. establishlink(struct FsmInst *fi)
  419. {
  420. struct PStack *st = fi->userdata;
  421. u_char cmd;
  422. clear_exception(&st->l2);
  423. st->l2.rc = 0;
  424. cmd = (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM) | 0x10;
  425. send_uframe(st, cmd, CMD);
  426. FsmDelTimer(&st->l2.t203, 1);
  427. restart_t200(st, 1);
  428. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  429. freewin(st);
  430. FsmChangeState(fi, ST_L2_5);
  431. }
  432. static void
  433. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  434. {
  435. struct sk_buff *skb = arg;
  436. struct PStack *st = fi->userdata;
  437. if (get_PollFlagFree(st, skb))
  438. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'C');
  439. else
  440. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'D');
  441. }
  442. static void
  443. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  444. {
  445. struct sk_buff *skb = arg;
  446. struct PStack *st = fi->userdata;
  447. if (get_PollFlagFree(st, skb))
  448. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  449. else {
  450. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  451. establishlink(fi);
  452. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  453. }
  454. }
  455. static void
  456. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  457. {
  458. struct sk_buff *skb = arg;
  459. struct PStack *st = fi->userdata;
  460. if (get_PollFlagFree(st, skb))
  461. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'B');
  462. else {
  463. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'E');
  464. }
  465. establishlink(fi);
  466. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  467. }
  468. static void
  469. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  470. {
  471. FsmChangeState(fi, ST_L2_3);
  472. }
  473. static void
  474. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  475. {
  476. struct PStack *st = fi->userdata;
  477. FsmChangeState(fi, ST_L2_3);
  478. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  479. }
  480. static void
  481. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  482. {
  483. struct PStack *st = fi->userdata;
  484. struct sk_buff *skb = arg;
  485. skb_queue_tail(&st->l2.ui_queue, skb);
  486. FsmChangeState(fi, ST_L2_2);
  487. st->l2.l2tei(st, MDL_ASSIGN | INDICATION, NULL);
  488. }
  489. static void
  490. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  491. {
  492. struct PStack *st = fi->userdata;
  493. struct sk_buff *skb = arg;
  494. skb_queue_tail(&st->l2.ui_queue, skb);
  495. }
  496. static void
  497. tx_ui(struct PStack *st)
  498. {
  499. struct sk_buff *skb;
  500. u_char header[MAX_HEADER_LEN];
  501. int i;
  502. i = sethdraddr(&(st->l2), header, CMD);
  503. header[i++] = UI;
  504. while ((skb = skb_dequeue(&st->l2.ui_queue))) {
  505. memcpy(skb_push(skb, i), header, i);
  506. enqueue_ui(st, skb);
  507. }
  508. }
  509. static void
  510. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  511. {
  512. struct PStack *st = fi->userdata;
  513. struct sk_buff *skb = arg;
  514. skb_queue_tail(&st->l2.ui_queue, skb);
  515. tx_ui(st);
  516. }
  517. static void
  518. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  519. {
  520. struct PStack *st = fi->userdata;
  521. struct sk_buff *skb = arg;
  522. skb_pull(skb, l2headersize(&st->l2, 1));
  523. st->l2.l2l3(st, DL_UNIT_DATA | INDICATION, skb);
  524. /* ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
  525. * in states 1-3 for broadcast
  526. */
  527. }
  528. static void
  529. l2_establish(struct FsmInst *fi, int event, void *arg)
  530. {
  531. struct PStack *st = fi->userdata;
  532. establishlink(fi);
  533. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  534. }
  535. static void
  536. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  537. {
  538. struct PStack *st = fi->userdata;
  539. skb_queue_purge(&st->l2.i_queue);
  540. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  541. test_and_clear_bit(FLG_PEND_REL, &st->l2.flag);
  542. }
  543. static void
  544. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  545. {
  546. struct PStack *st = fi->userdata;
  547. skb_queue_purge(&st->l2.i_queue);
  548. establishlink(fi);
  549. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  550. }
  551. static void
  552. l2_release(struct FsmInst *fi, int event, void *arg)
  553. {
  554. struct PStack *st = fi->userdata;
  555. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  556. }
  557. static void
  558. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  559. {
  560. struct PStack *st = fi->userdata;
  561. test_and_set_bit(FLG_PEND_REL, &st->l2.flag);
  562. }
  563. static void
  564. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  565. {
  566. struct PStack *st = fi->userdata;
  567. skb_queue_purge(&st->l2.i_queue);
  568. freewin(st);
  569. FsmChangeState(fi, ST_L2_6);
  570. st->l2.rc = 0;
  571. send_uframe(st, DISC | 0x10, CMD);
  572. FsmDelTimer(&st->l2.t203, 1);
  573. restart_t200(st, 2);
  574. }
  575. static void
  576. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  577. {
  578. struct PStack *st = fi->userdata;
  579. struct sk_buff *skb = arg;
  580. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  581. clear_exception(&st->l2);
  582. st->l2.vs = 0;
  583. st->l2.va = 0;
  584. st->l2.vr = 0;
  585. st->l2.sow = 0;
  586. FsmChangeState(fi, ST_L2_7);
  587. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  588. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  589. }
  590. static void
  591. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  592. {
  593. struct PStack *st = fi->userdata;
  594. struct sk_buff *skb = arg;
  595. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  596. }
  597. static void
  598. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  599. {
  600. struct PStack *st = fi->userdata;
  601. struct sk_buff *skb = arg;
  602. send_uframe(st, DM | get_PollFlagFree(st, skb), RSP);
  603. }
  604. static void
  605. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  606. {
  607. struct PStack *st = fi->userdata;
  608. struct sk_buff *skb = arg;
  609. int est = 0, state;
  610. state = fi->state;
  611. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  612. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'F');
  613. if (st->l2.vs != st->l2.va) {
  614. skb_queue_purge(&st->l2.i_queue);
  615. est = 1;
  616. }
  617. clear_exception(&st->l2);
  618. st->l2.vs = 0;
  619. st->l2.va = 0;
  620. st->l2.vr = 0;
  621. st->l2.sow = 0;
  622. FsmChangeState(fi, ST_L2_7);
  623. stop_t200(st, 3);
  624. FsmRestartTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 3);
  625. if (est)
  626. st->l2.l2l3(st, DL_ESTABLISH | INDICATION, NULL);
  627. if ((ST_L2_7==state) || (ST_L2_8 == state))
  628. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  629. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  630. }
  631. static void
  632. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  633. {
  634. struct PStack *st = fi->userdata;
  635. struct sk_buff *skb = arg;
  636. FsmChangeState(fi, ST_L2_4);
  637. FsmDelTimer(&st->l2.t203, 3);
  638. stop_t200(st, 4);
  639. send_uframe(st, UA | get_PollFlagFree(st, skb), RSP);
  640. skb_queue_purge(&st->l2.i_queue);
  641. freewin(st);
  642. lapb_dl_release_l2l3(st, INDICATION);
  643. }
  644. static void
  645. l2_connected(struct FsmInst *fi, int event, void *arg)
  646. {
  647. struct PStack *st = fi->userdata;
  648. struct sk_buff *skb = arg;
  649. int pr=-1;
  650. if (!get_PollFlag(st, skb)) {
  651. l2_mdl_error_ua(fi, event, arg);
  652. return;
  653. }
  654. dev_kfree_skb(skb);
  655. if (test_and_clear_bit(FLG_PEND_REL, &st->l2.flag))
  656. l2_disconnect(fi, event, arg);
  657. if (test_and_clear_bit(FLG_L3_INIT, &st->l2.flag)) {
  658. pr = DL_ESTABLISH | CONFIRM;
  659. } else if (st->l2.vs != st->l2.va) {
  660. skb_queue_purge(&st->l2.i_queue);
  661. pr = DL_ESTABLISH | INDICATION;
  662. }
  663. stop_t200(st, 5);
  664. st->l2.vr = 0;
  665. st->l2.vs = 0;
  666. st->l2.va = 0;
  667. st->l2.sow = 0;
  668. FsmChangeState(fi, ST_L2_7);
  669. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 4);
  670. if (pr != -1)
  671. st->l2.l2l3(st, pr, NULL);
  672. if (!skb_queue_empty(&st->l2.i_queue) && cansend(st))
  673. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  674. }
  675. static void
  676. l2_released(struct FsmInst *fi, int event, void *arg)
  677. {
  678. struct PStack *st = fi->userdata;
  679. struct sk_buff *skb = arg;
  680. if (!get_PollFlag(st, skb)) {
  681. l2_mdl_error_ua(fi, event, arg);
  682. return;
  683. }
  684. dev_kfree_skb(skb);
  685. stop_t200(st, 6);
  686. lapb_dl_release_l2l3(st, CONFIRM);
  687. FsmChangeState(fi, ST_L2_4);
  688. }
  689. static void
  690. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  691. {
  692. struct PStack *st = fi->userdata;
  693. struct sk_buff *skb = arg;
  694. if (!get_PollFlagFree(st, skb)) {
  695. establishlink(fi);
  696. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  697. }
  698. }
  699. static void
  700. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  701. {
  702. struct PStack *st = fi->userdata;
  703. struct sk_buff *skb = arg;
  704. if (get_PollFlagFree(st, skb)) {
  705. stop_t200(st, 7);
  706. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  707. skb_queue_purge(&st->l2.i_queue);
  708. if (test_bit(FLG_LAPB, &st->l2.flag))
  709. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  710. st5_dl_release_l2l3(st);
  711. FsmChangeState(fi, ST_L2_4);
  712. }
  713. }
  714. static void
  715. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  716. {
  717. struct PStack *st = fi->userdata;
  718. struct sk_buff *skb = arg;
  719. if (get_PollFlagFree(st, skb)) {
  720. stop_t200(st, 8);
  721. lapb_dl_release_l2l3(st, CONFIRM);
  722. FsmChangeState(fi, ST_L2_4);
  723. }
  724. }
  725. static inline void
  726. enquiry_cr(struct PStack *st, u_char typ, u_char cr, u_char pf)
  727. {
  728. struct sk_buff *skb;
  729. struct Layer2 *l2;
  730. u_char tmp[MAX_HEADER_LEN];
  731. int i;
  732. l2 = &st->l2;
  733. i = sethdraddr(l2, tmp, cr);
  734. if (test_bit(FLG_MOD128, &l2->flag)) {
  735. tmp[i++] = typ;
  736. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  737. } else
  738. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  739. if (!(skb = alloc_skb(i, GFP_ATOMIC))) {
  740. printk(KERN_WARNING "isdl2 can't alloc sbbuff for enquiry_cr\n");
  741. return;
  742. }
  743. memcpy(skb_put(skb, i), tmp, i);
  744. enqueue_super(st, skb);
  745. }
  746. static inline void
  747. enquiry_response(struct PStack *st)
  748. {
  749. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  750. enquiry_cr(st, RNR, RSP, 1);
  751. else
  752. enquiry_cr(st, RR, RSP, 1);
  753. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  754. }
  755. static inline void
  756. transmit_enquiry(struct PStack *st)
  757. {
  758. if (test_bit(FLG_OWN_BUSY, &st->l2.flag))
  759. enquiry_cr(st, RNR, CMD, 1);
  760. else
  761. enquiry_cr(st, RR, CMD, 1);
  762. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  763. start_t200(st, 9);
  764. }
  765. static void
  766. nrerrorrecovery(struct FsmInst *fi)
  767. {
  768. struct PStack *st = fi->userdata;
  769. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'J');
  770. establishlink(fi);
  771. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  772. }
  773. static void
  774. invoke_retransmission(struct PStack *st, unsigned int nr)
  775. {
  776. struct Layer2 *l2 = &st->l2;
  777. u_int p1;
  778. u_long flags;
  779. spin_lock_irqsave(&l2->lock, flags);
  780. if (l2->vs != nr) {
  781. while (l2->vs != nr) {
  782. (l2->vs)--;
  783. if(test_bit(FLG_MOD128, &l2->flag)) {
  784. l2->vs %= 128;
  785. p1 = (l2->vs - l2->va) % 128;
  786. } else {
  787. l2->vs %= 8;
  788. p1 = (l2->vs - l2->va) % 8;
  789. }
  790. p1 = (p1 + l2->sow) % l2->window;
  791. if (test_bit(FLG_LAPB, &l2->flag))
  792. st->l1.bcs->tx_cnt += l2->windowar[p1]->len + l2headersize(l2, 0);
  793. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  794. l2->windowar[p1] = NULL;
  795. }
  796. spin_unlock_irqrestore(&l2->lock, flags);
  797. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  798. return;
  799. }
  800. spin_unlock_irqrestore(&l2->lock, flags);
  801. }
  802. static void
  803. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  804. {
  805. struct PStack *st = fi->userdata;
  806. struct sk_buff *skb = arg;
  807. int PollFlag, rsp, typ = RR;
  808. unsigned int nr;
  809. struct Layer2 *l2 = &st->l2;
  810. rsp = *skb->data & 0x2;
  811. if (test_bit(FLG_ORIG, &l2->flag))
  812. rsp = !rsp;
  813. skb_pull(skb, l2addrsize(l2));
  814. if (IsRNR(skb->data, st)) {
  815. set_peer_busy(l2);
  816. typ = RNR;
  817. } else
  818. clear_peer_busy(l2);
  819. if (IsREJ(skb->data, st))
  820. typ = REJ;
  821. if (test_bit(FLG_MOD128, &l2->flag)) {
  822. PollFlag = (skb->data[1] & 0x1) == 0x1;
  823. nr = skb->data[1] >> 1;
  824. } else {
  825. PollFlag = (skb->data[0] & 0x10);
  826. nr = (skb->data[0] >> 5) & 0x7;
  827. }
  828. dev_kfree_skb(skb);
  829. if (PollFlag) {
  830. if (rsp)
  831. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'A');
  832. else
  833. enquiry_response(st);
  834. }
  835. if (legalnr(st, nr)) {
  836. if (typ == REJ) {
  837. setva(st, nr);
  838. invoke_retransmission(st, nr);
  839. stop_t200(st, 10);
  840. if (FsmAddTimer(&st->l2.t203, st->l2.T203,
  841. EV_L2_T203, NULL, 6))
  842. l2m_debug(&st->l2.l2m, "Restart T203 ST7 REJ");
  843. } else if ((nr == l2->vs) && (typ == RR)) {
  844. setva(st, nr);
  845. stop_t200(st, 11);
  846. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  847. EV_L2_T203, NULL, 7);
  848. } else if ((l2->va != nr) || (typ == RNR)) {
  849. setva(st, nr);
  850. if(typ != RR) FsmDelTimer(&st->l2.t203, 9);
  851. restart_t200(st, 12);
  852. }
  853. if (!skb_queue_empty(&st->l2.i_queue) && (typ == RR))
  854. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  855. } else
  856. nrerrorrecovery(fi);
  857. }
  858. static void
  859. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  860. {
  861. struct PStack *st = fi->userdata;
  862. struct sk_buff *skb = arg;
  863. if (test_bit(FLG_LAPB, &st->l2.flag))
  864. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  865. if (!test_bit(FLG_L3_INIT, &st->l2.flag))
  866. skb_queue_tail(&st->l2.i_queue, skb);
  867. else
  868. dev_kfree_skb(skb);
  869. }
  870. static void
  871. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  872. {
  873. struct PStack *st = fi->userdata;
  874. struct sk_buff *skb = arg;
  875. if (test_bit(FLG_LAPB, &st->l2.flag))
  876. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  877. skb_queue_tail(&st->l2.i_queue, skb);
  878. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  879. }
  880. static void
  881. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  882. {
  883. struct PStack *st = fi->userdata;
  884. struct sk_buff *skb = arg;
  885. if (test_bit(FLG_LAPB, &st->l2.flag))
  886. st->l1.bcs->tx_cnt += skb->len + l2headersize(&st->l2, 0);
  887. skb_queue_tail(&st->l2.i_queue, skb);
  888. }
  889. static void
  890. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  891. {
  892. struct PStack *st = fi->userdata;
  893. struct sk_buff *skb = arg;
  894. struct Layer2 *l2 = &(st->l2);
  895. int PollFlag, ns, i;
  896. unsigned int nr;
  897. i = l2addrsize(l2);
  898. if (test_bit(FLG_MOD128, &l2->flag)) {
  899. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  900. ns = skb->data[i] >> 1;
  901. nr = (skb->data[i + 1] >> 1) & 0x7f;
  902. } else {
  903. PollFlag = (skb->data[i] & 0x10);
  904. ns = (skb->data[i] >> 1) & 0x7;
  905. nr = (skb->data[i] >> 5) & 0x7;
  906. }
  907. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  908. dev_kfree_skb(skb);
  909. if(PollFlag) enquiry_response(st);
  910. } else if (l2->vr == ns) {
  911. (l2->vr)++;
  912. if(test_bit(FLG_MOD128, &l2->flag))
  913. l2->vr %= 128;
  914. else
  915. l2->vr %= 8;
  916. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  917. if (PollFlag)
  918. enquiry_response(st);
  919. else
  920. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  921. skb_pull(skb, l2headersize(l2, 0));
  922. st->l2.l2l3(st, DL_DATA | INDICATION, skb);
  923. } else {
  924. /* n(s)!=v(r) */
  925. dev_kfree_skb(skb);
  926. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  927. if (PollFlag)
  928. enquiry_response(st);
  929. } else {
  930. enquiry_cr(st, REJ, RSP, PollFlag);
  931. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  932. }
  933. }
  934. if (legalnr(st, nr)) {
  935. if (!test_bit(FLG_PEER_BUSY, &st->l2.flag) && (fi->state == ST_L2_7)) {
  936. if (nr == st->l2.vs) {
  937. stop_t200(st, 13);
  938. FsmRestartTimer(&st->l2.t203, st->l2.T203,
  939. EV_L2_T203, NULL, 7);
  940. } else if (nr != st->l2.va)
  941. restart_t200(st, 14);
  942. }
  943. setva(st, nr);
  944. } else {
  945. nrerrorrecovery(fi);
  946. return;
  947. }
  948. if (!skb_queue_empty(&st->l2.i_queue) && (fi->state == ST_L2_7))
  949. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  950. if (test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag))
  951. enquiry_cr(st, RR, RSP, 0);
  952. }
  953. static void
  954. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  955. {
  956. struct PStack *st = fi->userdata;
  957. st->l2.tei = (long) arg;
  958. if (fi->state == ST_L2_3) {
  959. establishlink(fi);
  960. test_and_set_bit(FLG_L3_INIT, &st->l2.flag);
  961. } else
  962. FsmChangeState(fi, ST_L2_4);
  963. if (!skb_queue_empty(&st->l2.ui_queue))
  964. tx_ui(st);
  965. }
  966. static void
  967. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  968. {
  969. struct PStack *st = fi->userdata;
  970. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  971. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  972. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  973. } else if (st->l2.rc == st->l2.N200) {
  974. FsmChangeState(fi, ST_L2_4);
  975. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  976. skb_queue_purge(&st->l2.i_queue);
  977. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'G');
  978. if (test_bit(FLG_LAPB, &st->l2.flag))
  979. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  980. st5_dl_release_l2l3(st);
  981. } else {
  982. st->l2.rc++;
  983. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  984. send_uframe(st, (test_bit(FLG_MOD128, &st->l2.flag) ? SABME : SABM)
  985. | 0x10, CMD);
  986. }
  987. }
  988. static void
  989. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  990. {
  991. struct PStack *st = fi->userdata;
  992. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  993. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  994. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  995. } else if (st->l2.rc == st->l2.N200) {
  996. FsmChangeState(fi, ST_L2_4);
  997. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  998. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'H');
  999. lapb_dl_release_l2l3(st, CONFIRM);
  1000. } else {
  1001. st->l2.rc++;
  1002. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200,
  1003. NULL, 9);
  1004. send_uframe(st, DISC | 0x10, CMD);
  1005. }
  1006. }
  1007. static void
  1008. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1009. {
  1010. struct PStack *st = fi->userdata;
  1011. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1012. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1013. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1014. return;
  1015. }
  1016. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1017. st->l2.rc = 0;
  1018. FsmChangeState(fi, ST_L2_8);
  1019. transmit_enquiry(st);
  1020. st->l2.rc++;
  1021. }
  1022. static void
  1023. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1024. {
  1025. struct PStack *st = fi->userdata;
  1026. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1027. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1028. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 9);
  1029. return;
  1030. }
  1031. test_and_clear_bit(FLG_T200_RUN, &st->l2.flag);
  1032. if (st->l2.rc == st->l2.N200) {
  1033. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'I');
  1034. establishlink(fi);
  1035. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1036. } else {
  1037. transmit_enquiry(st);
  1038. st->l2.rc++;
  1039. }
  1040. }
  1041. static void
  1042. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1043. {
  1044. struct PStack *st = fi->userdata;
  1045. if (test_bit(FLG_LAPD, &st->l2.flag) &&
  1046. test_bit(FLG_DCHAN_BUSY, &st->l2.flag)) {
  1047. FsmAddTimer(&st->l2.t203, st->l2.T203, EV_L2_T203, NULL, 9);
  1048. return;
  1049. }
  1050. FsmChangeState(fi, ST_L2_8);
  1051. transmit_enquiry(st);
  1052. st->l2.rc = 0;
  1053. }
  1054. static void
  1055. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1056. {
  1057. struct PStack *st = fi->userdata;
  1058. struct sk_buff *skb, *oskb;
  1059. struct Layer2 *l2 = &st->l2;
  1060. u_char header[MAX_HEADER_LEN];
  1061. int i;
  1062. int unsigned p1;
  1063. u_long flags;
  1064. if (!cansend(st))
  1065. return;
  1066. skb = skb_dequeue(&l2->i_queue);
  1067. if (!skb)
  1068. return;
  1069. spin_lock_irqsave(&l2->lock, flags);
  1070. if(test_bit(FLG_MOD128, &l2->flag))
  1071. p1 = (l2->vs - l2->va) % 128;
  1072. else
  1073. p1 = (l2->vs - l2->va) % 8;
  1074. p1 = (p1 + l2->sow) % l2->window;
  1075. if (l2->windowar[p1]) {
  1076. printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
  1077. p1);
  1078. dev_kfree_skb(l2->windowar[p1]);
  1079. }
  1080. l2->windowar[p1] = skb_clone(skb, GFP_ATOMIC);
  1081. i = sethdraddr(&st->l2, header, CMD);
  1082. if (test_bit(FLG_MOD128, &l2->flag)) {
  1083. header[i++] = l2->vs << 1;
  1084. header[i++] = l2->vr << 1;
  1085. l2->vs = (l2->vs + 1) % 128;
  1086. } else {
  1087. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1088. l2->vs = (l2->vs + 1) % 8;
  1089. }
  1090. spin_unlock_irqrestore(&l2->lock, flags);
  1091. p1 = skb->data - skb->head;
  1092. if (p1 >= i)
  1093. memcpy(skb_push(skb, i), header, i);
  1094. else {
  1095. printk(KERN_WARNING
  1096. "isdl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
  1097. oskb = skb;
  1098. skb = alloc_skb(oskb->len + i, GFP_ATOMIC);
  1099. memcpy(skb_put(skb, i), header, i);
  1100. skb_copy_from_linear_data(oskb,
  1101. skb_put(skb, oskb->len), oskb->len);
  1102. dev_kfree_skb(oskb);
  1103. }
  1104. st->l2.l2l1(st, PH_PULL | INDICATION, skb);
  1105. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1106. if (!test_and_set_bit(FLG_T200_RUN, &st->l2.flag)) {
  1107. FsmDelTimer(&st->l2.t203, 13);
  1108. FsmAddTimer(&st->l2.t200, st->l2.T200, EV_L2_T200, NULL, 11);
  1109. }
  1110. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1111. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1112. }
  1113. static void
  1114. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1115. {
  1116. struct PStack *st = fi->userdata;
  1117. struct sk_buff *skb = arg;
  1118. int PollFlag, rsp, rnr = 0;
  1119. unsigned int nr;
  1120. struct Layer2 *l2 = &st->l2;
  1121. rsp = *skb->data & 0x2;
  1122. if (test_bit(FLG_ORIG, &l2->flag))
  1123. rsp = !rsp;
  1124. skb_pull(skb, l2addrsize(l2));
  1125. if (IsRNR(skb->data, st)) {
  1126. set_peer_busy(l2);
  1127. rnr = 1;
  1128. } else
  1129. clear_peer_busy(l2);
  1130. if (test_bit(FLG_MOD128, &l2->flag)) {
  1131. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1132. nr = skb->data[1] >> 1;
  1133. } else {
  1134. PollFlag = (skb->data[0] & 0x10);
  1135. nr = (skb->data[0] >> 5) & 0x7;
  1136. }
  1137. dev_kfree_skb(skb);
  1138. if (rsp && PollFlag) {
  1139. if (legalnr(st, nr)) {
  1140. if (rnr) {
  1141. restart_t200(st, 15);
  1142. } else {
  1143. stop_t200(st, 16);
  1144. FsmAddTimer(&l2->t203, l2->T203,
  1145. EV_L2_T203, NULL, 5);
  1146. setva(st, nr);
  1147. }
  1148. invoke_retransmission(st, nr);
  1149. FsmChangeState(fi, ST_L2_7);
  1150. if (!skb_queue_empty(&l2->i_queue) && cansend(st))
  1151. st->l2.l2l1(st, PH_PULL | REQUEST, NULL);
  1152. } else
  1153. nrerrorrecovery(fi);
  1154. } else {
  1155. if (!rsp && PollFlag)
  1156. enquiry_response(st);
  1157. if (legalnr(st, nr)) {
  1158. setva(st, nr);
  1159. } else
  1160. nrerrorrecovery(fi);
  1161. }
  1162. }
  1163. static void
  1164. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1165. {
  1166. struct PStack *st = fi->userdata;
  1167. struct sk_buff *skb = arg;
  1168. skb_pull(skb, l2addrsize(&st->l2) + 1);
  1169. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1170. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1171. st->ma.layer(st, MDL_ERROR | INDICATION, (void *) 'K');
  1172. establishlink(fi);
  1173. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1174. }
  1175. dev_kfree_skb(skb);
  1176. }
  1177. static void
  1178. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1179. {
  1180. struct PStack *st = fi->userdata;
  1181. skb_queue_purge(&st->l2.ui_queue);
  1182. st->l2.tei = -1;
  1183. FsmChangeState(fi, ST_L2_1);
  1184. }
  1185. static void
  1186. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1187. {
  1188. struct PStack *st = fi->userdata;
  1189. skb_queue_purge(&st->l2.ui_queue);
  1190. st->l2.tei = -1;
  1191. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1192. FsmChangeState(fi, ST_L2_1);
  1193. }
  1194. static void
  1195. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1196. {
  1197. struct PStack *st = fi->userdata;
  1198. skb_queue_purge(&st->l2.i_queue);
  1199. skb_queue_purge(&st->l2.ui_queue);
  1200. freewin(st);
  1201. st->l2.tei = -1;
  1202. stop_t200(st, 17);
  1203. st5_dl_release_l2l3(st);
  1204. FsmChangeState(fi, ST_L2_1);
  1205. }
  1206. static void
  1207. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1208. {
  1209. struct PStack *st = fi->userdata;
  1210. skb_queue_purge(&st->l2.ui_queue);
  1211. st->l2.tei = -1;
  1212. stop_t200(st, 18);
  1213. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1214. FsmChangeState(fi, ST_L2_1);
  1215. }
  1216. static void
  1217. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1218. {
  1219. struct PStack *st = fi->userdata;
  1220. skb_queue_purge(&st->l2.i_queue);
  1221. skb_queue_purge(&st->l2.ui_queue);
  1222. freewin(st);
  1223. st->l2.tei = -1;
  1224. stop_t200(st, 17);
  1225. FsmDelTimer(&st->l2.t203, 19);
  1226. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1227. FsmChangeState(fi, ST_L2_1);
  1228. }
  1229. static void
  1230. l2_st14_persistent_da(struct FsmInst *fi, int event, void *arg)
  1231. {
  1232. struct PStack *st = fi->userdata;
  1233. skb_queue_purge(&st->l2.i_queue);
  1234. skb_queue_purge(&st->l2.ui_queue);
  1235. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1236. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1237. }
  1238. static void
  1239. l2_st5_persistent_da(struct FsmInst *fi, int event, void *arg)
  1240. {
  1241. struct PStack *st = fi->userdata;
  1242. skb_queue_purge(&st->l2.i_queue);
  1243. skb_queue_purge(&st->l2.ui_queue);
  1244. freewin(st);
  1245. stop_t200(st, 19);
  1246. st5_dl_release_l2l3(st);
  1247. FsmChangeState(fi, ST_L2_4);
  1248. }
  1249. static void
  1250. l2_st6_persistent_da(struct FsmInst *fi, int event, void *arg)
  1251. {
  1252. struct PStack *st = fi->userdata;
  1253. skb_queue_purge(&st->l2.ui_queue);
  1254. stop_t200(st, 20);
  1255. st->l2.l2l3(st, DL_RELEASE | CONFIRM, NULL);
  1256. FsmChangeState(fi, ST_L2_4);
  1257. }
  1258. static void
  1259. l2_persistent_da(struct FsmInst *fi, int event, void *arg)
  1260. {
  1261. struct PStack *st = fi->userdata;
  1262. skb_queue_purge(&st->l2.i_queue);
  1263. skb_queue_purge(&st->l2.ui_queue);
  1264. freewin(st);
  1265. stop_t200(st, 19);
  1266. FsmDelTimer(&st->l2.t203, 19);
  1267. st->l2.l2l3(st, DL_RELEASE | INDICATION, NULL);
  1268. FsmChangeState(fi, ST_L2_4);
  1269. }
  1270. static void
  1271. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1272. {
  1273. struct PStack *st = fi->userdata;
  1274. if(!test_and_set_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1275. enquiry_cr(st, RNR, RSP, 0);
  1276. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1277. }
  1278. }
  1279. static void
  1280. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1281. {
  1282. struct PStack *st = fi->userdata;
  1283. if(!test_and_clear_bit(FLG_OWN_BUSY, &st->l2.flag)) {
  1284. enquiry_cr(st, RR, RSP, 0);
  1285. test_and_clear_bit(FLG_ACK_PEND, &st->l2.flag);
  1286. }
  1287. }
  1288. static void
  1289. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1290. {
  1291. struct PStack *st = fi->userdata;
  1292. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1293. }
  1294. static void
  1295. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1296. {
  1297. struct PStack *st = fi->userdata;
  1298. st->ma.layer(st, MDL_ERROR | INDICATION, arg);
  1299. establishlink(fi);
  1300. test_and_clear_bit(FLG_L3_INIT, &st->l2.flag);
  1301. }
  1302. static struct FsmNode L2FnList[] __initdata =
  1303. {
  1304. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1305. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1306. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1307. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1308. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1309. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1310. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1311. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1312. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1313. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1314. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1315. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1316. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1317. {ST_L2_1, EV_L2_DL_UNIT_DATA, l2_queue_ui_assign},
  1318. {ST_L2_2, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1319. {ST_L2_3, EV_L2_DL_UNIT_DATA, l2_queue_ui},
  1320. {ST_L2_4, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1321. {ST_L2_5, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1322. {ST_L2_6, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1323. {ST_L2_7, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1324. {ST_L2_8, EV_L2_DL_UNIT_DATA, l2_send_ui},
  1325. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1326. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1327. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1328. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1329. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1330. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1331. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1332. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1333. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1334. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1335. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1336. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1337. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1338. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1339. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1340. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1341. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1342. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1343. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1344. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1345. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1346. {ST_L2_5, EV_L2_UA, l2_connected},
  1347. {ST_L2_6, EV_L2_UA, l2_released},
  1348. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1349. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1350. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1351. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1352. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1353. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1354. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1355. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1356. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1357. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1358. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1359. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1360. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1361. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1362. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1363. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1364. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1365. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1366. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1367. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1368. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1369. {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
  1370. {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
  1371. {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
  1372. {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
  1373. {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
  1374. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1375. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1376. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1377. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1378. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1379. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1380. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1381. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1382. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1383. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1384. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1385. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1386. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1387. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistent_da},
  1388. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistent_da},
  1389. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistent_da},
  1390. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistent_da},
  1391. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistent_da},
  1392. };
  1393. static void
  1394. isdnl2_l1l2(struct PStack *st, int pr, void *arg)
  1395. {
  1396. struct sk_buff *skb = arg;
  1397. u_char *datap;
  1398. int ret = 1, len;
  1399. int c = 0;
  1400. switch (pr) {
  1401. case (PH_DATA | INDICATION):
  1402. datap = skb->data;
  1403. len = l2addrsize(&st->l2);
  1404. if (skb->len > len)
  1405. datap += len;
  1406. else {
  1407. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1408. dev_kfree_skb(skb);
  1409. return;
  1410. }
  1411. if (!(*datap & 1)) { /* I-Frame */
  1412. if(!(c = iframe_error(st, skb)))
  1413. ret = FsmEvent(&st->l2.l2m, EV_L2_I, skb);
  1414. } else if (IsSFrame(datap, st)) { /* S-Frame */
  1415. if(!(c = super_error(st, skb)))
  1416. ret = FsmEvent(&st->l2.l2m, EV_L2_SUPER, skb);
  1417. } else if (IsUI(datap)) {
  1418. if(!(c = UI_error(st, skb)))
  1419. ret = FsmEvent(&st->l2.l2m, EV_L2_UI, skb);
  1420. } else if (IsSABME(datap, st)) {
  1421. if(!(c = unnum_error(st, skb, CMD)))
  1422. ret = FsmEvent(&st->l2.l2m, EV_L2_SABME, skb);
  1423. } else if (IsUA(datap)) {
  1424. if(!(c = unnum_error(st, skb, RSP)))
  1425. ret = FsmEvent(&st->l2.l2m, EV_L2_UA, skb);
  1426. } else if (IsDISC(datap)) {
  1427. if(!(c = unnum_error(st, skb, CMD)))
  1428. ret = FsmEvent(&st->l2.l2m, EV_L2_DISC, skb);
  1429. } else if (IsDM(datap)) {
  1430. if(!(c = unnum_error(st, skb, RSP)))
  1431. ret = FsmEvent(&st->l2.l2m, EV_L2_DM, skb);
  1432. } else if (IsFRMR(datap)) {
  1433. if(!(c = FRMR_error(st,skb)))
  1434. ret = FsmEvent(&st->l2.l2m, EV_L2_FRMR, skb);
  1435. } else {
  1436. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *) 'L');
  1437. dev_kfree_skb(skb);
  1438. ret = 0;
  1439. }
  1440. if(c) {
  1441. dev_kfree_skb(skb);
  1442. FsmEvent(&st->l2.l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1443. ret = 0;
  1444. }
  1445. if (ret)
  1446. dev_kfree_skb(skb);
  1447. break;
  1448. case (PH_PULL | CONFIRM):
  1449. FsmEvent(&st->l2.l2m, EV_L2_ACK_PULL, arg);
  1450. break;
  1451. case (PH_PAUSE | INDICATION):
  1452. test_and_set_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1453. break;
  1454. case (PH_PAUSE | CONFIRM):
  1455. test_and_clear_bit(FLG_DCHAN_BUSY, &st->l2.flag);
  1456. break;
  1457. case (PH_ACTIVATE | CONFIRM):
  1458. case (PH_ACTIVATE | INDICATION):
  1459. test_and_set_bit(FLG_L1_ACTIV, &st->l2.flag);
  1460. if (test_and_clear_bit(FLG_ESTAB_PEND, &st->l2.flag))
  1461. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1462. break;
  1463. case (PH_DEACTIVATE | INDICATION):
  1464. case (PH_DEACTIVATE | CONFIRM):
  1465. test_and_clear_bit(FLG_L1_ACTIV, &st->l2.flag);
  1466. FsmEvent(&st->l2.l2m, EV_L1_DEACTIVATE, arg);
  1467. break;
  1468. default:
  1469. l2m_debug(&st->l2.l2m, "l2 unknown pr %04x", pr);
  1470. break;
  1471. }
  1472. }
  1473. static void
  1474. isdnl2_l3l2(struct PStack *st, int pr, void *arg)
  1475. {
  1476. switch (pr) {
  1477. case (DL_DATA | REQUEST):
  1478. if (FsmEvent(&st->l2.l2m, EV_L2_DL_DATA, arg)) {
  1479. dev_kfree_skb((struct sk_buff *) arg);
  1480. }
  1481. break;
  1482. case (DL_UNIT_DATA | REQUEST):
  1483. if (FsmEvent(&st->l2.l2m, EV_L2_DL_UNIT_DATA, arg)) {
  1484. dev_kfree_skb((struct sk_buff *) arg);
  1485. }
  1486. break;
  1487. case (DL_ESTABLISH | REQUEST):
  1488. if (test_bit(FLG_L1_ACTIV, &st->l2.flag)) {
  1489. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1490. test_bit(FLG_ORIG, &st->l2.flag)) {
  1491. FsmEvent(&st->l2.l2m, EV_L2_DL_ESTABLISH_REQ, arg);
  1492. }
  1493. } else {
  1494. if (test_bit(FLG_LAPD, &st->l2.flag) ||
  1495. test_bit(FLG_ORIG, &st->l2.flag)) {
  1496. test_and_set_bit(FLG_ESTAB_PEND, &st->l2.flag);
  1497. }
  1498. st->l2.l2l1(st, PH_ACTIVATE, NULL);
  1499. }
  1500. break;
  1501. case (DL_RELEASE | REQUEST):
  1502. if (test_bit(FLG_LAPB, &st->l2.flag)) {
  1503. st->l2.l2l1(st, PH_DEACTIVATE, NULL);
  1504. }
  1505. FsmEvent(&st->l2.l2m, EV_L2_DL_RELEASE_REQ, arg);
  1506. break;
  1507. case (MDL_ASSIGN | REQUEST):
  1508. FsmEvent(&st->l2.l2m, EV_L2_MDL_ASSIGN, arg);
  1509. break;
  1510. case (MDL_REMOVE | REQUEST):
  1511. FsmEvent(&st->l2.l2m, EV_L2_MDL_REMOVE, arg);
  1512. break;
  1513. case (MDL_ERROR | RESPONSE):
  1514. FsmEvent(&st->l2.l2m, EV_L2_MDL_ERROR, arg);
  1515. break;
  1516. }
  1517. }
  1518. void
  1519. releasestack_isdnl2(struct PStack *st)
  1520. {
  1521. FsmDelTimer(&st->l2.t200, 21);
  1522. FsmDelTimer(&st->l2.t203, 16);
  1523. skb_queue_purge(&st->l2.i_queue);
  1524. skb_queue_purge(&st->l2.ui_queue);
  1525. ReleaseWin(&st->l2);
  1526. }
  1527. static void
  1528. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  1529. {
  1530. va_list args;
  1531. struct PStack *st = fi->userdata;
  1532. va_start(args, fmt);
  1533. VHiSax_putstatus(st->l1.hardware, st->l2.debug_id, fmt, args);
  1534. va_end(args);
  1535. }
  1536. void
  1537. setstack_isdnl2(struct PStack *st, char *debug_id)
  1538. {
  1539. spin_lock_init(&st->l2.lock);
  1540. st->l1.l1l2 = isdnl2_l1l2;
  1541. st->l3.l3l2 = isdnl2_l3l2;
  1542. skb_queue_head_init(&st->l2.i_queue);
  1543. skb_queue_head_init(&st->l2.ui_queue);
  1544. InitWin(&st->l2);
  1545. st->l2.debug = 0;
  1546. st->l2.l2m.fsm = &l2fsm;
  1547. if (test_bit(FLG_LAPB, &st->l2.flag))
  1548. st->l2.l2m.state = ST_L2_4;
  1549. else
  1550. st->l2.l2m.state = ST_L2_1;
  1551. st->l2.l2m.debug = 0;
  1552. st->l2.l2m.userdata = st;
  1553. st->l2.l2m.userint = 0;
  1554. st->l2.l2m.printdebug = l2m_debug;
  1555. strcpy(st->l2.debug_id, debug_id);
  1556. FsmInitTimer(&st->l2.l2m, &st->l2.t200);
  1557. FsmInitTimer(&st->l2.l2m, &st->l2.t203);
  1558. }
  1559. static void
  1560. transl2_l3l2(struct PStack *st, int pr, void *arg)
  1561. {
  1562. switch (pr) {
  1563. case (DL_DATA | REQUEST):
  1564. case (DL_UNIT_DATA | REQUEST):
  1565. st->l2.l2l1(st, PH_DATA | REQUEST, arg);
  1566. break;
  1567. case (DL_ESTABLISH | REQUEST):
  1568. st->l2.l2l1(st, PH_ACTIVATE | REQUEST, NULL);
  1569. break;
  1570. case (DL_RELEASE | REQUEST):
  1571. st->l2.l2l1(st, PH_DEACTIVATE | REQUEST, NULL);
  1572. break;
  1573. }
  1574. }
  1575. void
  1576. setstack_transl2(struct PStack *st)
  1577. {
  1578. st->l3.l3l2 = transl2_l3l2;
  1579. }
  1580. void
  1581. releasestack_transl2(struct PStack *st)
  1582. {
  1583. }
  1584. int __init
  1585. Isdnl2New(void)
  1586. {
  1587. l2fsm.state_count = L2_STATE_COUNT;
  1588. l2fsm.event_count = L2_EVENT_COUNT;
  1589. l2fsm.strEvent = strL2Event;
  1590. l2fsm.strState = strL2State;
  1591. return FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
  1592. }
  1593. void
  1594. Isdnl2Free(void)
  1595. {
  1596. FsmFree(&l2fsm);
  1597. }