layer2.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include "fsm.h"
  18. #include "layer2.h"
  19. static int *debug;
  20. static
  21. struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
  22. static char *strL2State[] =
  23. {
  24. "ST_L2_1",
  25. "ST_L2_2",
  26. "ST_L2_3",
  27. "ST_L2_4",
  28. "ST_L2_5",
  29. "ST_L2_6",
  30. "ST_L2_7",
  31. "ST_L2_8",
  32. };
  33. enum {
  34. EV_L2_UI,
  35. EV_L2_SABME,
  36. EV_L2_DISC,
  37. EV_L2_DM,
  38. EV_L2_UA,
  39. EV_L2_FRMR,
  40. EV_L2_SUPER,
  41. EV_L2_I,
  42. EV_L2_DL_DATA,
  43. EV_L2_ACK_PULL,
  44. EV_L2_DL_UNITDATA,
  45. EV_L2_DL_ESTABLISH_REQ,
  46. EV_L2_DL_RELEASE_REQ,
  47. EV_L2_MDL_ASSIGN,
  48. EV_L2_MDL_REMOVE,
  49. EV_L2_MDL_ERROR,
  50. EV_L1_DEACTIVATE,
  51. EV_L2_T200,
  52. EV_L2_T203,
  53. EV_L2_SET_OWN_BUSY,
  54. EV_L2_CLEAR_OWN_BUSY,
  55. EV_L2_FRAME_ERROR,
  56. };
  57. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
  58. static char *strL2Event[] =
  59. {
  60. "EV_L2_UI",
  61. "EV_L2_SABME",
  62. "EV_L2_DISC",
  63. "EV_L2_DM",
  64. "EV_L2_UA",
  65. "EV_L2_FRMR",
  66. "EV_L2_SUPER",
  67. "EV_L2_I",
  68. "EV_L2_DL_DATA",
  69. "EV_L2_ACK_PULL",
  70. "EV_L2_DL_UNITDATA",
  71. "EV_L2_DL_ESTABLISH_REQ",
  72. "EV_L2_DL_RELEASE_REQ",
  73. "EV_L2_MDL_ASSIGN",
  74. "EV_L2_MDL_REMOVE",
  75. "EV_L2_MDL_ERROR",
  76. "EV_L1_DEACTIVATE",
  77. "EV_L2_T200",
  78. "EV_L2_T203",
  79. "EV_L2_SET_OWN_BUSY",
  80. "EV_L2_CLEAR_OWN_BUSY",
  81. "EV_L2_FRAME_ERROR",
  82. };
  83. static void
  84. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  85. {
  86. struct layer2 *l2 = fi->userdata;
  87. va_list va;
  88. if (!(*debug & DEBUG_L2_FSM))
  89. return;
  90. va_start(va, fmt);
  91. printk(KERN_DEBUG "l2 (tei %d): ", l2->tei);
  92. vprintk(fmt, va);
  93. printk("\n");
  94. va_end(va);
  95. }
  96. inline u_int
  97. l2headersize(struct layer2 *l2, int ui)
  98. {
  99. return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  100. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  101. }
  102. inline u_int
  103. l2addrsize(struct layer2 *l2)
  104. {
  105. return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
  106. }
  107. static u_int
  108. l2_newid(struct layer2 *l2)
  109. {
  110. u_int id;
  111. id = l2->next_id++;
  112. if (id == 0x7fff)
  113. l2->next_id = 1;
  114. id <<= 16;
  115. id |= l2->tei << 8;
  116. id |= l2->sapi;
  117. return id;
  118. }
  119. static void
  120. l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
  121. {
  122. int err;
  123. if (!l2->up)
  124. return;
  125. mISDN_HEAD_PRIM(skb) = prim;
  126. mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
  127. err = l2->up->send(l2->up, skb);
  128. if (err) {
  129. printk(KERN_WARNING "%s: err=%d\n", __func__, err);
  130. dev_kfree_skb(skb);
  131. }
  132. }
  133. static void
  134. l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
  135. {
  136. struct sk_buff *skb;
  137. struct mISDNhead *hh;
  138. int err;
  139. if (!l2->up)
  140. return;
  141. skb = mI_alloc_skb(len, GFP_ATOMIC);
  142. if (!skb)
  143. return;
  144. hh = mISDN_HEAD_P(skb);
  145. hh->prim = prim;
  146. hh->id = (l2->ch.nr << 16) | l2->ch.addr;
  147. if (len)
  148. memcpy(skb_put(skb, len), arg, len);
  149. err = l2->up->send(l2->up, skb);
  150. if (err) {
  151. printk(KERN_WARNING "%s: err=%d\n", __func__, err);
  152. dev_kfree_skb(skb);
  153. }
  154. }
  155. static int
  156. l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
  157. int ret;
  158. ret = l2->ch.recv(l2->ch.peer, skb);
  159. if (ret && (*debug & DEBUG_L2_RECV))
  160. printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
  161. return ret;
  162. }
  163. static int
  164. l2down_raw(struct layer2 *l2, struct sk_buff *skb)
  165. {
  166. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  167. if (hh->prim == PH_DATA_REQ) {
  168. if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  169. skb_queue_tail(&l2->down_queue, skb);
  170. return 0;
  171. }
  172. l2->down_id = mISDN_HEAD_ID(skb);
  173. }
  174. return l2down_skb(l2, skb);
  175. }
  176. static int
  177. l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
  178. {
  179. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  180. hh->prim = prim;
  181. hh->id = id;
  182. return l2down_raw(l2, skb);
  183. }
  184. static int
  185. l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
  186. {
  187. struct sk_buff *skb;
  188. int err;
  189. struct mISDNhead *hh;
  190. skb = mI_alloc_skb(len, GFP_ATOMIC);
  191. if (!skb)
  192. return -ENOMEM;
  193. hh = mISDN_HEAD_P(skb);
  194. hh->prim = prim;
  195. hh->id = id;
  196. if (len)
  197. memcpy(skb_put(skb, len), arg, len);
  198. err = l2down_raw(l2, skb);
  199. if (err)
  200. dev_kfree_skb(skb);
  201. return err;
  202. }
  203. static int
  204. ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
  205. struct sk_buff *nskb = skb;
  206. int ret = -EAGAIN;
  207. if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
  208. if (hh->id == l2->down_id) {
  209. nskb = skb_dequeue(&l2->down_queue);
  210. if (nskb) {
  211. l2->down_id = mISDN_HEAD_ID(nskb);
  212. if (l2down_skb(l2, nskb)) {
  213. dev_kfree_skb(nskb);
  214. l2->down_id = MISDN_ID_NONE;
  215. }
  216. } else
  217. l2->down_id = MISDN_ID_NONE;
  218. if (ret) {
  219. dev_kfree_skb(skb);
  220. ret = 0;
  221. }
  222. if (l2->down_id == MISDN_ID_NONE) {
  223. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  224. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  225. }
  226. }
  227. }
  228. if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  229. nskb = skb_dequeue(&l2->down_queue);
  230. if (nskb) {
  231. l2->down_id = mISDN_HEAD_ID(nskb);
  232. if (l2down_skb(l2, nskb)) {
  233. dev_kfree_skb(nskb);
  234. l2->down_id = MISDN_ID_NONE;
  235. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  236. }
  237. } else
  238. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  239. }
  240. return ret;
  241. }
  242. static int
  243. l2mgr(struct layer2 *l2, u_int prim, void *arg) {
  244. long c = (long)arg;
  245. printk(KERN_WARNING
  246. "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
  247. if (test_bit(FLG_LAPD, &l2->flag) &&
  248. !test_bit(FLG_FIXED_TEI, &l2->flag)) {
  249. switch (c) {
  250. case 'C':
  251. case 'D':
  252. case 'G':
  253. case 'H':
  254. l2_tei(l2, prim, (u_long)arg);
  255. break;
  256. }
  257. }
  258. return 0;
  259. }
  260. static void
  261. set_peer_busy(struct layer2 *l2) {
  262. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  263. if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
  264. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  265. }
  266. static void
  267. clear_peer_busy(struct layer2 *l2) {
  268. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  269. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  270. }
  271. static void
  272. InitWin(struct layer2 *l2)
  273. {
  274. int i;
  275. for (i = 0; i < MAX_WINDOW; i++)
  276. l2->windowar[i] = NULL;
  277. }
  278. static int
  279. freewin(struct layer2 *l2)
  280. {
  281. int i, cnt = 0;
  282. for (i = 0; i < MAX_WINDOW; i++) {
  283. if (l2->windowar[i]) {
  284. cnt++;
  285. dev_kfree_skb(l2->windowar[i]);
  286. l2->windowar[i] = NULL;
  287. }
  288. }
  289. return cnt;
  290. }
  291. static void
  292. ReleaseWin(struct layer2 *l2)
  293. {
  294. int cnt = freewin(l2);
  295. if (cnt)
  296. printk(KERN_WARNING
  297. "isdnl2 freed %d skbuffs in release\n", cnt);
  298. }
  299. inline unsigned int
  300. cansend(struct layer2 *l2)
  301. {
  302. unsigned int p1;
  303. if (test_bit(FLG_MOD128, &l2->flag))
  304. p1 = (l2->vs - l2->va) % 128;
  305. else
  306. p1 = (l2->vs - l2->va) % 8;
  307. return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
  308. }
  309. inline void
  310. clear_exception(struct layer2 *l2)
  311. {
  312. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  313. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  314. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  315. clear_peer_busy(l2);
  316. }
  317. static int
  318. sethdraddr(struct layer2 *l2, u_char *header, int rsp)
  319. {
  320. u_char *ptr = header;
  321. int crbit = rsp;
  322. if (test_bit(FLG_LAPD, &l2->flag)) {
  323. if (test_bit(FLG_LAPD_NET, &l2->flag))
  324. crbit = !crbit;
  325. *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
  326. *ptr++ = (l2->tei << 1) | 1;
  327. return 2;
  328. } else {
  329. if (test_bit(FLG_ORIG, &l2->flag))
  330. crbit = !crbit;
  331. if (crbit)
  332. *ptr++ = l2->addr.B;
  333. else
  334. *ptr++ = l2->addr.A;
  335. return 1;
  336. }
  337. }
  338. static inline void
  339. enqueue_super(struct layer2 *l2, struct sk_buff *skb)
  340. {
  341. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  342. dev_kfree_skb(skb);
  343. }
  344. static inline void
  345. enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
  346. {
  347. if (l2->tm)
  348. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  349. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  350. dev_kfree_skb(skb);
  351. }
  352. inline int
  353. IsUI(u_char *data)
  354. {
  355. return (data[0] & 0xef) == UI;
  356. }
  357. inline int
  358. IsUA(u_char *data)
  359. {
  360. return (data[0] & 0xef) == UA;
  361. }
  362. inline int
  363. IsDM(u_char *data)
  364. {
  365. return (data[0] & 0xef) == DM;
  366. }
  367. inline int
  368. IsDISC(u_char *data)
  369. {
  370. return (data[0] & 0xef) == DISC;
  371. }
  372. inline int
  373. IsRR(u_char *data, struct layer2 *l2)
  374. {
  375. if (test_bit(FLG_MOD128, &l2->flag))
  376. return data[0] == RR;
  377. else
  378. return (data[0] & 0xf) == 1;
  379. }
  380. inline int
  381. IsSFrame(u_char *data, struct layer2 *l2)
  382. {
  383. register u_char d = *data;
  384. if (!test_bit(FLG_MOD128, &l2->flag))
  385. d &= 0xf;
  386. return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
  387. }
  388. inline int
  389. IsSABME(u_char *data, struct layer2 *l2)
  390. {
  391. u_char d = data[0] & ~0x10;
  392. return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
  393. }
  394. inline int
  395. IsREJ(u_char *data, struct layer2 *l2)
  396. {
  397. return test_bit(FLG_MOD128, &l2->flag) ?
  398. data[0] == REJ : (data[0] & 0xf) == REJ;
  399. }
  400. inline int
  401. IsFRMR(u_char *data)
  402. {
  403. return (data[0] & 0xef) == FRMR;
  404. }
  405. inline int
  406. IsRNR(u_char *data, struct layer2 *l2)
  407. {
  408. return test_bit(FLG_MOD128, &l2->flag) ?
  409. data[0] == RNR : (data[0] & 0xf) == RNR;
  410. }
  411. int
  412. iframe_error(struct layer2 *l2, struct sk_buff *skb)
  413. {
  414. u_int i;
  415. int rsp = *skb->data & 0x2;
  416. i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
  417. if (test_bit(FLG_ORIG, &l2->flag))
  418. rsp = !rsp;
  419. if (rsp)
  420. return 'L';
  421. if (skb->len < i)
  422. return 'N';
  423. if ((skb->len - i) > l2->maxlen)
  424. return 'O';
  425. return 0;
  426. }
  427. int
  428. super_error(struct layer2 *l2, struct sk_buff *skb)
  429. {
  430. if (skb->len != l2addrsize(l2) +
  431. (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
  432. return 'N';
  433. return 0;
  434. }
  435. int
  436. unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
  437. {
  438. int rsp = (*skb->data & 0x2) >> 1;
  439. if (test_bit(FLG_ORIG, &l2->flag))
  440. rsp = !rsp;
  441. if (rsp != wantrsp)
  442. return 'L';
  443. if (skb->len != l2addrsize(l2) + 1)
  444. return 'N';
  445. return 0;
  446. }
  447. int
  448. UI_error(struct layer2 *l2, struct sk_buff *skb)
  449. {
  450. int rsp = *skb->data & 0x2;
  451. if (test_bit(FLG_ORIG, &l2->flag))
  452. rsp = !rsp;
  453. if (rsp)
  454. return 'L';
  455. if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
  456. return 'O';
  457. return 0;
  458. }
  459. int
  460. FRMR_error(struct layer2 *l2, struct sk_buff *skb)
  461. {
  462. u_int headers = l2addrsize(l2) + 1;
  463. u_char *datap = skb->data + headers;
  464. int rsp = *skb->data & 0x2;
  465. if (test_bit(FLG_ORIG, &l2->flag))
  466. rsp = !rsp;
  467. if (!rsp)
  468. return 'L';
  469. if (test_bit(FLG_MOD128, &l2->flag)) {
  470. if (skb->len < headers + 5)
  471. return 'N';
  472. else if (*debug & DEBUG_L2)
  473. l2m_debug(&l2->l2m,
  474. "FRMR information %2x %2x %2x %2x %2x",
  475. datap[0], datap[1], datap[2], datap[3], datap[4]);
  476. } else {
  477. if (skb->len < headers + 3)
  478. return 'N';
  479. else if (*debug & DEBUG_L2)
  480. l2m_debug(&l2->l2m,
  481. "FRMR information %2x %2x %2x",
  482. datap[0], datap[1], datap[2]);
  483. }
  484. return 0;
  485. }
  486. static unsigned int
  487. legalnr(struct layer2 *l2, unsigned int nr)
  488. {
  489. if (test_bit(FLG_MOD128, &l2->flag))
  490. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  491. else
  492. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  493. }
  494. static void
  495. setva(struct layer2 *l2, unsigned int nr)
  496. {
  497. struct sk_buff *skb;
  498. while (l2->va != nr) {
  499. l2->va++;
  500. if (test_bit(FLG_MOD128, &l2->flag))
  501. l2->va %= 128;
  502. else
  503. l2->va %= 8;
  504. if (l2->windowar[l2->sow]) {
  505. skb_trim(l2->windowar[l2->sow], 0);
  506. skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
  507. l2->windowar[l2->sow] = NULL;
  508. }
  509. l2->sow = (l2->sow + 1) % l2->window;
  510. }
  511. skb = skb_dequeue(&l2->tmp_queue);
  512. while (skb) {
  513. dev_kfree_skb(skb);
  514. skb = skb_dequeue(&l2->tmp_queue);
  515. }
  516. }
  517. static void
  518. send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
  519. {
  520. u_char tmp[MAX_L2HEADER_LEN];
  521. int i;
  522. i = sethdraddr(l2, tmp, cr);
  523. tmp[i++] = cmd;
  524. if (skb)
  525. skb_trim(skb, 0);
  526. else {
  527. skb = mI_alloc_skb(i, GFP_ATOMIC);
  528. if (!skb) {
  529. printk(KERN_WARNING "%s: can't alloc skbuff\n",
  530. __func__);
  531. return;
  532. }
  533. }
  534. memcpy(skb_put(skb, i), tmp, i);
  535. enqueue_super(l2, skb);
  536. }
  537. inline u_char
  538. get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
  539. {
  540. return skb->data[l2addrsize(l2)] & 0x10;
  541. }
  542. inline u_char
  543. get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
  544. {
  545. u_char PF;
  546. PF = get_PollFlag(l2, skb);
  547. dev_kfree_skb(skb);
  548. return PF;
  549. }
  550. inline void
  551. start_t200(struct layer2 *l2, int i)
  552. {
  553. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  554. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  555. }
  556. inline void
  557. restart_t200(struct layer2 *l2, int i)
  558. {
  559. mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  560. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  561. }
  562. inline void
  563. stop_t200(struct layer2 *l2, int i)
  564. {
  565. if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
  566. mISDN_FsmDelTimer(&l2->t200, i);
  567. }
  568. inline void
  569. st5_dl_release_l2l3(struct layer2 *l2)
  570. {
  571. int pr;
  572. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  573. pr = DL_RELEASE_CNF;
  574. else
  575. pr = DL_RELEASE_IND;
  576. l2up_create(l2, pr, 0, NULL);
  577. }
  578. inline void
  579. lapb_dl_release_l2l3(struct layer2 *l2, int f)
  580. {
  581. if (test_bit(FLG_LAPB, &l2->flag))
  582. l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
  583. l2up_create(l2, f, 0, NULL);
  584. }
  585. static void
  586. establishlink(struct FsmInst *fi)
  587. {
  588. struct layer2 *l2 = fi->userdata;
  589. u_char cmd;
  590. clear_exception(l2);
  591. l2->rc = 0;
  592. cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
  593. send_uframe(l2, NULL, cmd, CMD);
  594. mISDN_FsmDelTimer(&l2->t203, 1);
  595. restart_t200(l2, 1);
  596. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  597. freewin(l2);
  598. mISDN_FsmChangeState(fi, ST_L2_5);
  599. }
  600. static void
  601. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  602. {
  603. struct sk_buff *skb = arg;
  604. struct layer2 *l2 = fi->userdata;
  605. if (get_PollFlagFree(l2, skb))
  606. l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
  607. else
  608. l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
  609. }
  610. static void
  611. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  612. {
  613. struct sk_buff *skb = arg;
  614. struct layer2 *l2 = fi->userdata;
  615. if (get_PollFlagFree(l2, skb))
  616. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  617. else {
  618. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  619. establishlink(fi);
  620. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  621. }
  622. }
  623. static void
  624. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  625. {
  626. struct sk_buff *skb = arg;
  627. struct layer2 *l2 = fi->userdata;
  628. if (get_PollFlagFree(l2, skb))
  629. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  630. else
  631. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  632. establishlink(fi);
  633. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  634. }
  635. static void
  636. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  637. {
  638. dev_kfree_skb((struct sk_buff *)arg);
  639. mISDN_FsmChangeState(fi, ST_L2_3);
  640. }
  641. static void
  642. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  643. {
  644. struct layer2 *l2 = fi->userdata;
  645. mISDN_FsmChangeState(fi, ST_L2_3);
  646. dev_kfree_skb((struct sk_buff *)arg);
  647. l2_tei(l2, MDL_ASSIGN_IND, 0);
  648. }
  649. static void
  650. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  651. {
  652. struct layer2 *l2 = fi->userdata;
  653. struct sk_buff *skb = arg;
  654. skb_queue_tail(&l2->ui_queue, skb);
  655. mISDN_FsmChangeState(fi, ST_L2_2);
  656. l2_tei(l2, MDL_ASSIGN_IND, 0);
  657. }
  658. static void
  659. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  660. {
  661. struct layer2 *l2 = fi->userdata;
  662. struct sk_buff *skb = arg;
  663. skb_queue_tail(&l2->ui_queue, skb);
  664. }
  665. static void
  666. tx_ui(struct layer2 *l2)
  667. {
  668. struct sk_buff *skb;
  669. u_char header[MAX_L2HEADER_LEN];
  670. int i;
  671. i = sethdraddr(l2, header, CMD);
  672. if (test_bit(FLG_LAPD_NET, &l2->flag))
  673. header[1] = 0xff; /* tei 127 */
  674. header[i++] = UI;
  675. while ((skb = skb_dequeue(&l2->ui_queue))) {
  676. memcpy(skb_push(skb, i), header, i);
  677. enqueue_ui(l2, skb);
  678. }
  679. }
  680. static void
  681. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  682. {
  683. struct layer2 *l2 = fi->userdata;
  684. struct sk_buff *skb = arg;
  685. skb_queue_tail(&l2->ui_queue, skb);
  686. tx_ui(l2);
  687. }
  688. static void
  689. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  690. {
  691. struct layer2 *l2 = fi->userdata;
  692. struct sk_buff *skb = arg;
  693. skb_pull(skb, l2headersize(l2, 1));
  694. /*
  695. * in states 1-3 for broadcast
  696. */
  697. if (l2->tm)
  698. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  699. l2up(l2, DL_UNITDATA_IND, skb);
  700. }
  701. static void
  702. l2_establish(struct FsmInst *fi, int event, void *arg)
  703. {
  704. struct sk_buff *skb = arg;
  705. struct layer2 *l2 = fi->userdata;
  706. establishlink(fi);
  707. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  708. dev_kfree_skb(skb);
  709. }
  710. static void
  711. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  712. {
  713. struct sk_buff *skb = arg;
  714. struct layer2 *l2 = fi->userdata;
  715. skb_queue_purge(&l2->i_queue);
  716. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  717. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  718. dev_kfree_skb(skb);
  719. }
  720. static void
  721. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  722. {
  723. struct sk_buff *skb = arg;
  724. struct layer2 *l2 = fi->userdata;
  725. skb_queue_purge(&l2->i_queue);
  726. establishlink(fi);
  727. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  728. dev_kfree_skb(skb);
  729. }
  730. static void
  731. l2_release(struct FsmInst *fi, int event, void *arg)
  732. {
  733. struct layer2 *l2 = fi->userdata;
  734. struct sk_buff *skb = arg;
  735. skb_trim(skb, 0);
  736. l2up(l2, DL_RELEASE_CNF, skb);
  737. }
  738. static void
  739. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  740. {
  741. struct sk_buff *skb = arg;
  742. struct layer2 *l2 = fi->userdata;
  743. test_and_set_bit(FLG_PEND_REL, &l2->flag);
  744. dev_kfree_skb(skb);
  745. }
  746. static void
  747. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  748. {
  749. struct layer2 *l2 = fi->userdata;
  750. struct sk_buff *skb = arg;
  751. skb_queue_purge(&l2->i_queue);
  752. freewin(l2);
  753. mISDN_FsmChangeState(fi, ST_L2_6);
  754. l2->rc = 0;
  755. send_uframe(l2, NULL, DISC | 0x10, CMD);
  756. mISDN_FsmDelTimer(&l2->t203, 1);
  757. restart_t200(l2, 2);
  758. if (skb)
  759. dev_kfree_skb(skb);
  760. }
  761. static void
  762. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  763. {
  764. struct layer2 *l2 = fi->userdata;
  765. struct sk_buff *skb = arg;
  766. l2->vs = 0;
  767. l2->va = 0;
  768. l2->vr = 0;
  769. l2->sow = 0;
  770. clear_exception(l2);
  771. send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
  772. mISDN_FsmChangeState(fi, ST_L2_7);
  773. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  774. skb_trim(skb, 0);
  775. l2up(l2, DL_ESTABLISH_IND, skb);
  776. if (l2->tm)
  777. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  778. }
  779. static void
  780. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  781. {
  782. struct layer2 *l2 = fi->userdata;
  783. struct sk_buff *skb = arg;
  784. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  785. }
  786. static void
  787. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  788. {
  789. struct layer2 *l2 = fi->userdata;
  790. struct sk_buff *skb = arg;
  791. send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
  792. }
  793. static void
  794. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  795. {
  796. struct layer2 *l2 = fi->userdata;
  797. struct sk_buff *skb = arg;
  798. int est = 0;
  799. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  800. l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
  801. if (l2->vs != l2->va) {
  802. skb_queue_purge(&l2->i_queue);
  803. est = 1;
  804. }
  805. clear_exception(l2);
  806. l2->vs = 0;
  807. l2->va = 0;
  808. l2->vr = 0;
  809. l2->sow = 0;
  810. mISDN_FsmChangeState(fi, ST_L2_7);
  811. stop_t200(l2, 3);
  812. mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  813. if (est)
  814. l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
  815. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  816. * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
  817. * 0, NULL, 0);
  818. */
  819. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  820. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  821. }
  822. static void
  823. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  824. {
  825. struct layer2 *l2 = fi->userdata;
  826. struct sk_buff *skb = arg;
  827. mISDN_FsmChangeState(fi, ST_L2_4);
  828. mISDN_FsmDelTimer(&l2->t203, 3);
  829. stop_t200(l2, 4);
  830. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  831. skb_queue_purge(&l2->i_queue);
  832. freewin(l2);
  833. lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
  834. if (l2->tm)
  835. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  836. }
  837. static void
  838. l2_connected(struct FsmInst *fi, int event, void *arg)
  839. {
  840. struct layer2 *l2 = fi->userdata;
  841. struct sk_buff *skb = arg;
  842. int pr = -1;
  843. if (!get_PollFlag(l2, skb)) {
  844. l2_mdl_error_ua(fi, event, arg);
  845. return;
  846. }
  847. dev_kfree_skb(skb);
  848. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  849. l2_disconnect(fi, event, NULL);
  850. if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
  851. pr = DL_ESTABLISH_CNF;
  852. } else if (l2->vs != l2->va) {
  853. skb_queue_purge(&l2->i_queue);
  854. pr = DL_ESTABLISH_IND;
  855. }
  856. stop_t200(l2, 5);
  857. l2->vr = 0;
  858. l2->vs = 0;
  859. l2->va = 0;
  860. l2->sow = 0;
  861. mISDN_FsmChangeState(fi, ST_L2_7);
  862. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
  863. if (pr != -1)
  864. l2up_create(l2, pr, 0, NULL);
  865. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  866. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  867. if (l2->tm)
  868. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  869. }
  870. static void
  871. l2_released(struct FsmInst *fi, int event, void *arg)
  872. {
  873. struct layer2 *l2 = fi->userdata;
  874. struct sk_buff *skb = arg;
  875. if (!get_PollFlag(l2, skb)) {
  876. l2_mdl_error_ua(fi, event, arg);
  877. return;
  878. }
  879. dev_kfree_skb(skb);
  880. stop_t200(l2, 6);
  881. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  882. mISDN_FsmChangeState(fi, ST_L2_4);
  883. if (l2->tm)
  884. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  885. }
  886. static void
  887. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  888. {
  889. struct layer2 *l2 = fi->userdata;
  890. struct sk_buff *skb = arg;
  891. if (!get_PollFlagFree(l2, skb)) {
  892. establishlink(fi);
  893. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  894. }
  895. }
  896. static void
  897. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  898. {
  899. struct layer2 *l2 = fi->userdata;
  900. struct sk_buff *skb = arg;
  901. if (get_PollFlagFree(l2, skb)) {
  902. stop_t200(l2, 7);
  903. if (!test_bit(FLG_L3_INIT, &l2->flag))
  904. skb_queue_purge(&l2->i_queue);
  905. if (test_bit(FLG_LAPB, &l2->flag))
  906. l2down_create(l2, PH_DEACTIVATE_REQ,
  907. l2_newid(l2), 0, NULL);
  908. st5_dl_release_l2l3(l2);
  909. mISDN_FsmChangeState(fi, ST_L2_4);
  910. if (l2->tm)
  911. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  912. }
  913. }
  914. static void
  915. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  916. {
  917. struct layer2 *l2 = fi->userdata;
  918. struct sk_buff *skb = arg;
  919. if (get_PollFlagFree(l2, skb)) {
  920. stop_t200(l2, 8);
  921. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  922. mISDN_FsmChangeState(fi, ST_L2_4);
  923. if (l2->tm)
  924. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  925. }
  926. }
  927. void
  928. enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
  929. {
  930. struct sk_buff *skb;
  931. u_char tmp[MAX_L2HEADER_LEN];
  932. int i;
  933. i = sethdraddr(l2, tmp, cr);
  934. if (test_bit(FLG_MOD128, &l2->flag)) {
  935. tmp[i++] = typ;
  936. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  937. } else
  938. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  939. skb = mI_alloc_skb(i, GFP_ATOMIC);
  940. if (!skb) {
  941. printk(KERN_WARNING
  942. "isdnl2 can't alloc sbbuff for enquiry_cr\n");
  943. return;
  944. }
  945. memcpy(skb_put(skb, i), tmp, i);
  946. enqueue_super(l2, skb);
  947. }
  948. inline void
  949. enquiry_response(struct layer2 *l2)
  950. {
  951. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  952. enquiry_cr(l2, RNR, RSP, 1);
  953. else
  954. enquiry_cr(l2, RR, RSP, 1);
  955. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  956. }
  957. inline void
  958. transmit_enquiry(struct layer2 *l2)
  959. {
  960. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  961. enquiry_cr(l2, RNR, CMD, 1);
  962. else
  963. enquiry_cr(l2, RR, CMD, 1);
  964. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  965. start_t200(l2, 9);
  966. }
  967. static void
  968. nrerrorrecovery(struct FsmInst *fi)
  969. {
  970. struct layer2 *l2 = fi->userdata;
  971. l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
  972. establishlink(fi);
  973. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  974. }
  975. static void
  976. invoke_retransmission(struct layer2 *l2, unsigned int nr)
  977. {
  978. u_int p1;
  979. if (l2->vs != nr) {
  980. while (l2->vs != nr) {
  981. (l2->vs)--;
  982. if (test_bit(FLG_MOD128, &l2->flag)) {
  983. l2->vs %= 128;
  984. p1 = (l2->vs - l2->va) % 128;
  985. } else {
  986. l2->vs %= 8;
  987. p1 = (l2->vs - l2->va) % 8;
  988. }
  989. p1 = (p1 + l2->sow) % l2->window;
  990. if (l2->windowar[p1])
  991. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  992. else
  993. printk(KERN_WARNING
  994. "%s: windowar[%d] is NULL\n",
  995. __func__, p1);
  996. l2->windowar[p1] = NULL;
  997. }
  998. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  999. }
  1000. }
  1001. static void
  1002. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  1003. {
  1004. struct layer2 *l2 = fi->userdata;
  1005. struct sk_buff *skb = arg;
  1006. int PollFlag, rsp, typ = RR;
  1007. unsigned int nr;
  1008. rsp = *skb->data & 0x2;
  1009. if (test_bit(FLG_ORIG, &l2->flag))
  1010. rsp = !rsp;
  1011. skb_pull(skb, l2addrsize(l2));
  1012. if (IsRNR(skb->data, l2)) {
  1013. set_peer_busy(l2);
  1014. typ = RNR;
  1015. } else
  1016. clear_peer_busy(l2);
  1017. if (IsREJ(skb->data, l2))
  1018. typ = REJ;
  1019. if (test_bit(FLG_MOD128, &l2->flag)) {
  1020. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1021. nr = skb->data[1] >> 1;
  1022. } else {
  1023. PollFlag = (skb->data[0] & 0x10);
  1024. nr = (skb->data[0] >> 5) & 0x7;
  1025. }
  1026. dev_kfree_skb(skb);
  1027. if (PollFlag) {
  1028. if (rsp)
  1029. l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
  1030. else
  1031. enquiry_response(l2);
  1032. }
  1033. if (legalnr(l2, nr)) {
  1034. if (typ == REJ) {
  1035. setva(l2, nr);
  1036. invoke_retransmission(l2, nr);
  1037. stop_t200(l2, 10);
  1038. if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1039. EV_L2_T203, NULL, 6))
  1040. l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
  1041. } else if ((nr == l2->vs) && (typ == RR)) {
  1042. setva(l2, nr);
  1043. stop_t200(l2, 11);
  1044. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1045. EV_L2_T203, NULL, 7);
  1046. } else if ((l2->va != nr) || (typ == RNR)) {
  1047. setva(l2, nr);
  1048. if (typ != RR)
  1049. mISDN_FsmDelTimer(&l2->t203, 9);
  1050. restart_t200(l2, 12);
  1051. }
  1052. if (skb_queue_len(&l2->i_queue) && (typ == RR))
  1053. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1054. } else
  1055. nrerrorrecovery(fi);
  1056. }
  1057. static void
  1058. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  1059. {
  1060. struct layer2 *l2 = fi->userdata;
  1061. struct sk_buff *skb = arg;
  1062. if (!test_bit(FLG_L3_INIT, &l2->flag))
  1063. skb_queue_tail(&l2->i_queue, skb);
  1064. else
  1065. dev_kfree_skb(skb);
  1066. }
  1067. static void
  1068. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  1069. {
  1070. struct layer2 *l2 = fi->userdata;
  1071. struct sk_buff *skb = arg;
  1072. skb_queue_tail(&l2->i_queue, skb);
  1073. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1074. }
  1075. static void
  1076. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  1077. {
  1078. struct layer2 *l2 = fi->userdata;
  1079. struct sk_buff *skb = arg;
  1080. skb_queue_tail(&l2->i_queue, skb);
  1081. }
  1082. static void
  1083. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  1084. {
  1085. struct layer2 *l2 = fi->userdata;
  1086. struct sk_buff *skb = arg;
  1087. int PollFlag, i;
  1088. u_int ns, nr;
  1089. i = l2addrsize(l2);
  1090. if (test_bit(FLG_MOD128, &l2->flag)) {
  1091. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  1092. ns = skb->data[i] >> 1;
  1093. nr = (skb->data[i + 1] >> 1) & 0x7f;
  1094. } else {
  1095. PollFlag = (skb->data[i] & 0x10);
  1096. ns = (skb->data[i] >> 1) & 0x7;
  1097. nr = (skb->data[i] >> 5) & 0x7;
  1098. }
  1099. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  1100. dev_kfree_skb(skb);
  1101. if (PollFlag)
  1102. enquiry_response(l2);
  1103. } else {
  1104. if (l2->vr == ns) {
  1105. l2->vr++;
  1106. if (test_bit(FLG_MOD128, &l2->flag))
  1107. l2->vr %= 128;
  1108. else
  1109. l2->vr %= 8;
  1110. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  1111. if (PollFlag)
  1112. enquiry_response(l2);
  1113. else
  1114. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  1115. skb_pull(skb, l2headersize(l2, 0));
  1116. l2up(l2, DL_DATA_IND, skb);
  1117. } else {
  1118. /* n(s)!=v(r) */
  1119. dev_kfree_skb(skb);
  1120. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  1121. if (PollFlag)
  1122. enquiry_response(l2);
  1123. } else {
  1124. enquiry_cr(l2, REJ, RSP, PollFlag);
  1125. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1126. }
  1127. }
  1128. }
  1129. if (legalnr(l2, nr)) {
  1130. if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
  1131. (fi->state == ST_L2_7)) {
  1132. if (nr == l2->vs) {
  1133. stop_t200(l2, 13);
  1134. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1135. EV_L2_T203, NULL, 7);
  1136. } else if (nr != l2->va)
  1137. restart_t200(l2, 14);
  1138. }
  1139. setva(l2, nr);
  1140. } else {
  1141. nrerrorrecovery(fi);
  1142. return;
  1143. }
  1144. if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
  1145. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1146. if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
  1147. enquiry_cr(l2, RR, RSP, 0);
  1148. }
  1149. static void
  1150. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  1151. {
  1152. struct layer2 *l2 = fi->userdata;
  1153. u_int info;
  1154. l2->tei = (signed char)(long)arg;
  1155. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1156. info = DL_INFO_L2_CONNECT;
  1157. l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
  1158. if (fi->state == ST_L2_3) {
  1159. establishlink(fi);
  1160. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  1161. } else
  1162. mISDN_FsmChangeState(fi, ST_L2_4);
  1163. if (skb_queue_len(&l2->ui_queue))
  1164. tx_ui(l2);
  1165. }
  1166. static void
  1167. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  1168. {
  1169. struct layer2 *l2 = fi->userdata;
  1170. if (test_bit(FLG_LAPD, &l2->flag) &&
  1171. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1172. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1173. } else if (l2->rc == l2->N200) {
  1174. mISDN_FsmChangeState(fi, ST_L2_4);
  1175. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1176. skb_queue_purge(&l2->i_queue);
  1177. l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
  1178. if (test_bit(FLG_LAPB, &l2->flag))
  1179. l2down_create(l2, PH_DEACTIVATE_REQ,
  1180. l2_newid(l2), 0, NULL);
  1181. st5_dl_release_l2l3(l2);
  1182. if (l2->tm)
  1183. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1184. } else {
  1185. l2->rc++;
  1186. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1187. send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
  1188. SABME : SABM) | 0x10, CMD);
  1189. }
  1190. }
  1191. static void
  1192. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  1193. {
  1194. struct layer2 *l2 = fi->userdata;
  1195. if (test_bit(FLG_LAPD, &l2->flag) &&
  1196. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1197. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1198. } else if (l2->rc == l2->N200) {
  1199. mISDN_FsmChangeState(fi, ST_L2_4);
  1200. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1201. l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
  1202. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  1203. if (l2->tm)
  1204. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1205. } else {
  1206. l2->rc++;
  1207. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
  1208. NULL, 9);
  1209. send_uframe(l2, NULL, DISC | 0x10, CMD);
  1210. }
  1211. }
  1212. static void
  1213. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1214. {
  1215. struct layer2 *l2 = fi->userdata;
  1216. if (test_bit(FLG_LAPD, &l2->flag) &&
  1217. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1218. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1219. return;
  1220. }
  1221. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1222. l2->rc = 0;
  1223. mISDN_FsmChangeState(fi, ST_L2_8);
  1224. transmit_enquiry(l2);
  1225. l2->rc++;
  1226. }
  1227. static void
  1228. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1229. {
  1230. struct layer2 *l2 = fi->userdata;
  1231. if (test_bit(FLG_LAPD, &l2->flag) &&
  1232. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1233. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1234. return;
  1235. }
  1236. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1237. if (l2->rc == l2->N200) {
  1238. l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
  1239. establishlink(fi);
  1240. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1241. } else {
  1242. transmit_enquiry(l2);
  1243. l2->rc++;
  1244. }
  1245. }
  1246. static void
  1247. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1248. {
  1249. struct layer2 *l2 = fi->userdata;
  1250. if (test_bit(FLG_LAPD, &l2->flag) &&
  1251. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1252. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
  1253. return;
  1254. }
  1255. mISDN_FsmChangeState(fi, ST_L2_8);
  1256. transmit_enquiry(l2);
  1257. l2->rc = 0;
  1258. }
  1259. static void
  1260. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1261. {
  1262. struct layer2 *l2 = fi->userdata;
  1263. struct sk_buff *skb, *nskb, *oskb;
  1264. u_char header[MAX_L2HEADER_LEN];
  1265. u_int i, p1;
  1266. if (!cansend(l2))
  1267. return;
  1268. skb = skb_dequeue(&l2->i_queue);
  1269. if (!skb)
  1270. return;
  1271. if (test_bit(FLG_MOD128, &l2->flag))
  1272. p1 = (l2->vs - l2->va) % 128;
  1273. else
  1274. p1 = (l2->vs - l2->va) % 8;
  1275. p1 = (p1 + l2->sow) % l2->window;
  1276. if (l2->windowar[p1]) {
  1277. printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
  1278. p1);
  1279. dev_kfree_skb(l2->windowar[p1]);
  1280. }
  1281. l2->windowar[p1] = skb;
  1282. i = sethdraddr(l2, header, CMD);
  1283. if (test_bit(FLG_MOD128, &l2->flag)) {
  1284. header[i++] = l2->vs << 1;
  1285. header[i++] = l2->vr << 1;
  1286. l2->vs = (l2->vs + 1) % 128;
  1287. } else {
  1288. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1289. l2->vs = (l2->vs + 1) % 8;
  1290. }
  1291. nskb = skb_clone(skb, GFP_ATOMIC);
  1292. p1 = skb_headroom(nskb);
  1293. if (p1 >= i)
  1294. memcpy(skb_push(nskb, i), header, i);
  1295. else {
  1296. printk(KERN_WARNING
  1297. "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
  1298. oskb = nskb;
  1299. nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
  1300. if (!nskb) {
  1301. dev_kfree_skb(oskb);
  1302. printk(KERN_WARNING "%s: no skb mem\n", __func__);
  1303. return;
  1304. }
  1305. memcpy(skb_put(nskb, i), header, i);
  1306. memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
  1307. dev_kfree_skb(oskb);
  1308. }
  1309. l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
  1310. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1311. if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
  1312. mISDN_FsmDelTimer(&l2->t203, 13);
  1313. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
  1314. }
  1315. }
  1316. static void
  1317. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1318. {
  1319. struct layer2 *l2 = fi->userdata;
  1320. struct sk_buff *skb = arg;
  1321. int PollFlag, rsp, rnr = 0;
  1322. unsigned int nr;
  1323. rsp = *skb->data & 0x2;
  1324. if (test_bit(FLG_ORIG, &l2->flag))
  1325. rsp = !rsp;
  1326. skb_pull(skb, l2addrsize(l2));
  1327. if (IsRNR(skb->data, l2)) {
  1328. set_peer_busy(l2);
  1329. rnr = 1;
  1330. } else
  1331. clear_peer_busy(l2);
  1332. if (test_bit(FLG_MOD128, &l2->flag)) {
  1333. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1334. nr = skb->data[1] >> 1;
  1335. } else {
  1336. PollFlag = (skb->data[0] & 0x10);
  1337. nr = (skb->data[0] >> 5) & 0x7;
  1338. }
  1339. dev_kfree_skb(skb);
  1340. if (rsp && PollFlag) {
  1341. if (legalnr(l2, nr)) {
  1342. if (rnr) {
  1343. restart_t200(l2, 15);
  1344. } else {
  1345. stop_t200(l2, 16);
  1346. mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1347. EV_L2_T203, NULL, 5);
  1348. setva(l2, nr);
  1349. }
  1350. invoke_retransmission(l2, nr);
  1351. mISDN_FsmChangeState(fi, ST_L2_7);
  1352. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  1353. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1354. } else
  1355. nrerrorrecovery(fi);
  1356. } else {
  1357. if (!rsp && PollFlag)
  1358. enquiry_response(l2);
  1359. if (legalnr(l2, nr))
  1360. setva(l2, nr);
  1361. else
  1362. nrerrorrecovery(fi);
  1363. }
  1364. }
  1365. static void
  1366. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1367. {
  1368. struct layer2 *l2 = fi->userdata;
  1369. struct sk_buff *skb = arg;
  1370. skb_pull(skb, l2addrsize(l2) + 1);
  1371. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1372. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1373. l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
  1374. establishlink(fi);
  1375. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1376. }
  1377. dev_kfree_skb(skb);
  1378. }
  1379. static void
  1380. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1381. {
  1382. struct layer2 *l2 = fi->userdata;
  1383. skb_queue_purge(&l2->ui_queue);
  1384. l2->tei = GROUP_TEI;
  1385. mISDN_FsmChangeState(fi, ST_L2_1);
  1386. }
  1387. static void
  1388. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1389. {
  1390. struct layer2 *l2 = fi->userdata;
  1391. skb_queue_purge(&l2->ui_queue);
  1392. l2->tei = GROUP_TEI;
  1393. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1394. mISDN_FsmChangeState(fi, ST_L2_1);
  1395. }
  1396. static void
  1397. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1398. {
  1399. struct layer2 *l2 = fi->userdata;
  1400. skb_queue_purge(&l2->i_queue);
  1401. skb_queue_purge(&l2->ui_queue);
  1402. freewin(l2);
  1403. l2->tei = GROUP_TEI;
  1404. stop_t200(l2, 17);
  1405. st5_dl_release_l2l3(l2);
  1406. mISDN_FsmChangeState(fi, ST_L2_1);
  1407. }
  1408. static void
  1409. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1410. {
  1411. struct layer2 *l2 = fi->userdata;
  1412. skb_queue_purge(&l2->ui_queue);
  1413. l2->tei = GROUP_TEI;
  1414. stop_t200(l2, 18);
  1415. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1416. mISDN_FsmChangeState(fi, ST_L2_1);
  1417. }
  1418. static void
  1419. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1420. {
  1421. struct layer2 *l2 = fi->userdata;
  1422. skb_queue_purge(&l2->i_queue);
  1423. skb_queue_purge(&l2->ui_queue);
  1424. freewin(l2);
  1425. l2->tei = GROUP_TEI;
  1426. stop_t200(l2, 17);
  1427. mISDN_FsmDelTimer(&l2->t203, 19);
  1428. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1429. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  1430. * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
  1431. * 0, NULL, 0);
  1432. */
  1433. mISDN_FsmChangeState(fi, ST_L2_1);
  1434. }
  1435. static void
  1436. l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
  1437. {
  1438. struct layer2 *l2 = fi->userdata;
  1439. struct sk_buff *skb = arg;
  1440. skb_queue_purge(&l2->i_queue);
  1441. skb_queue_purge(&l2->ui_queue);
  1442. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1443. l2up(l2, DL_RELEASE_IND, skb);
  1444. else
  1445. dev_kfree_skb(skb);
  1446. }
  1447. static void
  1448. l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
  1449. {
  1450. struct layer2 *l2 = fi->userdata;
  1451. struct sk_buff *skb = arg;
  1452. skb_queue_purge(&l2->i_queue);
  1453. skb_queue_purge(&l2->ui_queue);
  1454. freewin(l2);
  1455. stop_t200(l2, 19);
  1456. st5_dl_release_l2l3(l2);
  1457. mISDN_FsmChangeState(fi, ST_L2_4);
  1458. if (l2->tm)
  1459. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1460. dev_kfree_skb(skb);
  1461. }
  1462. static void
  1463. l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
  1464. {
  1465. struct layer2 *l2 = fi->userdata;
  1466. struct sk_buff *skb = arg;
  1467. skb_queue_purge(&l2->ui_queue);
  1468. stop_t200(l2, 20);
  1469. l2up(l2, DL_RELEASE_CNF, skb);
  1470. mISDN_FsmChangeState(fi, ST_L2_4);
  1471. if (l2->tm)
  1472. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1473. }
  1474. static void
  1475. l2_persistant_da(struct FsmInst *fi, int event, void *arg)
  1476. {
  1477. struct layer2 *l2 = fi->userdata;
  1478. struct sk_buff *skb = arg;
  1479. skb_queue_purge(&l2->i_queue);
  1480. skb_queue_purge(&l2->ui_queue);
  1481. freewin(l2);
  1482. stop_t200(l2, 19);
  1483. mISDN_FsmDelTimer(&l2->t203, 19);
  1484. l2up(l2, DL_RELEASE_IND, skb);
  1485. mISDN_FsmChangeState(fi, ST_L2_4);
  1486. if (l2->tm)
  1487. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1488. }
  1489. static void
  1490. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1491. {
  1492. struct layer2 *l2 = fi->userdata;
  1493. struct sk_buff *skb = arg;
  1494. if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
  1495. enquiry_cr(l2, RNR, RSP, 0);
  1496. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1497. }
  1498. if (skb)
  1499. dev_kfree_skb(skb);
  1500. }
  1501. static void
  1502. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1503. {
  1504. struct layer2 *l2 = fi->userdata;
  1505. struct sk_buff *skb = arg;
  1506. if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
  1507. enquiry_cr(l2, RR, RSP, 0);
  1508. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1509. }
  1510. if (skb)
  1511. dev_kfree_skb(skb);
  1512. }
  1513. static void
  1514. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1515. {
  1516. struct layer2 *l2 = fi->userdata;
  1517. l2mgr(l2, MDL_ERROR_IND, arg);
  1518. }
  1519. static void
  1520. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1521. {
  1522. struct layer2 *l2 = fi->userdata;
  1523. l2mgr(l2, MDL_ERROR_IND, arg);
  1524. establishlink(fi);
  1525. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1526. }
  1527. static struct FsmNode L2FnList[] =
  1528. {
  1529. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1530. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1531. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1532. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1533. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1534. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1535. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1536. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1537. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1538. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1539. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1540. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1541. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1542. {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
  1543. {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
  1544. {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
  1545. {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
  1546. {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
  1547. {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
  1548. {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
  1549. {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
  1550. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1551. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1552. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1553. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1554. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1555. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1556. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1557. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1558. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1559. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1560. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1561. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1562. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1563. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1564. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1565. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1566. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1567. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1568. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1569. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1570. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1571. {ST_L2_5, EV_L2_UA, l2_connected},
  1572. {ST_L2_6, EV_L2_UA, l2_released},
  1573. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1574. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1575. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1576. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1577. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1578. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1579. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1580. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1581. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1582. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1583. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1584. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1585. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1586. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1587. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1588. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1589. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1590. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1591. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1592. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1593. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1594. {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
  1595. {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
  1596. {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
  1597. {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
  1598. {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
  1599. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1600. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1601. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1602. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1603. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1604. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1605. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1606. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1607. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1608. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1609. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
  1610. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1611. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1612. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
  1613. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
  1614. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
  1615. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
  1616. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
  1617. };
  1618. #define L2_FN_COUNT (sizeof(L2FnList)/sizeof(struct FsmNode))
  1619. static int
  1620. ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
  1621. {
  1622. u_char *datap = skb->data;
  1623. int ret = -EINVAL;
  1624. int psapi, ptei;
  1625. u_int l;
  1626. int c = 0;
  1627. l = l2addrsize(l2);
  1628. if (skb->len <= l) {
  1629. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1630. return ret;
  1631. }
  1632. if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
  1633. psapi = *datap++;
  1634. ptei = *datap++;
  1635. if ((psapi & 1) || !(ptei & 1)) {
  1636. printk(KERN_WARNING
  1637. "l2 D-channel frame wrong EA0/EA1\n");
  1638. return ret;
  1639. }
  1640. psapi >>= 2;
  1641. ptei >>= 1;
  1642. if (psapi != l2->sapi) {
  1643. /* not our bussiness
  1644. * printk(KERN_DEBUG "%s: sapi %d/%d sapi mismatch\n",
  1645. * __func__,
  1646. * psapi, l2->sapi);
  1647. */
  1648. dev_kfree_skb(skb);
  1649. return 0;
  1650. }
  1651. if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
  1652. /* not our bussiness
  1653. * printk(KERN_DEBUG "%s: tei %d/%d sapi %d mismatch\n",
  1654. * __func__,
  1655. * ptei, l2->tei, psapi);
  1656. */
  1657. dev_kfree_skb(skb);
  1658. return 0;
  1659. }
  1660. } else
  1661. datap += l;
  1662. if (!(*datap & 1)) { /* I-Frame */
  1663. c = iframe_error(l2, skb);
  1664. if (!c)
  1665. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
  1666. } else if (IsSFrame(datap, l2)) { /* S-Frame */
  1667. c = super_error(l2, skb);
  1668. if (!c)
  1669. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
  1670. } else if (IsUI(datap)) {
  1671. c = UI_error(l2, skb);
  1672. if (!c)
  1673. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
  1674. } else if (IsSABME(datap, l2)) {
  1675. c = unnum_error(l2, skb, CMD);
  1676. if (!c)
  1677. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
  1678. } else if (IsUA(datap)) {
  1679. c = unnum_error(l2, skb, RSP);
  1680. if (!c)
  1681. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
  1682. } else if (IsDISC(datap)) {
  1683. c = unnum_error(l2, skb, CMD);
  1684. if (!c)
  1685. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
  1686. } else if (IsDM(datap)) {
  1687. c = unnum_error(l2, skb, RSP);
  1688. if (!c)
  1689. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
  1690. } else if (IsFRMR(datap)) {
  1691. c = FRMR_error(l2, skb);
  1692. if (!c)
  1693. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
  1694. } else
  1695. c = 'L';
  1696. if (c) {
  1697. printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
  1698. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1699. }
  1700. return ret;
  1701. }
  1702. static int
  1703. l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
  1704. {
  1705. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1706. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  1707. int ret = -EINVAL;
  1708. if (*debug & DEBUG_L2_RECV)
  1709. printk(KERN_DEBUG "%s: prim(%x) id(%x) tei(%d)\n",
  1710. __func__, hh->prim, hh->id, l2->tei);
  1711. switch (hh->prim) {
  1712. case PH_DATA_IND:
  1713. ret = ph_data_indication(l2, hh, skb);
  1714. break;
  1715. case PH_DATA_CNF:
  1716. ret = ph_data_confirm(l2, hh, skb);
  1717. break;
  1718. case PH_ACTIVATE_IND:
  1719. test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
  1720. l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
  1721. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1722. ret = mISDN_FsmEvent(&l2->l2m,
  1723. EV_L2_DL_ESTABLISH_REQ, skb);
  1724. break;
  1725. case PH_DEACTIVATE_IND:
  1726. test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
  1727. l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
  1728. ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
  1729. break;
  1730. case MPH_INFORMATION_IND:
  1731. if (!l2->up)
  1732. break;
  1733. ret = l2->up->send(l2->up, skb);
  1734. break;
  1735. case DL_DATA_REQ:
  1736. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
  1737. break;
  1738. case DL_UNITDATA_REQ:
  1739. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
  1740. break;
  1741. case DL_ESTABLISH_REQ:
  1742. if (test_bit(FLG_LAPB, &l2->flag))
  1743. test_and_set_bit(FLG_ORIG, &l2->flag);
  1744. if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
  1745. if (test_bit(FLG_LAPD, &l2->flag) ||
  1746. test_bit(FLG_ORIG, &l2->flag))
  1747. ret = mISDN_FsmEvent(&l2->l2m,
  1748. EV_L2_DL_ESTABLISH_REQ, skb);
  1749. } else {
  1750. if (test_bit(FLG_LAPD, &l2->flag) ||
  1751. test_bit(FLG_ORIG, &l2->flag)) {
  1752. test_and_set_bit(FLG_ESTAB_PEND,
  1753. &l2->flag);
  1754. }
  1755. ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
  1756. skb);
  1757. }
  1758. break;
  1759. case DL_RELEASE_REQ:
  1760. if (test_bit(FLG_LAPB, &l2->flag))
  1761. l2down_create(l2, PH_DEACTIVATE_REQ,
  1762. l2_newid(l2), 0, NULL);
  1763. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
  1764. skb);
  1765. break;
  1766. default:
  1767. if (*debug & DEBUG_L2)
  1768. l2m_debug(&l2->l2m, "l2 unknown pr %04x",
  1769. hh->prim);
  1770. }
  1771. if (ret) {
  1772. dev_kfree_skb(skb);
  1773. ret = 0;
  1774. }
  1775. return ret;
  1776. }
  1777. int
  1778. tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
  1779. {
  1780. int ret = -EINVAL;
  1781. if (*debug & DEBUG_L2_TEI)
  1782. printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
  1783. switch (cmd) {
  1784. case (MDL_ASSIGN_REQ):
  1785. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
  1786. break;
  1787. case (MDL_REMOVE_REQ):
  1788. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
  1789. break;
  1790. case (MDL_ERROR_IND):
  1791. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1792. break;
  1793. case (MDL_ERROR_RSP):
  1794. /* ETS 300-125 5.3.2.1 Test: TC13010 */
  1795. printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
  1796. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1797. break;
  1798. }
  1799. return ret;
  1800. }
  1801. static void
  1802. release_l2(struct layer2 *l2)
  1803. {
  1804. mISDN_FsmDelTimer(&l2->t200, 21);
  1805. mISDN_FsmDelTimer(&l2->t203, 16);
  1806. skb_queue_purge(&l2->i_queue);
  1807. skb_queue_purge(&l2->ui_queue);
  1808. skb_queue_purge(&l2->down_queue);
  1809. ReleaseWin(l2);
  1810. if (test_bit(FLG_LAPD, &l2->flag)) {
  1811. TEIrelease(l2);
  1812. if (l2->ch.st)
  1813. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
  1814. CLOSE_CHANNEL, NULL);
  1815. }
  1816. kfree(l2);
  1817. }
  1818. static int
  1819. l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  1820. {
  1821. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1822. u_int info;
  1823. if (*debug & DEBUG_L2_CTRL)
  1824. printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
  1825. switch (cmd) {
  1826. case OPEN_CHANNEL:
  1827. if (test_bit(FLG_LAPD, &l2->flag)) {
  1828. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1829. info = DL_INFO_L2_CONNECT;
  1830. l2up_create(l2, DL_INFORMATION_IND,
  1831. sizeof(info), &info);
  1832. }
  1833. break;
  1834. case CLOSE_CHANNEL:
  1835. if (l2->ch.peer)
  1836. l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
  1837. release_l2(l2);
  1838. break;
  1839. }
  1840. return 0;
  1841. }
  1842. struct layer2 *
  1843. create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, u_long arg)
  1844. {
  1845. struct layer2 *l2;
  1846. struct channel_req rq;
  1847. l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
  1848. if (!l2) {
  1849. printk(KERN_ERR "kzalloc layer2 failed\n");
  1850. return NULL;
  1851. }
  1852. l2->next_id = 1;
  1853. l2->down_id = MISDN_ID_NONE;
  1854. l2->up = ch;
  1855. l2->ch.st = ch->st;
  1856. l2->ch.send = l2_send;
  1857. l2->ch.ctrl = l2_ctrl;
  1858. switch (protocol) {
  1859. case ISDN_P_LAPD_NT:
  1860. test_and_set_bit(FLG_LAPD, &l2->flag);
  1861. test_and_set_bit(FLG_LAPD_NET, &l2->flag);
  1862. test_and_set_bit(FLG_MOD128, &l2->flag);
  1863. l2->sapi = 0;
  1864. l2->maxlen = MAX_DFRAME_LEN;
  1865. if (test_bit(OPTION_L2_PMX, &options))
  1866. l2->window = 7;
  1867. else
  1868. l2->window = 1;
  1869. if (test_bit(OPTION_L2_PTP, &options))
  1870. test_and_set_bit(FLG_PTP, &l2->flag);
  1871. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1872. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1873. l2->tei = (u_int)arg;
  1874. l2->T200 = 1000;
  1875. l2->N200 = 3;
  1876. l2->T203 = 10000;
  1877. if (test_bit(OPTION_L2_PMX, &options))
  1878. rq.protocol = ISDN_P_NT_E1;
  1879. else
  1880. rq.protocol = ISDN_P_NT_S0;
  1881. rq.adr.channel = 0;
  1882. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1883. break;
  1884. case ISDN_P_LAPD_TE:
  1885. test_and_set_bit(FLG_LAPD, &l2->flag);
  1886. test_and_set_bit(FLG_MOD128, &l2->flag);
  1887. test_and_set_bit(FLG_ORIG, &l2->flag);
  1888. l2->sapi = 0;
  1889. l2->maxlen = MAX_DFRAME_LEN;
  1890. if (test_bit(OPTION_L2_PMX, &options))
  1891. l2->window = 7;
  1892. else
  1893. l2->window = 1;
  1894. if (test_bit(OPTION_L2_PTP, &options))
  1895. test_and_set_bit(FLG_PTP, &l2->flag);
  1896. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1897. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1898. l2->tei = (u_int)arg;
  1899. l2->T200 = 1000;
  1900. l2->N200 = 3;
  1901. l2->T203 = 10000;
  1902. if (test_bit(OPTION_L2_PMX, &options))
  1903. rq.protocol = ISDN_P_TE_E1;
  1904. else
  1905. rq.protocol = ISDN_P_TE_S0;
  1906. rq.adr.channel = 0;
  1907. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1908. break;
  1909. case ISDN_P_B_X75SLP:
  1910. test_and_set_bit(FLG_LAPB, &l2->flag);
  1911. l2->window = 7;
  1912. l2->maxlen = MAX_DATA_SIZE;
  1913. l2->T200 = 1000;
  1914. l2->N200 = 4;
  1915. l2->T203 = 5000;
  1916. l2->addr.A = 3;
  1917. l2->addr.B = 1;
  1918. break;
  1919. default:
  1920. printk(KERN_ERR "layer2 create failed prt %x\n",
  1921. protocol);
  1922. kfree(l2);
  1923. return NULL;
  1924. }
  1925. skb_queue_head_init(&l2->i_queue);
  1926. skb_queue_head_init(&l2->ui_queue);
  1927. skb_queue_head_init(&l2->down_queue);
  1928. skb_queue_head_init(&l2->tmp_queue);
  1929. InitWin(l2);
  1930. l2->l2m.fsm = &l2fsm;
  1931. if (test_bit(FLG_LAPB, &l2->flag) ||
  1932. test_bit(FLG_PTP, &l2->flag) ||
  1933. test_bit(FLG_LAPD_NET, &l2->flag))
  1934. l2->l2m.state = ST_L2_4;
  1935. else
  1936. l2->l2m.state = ST_L2_1;
  1937. l2->l2m.debug = *debug;
  1938. l2->l2m.userdata = l2;
  1939. l2->l2m.userint = 0;
  1940. l2->l2m.printdebug = l2m_debug;
  1941. mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
  1942. mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
  1943. return l2;
  1944. }
  1945. static int
  1946. x75create(struct channel_req *crq)
  1947. {
  1948. struct layer2 *l2;
  1949. if (crq->protocol != ISDN_P_B_X75SLP)
  1950. return -EPROTONOSUPPORT;
  1951. l2 = create_l2(crq->ch, crq->protocol, 0, 0);
  1952. if (!l2)
  1953. return -ENOMEM;
  1954. crq->ch = &l2->ch;
  1955. crq->protocol = ISDN_P_B_HDLC;
  1956. return 0;
  1957. }
  1958. static struct Bprotocol X75SLP = {
  1959. .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
  1960. .name = "X75SLP",
  1961. .create = x75create
  1962. };
  1963. int
  1964. Isdnl2_Init(u_int *deb)
  1965. {
  1966. debug = deb;
  1967. mISDN_register_Bprotocol(&X75SLP);
  1968. l2fsm.state_count = L2_STATE_COUNT;
  1969. l2fsm.event_count = L2_EVENT_COUNT;
  1970. l2fsm.strEvent = strL2Event;
  1971. l2fsm.strState = strL2State;
  1972. mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
  1973. TEIInit(deb);
  1974. return 0;
  1975. }
  1976. void
  1977. Isdnl2_cleanup(void)
  1978. {
  1979. mISDN_unregister_Bprotocol(&X75SLP);
  1980. TEIFree();
  1981. mISDN_FsmFree(&l2fsm);
  1982. }