layer2.c 50 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216
  1. /*
  2. *
  3. * Author Karsten Keil <kkeil@novell.com>
  4. *
  5. * Copyright 2008 by Karsten Keil <kkeil@novell.com>
  6. *
  7. * This program is free software; you can redistribute it and/or modify
  8. * it under the terms of the GNU General Public License version 2 as
  9. * published by the Free Software Foundation.
  10. *
  11. * This program is distributed in the hope that it will be useful,
  12. * but WITHOUT ANY WARRANTY; without even the implied warranty of
  13. * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  14. * GNU General Public License for more details.
  15. *
  16. */
  17. #include <linux/mISDNif.h>
  18. #include <linux/slab.h>
  19. #include "core.h"
  20. #include "fsm.h"
  21. #include "layer2.h"
  22. static u_int *debug;
  23. static
  24. struct Fsm l2fsm = {NULL, 0, 0, NULL, NULL};
  25. static char *strL2State[] =
  26. {
  27. "ST_L2_1",
  28. "ST_L2_2",
  29. "ST_L2_3",
  30. "ST_L2_4",
  31. "ST_L2_5",
  32. "ST_L2_6",
  33. "ST_L2_7",
  34. "ST_L2_8",
  35. };
  36. enum {
  37. EV_L2_UI,
  38. EV_L2_SABME,
  39. EV_L2_DISC,
  40. EV_L2_DM,
  41. EV_L2_UA,
  42. EV_L2_FRMR,
  43. EV_L2_SUPER,
  44. EV_L2_I,
  45. EV_L2_DL_DATA,
  46. EV_L2_ACK_PULL,
  47. EV_L2_DL_UNITDATA,
  48. EV_L2_DL_ESTABLISH_REQ,
  49. EV_L2_DL_RELEASE_REQ,
  50. EV_L2_MDL_ASSIGN,
  51. EV_L2_MDL_REMOVE,
  52. EV_L2_MDL_ERROR,
  53. EV_L1_DEACTIVATE,
  54. EV_L2_T200,
  55. EV_L2_T203,
  56. EV_L2_SET_OWN_BUSY,
  57. EV_L2_CLEAR_OWN_BUSY,
  58. EV_L2_FRAME_ERROR,
  59. };
  60. #define L2_EVENT_COUNT (EV_L2_FRAME_ERROR+1)
  61. static char *strL2Event[] =
  62. {
  63. "EV_L2_UI",
  64. "EV_L2_SABME",
  65. "EV_L2_DISC",
  66. "EV_L2_DM",
  67. "EV_L2_UA",
  68. "EV_L2_FRMR",
  69. "EV_L2_SUPER",
  70. "EV_L2_I",
  71. "EV_L2_DL_DATA",
  72. "EV_L2_ACK_PULL",
  73. "EV_L2_DL_UNITDATA",
  74. "EV_L2_DL_ESTABLISH_REQ",
  75. "EV_L2_DL_RELEASE_REQ",
  76. "EV_L2_MDL_ASSIGN",
  77. "EV_L2_MDL_REMOVE",
  78. "EV_L2_MDL_ERROR",
  79. "EV_L1_DEACTIVATE",
  80. "EV_L2_T200",
  81. "EV_L2_T203",
  82. "EV_L2_SET_OWN_BUSY",
  83. "EV_L2_CLEAR_OWN_BUSY",
  84. "EV_L2_FRAME_ERROR",
  85. };
  86. static void
  87. l2m_debug(struct FsmInst *fi, char *fmt, ...)
  88. {
  89. struct layer2 *l2 = fi->userdata;
  90. va_list va;
  91. if (!(*debug & DEBUG_L2_FSM))
  92. return;
  93. va_start(va, fmt);
  94. printk(KERN_DEBUG "l2 (sapi %d tei %d): ", l2->sapi, l2->tei);
  95. vprintk(fmt, va);
  96. printk("\n");
  97. va_end(va);
  98. }
  99. inline u_int
  100. l2headersize(struct layer2 *l2, int ui)
  101. {
  102. return ((test_bit(FLG_MOD128, &l2->flag) && (!ui)) ? 2 : 1) +
  103. (test_bit(FLG_LAPD, &l2->flag) ? 2 : 1);
  104. }
  105. inline u_int
  106. l2addrsize(struct layer2 *l2)
  107. {
  108. return test_bit(FLG_LAPD, &l2->flag) ? 2 : 1;
  109. }
  110. static u_int
  111. l2_newid(struct layer2 *l2)
  112. {
  113. u_int id;
  114. id = l2->next_id++;
  115. if (id == 0x7fff)
  116. l2->next_id = 1;
  117. id <<= 16;
  118. id |= l2->tei << 8;
  119. id |= l2->sapi;
  120. return id;
  121. }
  122. static void
  123. l2up(struct layer2 *l2, u_int prim, struct sk_buff *skb)
  124. {
  125. int err;
  126. if (!l2->up)
  127. return;
  128. mISDN_HEAD_PRIM(skb) = prim;
  129. mISDN_HEAD_ID(skb) = (l2->ch.nr << 16) | l2->ch.addr;
  130. err = l2->up->send(l2->up, skb);
  131. if (err) {
  132. printk(KERN_WARNING "%s: err=%d\n", __func__, err);
  133. dev_kfree_skb(skb);
  134. }
  135. }
  136. static void
  137. l2up_create(struct layer2 *l2, u_int prim, int len, void *arg)
  138. {
  139. struct sk_buff *skb;
  140. struct mISDNhead *hh;
  141. int err;
  142. if (!l2->up)
  143. return;
  144. skb = mI_alloc_skb(len, GFP_ATOMIC);
  145. if (!skb)
  146. return;
  147. hh = mISDN_HEAD_P(skb);
  148. hh->prim = prim;
  149. hh->id = (l2->ch.nr << 16) | l2->ch.addr;
  150. if (len)
  151. memcpy(skb_put(skb, len), arg, len);
  152. err = l2->up->send(l2->up, skb);
  153. if (err) {
  154. printk(KERN_WARNING "%s: err=%d\n", __func__, err);
  155. dev_kfree_skb(skb);
  156. }
  157. }
  158. static int
  159. l2down_skb(struct layer2 *l2, struct sk_buff *skb) {
  160. int ret;
  161. ret = l2->ch.recv(l2->ch.peer, skb);
  162. if (ret && (*debug & DEBUG_L2_RECV))
  163. printk(KERN_DEBUG "l2down_skb: ret(%d)\n", ret);
  164. return ret;
  165. }
  166. static int
  167. l2down_raw(struct layer2 *l2, struct sk_buff *skb)
  168. {
  169. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  170. if (hh->prim == PH_DATA_REQ) {
  171. if (test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  172. skb_queue_tail(&l2->down_queue, skb);
  173. return 0;
  174. }
  175. l2->down_id = mISDN_HEAD_ID(skb);
  176. }
  177. return l2down_skb(l2, skb);
  178. }
  179. static int
  180. l2down(struct layer2 *l2, u_int prim, u_int id, struct sk_buff *skb)
  181. {
  182. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  183. hh->prim = prim;
  184. hh->id = id;
  185. return l2down_raw(l2, skb);
  186. }
  187. static int
  188. l2down_create(struct layer2 *l2, u_int prim, u_int id, int len, void *arg)
  189. {
  190. struct sk_buff *skb;
  191. int err;
  192. struct mISDNhead *hh;
  193. skb = mI_alloc_skb(len, GFP_ATOMIC);
  194. if (!skb)
  195. return -ENOMEM;
  196. hh = mISDN_HEAD_P(skb);
  197. hh->prim = prim;
  198. hh->id = id;
  199. if (len)
  200. memcpy(skb_put(skb, len), arg, len);
  201. err = l2down_raw(l2, skb);
  202. if (err)
  203. dev_kfree_skb(skb);
  204. return err;
  205. }
  206. static int
  207. ph_data_confirm(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb) {
  208. struct sk_buff *nskb = skb;
  209. int ret = -EAGAIN;
  210. if (test_bit(FLG_L1_NOTREADY, &l2->flag)) {
  211. if (hh->id == l2->down_id) {
  212. nskb = skb_dequeue(&l2->down_queue);
  213. if (nskb) {
  214. l2->down_id = mISDN_HEAD_ID(nskb);
  215. if (l2down_skb(l2, nskb)) {
  216. dev_kfree_skb(nskb);
  217. l2->down_id = MISDN_ID_NONE;
  218. }
  219. } else
  220. l2->down_id = MISDN_ID_NONE;
  221. if (ret) {
  222. dev_kfree_skb(skb);
  223. ret = 0;
  224. }
  225. if (l2->down_id == MISDN_ID_NONE) {
  226. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  227. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  228. }
  229. }
  230. }
  231. if (!test_and_set_bit(FLG_L1_NOTREADY, &l2->flag)) {
  232. nskb = skb_dequeue(&l2->down_queue);
  233. if (nskb) {
  234. l2->down_id = mISDN_HEAD_ID(nskb);
  235. if (l2down_skb(l2, nskb)) {
  236. dev_kfree_skb(nskb);
  237. l2->down_id = MISDN_ID_NONE;
  238. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  239. }
  240. } else
  241. test_and_clear_bit(FLG_L1_NOTREADY, &l2->flag);
  242. }
  243. return ret;
  244. }
  245. static int
  246. l2mgr(struct layer2 *l2, u_int prim, void *arg) {
  247. long c = (long)arg;
  248. printk(KERN_WARNING
  249. "l2mgr: addr:%x prim %x %c\n", l2->id, prim, (char)c);
  250. if (test_bit(FLG_LAPD, &l2->flag) &&
  251. !test_bit(FLG_FIXED_TEI, &l2->flag)) {
  252. switch (c) {
  253. case 'C':
  254. case 'D':
  255. case 'G':
  256. case 'H':
  257. l2_tei(l2, prim, (u_long)arg);
  258. break;
  259. }
  260. }
  261. return 0;
  262. }
  263. static void
  264. set_peer_busy(struct layer2 *l2) {
  265. test_and_set_bit(FLG_PEER_BUSY, &l2->flag);
  266. if (skb_queue_len(&l2->i_queue) || skb_queue_len(&l2->ui_queue))
  267. test_and_set_bit(FLG_L2BLOCK, &l2->flag);
  268. }
  269. static void
  270. clear_peer_busy(struct layer2 *l2) {
  271. if (test_and_clear_bit(FLG_PEER_BUSY, &l2->flag))
  272. test_and_clear_bit(FLG_L2BLOCK, &l2->flag);
  273. }
  274. static void
  275. InitWin(struct layer2 *l2)
  276. {
  277. int i;
  278. for (i = 0; i < MAX_WINDOW; i++)
  279. l2->windowar[i] = NULL;
  280. }
  281. static int
  282. freewin(struct layer2 *l2)
  283. {
  284. int i, cnt = 0;
  285. for (i = 0; i < MAX_WINDOW; i++) {
  286. if (l2->windowar[i]) {
  287. cnt++;
  288. dev_kfree_skb(l2->windowar[i]);
  289. l2->windowar[i] = NULL;
  290. }
  291. }
  292. return cnt;
  293. }
  294. static void
  295. ReleaseWin(struct layer2 *l2)
  296. {
  297. int cnt = freewin(l2);
  298. if (cnt)
  299. printk(KERN_WARNING
  300. "isdnl2 freed %d skbuffs in release\n", cnt);
  301. }
  302. inline unsigned int
  303. cansend(struct layer2 *l2)
  304. {
  305. unsigned int p1;
  306. if (test_bit(FLG_MOD128, &l2->flag))
  307. p1 = (l2->vs - l2->va) % 128;
  308. else
  309. p1 = (l2->vs - l2->va) % 8;
  310. return (p1 < l2->window) && !test_bit(FLG_PEER_BUSY, &l2->flag);
  311. }
  312. inline void
  313. clear_exception(struct layer2 *l2)
  314. {
  315. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  316. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  317. test_and_clear_bit(FLG_OWN_BUSY, &l2->flag);
  318. clear_peer_busy(l2);
  319. }
  320. static int
  321. sethdraddr(struct layer2 *l2, u_char *header, int rsp)
  322. {
  323. u_char *ptr = header;
  324. int crbit = rsp;
  325. if (test_bit(FLG_LAPD, &l2->flag)) {
  326. if (test_bit(FLG_LAPD_NET, &l2->flag))
  327. crbit = !crbit;
  328. *ptr++ = (l2->sapi << 2) | (crbit ? 2 : 0);
  329. *ptr++ = (l2->tei << 1) | 1;
  330. return 2;
  331. } else {
  332. if (test_bit(FLG_ORIG, &l2->flag))
  333. crbit = !crbit;
  334. if (crbit)
  335. *ptr++ = l2->addr.B;
  336. else
  337. *ptr++ = l2->addr.A;
  338. return 1;
  339. }
  340. }
  341. static inline void
  342. enqueue_super(struct layer2 *l2, struct sk_buff *skb)
  343. {
  344. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  345. dev_kfree_skb(skb);
  346. }
  347. static inline void
  348. enqueue_ui(struct layer2 *l2, struct sk_buff *skb)
  349. {
  350. if (l2->tm)
  351. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  352. if (l2down(l2, PH_DATA_REQ, l2_newid(l2), skb))
  353. dev_kfree_skb(skb);
  354. }
  355. inline int
  356. IsUI(u_char *data)
  357. {
  358. return (data[0] & 0xef) == UI;
  359. }
  360. inline int
  361. IsUA(u_char *data)
  362. {
  363. return (data[0] & 0xef) == UA;
  364. }
  365. inline int
  366. IsDM(u_char *data)
  367. {
  368. return (data[0] & 0xef) == DM;
  369. }
  370. inline int
  371. IsDISC(u_char *data)
  372. {
  373. return (data[0] & 0xef) == DISC;
  374. }
  375. inline int
  376. IsRR(u_char *data, struct layer2 *l2)
  377. {
  378. if (test_bit(FLG_MOD128, &l2->flag))
  379. return data[0] == RR;
  380. else
  381. return (data[0] & 0xf) == 1;
  382. }
  383. inline int
  384. IsSFrame(u_char *data, struct layer2 *l2)
  385. {
  386. register u_char d = *data;
  387. if (!test_bit(FLG_MOD128, &l2->flag))
  388. d &= 0xf;
  389. return ((d & 0xf3) == 1) && ((d & 0x0c) != 0x0c);
  390. }
  391. inline int
  392. IsSABME(u_char *data, struct layer2 *l2)
  393. {
  394. u_char d = data[0] & ~0x10;
  395. return test_bit(FLG_MOD128, &l2->flag) ? d == SABME : d == SABM;
  396. }
  397. inline int
  398. IsREJ(u_char *data, struct layer2 *l2)
  399. {
  400. return test_bit(FLG_MOD128, &l2->flag) ?
  401. data[0] == REJ : (data[0] & 0xf) == REJ;
  402. }
  403. inline int
  404. IsFRMR(u_char *data)
  405. {
  406. return (data[0] & 0xef) == FRMR;
  407. }
  408. inline int
  409. IsRNR(u_char *data, struct layer2 *l2)
  410. {
  411. return test_bit(FLG_MOD128, &l2->flag) ?
  412. data[0] == RNR : (data[0] & 0xf) == RNR;
  413. }
  414. static int
  415. iframe_error(struct layer2 *l2, struct sk_buff *skb)
  416. {
  417. u_int i;
  418. int rsp = *skb->data & 0x2;
  419. i = l2addrsize(l2) + (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1);
  420. if (test_bit(FLG_ORIG, &l2->flag))
  421. rsp = !rsp;
  422. if (rsp)
  423. return 'L';
  424. if (skb->len < i)
  425. return 'N';
  426. if ((skb->len - i) > l2->maxlen)
  427. return 'O';
  428. return 0;
  429. }
  430. static int
  431. super_error(struct layer2 *l2, struct sk_buff *skb)
  432. {
  433. if (skb->len != l2addrsize(l2) +
  434. (test_bit(FLG_MOD128, &l2->flag) ? 2 : 1))
  435. return 'N';
  436. return 0;
  437. }
  438. static int
  439. unnum_error(struct layer2 *l2, struct sk_buff *skb, int wantrsp)
  440. {
  441. int rsp = (*skb->data & 0x2) >> 1;
  442. if (test_bit(FLG_ORIG, &l2->flag))
  443. rsp = !rsp;
  444. if (rsp != wantrsp)
  445. return 'L';
  446. if (skb->len != l2addrsize(l2) + 1)
  447. return 'N';
  448. return 0;
  449. }
  450. static int
  451. UI_error(struct layer2 *l2, struct sk_buff *skb)
  452. {
  453. int rsp = *skb->data & 0x2;
  454. if (test_bit(FLG_ORIG, &l2->flag))
  455. rsp = !rsp;
  456. if (rsp)
  457. return 'L';
  458. if (skb->len > l2->maxlen + l2addrsize(l2) + 1)
  459. return 'O';
  460. return 0;
  461. }
  462. static int
  463. FRMR_error(struct layer2 *l2, struct sk_buff *skb)
  464. {
  465. u_int headers = l2addrsize(l2) + 1;
  466. u_char *datap = skb->data + headers;
  467. int rsp = *skb->data & 0x2;
  468. if (test_bit(FLG_ORIG, &l2->flag))
  469. rsp = !rsp;
  470. if (!rsp)
  471. return 'L';
  472. if (test_bit(FLG_MOD128, &l2->flag)) {
  473. if (skb->len < headers + 5)
  474. return 'N';
  475. else if (*debug & DEBUG_L2)
  476. l2m_debug(&l2->l2m,
  477. "FRMR information %2x %2x %2x %2x %2x",
  478. datap[0], datap[1], datap[2], datap[3], datap[4]);
  479. } else {
  480. if (skb->len < headers + 3)
  481. return 'N';
  482. else if (*debug & DEBUG_L2)
  483. l2m_debug(&l2->l2m,
  484. "FRMR information %2x %2x %2x",
  485. datap[0], datap[1], datap[2]);
  486. }
  487. return 0;
  488. }
  489. static unsigned int
  490. legalnr(struct layer2 *l2, unsigned int nr)
  491. {
  492. if (test_bit(FLG_MOD128, &l2->flag))
  493. return ((nr - l2->va) % 128) <= ((l2->vs - l2->va) % 128);
  494. else
  495. return ((nr - l2->va) % 8) <= ((l2->vs - l2->va) % 8);
  496. }
  497. static void
  498. setva(struct layer2 *l2, unsigned int nr)
  499. {
  500. struct sk_buff *skb;
  501. while (l2->va != nr) {
  502. l2->va++;
  503. if (test_bit(FLG_MOD128, &l2->flag))
  504. l2->va %= 128;
  505. else
  506. l2->va %= 8;
  507. if (l2->windowar[l2->sow]) {
  508. skb_trim(l2->windowar[l2->sow], 0);
  509. skb_queue_tail(&l2->tmp_queue, l2->windowar[l2->sow]);
  510. l2->windowar[l2->sow] = NULL;
  511. }
  512. l2->sow = (l2->sow + 1) % l2->window;
  513. }
  514. skb = skb_dequeue(&l2->tmp_queue);
  515. while (skb) {
  516. dev_kfree_skb(skb);
  517. skb = skb_dequeue(&l2->tmp_queue);
  518. }
  519. }
  520. static void
  521. send_uframe(struct layer2 *l2, struct sk_buff *skb, u_char cmd, u_char cr)
  522. {
  523. u_char tmp[MAX_L2HEADER_LEN];
  524. int i;
  525. i = sethdraddr(l2, tmp, cr);
  526. tmp[i++] = cmd;
  527. if (skb)
  528. skb_trim(skb, 0);
  529. else {
  530. skb = mI_alloc_skb(i, GFP_ATOMIC);
  531. if (!skb) {
  532. printk(KERN_WARNING "%s: can't alloc skbuff\n",
  533. __func__);
  534. return;
  535. }
  536. }
  537. memcpy(skb_put(skb, i), tmp, i);
  538. enqueue_super(l2, skb);
  539. }
  540. inline u_char
  541. get_PollFlag(struct layer2 *l2, struct sk_buff *skb)
  542. {
  543. return skb->data[l2addrsize(l2)] & 0x10;
  544. }
  545. inline u_char
  546. get_PollFlagFree(struct layer2 *l2, struct sk_buff *skb)
  547. {
  548. u_char PF;
  549. PF = get_PollFlag(l2, skb);
  550. dev_kfree_skb(skb);
  551. return PF;
  552. }
  553. inline void
  554. start_t200(struct layer2 *l2, int i)
  555. {
  556. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  557. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  558. }
  559. inline void
  560. restart_t200(struct layer2 *l2, int i)
  561. {
  562. mISDN_FsmRestartTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, i);
  563. test_and_set_bit(FLG_T200_RUN, &l2->flag);
  564. }
  565. inline void
  566. stop_t200(struct layer2 *l2, int i)
  567. {
  568. if (test_and_clear_bit(FLG_T200_RUN, &l2->flag))
  569. mISDN_FsmDelTimer(&l2->t200, i);
  570. }
  571. inline void
  572. st5_dl_release_l2l3(struct layer2 *l2)
  573. {
  574. int pr;
  575. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  576. pr = DL_RELEASE_CNF;
  577. else
  578. pr = DL_RELEASE_IND;
  579. l2up_create(l2, pr, 0, NULL);
  580. }
  581. inline void
  582. lapb_dl_release_l2l3(struct layer2 *l2, int f)
  583. {
  584. if (test_bit(FLG_LAPB, &l2->flag))
  585. l2down_create(l2, PH_DEACTIVATE_REQ, l2_newid(l2), 0, NULL);
  586. l2up_create(l2, f, 0, NULL);
  587. }
  588. static void
  589. establishlink(struct FsmInst *fi)
  590. {
  591. struct layer2 *l2 = fi->userdata;
  592. u_char cmd;
  593. clear_exception(l2);
  594. l2->rc = 0;
  595. cmd = (test_bit(FLG_MOD128, &l2->flag) ? SABME : SABM) | 0x10;
  596. send_uframe(l2, NULL, cmd, CMD);
  597. mISDN_FsmDelTimer(&l2->t203, 1);
  598. restart_t200(l2, 1);
  599. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  600. freewin(l2);
  601. mISDN_FsmChangeState(fi, ST_L2_5);
  602. }
  603. static void
  604. l2_mdl_error_ua(struct FsmInst *fi, int event, void *arg)
  605. {
  606. struct sk_buff *skb = arg;
  607. struct layer2 *l2 = fi->userdata;
  608. if (get_PollFlagFree(l2, skb))
  609. l2mgr(l2, MDL_ERROR_IND, (void *) 'C');
  610. else
  611. l2mgr(l2, MDL_ERROR_IND, (void *) 'D');
  612. }
  613. static void
  614. l2_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  615. {
  616. struct sk_buff *skb = arg;
  617. struct layer2 *l2 = fi->userdata;
  618. if (get_PollFlagFree(l2, skb))
  619. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  620. else {
  621. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  622. establishlink(fi);
  623. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  624. }
  625. }
  626. static void
  627. l2_st8_mdl_error_dm(struct FsmInst *fi, int event, void *arg)
  628. {
  629. struct sk_buff *skb = arg;
  630. struct layer2 *l2 = fi->userdata;
  631. if (get_PollFlagFree(l2, skb))
  632. l2mgr(l2, MDL_ERROR_IND, (void *) 'B');
  633. else
  634. l2mgr(l2, MDL_ERROR_IND, (void *) 'E');
  635. establishlink(fi);
  636. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  637. }
  638. static void
  639. l2_go_st3(struct FsmInst *fi, int event, void *arg)
  640. {
  641. dev_kfree_skb((struct sk_buff *)arg);
  642. mISDN_FsmChangeState(fi, ST_L2_3);
  643. }
  644. static void
  645. l2_mdl_assign(struct FsmInst *fi, int event, void *arg)
  646. {
  647. struct layer2 *l2 = fi->userdata;
  648. mISDN_FsmChangeState(fi, ST_L2_3);
  649. dev_kfree_skb((struct sk_buff *)arg);
  650. l2_tei(l2, MDL_ASSIGN_IND, 0);
  651. }
  652. static void
  653. l2_queue_ui_assign(struct FsmInst *fi, int event, void *arg)
  654. {
  655. struct layer2 *l2 = fi->userdata;
  656. struct sk_buff *skb = arg;
  657. skb_queue_tail(&l2->ui_queue, skb);
  658. mISDN_FsmChangeState(fi, ST_L2_2);
  659. l2_tei(l2, MDL_ASSIGN_IND, 0);
  660. }
  661. static void
  662. l2_queue_ui(struct FsmInst *fi, int event, void *arg)
  663. {
  664. struct layer2 *l2 = fi->userdata;
  665. struct sk_buff *skb = arg;
  666. skb_queue_tail(&l2->ui_queue, skb);
  667. }
  668. static void
  669. tx_ui(struct layer2 *l2)
  670. {
  671. struct sk_buff *skb;
  672. u_char header[MAX_L2HEADER_LEN];
  673. int i;
  674. i = sethdraddr(l2, header, CMD);
  675. if (test_bit(FLG_LAPD_NET, &l2->flag))
  676. header[1] = 0xff; /* tei 127 */
  677. header[i++] = UI;
  678. while ((skb = skb_dequeue(&l2->ui_queue))) {
  679. memcpy(skb_push(skb, i), header, i);
  680. enqueue_ui(l2, skb);
  681. }
  682. }
  683. static void
  684. l2_send_ui(struct FsmInst *fi, int event, void *arg)
  685. {
  686. struct layer2 *l2 = fi->userdata;
  687. struct sk_buff *skb = arg;
  688. skb_queue_tail(&l2->ui_queue, skb);
  689. tx_ui(l2);
  690. }
  691. static void
  692. l2_got_ui(struct FsmInst *fi, int event, void *arg)
  693. {
  694. struct layer2 *l2 = fi->userdata;
  695. struct sk_buff *skb = arg;
  696. skb_pull(skb, l2headersize(l2, 1));
  697. /*
  698. * in states 1-3 for broadcast
  699. */
  700. if (l2->tm)
  701. l2_tei(l2, MDL_STATUS_UI_IND, 0);
  702. l2up(l2, DL_UNITDATA_IND, skb);
  703. }
  704. static void
  705. l2_establish(struct FsmInst *fi, int event, void *arg)
  706. {
  707. struct sk_buff *skb = arg;
  708. struct layer2 *l2 = fi->userdata;
  709. establishlink(fi);
  710. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  711. dev_kfree_skb(skb);
  712. }
  713. static void
  714. l2_discard_i_setl3(struct FsmInst *fi, int event, void *arg)
  715. {
  716. struct sk_buff *skb = arg;
  717. struct layer2 *l2 = fi->userdata;
  718. skb_queue_purge(&l2->i_queue);
  719. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  720. test_and_clear_bit(FLG_PEND_REL, &l2->flag);
  721. dev_kfree_skb(skb);
  722. }
  723. static void
  724. l2_l3_reestablish(struct FsmInst *fi, int event, void *arg)
  725. {
  726. struct sk_buff *skb = arg;
  727. struct layer2 *l2 = fi->userdata;
  728. skb_queue_purge(&l2->i_queue);
  729. establishlink(fi);
  730. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  731. dev_kfree_skb(skb);
  732. }
  733. static void
  734. l2_release(struct FsmInst *fi, int event, void *arg)
  735. {
  736. struct layer2 *l2 = fi->userdata;
  737. struct sk_buff *skb = arg;
  738. skb_trim(skb, 0);
  739. l2up(l2, DL_RELEASE_CNF, skb);
  740. }
  741. static void
  742. l2_pend_rel(struct FsmInst *fi, int event, void *arg)
  743. {
  744. struct sk_buff *skb = arg;
  745. struct layer2 *l2 = fi->userdata;
  746. test_and_set_bit(FLG_PEND_REL, &l2->flag);
  747. dev_kfree_skb(skb);
  748. }
  749. static void
  750. l2_disconnect(struct FsmInst *fi, int event, void *arg)
  751. {
  752. struct layer2 *l2 = fi->userdata;
  753. struct sk_buff *skb = arg;
  754. skb_queue_purge(&l2->i_queue);
  755. freewin(l2);
  756. mISDN_FsmChangeState(fi, ST_L2_6);
  757. l2->rc = 0;
  758. send_uframe(l2, NULL, DISC | 0x10, CMD);
  759. mISDN_FsmDelTimer(&l2->t203, 1);
  760. restart_t200(l2, 2);
  761. if (skb)
  762. dev_kfree_skb(skb);
  763. }
  764. static void
  765. l2_start_multi(struct FsmInst *fi, int event, void *arg)
  766. {
  767. struct layer2 *l2 = fi->userdata;
  768. struct sk_buff *skb = arg;
  769. l2->vs = 0;
  770. l2->va = 0;
  771. l2->vr = 0;
  772. l2->sow = 0;
  773. clear_exception(l2);
  774. send_uframe(l2, NULL, UA | get_PollFlag(l2, skb), RSP);
  775. mISDN_FsmChangeState(fi, ST_L2_7);
  776. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  777. skb_trim(skb, 0);
  778. l2up(l2, DL_ESTABLISH_IND, skb);
  779. if (l2->tm)
  780. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  781. }
  782. static void
  783. l2_send_UA(struct FsmInst *fi, int event, void *arg)
  784. {
  785. struct layer2 *l2 = fi->userdata;
  786. struct sk_buff *skb = arg;
  787. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  788. }
  789. static void
  790. l2_send_DM(struct FsmInst *fi, int event, void *arg)
  791. {
  792. struct layer2 *l2 = fi->userdata;
  793. struct sk_buff *skb = arg;
  794. send_uframe(l2, skb, DM | get_PollFlag(l2, skb), RSP);
  795. }
  796. static void
  797. l2_restart_multi(struct FsmInst *fi, int event, void *arg)
  798. {
  799. struct layer2 *l2 = fi->userdata;
  800. struct sk_buff *skb = arg;
  801. int est = 0;
  802. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  803. l2mgr(l2, MDL_ERROR_IND, (void *) 'F');
  804. if (l2->vs != l2->va) {
  805. skb_queue_purge(&l2->i_queue);
  806. est = 1;
  807. }
  808. clear_exception(l2);
  809. l2->vs = 0;
  810. l2->va = 0;
  811. l2->vr = 0;
  812. l2->sow = 0;
  813. mISDN_FsmChangeState(fi, ST_L2_7);
  814. stop_t200(l2, 3);
  815. mISDN_FsmRestartTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 3);
  816. if (est)
  817. l2up_create(l2, DL_ESTABLISH_IND, 0, NULL);
  818. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  819. * MGR_SHORTSTATUS | INDICATION, SSTATUS_L2_ESTABLISHED,
  820. * 0, NULL, 0);
  821. */
  822. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  823. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  824. }
  825. static void
  826. l2_stop_multi(struct FsmInst *fi, int event, void *arg)
  827. {
  828. struct layer2 *l2 = fi->userdata;
  829. struct sk_buff *skb = arg;
  830. mISDN_FsmChangeState(fi, ST_L2_4);
  831. mISDN_FsmDelTimer(&l2->t203, 3);
  832. stop_t200(l2, 4);
  833. send_uframe(l2, skb, UA | get_PollFlag(l2, skb), RSP);
  834. skb_queue_purge(&l2->i_queue);
  835. freewin(l2);
  836. lapb_dl_release_l2l3(l2, DL_RELEASE_IND);
  837. if (l2->tm)
  838. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  839. }
  840. static void
  841. l2_connected(struct FsmInst *fi, int event, void *arg)
  842. {
  843. struct layer2 *l2 = fi->userdata;
  844. struct sk_buff *skb = arg;
  845. int pr = -1;
  846. if (!get_PollFlag(l2, skb)) {
  847. l2_mdl_error_ua(fi, event, arg);
  848. return;
  849. }
  850. dev_kfree_skb(skb);
  851. if (test_and_clear_bit(FLG_PEND_REL, &l2->flag))
  852. l2_disconnect(fi, event, NULL);
  853. if (test_and_clear_bit(FLG_L3_INIT, &l2->flag)) {
  854. pr = DL_ESTABLISH_CNF;
  855. } else if (l2->vs != l2->va) {
  856. skb_queue_purge(&l2->i_queue);
  857. pr = DL_ESTABLISH_IND;
  858. }
  859. stop_t200(l2, 5);
  860. l2->vr = 0;
  861. l2->vs = 0;
  862. l2->va = 0;
  863. l2->sow = 0;
  864. mISDN_FsmChangeState(fi, ST_L2_7);
  865. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 4);
  866. if (pr != -1)
  867. l2up_create(l2, pr, 0, NULL);
  868. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  869. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  870. if (l2->tm)
  871. l2_tei(l2, MDL_STATUS_UP_IND, 0);
  872. }
  873. static void
  874. l2_released(struct FsmInst *fi, int event, void *arg)
  875. {
  876. struct layer2 *l2 = fi->userdata;
  877. struct sk_buff *skb = arg;
  878. if (!get_PollFlag(l2, skb)) {
  879. l2_mdl_error_ua(fi, event, arg);
  880. return;
  881. }
  882. dev_kfree_skb(skb);
  883. stop_t200(l2, 6);
  884. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  885. mISDN_FsmChangeState(fi, ST_L2_4);
  886. if (l2->tm)
  887. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  888. }
  889. static void
  890. l2_reestablish(struct FsmInst *fi, int event, void *arg)
  891. {
  892. struct layer2 *l2 = fi->userdata;
  893. struct sk_buff *skb = arg;
  894. if (!get_PollFlagFree(l2, skb)) {
  895. establishlink(fi);
  896. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  897. }
  898. }
  899. static void
  900. l2_st5_dm_release(struct FsmInst *fi, int event, void *arg)
  901. {
  902. struct layer2 *l2 = fi->userdata;
  903. struct sk_buff *skb = arg;
  904. if (get_PollFlagFree(l2, skb)) {
  905. stop_t200(l2, 7);
  906. if (!test_bit(FLG_L3_INIT, &l2->flag))
  907. skb_queue_purge(&l2->i_queue);
  908. if (test_bit(FLG_LAPB, &l2->flag))
  909. l2down_create(l2, PH_DEACTIVATE_REQ,
  910. l2_newid(l2), 0, NULL);
  911. st5_dl_release_l2l3(l2);
  912. mISDN_FsmChangeState(fi, ST_L2_4);
  913. if (l2->tm)
  914. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  915. }
  916. }
  917. static void
  918. l2_st6_dm_release(struct FsmInst *fi, int event, void *arg)
  919. {
  920. struct layer2 *l2 = fi->userdata;
  921. struct sk_buff *skb = arg;
  922. if (get_PollFlagFree(l2, skb)) {
  923. stop_t200(l2, 8);
  924. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  925. mISDN_FsmChangeState(fi, ST_L2_4);
  926. if (l2->tm)
  927. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  928. }
  929. }
  930. static void
  931. enquiry_cr(struct layer2 *l2, u_char typ, u_char cr, u_char pf)
  932. {
  933. struct sk_buff *skb;
  934. u_char tmp[MAX_L2HEADER_LEN];
  935. int i;
  936. i = sethdraddr(l2, tmp, cr);
  937. if (test_bit(FLG_MOD128, &l2->flag)) {
  938. tmp[i++] = typ;
  939. tmp[i++] = (l2->vr << 1) | (pf ? 1 : 0);
  940. } else
  941. tmp[i++] = (l2->vr << 5) | typ | (pf ? 0x10 : 0);
  942. skb = mI_alloc_skb(i, GFP_ATOMIC);
  943. if (!skb) {
  944. printk(KERN_WARNING
  945. "isdnl2 can't alloc sbbuff for enquiry_cr\n");
  946. return;
  947. }
  948. memcpy(skb_put(skb, i), tmp, i);
  949. enqueue_super(l2, skb);
  950. }
  951. inline void
  952. enquiry_response(struct layer2 *l2)
  953. {
  954. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  955. enquiry_cr(l2, RNR, RSP, 1);
  956. else
  957. enquiry_cr(l2, RR, RSP, 1);
  958. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  959. }
  960. inline void
  961. transmit_enquiry(struct layer2 *l2)
  962. {
  963. if (test_bit(FLG_OWN_BUSY, &l2->flag))
  964. enquiry_cr(l2, RNR, CMD, 1);
  965. else
  966. enquiry_cr(l2, RR, CMD, 1);
  967. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  968. start_t200(l2, 9);
  969. }
  970. static void
  971. nrerrorrecovery(struct FsmInst *fi)
  972. {
  973. struct layer2 *l2 = fi->userdata;
  974. l2mgr(l2, MDL_ERROR_IND, (void *) 'J');
  975. establishlink(fi);
  976. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  977. }
  978. static void
  979. invoke_retransmission(struct layer2 *l2, unsigned int nr)
  980. {
  981. u_int p1;
  982. if (l2->vs != nr) {
  983. while (l2->vs != nr) {
  984. (l2->vs)--;
  985. if (test_bit(FLG_MOD128, &l2->flag)) {
  986. l2->vs %= 128;
  987. p1 = (l2->vs - l2->va) % 128;
  988. } else {
  989. l2->vs %= 8;
  990. p1 = (l2->vs - l2->va) % 8;
  991. }
  992. p1 = (p1 + l2->sow) % l2->window;
  993. if (l2->windowar[p1])
  994. skb_queue_head(&l2->i_queue, l2->windowar[p1]);
  995. else
  996. printk(KERN_WARNING
  997. "%s: windowar[%d] is NULL\n",
  998. __func__, p1);
  999. l2->windowar[p1] = NULL;
  1000. }
  1001. mISDN_FsmEvent(&l2->l2m, EV_L2_ACK_PULL, NULL);
  1002. }
  1003. }
  1004. static void
  1005. l2_st7_got_super(struct FsmInst *fi, int event, void *arg)
  1006. {
  1007. struct layer2 *l2 = fi->userdata;
  1008. struct sk_buff *skb = arg;
  1009. int PollFlag, rsp, typ = RR;
  1010. unsigned int nr;
  1011. rsp = *skb->data & 0x2;
  1012. if (test_bit(FLG_ORIG, &l2->flag))
  1013. rsp = !rsp;
  1014. skb_pull(skb, l2addrsize(l2));
  1015. if (IsRNR(skb->data, l2)) {
  1016. set_peer_busy(l2);
  1017. typ = RNR;
  1018. } else
  1019. clear_peer_busy(l2);
  1020. if (IsREJ(skb->data, l2))
  1021. typ = REJ;
  1022. if (test_bit(FLG_MOD128, &l2->flag)) {
  1023. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1024. nr = skb->data[1] >> 1;
  1025. } else {
  1026. PollFlag = (skb->data[0] & 0x10);
  1027. nr = (skb->data[0] >> 5) & 0x7;
  1028. }
  1029. dev_kfree_skb(skb);
  1030. if (PollFlag) {
  1031. if (rsp)
  1032. l2mgr(l2, MDL_ERROR_IND, (void *) 'A');
  1033. else
  1034. enquiry_response(l2);
  1035. }
  1036. if (legalnr(l2, nr)) {
  1037. if (typ == REJ) {
  1038. setva(l2, nr);
  1039. invoke_retransmission(l2, nr);
  1040. stop_t200(l2, 10);
  1041. if (mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1042. EV_L2_T203, NULL, 6))
  1043. l2m_debug(&l2->l2m, "Restart T203 ST7 REJ");
  1044. } else if ((nr == l2->vs) && (typ == RR)) {
  1045. setva(l2, nr);
  1046. stop_t200(l2, 11);
  1047. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1048. EV_L2_T203, NULL, 7);
  1049. } else if ((l2->va != nr) || (typ == RNR)) {
  1050. setva(l2, nr);
  1051. if (typ != RR)
  1052. mISDN_FsmDelTimer(&l2->t203, 9);
  1053. restart_t200(l2, 12);
  1054. }
  1055. if (skb_queue_len(&l2->i_queue) && (typ == RR))
  1056. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1057. } else
  1058. nrerrorrecovery(fi);
  1059. }
  1060. static void
  1061. l2_feed_i_if_reest(struct FsmInst *fi, int event, void *arg)
  1062. {
  1063. struct layer2 *l2 = fi->userdata;
  1064. struct sk_buff *skb = arg;
  1065. if (!test_bit(FLG_L3_INIT, &l2->flag))
  1066. skb_queue_tail(&l2->i_queue, skb);
  1067. else
  1068. dev_kfree_skb(skb);
  1069. }
  1070. static void
  1071. l2_feed_i_pull(struct FsmInst *fi, int event, void *arg)
  1072. {
  1073. struct layer2 *l2 = fi->userdata;
  1074. struct sk_buff *skb = arg;
  1075. skb_queue_tail(&l2->i_queue, skb);
  1076. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1077. }
  1078. static void
  1079. l2_feed_iqueue(struct FsmInst *fi, int event, void *arg)
  1080. {
  1081. struct layer2 *l2 = fi->userdata;
  1082. struct sk_buff *skb = arg;
  1083. skb_queue_tail(&l2->i_queue, skb);
  1084. }
  1085. static void
  1086. l2_got_iframe(struct FsmInst *fi, int event, void *arg)
  1087. {
  1088. struct layer2 *l2 = fi->userdata;
  1089. struct sk_buff *skb = arg;
  1090. int PollFlag, i;
  1091. u_int ns, nr;
  1092. i = l2addrsize(l2);
  1093. if (test_bit(FLG_MOD128, &l2->flag)) {
  1094. PollFlag = ((skb->data[i + 1] & 0x1) == 0x1);
  1095. ns = skb->data[i] >> 1;
  1096. nr = (skb->data[i + 1] >> 1) & 0x7f;
  1097. } else {
  1098. PollFlag = (skb->data[i] & 0x10);
  1099. ns = (skb->data[i] >> 1) & 0x7;
  1100. nr = (skb->data[i] >> 5) & 0x7;
  1101. }
  1102. if (test_bit(FLG_OWN_BUSY, &l2->flag)) {
  1103. dev_kfree_skb(skb);
  1104. if (PollFlag)
  1105. enquiry_response(l2);
  1106. } else {
  1107. if (l2->vr == ns) {
  1108. l2->vr++;
  1109. if (test_bit(FLG_MOD128, &l2->flag))
  1110. l2->vr %= 128;
  1111. else
  1112. l2->vr %= 8;
  1113. test_and_clear_bit(FLG_REJEXC, &l2->flag);
  1114. if (PollFlag)
  1115. enquiry_response(l2);
  1116. else
  1117. test_and_set_bit(FLG_ACK_PEND, &l2->flag);
  1118. skb_pull(skb, l2headersize(l2, 0));
  1119. l2up(l2, DL_DATA_IND, skb);
  1120. } else {
  1121. /* n(s)!=v(r) */
  1122. dev_kfree_skb(skb);
  1123. if (test_and_set_bit(FLG_REJEXC, &l2->flag)) {
  1124. if (PollFlag)
  1125. enquiry_response(l2);
  1126. } else {
  1127. enquiry_cr(l2, REJ, RSP, PollFlag);
  1128. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1129. }
  1130. }
  1131. }
  1132. if (legalnr(l2, nr)) {
  1133. if (!test_bit(FLG_PEER_BUSY, &l2->flag) &&
  1134. (fi->state == ST_L2_7)) {
  1135. if (nr == l2->vs) {
  1136. stop_t200(l2, 13);
  1137. mISDN_FsmRestartTimer(&l2->t203, l2->T203,
  1138. EV_L2_T203, NULL, 7);
  1139. } else if (nr != l2->va)
  1140. restart_t200(l2, 14);
  1141. }
  1142. setva(l2, nr);
  1143. } else {
  1144. nrerrorrecovery(fi);
  1145. return;
  1146. }
  1147. if (skb_queue_len(&l2->i_queue) && (fi->state == ST_L2_7))
  1148. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1149. if (test_and_clear_bit(FLG_ACK_PEND, &l2->flag))
  1150. enquiry_cr(l2, RR, RSP, 0);
  1151. }
  1152. static void
  1153. l2_got_tei(struct FsmInst *fi, int event, void *arg)
  1154. {
  1155. struct layer2 *l2 = fi->userdata;
  1156. u_int info;
  1157. l2->tei = (signed char)(long)arg;
  1158. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1159. info = DL_INFO_L2_CONNECT;
  1160. l2up_create(l2, DL_INFORMATION_IND, sizeof(info), &info);
  1161. if (fi->state == ST_L2_3) {
  1162. establishlink(fi);
  1163. test_and_set_bit(FLG_L3_INIT, &l2->flag);
  1164. } else
  1165. mISDN_FsmChangeState(fi, ST_L2_4);
  1166. if (skb_queue_len(&l2->ui_queue))
  1167. tx_ui(l2);
  1168. }
  1169. static void
  1170. l2_st5_tout_200(struct FsmInst *fi, int event, void *arg)
  1171. {
  1172. struct layer2 *l2 = fi->userdata;
  1173. if (test_bit(FLG_LAPD, &l2->flag) &&
  1174. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1175. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1176. } else if (l2->rc == l2->N200) {
  1177. mISDN_FsmChangeState(fi, ST_L2_4);
  1178. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1179. skb_queue_purge(&l2->i_queue);
  1180. l2mgr(l2, MDL_ERROR_IND, (void *) 'G');
  1181. if (test_bit(FLG_LAPB, &l2->flag))
  1182. l2down_create(l2, PH_DEACTIVATE_REQ,
  1183. l2_newid(l2), 0, NULL);
  1184. st5_dl_release_l2l3(l2);
  1185. if (l2->tm)
  1186. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1187. } else {
  1188. l2->rc++;
  1189. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1190. send_uframe(l2, NULL, (test_bit(FLG_MOD128, &l2->flag) ?
  1191. SABME : SABM) | 0x10, CMD);
  1192. }
  1193. }
  1194. static void
  1195. l2_st6_tout_200(struct FsmInst *fi, int event, void *arg)
  1196. {
  1197. struct layer2 *l2 = fi->userdata;
  1198. if (test_bit(FLG_LAPD, &l2->flag) &&
  1199. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1200. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1201. } else if (l2->rc == l2->N200) {
  1202. mISDN_FsmChangeState(fi, ST_L2_4);
  1203. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1204. l2mgr(l2, MDL_ERROR_IND, (void *) 'H');
  1205. lapb_dl_release_l2l3(l2, DL_RELEASE_CNF);
  1206. if (l2->tm)
  1207. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1208. } else {
  1209. l2->rc++;
  1210. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200,
  1211. NULL, 9);
  1212. send_uframe(l2, NULL, DISC | 0x10, CMD);
  1213. }
  1214. }
  1215. static void
  1216. l2_st7_tout_200(struct FsmInst *fi, int event, void *arg)
  1217. {
  1218. struct layer2 *l2 = fi->userdata;
  1219. if (test_bit(FLG_LAPD, &l2->flag) &&
  1220. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1221. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1222. return;
  1223. }
  1224. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1225. l2->rc = 0;
  1226. mISDN_FsmChangeState(fi, ST_L2_8);
  1227. transmit_enquiry(l2);
  1228. l2->rc++;
  1229. }
  1230. static void
  1231. l2_st8_tout_200(struct FsmInst *fi, int event, void *arg)
  1232. {
  1233. struct layer2 *l2 = fi->userdata;
  1234. if (test_bit(FLG_LAPD, &l2->flag) &&
  1235. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1236. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 9);
  1237. return;
  1238. }
  1239. test_and_clear_bit(FLG_T200_RUN, &l2->flag);
  1240. if (l2->rc == l2->N200) {
  1241. l2mgr(l2, MDL_ERROR_IND, (void *) 'I');
  1242. establishlink(fi);
  1243. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1244. } else {
  1245. transmit_enquiry(l2);
  1246. l2->rc++;
  1247. }
  1248. }
  1249. static void
  1250. l2_st7_tout_203(struct FsmInst *fi, int event, void *arg)
  1251. {
  1252. struct layer2 *l2 = fi->userdata;
  1253. if (test_bit(FLG_LAPD, &l2->flag) &&
  1254. test_bit(FLG_DCHAN_BUSY, &l2->flag)) {
  1255. mISDN_FsmAddTimer(&l2->t203, l2->T203, EV_L2_T203, NULL, 9);
  1256. return;
  1257. }
  1258. mISDN_FsmChangeState(fi, ST_L2_8);
  1259. transmit_enquiry(l2);
  1260. l2->rc = 0;
  1261. }
  1262. static void
  1263. l2_pull_iqueue(struct FsmInst *fi, int event, void *arg)
  1264. {
  1265. struct layer2 *l2 = fi->userdata;
  1266. struct sk_buff *skb, *nskb, *oskb;
  1267. u_char header[MAX_L2HEADER_LEN];
  1268. u_int i, p1;
  1269. if (!cansend(l2))
  1270. return;
  1271. skb = skb_dequeue(&l2->i_queue);
  1272. if (!skb)
  1273. return;
  1274. if (test_bit(FLG_MOD128, &l2->flag))
  1275. p1 = (l2->vs - l2->va) % 128;
  1276. else
  1277. p1 = (l2->vs - l2->va) % 8;
  1278. p1 = (p1 + l2->sow) % l2->window;
  1279. if (l2->windowar[p1]) {
  1280. printk(KERN_WARNING "isdnl2 try overwrite ack queue entry %d\n",
  1281. p1);
  1282. dev_kfree_skb(l2->windowar[p1]);
  1283. }
  1284. l2->windowar[p1] = skb;
  1285. i = sethdraddr(l2, header, CMD);
  1286. if (test_bit(FLG_MOD128, &l2->flag)) {
  1287. header[i++] = l2->vs << 1;
  1288. header[i++] = l2->vr << 1;
  1289. l2->vs = (l2->vs + 1) % 128;
  1290. } else {
  1291. header[i++] = (l2->vr << 5) | (l2->vs << 1);
  1292. l2->vs = (l2->vs + 1) % 8;
  1293. }
  1294. nskb = skb_clone(skb, GFP_ATOMIC);
  1295. p1 = skb_headroom(nskb);
  1296. if (p1 >= i)
  1297. memcpy(skb_push(nskb, i), header, i);
  1298. else {
  1299. printk(KERN_WARNING
  1300. "isdnl2 pull_iqueue skb header(%d/%d) too short\n", i, p1);
  1301. oskb = nskb;
  1302. nskb = mI_alloc_skb(oskb->len + i, GFP_ATOMIC);
  1303. if (!nskb) {
  1304. dev_kfree_skb(oskb);
  1305. printk(KERN_WARNING "%s: no skb mem\n", __func__);
  1306. return;
  1307. }
  1308. memcpy(skb_put(nskb, i), header, i);
  1309. memcpy(skb_put(nskb, oskb->len), oskb->data, oskb->len);
  1310. dev_kfree_skb(oskb);
  1311. }
  1312. l2down(l2, PH_DATA_REQ, l2_newid(l2), nskb);
  1313. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1314. if (!test_and_set_bit(FLG_T200_RUN, &l2->flag)) {
  1315. mISDN_FsmDelTimer(&l2->t203, 13);
  1316. mISDN_FsmAddTimer(&l2->t200, l2->T200, EV_L2_T200, NULL, 11);
  1317. }
  1318. }
  1319. static void
  1320. l2_st8_got_super(struct FsmInst *fi, int event, void *arg)
  1321. {
  1322. struct layer2 *l2 = fi->userdata;
  1323. struct sk_buff *skb = arg;
  1324. int PollFlag, rsp, rnr = 0;
  1325. unsigned int nr;
  1326. rsp = *skb->data & 0x2;
  1327. if (test_bit(FLG_ORIG, &l2->flag))
  1328. rsp = !rsp;
  1329. skb_pull(skb, l2addrsize(l2));
  1330. if (IsRNR(skb->data, l2)) {
  1331. set_peer_busy(l2);
  1332. rnr = 1;
  1333. } else
  1334. clear_peer_busy(l2);
  1335. if (test_bit(FLG_MOD128, &l2->flag)) {
  1336. PollFlag = (skb->data[1] & 0x1) == 0x1;
  1337. nr = skb->data[1] >> 1;
  1338. } else {
  1339. PollFlag = (skb->data[0] & 0x10);
  1340. nr = (skb->data[0] >> 5) & 0x7;
  1341. }
  1342. dev_kfree_skb(skb);
  1343. if (rsp && PollFlag) {
  1344. if (legalnr(l2, nr)) {
  1345. if (rnr) {
  1346. restart_t200(l2, 15);
  1347. } else {
  1348. stop_t200(l2, 16);
  1349. mISDN_FsmAddTimer(&l2->t203, l2->T203,
  1350. EV_L2_T203, NULL, 5);
  1351. setva(l2, nr);
  1352. }
  1353. invoke_retransmission(l2, nr);
  1354. mISDN_FsmChangeState(fi, ST_L2_7);
  1355. if (skb_queue_len(&l2->i_queue) && cansend(l2))
  1356. mISDN_FsmEvent(fi, EV_L2_ACK_PULL, NULL);
  1357. } else
  1358. nrerrorrecovery(fi);
  1359. } else {
  1360. if (!rsp && PollFlag)
  1361. enquiry_response(l2);
  1362. if (legalnr(l2, nr))
  1363. setva(l2, nr);
  1364. else
  1365. nrerrorrecovery(fi);
  1366. }
  1367. }
  1368. static void
  1369. l2_got_FRMR(struct FsmInst *fi, int event, void *arg)
  1370. {
  1371. struct layer2 *l2 = fi->userdata;
  1372. struct sk_buff *skb = arg;
  1373. skb_pull(skb, l2addrsize(l2) + 1);
  1374. if (!(skb->data[0] & 1) || ((skb->data[0] & 3) == 1) || /* I or S */
  1375. (IsUA(skb->data) && (fi->state == ST_L2_7))) {
  1376. l2mgr(l2, MDL_ERROR_IND, (void *) 'K');
  1377. establishlink(fi);
  1378. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1379. }
  1380. dev_kfree_skb(skb);
  1381. }
  1382. static void
  1383. l2_st24_tei_remove(struct FsmInst *fi, int event, void *arg)
  1384. {
  1385. struct layer2 *l2 = fi->userdata;
  1386. skb_queue_purge(&l2->ui_queue);
  1387. l2->tei = GROUP_TEI;
  1388. mISDN_FsmChangeState(fi, ST_L2_1);
  1389. }
  1390. static void
  1391. l2_st3_tei_remove(struct FsmInst *fi, int event, void *arg)
  1392. {
  1393. struct layer2 *l2 = fi->userdata;
  1394. skb_queue_purge(&l2->ui_queue);
  1395. l2->tei = GROUP_TEI;
  1396. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1397. mISDN_FsmChangeState(fi, ST_L2_1);
  1398. }
  1399. static void
  1400. l2_st5_tei_remove(struct FsmInst *fi, int event, void *arg)
  1401. {
  1402. struct layer2 *l2 = fi->userdata;
  1403. skb_queue_purge(&l2->i_queue);
  1404. skb_queue_purge(&l2->ui_queue);
  1405. freewin(l2);
  1406. l2->tei = GROUP_TEI;
  1407. stop_t200(l2, 17);
  1408. st5_dl_release_l2l3(l2);
  1409. mISDN_FsmChangeState(fi, ST_L2_1);
  1410. }
  1411. static void
  1412. l2_st6_tei_remove(struct FsmInst *fi, int event, void *arg)
  1413. {
  1414. struct layer2 *l2 = fi->userdata;
  1415. skb_queue_purge(&l2->ui_queue);
  1416. l2->tei = GROUP_TEI;
  1417. stop_t200(l2, 18);
  1418. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1419. mISDN_FsmChangeState(fi, ST_L2_1);
  1420. }
  1421. static void
  1422. l2_tei_remove(struct FsmInst *fi, int event, void *arg)
  1423. {
  1424. struct layer2 *l2 = fi->userdata;
  1425. skb_queue_purge(&l2->i_queue);
  1426. skb_queue_purge(&l2->ui_queue);
  1427. freewin(l2);
  1428. l2->tei = GROUP_TEI;
  1429. stop_t200(l2, 17);
  1430. mISDN_FsmDelTimer(&l2->t203, 19);
  1431. l2up_create(l2, DL_RELEASE_IND, 0, NULL);
  1432. /* mISDN_queue_data(&l2->inst, l2->inst.id | MSG_BROADCAST,
  1433. * MGR_SHORTSTATUS_IND, SSTATUS_L2_RELEASED,
  1434. * 0, NULL, 0);
  1435. */
  1436. mISDN_FsmChangeState(fi, ST_L2_1);
  1437. }
  1438. static void
  1439. l2_st14_persistant_da(struct FsmInst *fi, int event, void *arg)
  1440. {
  1441. struct layer2 *l2 = fi->userdata;
  1442. struct sk_buff *skb = arg;
  1443. skb_queue_purge(&l2->i_queue);
  1444. skb_queue_purge(&l2->ui_queue);
  1445. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1446. l2up(l2, DL_RELEASE_IND, skb);
  1447. else
  1448. dev_kfree_skb(skb);
  1449. }
  1450. static void
  1451. l2_st5_persistant_da(struct FsmInst *fi, int event, void *arg)
  1452. {
  1453. struct layer2 *l2 = fi->userdata;
  1454. struct sk_buff *skb = arg;
  1455. skb_queue_purge(&l2->i_queue);
  1456. skb_queue_purge(&l2->ui_queue);
  1457. freewin(l2);
  1458. stop_t200(l2, 19);
  1459. st5_dl_release_l2l3(l2);
  1460. mISDN_FsmChangeState(fi, ST_L2_4);
  1461. if (l2->tm)
  1462. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1463. dev_kfree_skb(skb);
  1464. }
  1465. static void
  1466. l2_st6_persistant_da(struct FsmInst *fi, int event, void *arg)
  1467. {
  1468. struct layer2 *l2 = fi->userdata;
  1469. struct sk_buff *skb = arg;
  1470. skb_queue_purge(&l2->ui_queue);
  1471. stop_t200(l2, 20);
  1472. l2up(l2, DL_RELEASE_CNF, skb);
  1473. mISDN_FsmChangeState(fi, ST_L2_4);
  1474. if (l2->tm)
  1475. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1476. }
  1477. static void
  1478. l2_persistant_da(struct FsmInst *fi, int event, void *arg)
  1479. {
  1480. struct layer2 *l2 = fi->userdata;
  1481. struct sk_buff *skb = arg;
  1482. skb_queue_purge(&l2->i_queue);
  1483. skb_queue_purge(&l2->ui_queue);
  1484. freewin(l2);
  1485. stop_t200(l2, 19);
  1486. mISDN_FsmDelTimer(&l2->t203, 19);
  1487. l2up(l2, DL_RELEASE_IND, skb);
  1488. mISDN_FsmChangeState(fi, ST_L2_4);
  1489. if (l2->tm)
  1490. l2_tei(l2, MDL_STATUS_DOWN_IND, 0);
  1491. }
  1492. static void
  1493. l2_set_own_busy(struct FsmInst *fi, int event, void *arg)
  1494. {
  1495. struct layer2 *l2 = fi->userdata;
  1496. struct sk_buff *skb = arg;
  1497. if (!test_and_set_bit(FLG_OWN_BUSY, &l2->flag)) {
  1498. enquiry_cr(l2, RNR, RSP, 0);
  1499. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1500. }
  1501. if (skb)
  1502. dev_kfree_skb(skb);
  1503. }
  1504. static void
  1505. l2_clear_own_busy(struct FsmInst *fi, int event, void *arg)
  1506. {
  1507. struct layer2 *l2 = fi->userdata;
  1508. struct sk_buff *skb = arg;
  1509. if (!test_and_clear_bit(FLG_OWN_BUSY, &l2->flag)) {
  1510. enquiry_cr(l2, RR, RSP, 0);
  1511. test_and_clear_bit(FLG_ACK_PEND, &l2->flag);
  1512. }
  1513. if (skb)
  1514. dev_kfree_skb(skb);
  1515. }
  1516. static void
  1517. l2_frame_error(struct FsmInst *fi, int event, void *arg)
  1518. {
  1519. struct layer2 *l2 = fi->userdata;
  1520. l2mgr(l2, MDL_ERROR_IND, arg);
  1521. }
  1522. static void
  1523. l2_frame_error_reest(struct FsmInst *fi, int event, void *arg)
  1524. {
  1525. struct layer2 *l2 = fi->userdata;
  1526. l2mgr(l2, MDL_ERROR_IND, arg);
  1527. establishlink(fi);
  1528. test_and_clear_bit(FLG_L3_INIT, &l2->flag);
  1529. }
  1530. static struct FsmNode L2FnList[] =
  1531. {
  1532. {ST_L2_1, EV_L2_DL_ESTABLISH_REQ, l2_mdl_assign},
  1533. {ST_L2_2, EV_L2_DL_ESTABLISH_REQ, l2_go_st3},
  1534. {ST_L2_4, EV_L2_DL_ESTABLISH_REQ, l2_establish},
  1535. {ST_L2_5, EV_L2_DL_ESTABLISH_REQ, l2_discard_i_setl3},
  1536. {ST_L2_7, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1537. {ST_L2_8, EV_L2_DL_ESTABLISH_REQ, l2_l3_reestablish},
  1538. {ST_L2_4, EV_L2_DL_RELEASE_REQ, l2_release},
  1539. {ST_L2_5, EV_L2_DL_RELEASE_REQ, l2_pend_rel},
  1540. {ST_L2_7, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1541. {ST_L2_8, EV_L2_DL_RELEASE_REQ, l2_disconnect},
  1542. {ST_L2_5, EV_L2_DL_DATA, l2_feed_i_if_reest},
  1543. {ST_L2_7, EV_L2_DL_DATA, l2_feed_i_pull},
  1544. {ST_L2_8, EV_L2_DL_DATA, l2_feed_iqueue},
  1545. {ST_L2_1, EV_L2_DL_UNITDATA, l2_queue_ui_assign},
  1546. {ST_L2_2, EV_L2_DL_UNITDATA, l2_queue_ui},
  1547. {ST_L2_3, EV_L2_DL_UNITDATA, l2_queue_ui},
  1548. {ST_L2_4, EV_L2_DL_UNITDATA, l2_send_ui},
  1549. {ST_L2_5, EV_L2_DL_UNITDATA, l2_send_ui},
  1550. {ST_L2_6, EV_L2_DL_UNITDATA, l2_send_ui},
  1551. {ST_L2_7, EV_L2_DL_UNITDATA, l2_send_ui},
  1552. {ST_L2_8, EV_L2_DL_UNITDATA, l2_send_ui},
  1553. {ST_L2_1, EV_L2_MDL_ASSIGN, l2_got_tei},
  1554. {ST_L2_2, EV_L2_MDL_ASSIGN, l2_got_tei},
  1555. {ST_L2_3, EV_L2_MDL_ASSIGN, l2_got_tei},
  1556. {ST_L2_2, EV_L2_MDL_ERROR, l2_st24_tei_remove},
  1557. {ST_L2_3, EV_L2_MDL_ERROR, l2_st3_tei_remove},
  1558. {ST_L2_4, EV_L2_MDL_REMOVE, l2_st24_tei_remove},
  1559. {ST_L2_5, EV_L2_MDL_REMOVE, l2_st5_tei_remove},
  1560. {ST_L2_6, EV_L2_MDL_REMOVE, l2_st6_tei_remove},
  1561. {ST_L2_7, EV_L2_MDL_REMOVE, l2_tei_remove},
  1562. {ST_L2_8, EV_L2_MDL_REMOVE, l2_tei_remove},
  1563. {ST_L2_4, EV_L2_SABME, l2_start_multi},
  1564. {ST_L2_5, EV_L2_SABME, l2_send_UA},
  1565. {ST_L2_6, EV_L2_SABME, l2_send_DM},
  1566. {ST_L2_7, EV_L2_SABME, l2_restart_multi},
  1567. {ST_L2_8, EV_L2_SABME, l2_restart_multi},
  1568. {ST_L2_4, EV_L2_DISC, l2_send_DM},
  1569. {ST_L2_5, EV_L2_DISC, l2_send_DM},
  1570. {ST_L2_6, EV_L2_DISC, l2_send_UA},
  1571. {ST_L2_7, EV_L2_DISC, l2_stop_multi},
  1572. {ST_L2_8, EV_L2_DISC, l2_stop_multi},
  1573. {ST_L2_4, EV_L2_UA, l2_mdl_error_ua},
  1574. {ST_L2_5, EV_L2_UA, l2_connected},
  1575. {ST_L2_6, EV_L2_UA, l2_released},
  1576. {ST_L2_7, EV_L2_UA, l2_mdl_error_ua},
  1577. {ST_L2_8, EV_L2_UA, l2_mdl_error_ua},
  1578. {ST_L2_4, EV_L2_DM, l2_reestablish},
  1579. {ST_L2_5, EV_L2_DM, l2_st5_dm_release},
  1580. {ST_L2_6, EV_L2_DM, l2_st6_dm_release},
  1581. {ST_L2_7, EV_L2_DM, l2_mdl_error_dm},
  1582. {ST_L2_8, EV_L2_DM, l2_st8_mdl_error_dm},
  1583. {ST_L2_1, EV_L2_UI, l2_got_ui},
  1584. {ST_L2_2, EV_L2_UI, l2_got_ui},
  1585. {ST_L2_3, EV_L2_UI, l2_got_ui},
  1586. {ST_L2_4, EV_L2_UI, l2_got_ui},
  1587. {ST_L2_5, EV_L2_UI, l2_got_ui},
  1588. {ST_L2_6, EV_L2_UI, l2_got_ui},
  1589. {ST_L2_7, EV_L2_UI, l2_got_ui},
  1590. {ST_L2_8, EV_L2_UI, l2_got_ui},
  1591. {ST_L2_7, EV_L2_FRMR, l2_got_FRMR},
  1592. {ST_L2_8, EV_L2_FRMR, l2_got_FRMR},
  1593. {ST_L2_7, EV_L2_SUPER, l2_st7_got_super},
  1594. {ST_L2_8, EV_L2_SUPER, l2_st8_got_super},
  1595. {ST_L2_7, EV_L2_I, l2_got_iframe},
  1596. {ST_L2_8, EV_L2_I, l2_got_iframe},
  1597. {ST_L2_5, EV_L2_T200, l2_st5_tout_200},
  1598. {ST_L2_6, EV_L2_T200, l2_st6_tout_200},
  1599. {ST_L2_7, EV_L2_T200, l2_st7_tout_200},
  1600. {ST_L2_8, EV_L2_T200, l2_st8_tout_200},
  1601. {ST_L2_7, EV_L2_T203, l2_st7_tout_203},
  1602. {ST_L2_7, EV_L2_ACK_PULL, l2_pull_iqueue},
  1603. {ST_L2_7, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1604. {ST_L2_8, EV_L2_SET_OWN_BUSY, l2_set_own_busy},
  1605. {ST_L2_7, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1606. {ST_L2_8, EV_L2_CLEAR_OWN_BUSY, l2_clear_own_busy},
  1607. {ST_L2_4, EV_L2_FRAME_ERROR, l2_frame_error},
  1608. {ST_L2_5, EV_L2_FRAME_ERROR, l2_frame_error},
  1609. {ST_L2_6, EV_L2_FRAME_ERROR, l2_frame_error},
  1610. {ST_L2_7, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1611. {ST_L2_8, EV_L2_FRAME_ERROR, l2_frame_error_reest},
  1612. {ST_L2_1, EV_L1_DEACTIVATE, l2_st14_persistant_da},
  1613. {ST_L2_2, EV_L1_DEACTIVATE, l2_st24_tei_remove},
  1614. {ST_L2_3, EV_L1_DEACTIVATE, l2_st3_tei_remove},
  1615. {ST_L2_4, EV_L1_DEACTIVATE, l2_st14_persistant_da},
  1616. {ST_L2_5, EV_L1_DEACTIVATE, l2_st5_persistant_da},
  1617. {ST_L2_6, EV_L1_DEACTIVATE, l2_st6_persistant_da},
  1618. {ST_L2_7, EV_L1_DEACTIVATE, l2_persistant_da},
  1619. {ST_L2_8, EV_L1_DEACTIVATE, l2_persistant_da},
  1620. };
  1621. static int
  1622. ph_data_indication(struct layer2 *l2, struct mISDNhead *hh, struct sk_buff *skb)
  1623. {
  1624. u_char *datap = skb->data;
  1625. int ret = -EINVAL;
  1626. int psapi, ptei;
  1627. u_int l;
  1628. int c = 0;
  1629. l = l2addrsize(l2);
  1630. if (skb->len <= l) {
  1631. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *) 'N');
  1632. return ret;
  1633. }
  1634. if (test_bit(FLG_LAPD, &l2->flag)) { /* Maybe not needed */
  1635. psapi = *datap++;
  1636. ptei = *datap++;
  1637. if ((psapi & 1) || !(ptei & 1)) {
  1638. printk(KERN_WARNING
  1639. "l2 D-channel frame wrong EA0/EA1\n");
  1640. return ret;
  1641. }
  1642. psapi >>= 2;
  1643. ptei >>= 1;
  1644. if (psapi != l2->sapi) {
  1645. /* not our bussiness */
  1646. if (*debug & DEBUG_L2)
  1647. printk(KERN_DEBUG "%s: sapi %d/%d mismatch\n",
  1648. __func__, psapi, l2->sapi);
  1649. dev_kfree_skb(skb);
  1650. return 0;
  1651. }
  1652. if ((ptei != l2->tei) && (ptei != GROUP_TEI)) {
  1653. /* not our bussiness */
  1654. if (*debug & DEBUG_L2)
  1655. printk(KERN_DEBUG "%s: tei %d/%d mismatch\n",
  1656. __func__, ptei, l2->tei);
  1657. dev_kfree_skb(skb);
  1658. return 0;
  1659. }
  1660. } else
  1661. datap += l;
  1662. if (!(*datap & 1)) { /* I-Frame */
  1663. c = iframe_error(l2, skb);
  1664. if (!c)
  1665. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_I, skb);
  1666. } else if (IsSFrame(datap, l2)) { /* S-Frame */
  1667. c = super_error(l2, skb);
  1668. if (!c)
  1669. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SUPER, skb);
  1670. } else if (IsUI(datap)) {
  1671. c = UI_error(l2, skb);
  1672. if (!c)
  1673. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UI, skb);
  1674. } else if (IsSABME(datap, l2)) {
  1675. c = unnum_error(l2, skb, CMD);
  1676. if (!c)
  1677. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_SABME, skb);
  1678. } else if (IsUA(datap)) {
  1679. c = unnum_error(l2, skb, RSP);
  1680. if (!c)
  1681. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_UA, skb);
  1682. } else if (IsDISC(datap)) {
  1683. c = unnum_error(l2, skb, CMD);
  1684. if (!c)
  1685. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DISC, skb);
  1686. } else if (IsDM(datap)) {
  1687. c = unnum_error(l2, skb, RSP);
  1688. if (!c)
  1689. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DM, skb);
  1690. } else if (IsFRMR(datap)) {
  1691. c = FRMR_error(l2, skb);
  1692. if (!c)
  1693. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_FRMR, skb);
  1694. } else
  1695. c = 'L';
  1696. if (c) {
  1697. printk(KERN_WARNING "l2 D-channel frame error %c\n", c);
  1698. mISDN_FsmEvent(&l2->l2m, EV_L2_FRAME_ERROR, (void *)(long)c);
  1699. }
  1700. return ret;
  1701. }
  1702. static int
  1703. l2_send(struct mISDNchannel *ch, struct sk_buff *skb)
  1704. {
  1705. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1706. struct mISDNhead *hh = mISDN_HEAD_P(skb);
  1707. int ret = -EINVAL;
  1708. if (*debug & DEBUG_L2_RECV)
  1709. printk(KERN_DEBUG "%s: prim(%x) id(%x) sapi(%d) tei(%d)\n",
  1710. __func__, hh->prim, hh->id, l2->sapi, l2->tei);
  1711. switch (hh->prim) {
  1712. case PH_DATA_IND:
  1713. ret = ph_data_indication(l2, hh, skb);
  1714. break;
  1715. case PH_DATA_CNF:
  1716. ret = ph_data_confirm(l2, hh, skb);
  1717. break;
  1718. case PH_ACTIVATE_IND:
  1719. test_and_set_bit(FLG_L1_ACTIV, &l2->flag);
  1720. l2up_create(l2, MPH_ACTIVATE_IND, 0, NULL);
  1721. if (test_and_clear_bit(FLG_ESTAB_PEND, &l2->flag))
  1722. ret = mISDN_FsmEvent(&l2->l2m,
  1723. EV_L2_DL_ESTABLISH_REQ, skb);
  1724. break;
  1725. case PH_DEACTIVATE_IND:
  1726. test_and_clear_bit(FLG_L1_ACTIV, &l2->flag);
  1727. l2up_create(l2, MPH_DEACTIVATE_IND, 0, NULL);
  1728. ret = mISDN_FsmEvent(&l2->l2m, EV_L1_DEACTIVATE, skb);
  1729. break;
  1730. case MPH_INFORMATION_IND:
  1731. if (!l2->up)
  1732. break;
  1733. ret = l2->up->send(l2->up, skb);
  1734. break;
  1735. case DL_DATA_REQ:
  1736. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_DATA, skb);
  1737. break;
  1738. case DL_UNITDATA_REQ:
  1739. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_UNITDATA, skb);
  1740. break;
  1741. case DL_ESTABLISH_REQ:
  1742. if (test_bit(FLG_LAPB, &l2->flag))
  1743. test_and_set_bit(FLG_ORIG, &l2->flag);
  1744. if (test_bit(FLG_L1_ACTIV, &l2->flag)) {
  1745. if (test_bit(FLG_LAPD, &l2->flag) ||
  1746. test_bit(FLG_ORIG, &l2->flag))
  1747. ret = mISDN_FsmEvent(&l2->l2m,
  1748. EV_L2_DL_ESTABLISH_REQ, skb);
  1749. } else {
  1750. if (test_bit(FLG_LAPD, &l2->flag) ||
  1751. test_bit(FLG_ORIG, &l2->flag)) {
  1752. test_and_set_bit(FLG_ESTAB_PEND,
  1753. &l2->flag);
  1754. }
  1755. ret = l2down(l2, PH_ACTIVATE_REQ, l2_newid(l2),
  1756. skb);
  1757. }
  1758. break;
  1759. case DL_RELEASE_REQ:
  1760. if (test_bit(FLG_LAPB, &l2->flag))
  1761. l2down_create(l2, PH_DEACTIVATE_REQ,
  1762. l2_newid(l2), 0, NULL);
  1763. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_DL_RELEASE_REQ,
  1764. skb);
  1765. break;
  1766. default:
  1767. if (*debug & DEBUG_L2)
  1768. l2m_debug(&l2->l2m, "l2 unknown pr %04x",
  1769. hh->prim);
  1770. }
  1771. if (ret) {
  1772. dev_kfree_skb(skb);
  1773. ret = 0;
  1774. }
  1775. return ret;
  1776. }
  1777. int
  1778. tei_l2(struct layer2 *l2, u_int cmd, u_long arg)
  1779. {
  1780. int ret = -EINVAL;
  1781. if (*debug & DEBUG_L2_TEI)
  1782. printk(KERN_DEBUG "%s: cmd(%x)\n", __func__, cmd);
  1783. switch (cmd) {
  1784. case (MDL_ASSIGN_REQ):
  1785. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ASSIGN, (void *)arg);
  1786. break;
  1787. case (MDL_REMOVE_REQ):
  1788. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_REMOVE, NULL);
  1789. break;
  1790. case (MDL_ERROR_IND):
  1791. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1792. break;
  1793. case (MDL_ERROR_RSP):
  1794. /* ETS 300-125 5.3.2.1 Test: TC13010 */
  1795. printk(KERN_NOTICE "MDL_ERROR|REQ (tei_l2)\n");
  1796. ret = mISDN_FsmEvent(&l2->l2m, EV_L2_MDL_ERROR, NULL);
  1797. break;
  1798. }
  1799. return ret;
  1800. }
  1801. static void
  1802. release_l2(struct layer2 *l2)
  1803. {
  1804. mISDN_FsmDelTimer(&l2->t200, 21);
  1805. mISDN_FsmDelTimer(&l2->t203, 16);
  1806. skb_queue_purge(&l2->i_queue);
  1807. skb_queue_purge(&l2->ui_queue);
  1808. skb_queue_purge(&l2->down_queue);
  1809. ReleaseWin(l2);
  1810. if (test_bit(FLG_LAPD, &l2->flag)) {
  1811. TEIrelease(l2);
  1812. if (l2->ch.st)
  1813. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D,
  1814. CLOSE_CHANNEL, NULL);
  1815. }
  1816. kfree(l2);
  1817. }
  1818. static int
  1819. l2_ctrl(struct mISDNchannel *ch, u_int cmd, void *arg)
  1820. {
  1821. struct layer2 *l2 = container_of(ch, struct layer2, ch);
  1822. u_int info;
  1823. if (*debug & DEBUG_L2_CTRL)
  1824. printk(KERN_DEBUG "%s:(%x)\n", __func__, cmd);
  1825. switch (cmd) {
  1826. case OPEN_CHANNEL:
  1827. if (test_bit(FLG_LAPD, &l2->flag)) {
  1828. set_channel_address(&l2->ch, l2->sapi, l2->tei);
  1829. info = DL_INFO_L2_CONNECT;
  1830. l2up_create(l2, DL_INFORMATION_IND,
  1831. sizeof(info), &info);
  1832. }
  1833. break;
  1834. case CLOSE_CHANNEL:
  1835. if (l2->ch.peer)
  1836. l2->ch.peer->ctrl(l2->ch.peer, CLOSE_CHANNEL, NULL);
  1837. release_l2(l2);
  1838. break;
  1839. }
  1840. return 0;
  1841. }
  1842. struct layer2 *
  1843. create_l2(struct mISDNchannel *ch, u_int protocol, u_long options, int tei,
  1844. int sapi)
  1845. {
  1846. struct layer2 *l2;
  1847. struct channel_req rq;
  1848. l2 = kzalloc(sizeof(struct layer2), GFP_KERNEL);
  1849. if (!l2) {
  1850. printk(KERN_ERR "kzalloc layer2 failed\n");
  1851. return NULL;
  1852. }
  1853. l2->next_id = 1;
  1854. l2->down_id = MISDN_ID_NONE;
  1855. l2->up = ch;
  1856. l2->ch.st = ch->st;
  1857. l2->ch.send = l2_send;
  1858. l2->ch.ctrl = l2_ctrl;
  1859. switch (protocol) {
  1860. case ISDN_P_LAPD_NT:
  1861. test_and_set_bit(FLG_LAPD, &l2->flag);
  1862. test_and_set_bit(FLG_LAPD_NET, &l2->flag);
  1863. test_and_set_bit(FLG_MOD128, &l2->flag);
  1864. l2->sapi = sapi;
  1865. l2->maxlen = MAX_DFRAME_LEN;
  1866. if (test_bit(OPTION_L2_PMX, &options))
  1867. l2->window = 7;
  1868. else
  1869. l2->window = 1;
  1870. if (test_bit(OPTION_L2_PTP, &options))
  1871. test_and_set_bit(FLG_PTP, &l2->flag);
  1872. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1873. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1874. l2->tei = tei;
  1875. l2->T200 = 1000;
  1876. l2->N200 = 3;
  1877. l2->T203 = 10000;
  1878. if (test_bit(OPTION_L2_PMX, &options))
  1879. rq.protocol = ISDN_P_NT_E1;
  1880. else
  1881. rq.protocol = ISDN_P_NT_S0;
  1882. rq.adr.channel = 0;
  1883. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1884. break;
  1885. case ISDN_P_LAPD_TE:
  1886. test_and_set_bit(FLG_LAPD, &l2->flag);
  1887. test_and_set_bit(FLG_MOD128, &l2->flag);
  1888. test_and_set_bit(FLG_ORIG, &l2->flag);
  1889. l2->sapi = sapi;
  1890. l2->maxlen = MAX_DFRAME_LEN;
  1891. if (test_bit(OPTION_L2_PMX, &options))
  1892. l2->window = 7;
  1893. else
  1894. l2->window = 1;
  1895. if (test_bit(OPTION_L2_PTP, &options))
  1896. test_and_set_bit(FLG_PTP, &l2->flag);
  1897. if (test_bit(OPTION_L2_FIXEDTEI, &options))
  1898. test_and_set_bit(FLG_FIXED_TEI, &l2->flag);
  1899. l2->tei = tei;
  1900. l2->T200 = 1000;
  1901. l2->N200 = 3;
  1902. l2->T203 = 10000;
  1903. if (test_bit(OPTION_L2_PMX, &options))
  1904. rq.protocol = ISDN_P_TE_E1;
  1905. else
  1906. rq.protocol = ISDN_P_TE_S0;
  1907. rq.adr.channel = 0;
  1908. l2->ch.st->dev->D.ctrl(&l2->ch.st->dev->D, OPEN_CHANNEL, &rq);
  1909. break;
  1910. case ISDN_P_B_X75SLP:
  1911. test_and_set_bit(FLG_LAPB, &l2->flag);
  1912. l2->window = 7;
  1913. l2->maxlen = MAX_DATA_SIZE;
  1914. l2->T200 = 1000;
  1915. l2->N200 = 4;
  1916. l2->T203 = 5000;
  1917. l2->addr.A = 3;
  1918. l2->addr.B = 1;
  1919. break;
  1920. default:
  1921. printk(KERN_ERR "layer2 create failed prt %x\n",
  1922. protocol);
  1923. kfree(l2);
  1924. return NULL;
  1925. }
  1926. skb_queue_head_init(&l2->i_queue);
  1927. skb_queue_head_init(&l2->ui_queue);
  1928. skb_queue_head_init(&l2->down_queue);
  1929. skb_queue_head_init(&l2->tmp_queue);
  1930. InitWin(l2);
  1931. l2->l2m.fsm = &l2fsm;
  1932. if (test_bit(FLG_LAPB, &l2->flag) ||
  1933. test_bit(FLG_PTP, &l2->flag) ||
  1934. test_bit(FLG_LAPD_NET, &l2->flag))
  1935. l2->l2m.state = ST_L2_4;
  1936. else
  1937. l2->l2m.state = ST_L2_1;
  1938. l2->l2m.debug = *debug;
  1939. l2->l2m.userdata = l2;
  1940. l2->l2m.userint = 0;
  1941. l2->l2m.printdebug = l2m_debug;
  1942. mISDN_FsmInitTimer(&l2->l2m, &l2->t200);
  1943. mISDN_FsmInitTimer(&l2->l2m, &l2->t203);
  1944. return l2;
  1945. }
  1946. static int
  1947. x75create(struct channel_req *crq)
  1948. {
  1949. struct layer2 *l2;
  1950. if (crq->protocol != ISDN_P_B_X75SLP)
  1951. return -EPROTONOSUPPORT;
  1952. l2 = create_l2(crq->ch, crq->protocol, 0, 0, 0);
  1953. if (!l2)
  1954. return -ENOMEM;
  1955. crq->ch = &l2->ch;
  1956. crq->protocol = ISDN_P_B_HDLC;
  1957. return 0;
  1958. }
  1959. static struct Bprotocol X75SLP = {
  1960. .Bprotocols = (1 << (ISDN_P_B_X75SLP & ISDN_P_B_MASK)),
  1961. .name = "X75SLP",
  1962. .create = x75create
  1963. };
  1964. int
  1965. Isdnl2_Init(u_int *deb)
  1966. {
  1967. debug = deb;
  1968. mISDN_register_Bprotocol(&X75SLP);
  1969. l2fsm.state_count = L2_STATE_COUNT;
  1970. l2fsm.event_count = L2_EVENT_COUNT;
  1971. l2fsm.strEvent = strL2Event;
  1972. l2fsm.strState = strL2State;
  1973. mISDN_FsmNew(&l2fsm, L2FnList, ARRAY_SIZE(L2FnList));
  1974. TEIInit(deb);
  1975. return 0;
  1976. }
  1977. void
  1978. Isdnl2_cleanup(void)
  1979. {
  1980. mISDN_unregister_Bprotocol(&X75SLP);
  1981. TEIFree();
  1982. mISDN_FsmFree(&l2fsm);
  1983. }