hdlc_fr.c 31 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346
  1. /*
  2. * Generic HDLC support routines for Linux
  3. * Frame Relay support
  4. *
  5. * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of version 2 of the GNU General Public License
  9. * as published by the Free Software Foundation.
  10. *
  11. Theory of PVC state
  12. DCE mode:
  13. (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
  14. 0,x -> 1,1 if "link reliable" when sending FULL STATUS
  15. 1,1 -> 1,0 if received FULL STATUS ACK
  16. (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
  17. -> 1 when "PVC up" and (exist,new) = 1,0
  18. DTE mode:
  19. (exist,new,active) = FULL STATUS if "link reliable"
  20. = 0, 0, 0 if "link unreliable"
  21. No LMI:
  22. active = open and "link reliable"
  23. exist = new = not used
  24. CCITT LMI: ITU-T Q.933 Annex A
  25. ANSI LMI: ANSI T1.617 Annex D
  26. CISCO LMI: the original, aka "Gang of Four" LMI
  27. */
  28. #include <linux/module.h>
  29. #include <linux/kernel.h>
  30. #include <linux/slab.h>
  31. #include <linux/poll.h>
  32. #include <linux/errno.h>
  33. #include <linux/if_arp.h>
  34. #include <linux/init.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/pkt_sched.h>
  37. #include <linux/random.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/lapb.h>
  40. #include <linux/rtnetlink.h>
  41. #include <linux/etherdevice.h>
  42. #include <linux/hdlc.h>
  43. #undef DEBUG_PKT
  44. #undef DEBUG_ECN
  45. #undef DEBUG_LINK
  46. #undef DEBUG_PROTO
  47. #undef DEBUG_PVC
  48. #define FR_UI 0x03
  49. #define FR_PAD 0x00
  50. #define NLPID_IP 0xCC
  51. #define NLPID_IPV6 0x8E
  52. #define NLPID_SNAP 0x80
  53. #define NLPID_PAD 0x00
  54. #define NLPID_CCITT_ANSI_LMI 0x08
  55. #define NLPID_CISCO_LMI 0x09
  56. #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
  57. #define LMI_CISCO_DLCI 1023
  58. #define LMI_CALLREF 0x00 /* Call Reference */
  59. #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
  60. #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
  61. #define LMI_CCITT_REPTYPE 0x51
  62. #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
  63. #define LMI_CCITT_ALIVE 0x53
  64. #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
  65. #define LMI_CCITT_PVCSTAT 0x57
  66. #define LMI_FULLREP 0x00 /* full report */
  67. #define LMI_INTEGRITY 0x01 /* link integrity report */
  68. #define LMI_SINGLE 0x02 /* single PVC report */
  69. #define LMI_STATUS_ENQUIRY 0x75
  70. #define LMI_STATUS 0x7D /* reply */
  71. #define LMI_REPT_LEN 1 /* report type element length */
  72. #define LMI_INTEG_LEN 2 /* link integrity element length */
  73. #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
  74. #define LMI_ANSI_LENGTH 14
  75. typedef struct {
  76. #if defined(__LITTLE_ENDIAN_BITFIELD)
  77. unsigned ea1: 1;
  78. unsigned cr: 1;
  79. unsigned dlcih: 6;
  80. unsigned ea2: 1;
  81. unsigned de: 1;
  82. unsigned becn: 1;
  83. unsigned fecn: 1;
  84. unsigned dlcil: 4;
  85. #else
  86. unsigned dlcih: 6;
  87. unsigned cr: 1;
  88. unsigned ea1: 1;
  89. unsigned dlcil: 4;
  90. unsigned fecn: 1;
  91. unsigned becn: 1;
  92. unsigned de: 1;
  93. unsigned ea2: 1;
  94. #endif
  95. }__attribute__ ((packed)) fr_hdr;
  96. typedef struct pvc_device_struct {
  97. struct net_device *frad;
  98. struct net_device *main;
  99. struct net_device *ether; /* bridged Ethernet interface */
  100. struct pvc_device_struct *next; /* Sorted in ascending DLCI order */
  101. int dlci;
  102. int open_count;
  103. struct {
  104. unsigned int new: 1;
  105. unsigned int active: 1;
  106. unsigned int exist: 1;
  107. unsigned int deleted: 1;
  108. unsigned int fecn: 1;
  109. unsigned int becn: 1;
  110. unsigned int bandwidth; /* Cisco LMI reporting only */
  111. }state;
  112. }pvc_device;
  113. struct frad_state {
  114. fr_proto settings;
  115. pvc_device *first_pvc;
  116. int dce_pvc_count;
  117. struct timer_list timer;
  118. unsigned long last_poll;
  119. int reliable;
  120. int dce_changed;
  121. int request;
  122. int fullrep_sent;
  123. u32 last_errors; /* last errors bit list */
  124. u8 n391cnt;
  125. u8 txseq; /* TX sequence number */
  126. u8 rxseq; /* RX sequence number */
  127. };
  128. static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
  129. static inline u16 q922_to_dlci(u8 *hdr)
  130. {
  131. return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
  132. }
  133. static inline void dlci_to_q922(u8 *hdr, u16 dlci)
  134. {
  135. hdr[0] = (dlci >> 2) & 0xFC;
  136. hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
  137. }
  138. static inline struct frad_state * state(hdlc_device *hdlc)
  139. {
  140. return(struct frad_state *)(hdlc->state);
  141. }
  142. static __inline__ pvc_device* dev_to_pvc(struct net_device *dev)
  143. {
  144. return dev->priv;
  145. }
  146. static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
  147. {
  148. pvc_device *pvc = state(hdlc)->first_pvc;
  149. while (pvc) {
  150. if (pvc->dlci == dlci)
  151. return pvc;
  152. if (pvc->dlci > dlci)
  153. return NULL; /* the listed is sorted */
  154. pvc = pvc->next;
  155. }
  156. return NULL;
  157. }
  158. static pvc_device* add_pvc(struct net_device *dev, u16 dlci)
  159. {
  160. hdlc_device *hdlc = dev_to_hdlc(dev);
  161. pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
  162. while (*pvc_p) {
  163. if ((*pvc_p)->dlci == dlci)
  164. return *pvc_p;
  165. if ((*pvc_p)->dlci > dlci)
  166. break; /* the list is sorted */
  167. pvc_p = &(*pvc_p)->next;
  168. }
  169. pvc = kmalloc(sizeof(pvc_device), GFP_ATOMIC);
  170. #ifdef DEBUG_PVC
  171. printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
  172. #endif
  173. if (!pvc)
  174. return NULL;
  175. memset(pvc, 0, sizeof(pvc_device));
  176. pvc->dlci = dlci;
  177. pvc->frad = dev;
  178. pvc->next = *pvc_p; /* Put it in the chain */
  179. *pvc_p = pvc;
  180. return pvc;
  181. }
  182. static inline int pvc_is_used(pvc_device *pvc)
  183. {
  184. return pvc->main || pvc->ether;
  185. }
  186. static inline void pvc_carrier(int on, pvc_device *pvc)
  187. {
  188. if (on) {
  189. if (pvc->main)
  190. if (!netif_carrier_ok(pvc->main))
  191. netif_carrier_on(pvc->main);
  192. if (pvc->ether)
  193. if (!netif_carrier_ok(pvc->ether))
  194. netif_carrier_on(pvc->ether);
  195. } else {
  196. if (pvc->main)
  197. if (netif_carrier_ok(pvc->main))
  198. netif_carrier_off(pvc->main);
  199. if (pvc->ether)
  200. if (netif_carrier_ok(pvc->ether))
  201. netif_carrier_off(pvc->ether);
  202. }
  203. }
  204. static inline void delete_unused_pvcs(hdlc_device *hdlc)
  205. {
  206. pvc_device **pvc_p = &state(hdlc)->first_pvc;
  207. while (*pvc_p) {
  208. if (!pvc_is_used(*pvc_p)) {
  209. pvc_device *pvc = *pvc_p;
  210. #ifdef DEBUG_PVC
  211. printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
  212. #endif
  213. *pvc_p = pvc->next;
  214. kfree(pvc);
  215. continue;
  216. }
  217. pvc_p = &(*pvc_p)->next;
  218. }
  219. }
  220. static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
  221. {
  222. if (type == ARPHRD_ETHER)
  223. return &pvc->ether;
  224. else
  225. return &pvc->main;
  226. }
  227. static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
  228. {
  229. u16 head_len;
  230. struct sk_buff *skb = *skb_p;
  231. switch (skb->protocol) {
  232. case __constant_htons(NLPID_CCITT_ANSI_LMI):
  233. head_len = 4;
  234. skb_push(skb, head_len);
  235. skb->data[3] = NLPID_CCITT_ANSI_LMI;
  236. break;
  237. case __constant_htons(NLPID_CISCO_LMI):
  238. head_len = 4;
  239. skb_push(skb, head_len);
  240. skb->data[3] = NLPID_CISCO_LMI;
  241. break;
  242. case __constant_htons(ETH_P_IP):
  243. head_len = 4;
  244. skb_push(skb, head_len);
  245. skb->data[3] = NLPID_IP;
  246. break;
  247. case __constant_htons(ETH_P_IPV6):
  248. head_len = 4;
  249. skb_push(skb, head_len);
  250. skb->data[3] = NLPID_IPV6;
  251. break;
  252. case __constant_htons(ETH_P_802_3):
  253. head_len = 10;
  254. if (skb_headroom(skb) < head_len) {
  255. struct sk_buff *skb2 = skb_realloc_headroom(skb,
  256. head_len);
  257. if (!skb2)
  258. return -ENOBUFS;
  259. dev_kfree_skb(skb);
  260. skb = *skb_p = skb2;
  261. }
  262. skb_push(skb, head_len);
  263. skb->data[3] = FR_PAD;
  264. skb->data[4] = NLPID_SNAP;
  265. skb->data[5] = FR_PAD;
  266. skb->data[6] = 0x80;
  267. skb->data[7] = 0xC2;
  268. skb->data[8] = 0x00;
  269. skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
  270. break;
  271. default:
  272. head_len = 10;
  273. skb_push(skb, head_len);
  274. skb->data[3] = FR_PAD;
  275. skb->data[4] = NLPID_SNAP;
  276. skb->data[5] = FR_PAD;
  277. skb->data[6] = FR_PAD;
  278. skb->data[7] = FR_PAD;
  279. *(__be16*)(skb->data + 8) = skb->protocol;
  280. }
  281. dlci_to_q922(skb->data, dlci);
  282. skb->data[2] = FR_UI;
  283. return 0;
  284. }
  285. static int pvc_open(struct net_device *dev)
  286. {
  287. pvc_device *pvc = dev_to_pvc(dev);
  288. if ((pvc->frad->flags & IFF_UP) == 0)
  289. return -EIO; /* Frad must be UP in order to activate PVC */
  290. if (pvc->open_count++ == 0) {
  291. hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
  292. if (state(hdlc)->settings.lmi == LMI_NONE)
  293. pvc->state.active = netif_carrier_ok(pvc->frad);
  294. pvc_carrier(pvc->state.active, pvc);
  295. state(hdlc)->dce_changed = 1;
  296. }
  297. return 0;
  298. }
  299. static int pvc_close(struct net_device *dev)
  300. {
  301. pvc_device *pvc = dev_to_pvc(dev);
  302. if (--pvc->open_count == 0) {
  303. hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
  304. if (state(hdlc)->settings.lmi == LMI_NONE)
  305. pvc->state.active = 0;
  306. if (state(hdlc)->settings.dce) {
  307. state(hdlc)->dce_changed = 1;
  308. pvc->state.active = 0;
  309. }
  310. }
  311. return 0;
  312. }
  313. static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  314. {
  315. pvc_device *pvc = dev_to_pvc(dev);
  316. fr_proto_pvc_info info;
  317. if (ifr->ifr_settings.type == IF_GET_PROTO) {
  318. if (dev->type == ARPHRD_ETHER)
  319. ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
  320. else
  321. ifr->ifr_settings.type = IF_PROTO_FR_PVC;
  322. if (ifr->ifr_settings.size < sizeof(info)) {
  323. /* data size wanted */
  324. ifr->ifr_settings.size = sizeof(info);
  325. return -ENOBUFS;
  326. }
  327. info.dlci = pvc->dlci;
  328. memcpy(info.master, pvc->frad->name, IFNAMSIZ);
  329. if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
  330. &info, sizeof(info)))
  331. return -EFAULT;
  332. return 0;
  333. }
  334. return -EINVAL;
  335. }
  336. static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
  337. {
  338. return &dev_to_desc(dev)->stats;
  339. }
  340. static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
  341. {
  342. pvc_device *pvc = dev_to_pvc(dev);
  343. struct net_device_stats *stats = pvc_get_stats(dev);
  344. if (pvc->state.active) {
  345. if (dev->type == ARPHRD_ETHER) {
  346. int pad = ETH_ZLEN - skb->len;
  347. if (pad > 0) { /* Pad the frame with zeros */
  348. int len = skb->len;
  349. if (skb_tailroom(skb) < pad)
  350. if (pskb_expand_head(skb, 0, pad,
  351. GFP_ATOMIC)) {
  352. stats->tx_dropped++;
  353. dev_kfree_skb(skb);
  354. return 0;
  355. }
  356. skb_put(skb, pad);
  357. memset(skb->data + len, 0, pad);
  358. }
  359. skb->protocol = __constant_htons(ETH_P_802_3);
  360. }
  361. if (!fr_hard_header(&skb, pvc->dlci)) {
  362. stats->tx_bytes += skb->len;
  363. stats->tx_packets++;
  364. if (pvc->state.fecn) /* TX Congestion counter */
  365. stats->tx_compressed++;
  366. skb->dev = pvc->frad;
  367. dev_queue_xmit(skb);
  368. return 0;
  369. }
  370. }
  371. stats->tx_dropped++;
  372. dev_kfree_skb(skb);
  373. return 0;
  374. }
  375. static int pvc_change_mtu(struct net_device *dev, int new_mtu)
  376. {
  377. if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
  378. return -EINVAL;
  379. dev->mtu = new_mtu;
  380. return 0;
  381. }
  382. static inline void fr_log_dlci_active(pvc_device *pvc)
  383. {
  384. printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
  385. pvc->frad->name,
  386. pvc->dlci,
  387. pvc->main ? pvc->main->name : "",
  388. pvc->main && pvc->ether ? " " : "",
  389. pvc->ether ? pvc->ether->name : "",
  390. pvc->state.new ? " new" : "",
  391. !pvc->state.exist ? "deleted" :
  392. pvc->state.active ? "active" : "inactive");
  393. }
  394. static inline u8 fr_lmi_nextseq(u8 x)
  395. {
  396. x++;
  397. return x ? x : 1;
  398. }
  399. static void fr_lmi_send(struct net_device *dev, int fullrep)
  400. {
  401. hdlc_device *hdlc = dev_to_hdlc(dev);
  402. struct sk_buff *skb;
  403. pvc_device *pvc = state(hdlc)->first_pvc;
  404. int lmi = state(hdlc)->settings.lmi;
  405. int dce = state(hdlc)->settings.dce;
  406. int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
  407. int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
  408. u8 *data;
  409. int i = 0;
  410. if (dce && fullrep) {
  411. len += state(hdlc)->dce_pvc_count * (2 + stat_len);
  412. if (len > HDLC_MAX_MRU) {
  413. printk(KERN_WARNING "%s: Too many PVCs while sending "
  414. "LMI full report\n", dev->name);
  415. return;
  416. }
  417. }
  418. skb = dev_alloc_skb(len);
  419. if (!skb) {
  420. printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
  421. dev->name);
  422. return;
  423. }
  424. memset(skb->data, 0, len);
  425. skb_reserve(skb, 4);
  426. if (lmi == LMI_CISCO) {
  427. skb->protocol = __constant_htons(NLPID_CISCO_LMI);
  428. fr_hard_header(&skb, LMI_CISCO_DLCI);
  429. } else {
  430. skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI);
  431. fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
  432. }
  433. data = skb_tail_pointer(skb);
  434. data[i++] = LMI_CALLREF;
  435. data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
  436. if (lmi == LMI_ANSI)
  437. data[i++] = LMI_ANSI_LOCKSHIFT;
  438. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  439. LMI_ANSI_CISCO_REPTYPE;
  440. data[i++] = LMI_REPT_LEN;
  441. data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
  442. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
  443. data[i++] = LMI_INTEG_LEN;
  444. data[i++] = state(hdlc)->txseq =
  445. fr_lmi_nextseq(state(hdlc)->txseq);
  446. data[i++] = state(hdlc)->rxseq;
  447. if (dce && fullrep) {
  448. while (pvc) {
  449. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  450. LMI_ANSI_CISCO_PVCSTAT;
  451. data[i++] = stat_len;
  452. /* LMI start/restart */
  453. if (state(hdlc)->reliable && !pvc->state.exist) {
  454. pvc->state.exist = pvc->state.new = 1;
  455. fr_log_dlci_active(pvc);
  456. }
  457. /* ifconfig PVC up */
  458. if (pvc->open_count && !pvc->state.active &&
  459. pvc->state.exist && !pvc->state.new) {
  460. pvc_carrier(1, pvc);
  461. pvc->state.active = 1;
  462. fr_log_dlci_active(pvc);
  463. }
  464. if (lmi == LMI_CISCO) {
  465. data[i] = pvc->dlci >> 8;
  466. data[i + 1] = pvc->dlci & 0xFF;
  467. } else {
  468. data[i] = (pvc->dlci >> 4) & 0x3F;
  469. data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
  470. data[i + 2] = 0x80;
  471. }
  472. if (pvc->state.new)
  473. data[i + 2] |= 0x08;
  474. else if (pvc->state.active)
  475. data[i + 2] |= 0x02;
  476. i += stat_len;
  477. pvc = pvc->next;
  478. }
  479. }
  480. skb_put(skb, i);
  481. skb->priority = TC_PRIO_CONTROL;
  482. skb->dev = dev;
  483. skb_reset_network_header(skb);
  484. dev_queue_xmit(skb);
  485. }
  486. static void fr_set_link_state(int reliable, struct net_device *dev)
  487. {
  488. hdlc_device *hdlc = dev_to_hdlc(dev);
  489. pvc_device *pvc = state(hdlc)->first_pvc;
  490. state(hdlc)->reliable = reliable;
  491. if (reliable) {
  492. netif_dormant_off(dev);
  493. state(hdlc)->n391cnt = 0; /* Request full status */
  494. state(hdlc)->dce_changed = 1;
  495. if (state(hdlc)->settings.lmi == LMI_NONE) {
  496. while (pvc) { /* Activate all PVCs */
  497. pvc_carrier(1, pvc);
  498. pvc->state.exist = pvc->state.active = 1;
  499. pvc->state.new = 0;
  500. pvc = pvc->next;
  501. }
  502. }
  503. } else {
  504. netif_dormant_on(dev);
  505. while (pvc) { /* Deactivate all PVCs */
  506. pvc_carrier(0, pvc);
  507. pvc->state.exist = pvc->state.active = 0;
  508. pvc->state.new = 0;
  509. if (!state(hdlc)->settings.dce)
  510. pvc->state.bandwidth = 0;
  511. pvc = pvc->next;
  512. }
  513. }
  514. }
  515. static void fr_timer(unsigned long arg)
  516. {
  517. struct net_device *dev = (struct net_device *)arg;
  518. hdlc_device *hdlc = dev_to_hdlc(dev);
  519. int i, cnt = 0, reliable;
  520. u32 list;
  521. if (state(hdlc)->settings.dce) {
  522. reliable = state(hdlc)->request &&
  523. time_before(jiffies, state(hdlc)->last_poll +
  524. state(hdlc)->settings.t392 * HZ);
  525. state(hdlc)->request = 0;
  526. } else {
  527. state(hdlc)->last_errors <<= 1; /* Shift the list */
  528. if (state(hdlc)->request) {
  529. if (state(hdlc)->reliable)
  530. printk(KERN_INFO "%s: No LMI status reply "
  531. "received\n", dev->name);
  532. state(hdlc)->last_errors |= 1;
  533. }
  534. list = state(hdlc)->last_errors;
  535. for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
  536. cnt += (list & 1); /* errors count */
  537. reliable = (cnt < state(hdlc)->settings.n392);
  538. }
  539. if (state(hdlc)->reliable != reliable) {
  540. printk(KERN_INFO "%s: Link %sreliable\n", dev->name,
  541. reliable ? "" : "un");
  542. fr_set_link_state(reliable, dev);
  543. }
  544. if (state(hdlc)->settings.dce)
  545. state(hdlc)->timer.expires = jiffies +
  546. state(hdlc)->settings.t392 * HZ;
  547. else {
  548. if (state(hdlc)->n391cnt)
  549. state(hdlc)->n391cnt--;
  550. fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
  551. state(hdlc)->last_poll = jiffies;
  552. state(hdlc)->request = 1;
  553. state(hdlc)->timer.expires = jiffies +
  554. state(hdlc)->settings.t391 * HZ;
  555. }
  556. state(hdlc)->timer.function = fr_timer;
  557. state(hdlc)->timer.data = arg;
  558. add_timer(&state(hdlc)->timer);
  559. }
  560. static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
  561. {
  562. hdlc_device *hdlc = dev_to_hdlc(dev);
  563. pvc_device *pvc;
  564. u8 rxseq, txseq;
  565. int lmi = state(hdlc)->settings.lmi;
  566. int dce = state(hdlc)->settings.dce;
  567. int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
  568. if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
  569. LMI_CCITT_CISCO_LENGTH)) {
  570. printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
  571. return 1;
  572. }
  573. if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
  574. NLPID_CCITT_ANSI_LMI)) {
  575. printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n",
  576. dev->name);
  577. return 1;
  578. }
  579. if (skb->data[4] != LMI_CALLREF) {
  580. printk(KERN_INFO "%s: Invalid LMI Call reference (0x%02X)\n",
  581. dev->name, skb->data[4]);
  582. return 1;
  583. }
  584. if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
  585. printk(KERN_INFO "%s: Invalid LMI Message type (0x%02X)\n",
  586. dev->name, skb->data[5]);
  587. return 1;
  588. }
  589. if (lmi == LMI_ANSI) {
  590. if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
  591. printk(KERN_INFO "%s: Not ANSI locking shift in LMI"
  592. " message (0x%02X)\n", dev->name, skb->data[6]);
  593. return 1;
  594. }
  595. i = 7;
  596. } else
  597. i = 6;
  598. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  599. LMI_ANSI_CISCO_REPTYPE)) {
  600. printk(KERN_INFO "%s: Not an LMI Report type IE (0x%02X)\n",
  601. dev->name, skb->data[i]);
  602. return 1;
  603. }
  604. if (skb->data[++i] != LMI_REPT_LEN) {
  605. printk(KERN_INFO "%s: Invalid LMI Report type IE length"
  606. " (%u)\n", dev->name, skb->data[i]);
  607. return 1;
  608. }
  609. reptype = skb->data[++i];
  610. if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
  611. printk(KERN_INFO "%s: Unsupported LMI Report type (0x%02X)\n",
  612. dev->name, reptype);
  613. return 1;
  614. }
  615. if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
  616. LMI_ANSI_CISCO_ALIVE)) {
  617. printk(KERN_INFO "%s: Not an LMI Link integrity verification"
  618. " IE (0x%02X)\n", dev->name, skb->data[i]);
  619. return 1;
  620. }
  621. if (skb->data[++i] != LMI_INTEG_LEN) {
  622. printk(KERN_INFO "%s: Invalid LMI Link integrity verification"
  623. " IE length (%u)\n", dev->name, skb->data[i]);
  624. return 1;
  625. }
  626. i++;
  627. state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
  628. rxseq = skb->data[i++]; /* Should confirm our sequence */
  629. txseq = state(hdlc)->txseq;
  630. if (dce)
  631. state(hdlc)->last_poll = jiffies;
  632. error = 0;
  633. if (!state(hdlc)->reliable)
  634. error = 1;
  635. if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
  636. state(hdlc)->n391cnt = 0;
  637. error = 1;
  638. }
  639. if (dce) {
  640. if (state(hdlc)->fullrep_sent && !error) {
  641. /* Stop sending full report - the last one has been confirmed by DTE */
  642. state(hdlc)->fullrep_sent = 0;
  643. pvc = state(hdlc)->first_pvc;
  644. while (pvc) {
  645. if (pvc->state.new) {
  646. pvc->state.new = 0;
  647. /* Tell DTE that new PVC is now active */
  648. state(hdlc)->dce_changed = 1;
  649. }
  650. pvc = pvc->next;
  651. }
  652. }
  653. if (state(hdlc)->dce_changed) {
  654. reptype = LMI_FULLREP;
  655. state(hdlc)->fullrep_sent = 1;
  656. state(hdlc)->dce_changed = 0;
  657. }
  658. state(hdlc)->request = 1; /* got request */
  659. fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
  660. return 0;
  661. }
  662. /* DTE */
  663. state(hdlc)->request = 0; /* got response, no request pending */
  664. if (error)
  665. return 0;
  666. if (reptype != LMI_FULLREP)
  667. return 0;
  668. pvc = state(hdlc)->first_pvc;
  669. while (pvc) {
  670. pvc->state.deleted = 1;
  671. pvc = pvc->next;
  672. }
  673. no_ram = 0;
  674. while (skb->len >= i + 2 + stat_len) {
  675. u16 dlci;
  676. u32 bw;
  677. unsigned int active, new;
  678. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  679. LMI_ANSI_CISCO_PVCSTAT)) {
  680. printk(KERN_INFO "%s: Not an LMI PVC status IE"
  681. " (0x%02X)\n", dev->name, skb->data[i]);
  682. return 1;
  683. }
  684. if (skb->data[++i] != stat_len) {
  685. printk(KERN_INFO "%s: Invalid LMI PVC status IE length"
  686. " (%u)\n", dev->name, skb->data[i]);
  687. return 1;
  688. }
  689. i++;
  690. new = !! (skb->data[i + 2] & 0x08);
  691. active = !! (skb->data[i + 2] & 0x02);
  692. if (lmi == LMI_CISCO) {
  693. dlci = (skb->data[i] << 8) | skb->data[i + 1];
  694. bw = (skb->data[i + 3] << 16) |
  695. (skb->data[i + 4] << 8) |
  696. (skb->data[i + 5]);
  697. } else {
  698. dlci = ((skb->data[i] & 0x3F) << 4) |
  699. ((skb->data[i + 1] & 0x78) >> 3);
  700. bw = 0;
  701. }
  702. pvc = add_pvc(dev, dlci);
  703. if (!pvc && !no_ram) {
  704. printk(KERN_WARNING
  705. "%s: Memory squeeze on fr_lmi_recv()\n",
  706. dev->name);
  707. no_ram = 1;
  708. }
  709. if (pvc) {
  710. pvc->state.exist = 1;
  711. pvc->state.deleted = 0;
  712. if (active != pvc->state.active ||
  713. new != pvc->state.new ||
  714. bw != pvc->state.bandwidth ||
  715. !pvc->state.exist) {
  716. pvc->state.new = new;
  717. pvc->state.active = active;
  718. pvc->state.bandwidth = bw;
  719. pvc_carrier(active, pvc);
  720. fr_log_dlci_active(pvc);
  721. }
  722. }
  723. i += stat_len;
  724. }
  725. pvc = state(hdlc)->first_pvc;
  726. while (pvc) {
  727. if (pvc->state.deleted && pvc->state.exist) {
  728. pvc_carrier(0, pvc);
  729. pvc->state.active = pvc->state.new = 0;
  730. pvc->state.exist = 0;
  731. pvc->state.bandwidth = 0;
  732. fr_log_dlci_active(pvc);
  733. }
  734. pvc = pvc->next;
  735. }
  736. /* Next full report after N391 polls */
  737. state(hdlc)->n391cnt = state(hdlc)->settings.n391;
  738. return 0;
  739. }
  740. static int fr_rx(struct sk_buff *skb)
  741. {
  742. struct net_device *frad = skb->dev;
  743. hdlc_device *hdlc = dev_to_hdlc(frad);
  744. fr_hdr *fh = (fr_hdr*)skb->data;
  745. u8 *data = skb->data;
  746. u16 dlci;
  747. pvc_device *pvc;
  748. struct net_device *dev = NULL;
  749. if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
  750. goto rx_error;
  751. dlci = q922_to_dlci(skb->data);
  752. if ((dlci == LMI_CCITT_ANSI_DLCI &&
  753. (state(hdlc)->settings.lmi == LMI_ANSI ||
  754. state(hdlc)->settings.lmi == LMI_CCITT)) ||
  755. (dlci == LMI_CISCO_DLCI &&
  756. state(hdlc)->settings.lmi == LMI_CISCO)) {
  757. if (fr_lmi_recv(frad, skb))
  758. goto rx_error;
  759. dev_kfree_skb_any(skb);
  760. return NET_RX_SUCCESS;
  761. }
  762. pvc = find_pvc(hdlc, dlci);
  763. if (!pvc) {
  764. #ifdef DEBUG_PKT
  765. printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
  766. frad->name, dlci);
  767. #endif
  768. dev_kfree_skb_any(skb);
  769. return NET_RX_DROP;
  770. }
  771. if (pvc->state.fecn != fh->fecn) {
  772. #ifdef DEBUG_ECN
  773. printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
  774. dlci, fh->fecn ? "N" : "FF");
  775. #endif
  776. pvc->state.fecn ^= 1;
  777. }
  778. if (pvc->state.becn != fh->becn) {
  779. #ifdef DEBUG_ECN
  780. printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
  781. dlci, fh->becn ? "N" : "FF");
  782. #endif
  783. pvc->state.becn ^= 1;
  784. }
  785. if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
  786. dev_to_desc(frad)->stats.rx_dropped++;
  787. return NET_RX_DROP;
  788. }
  789. if (data[3] == NLPID_IP) {
  790. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  791. dev = pvc->main;
  792. skb->protocol = htons(ETH_P_IP);
  793. } else if (data[3] == NLPID_IPV6) {
  794. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  795. dev = pvc->main;
  796. skb->protocol = htons(ETH_P_IPV6);
  797. } else if (skb->len > 10 && data[3] == FR_PAD &&
  798. data[4] == NLPID_SNAP && data[5] == FR_PAD) {
  799. u16 oui = ntohs(*(__be16*)(data + 6));
  800. u16 pid = ntohs(*(__be16*)(data + 8));
  801. skb_pull(skb, 10);
  802. switch ((((u32)oui) << 16) | pid) {
  803. case ETH_P_ARP: /* routed frame with SNAP */
  804. case ETH_P_IPX:
  805. case ETH_P_IP: /* a long variant */
  806. case ETH_P_IPV6:
  807. dev = pvc->main;
  808. skb->protocol = htons(pid);
  809. break;
  810. case 0x80C20007: /* bridged Ethernet frame */
  811. if ((dev = pvc->ether) != NULL)
  812. skb->protocol = eth_type_trans(skb, dev);
  813. break;
  814. default:
  815. printk(KERN_INFO "%s: Unsupported protocol, OUI=%x "
  816. "PID=%x\n", frad->name, oui, pid);
  817. dev_kfree_skb_any(skb);
  818. return NET_RX_DROP;
  819. }
  820. } else {
  821. printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x "
  822. "length = %i\n", frad->name, data[3], skb->len);
  823. dev_kfree_skb_any(skb);
  824. return NET_RX_DROP;
  825. }
  826. if (dev) {
  827. struct net_device_stats *stats = pvc_get_stats(dev);
  828. stats->rx_packets++; /* PVC traffic */
  829. stats->rx_bytes += skb->len;
  830. if (pvc->state.becn)
  831. stats->rx_compressed++;
  832. netif_rx(skb);
  833. return NET_RX_SUCCESS;
  834. } else {
  835. dev_kfree_skb_any(skb);
  836. return NET_RX_DROP;
  837. }
  838. rx_error:
  839. dev_to_desc(frad)->stats.rx_errors++; /* Mark error */
  840. dev_kfree_skb_any(skb);
  841. return NET_RX_DROP;
  842. }
  843. static void fr_start(struct net_device *dev)
  844. {
  845. hdlc_device *hdlc = dev_to_hdlc(dev);
  846. #ifdef DEBUG_LINK
  847. printk(KERN_DEBUG "fr_start\n");
  848. #endif
  849. if (state(hdlc)->settings.lmi != LMI_NONE) {
  850. state(hdlc)->reliable = 0;
  851. state(hdlc)->dce_changed = 1;
  852. state(hdlc)->request = 0;
  853. state(hdlc)->fullrep_sent = 0;
  854. state(hdlc)->last_errors = 0xFFFFFFFF;
  855. state(hdlc)->n391cnt = 0;
  856. state(hdlc)->txseq = state(hdlc)->rxseq = 0;
  857. init_timer(&state(hdlc)->timer);
  858. /* First poll after 1 s */
  859. state(hdlc)->timer.expires = jiffies + HZ;
  860. state(hdlc)->timer.function = fr_timer;
  861. state(hdlc)->timer.data = (unsigned long)dev;
  862. add_timer(&state(hdlc)->timer);
  863. } else
  864. fr_set_link_state(1, dev);
  865. }
  866. static void fr_stop(struct net_device *dev)
  867. {
  868. hdlc_device *hdlc = dev_to_hdlc(dev);
  869. #ifdef DEBUG_LINK
  870. printk(KERN_DEBUG "fr_stop\n");
  871. #endif
  872. if (state(hdlc)->settings.lmi != LMI_NONE)
  873. del_timer_sync(&state(hdlc)->timer);
  874. fr_set_link_state(0, dev);
  875. }
  876. static void fr_close(struct net_device *dev)
  877. {
  878. hdlc_device *hdlc = dev_to_hdlc(dev);
  879. pvc_device *pvc = state(hdlc)->first_pvc;
  880. while (pvc) { /* Shutdown all PVCs for this FRAD */
  881. if (pvc->main)
  882. dev_close(pvc->main);
  883. if (pvc->ether)
  884. dev_close(pvc->ether);
  885. pvc = pvc->next;
  886. }
  887. }
  888. static void pvc_setup(struct net_device *dev)
  889. {
  890. dev->type = ARPHRD_DLCI;
  891. dev->flags = IFF_POINTOPOINT;
  892. dev->hard_header_len = 10;
  893. dev->addr_len = 2;
  894. }
  895. static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
  896. {
  897. hdlc_device *hdlc = dev_to_hdlc(frad);
  898. pvc_device *pvc = NULL;
  899. struct net_device *dev;
  900. int result, used;
  901. char * prefix = "pvc%d";
  902. if (type == ARPHRD_ETHER)
  903. prefix = "pvceth%d";
  904. if ((pvc = add_pvc(frad, dlci)) == NULL) {
  905. printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
  906. frad->name);
  907. return -ENOBUFS;
  908. }
  909. if (*get_dev_p(pvc, type))
  910. return -EEXIST;
  911. used = pvc_is_used(pvc);
  912. if (type == ARPHRD_ETHER)
  913. dev = alloc_netdev(sizeof(struct net_device_stats),
  914. "pvceth%d", ether_setup);
  915. else
  916. dev = alloc_netdev(sizeof(struct net_device_stats),
  917. "pvc%d", pvc_setup);
  918. if (!dev) {
  919. printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
  920. frad->name);
  921. delete_unused_pvcs(hdlc);
  922. return -ENOBUFS;
  923. }
  924. if (type == ARPHRD_ETHER) {
  925. memcpy(dev->dev_addr, "\x00\x01", 2);
  926. get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
  927. } else {
  928. *(__be16*)dev->dev_addr = htons(dlci);
  929. dlci_to_q922(dev->broadcast, dlci);
  930. }
  931. dev->hard_start_xmit = pvc_xmit;
  932. dev->get_stats = pvc_get_stats;
  933. dev->open = pvc_open;
  934. dev->stop = pvc_close;
  935. dev->do_ioctl = pvc_ioctl;
  936. dev->change_mtu = pvc_change_mtu;
  937. dev->mtu = HDLC_MAX_MTU;
  938. dev->tx_queue_len = 0;
  939. dev->priv = pvc;
  940. result = dev_alloc_name(dev, dev->name);
  941. if (result < 0) {
  942. free_netdev(dev);
  943. delete_unused_pvcs(hdlc);
  944. return result;
  945. }
  946. if (register_netdevice(dev) != 0) {
  947. free_netdev(dev);
  948. delete_unused_pvcs(hdlc);
  949. return -EIO;
  950. }
  951. dev->destructor = free_netdev;
  952. *get_dev_p(pvc, type) = dev;
  953. if (!used) {
  954. state(hdlc)->dce_changed = 1;
  955. state(hdlc)->dce_pvc_count++;
  956. }
  957. return 0;
  958. }
  959. static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
  960. {
  961. pvc_device *pvc;
  962. struct net_device *dev;
  963. if ((pvc = find_pvc(hdlc, dlci)) == NULL)
  964. return -ENOENT;
  965. if ((dev = *get_dev_p(pvc, type)) == NULL)
  966. return -ENOENT;
  967. if (dev->flags & IFF_UP)
  968. return -EBUSY; /* PVC in use */
  969. unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
  970. *get_dev_p(pvc, type) = NULL;
  971. if (!pvc_is_used(pvc)) {
  972. state(hdlc)->dce_pvc_count--;
  973. state(hdlc)->dce_changed = 1;
  974. }
  975. delete_unused_pvcs(hdlc);
  976. return 0;
  977. }
  978. static void fr_destroy(struct net_device *frad)
  979. {
  980. hdlc_device *hdlc = dev_to_hdlc(frad);
  981. pvc_device *pvc = state(hdlc)->first_pvc;
  982. state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
  983. state(hdlc)->dce_pvc_count = 0;
  984. state(hdlc)->dce_changed = 1;
  985. while (pvc) {
  986. pvc_device *next = pvc->next;
  987. /* destructors will free_netdev() main and ether */
  988. if (pvc->main)
  989. unregister_netdevice(pvc->main);
  990. if (pvc->ether)
  991. unregister_netdevice(pvc->ether);
  992. kfree(pvc);
  993. pvc = next;
  994. }
  995. }
  996. static struct hdlc_proto proto = {
  997. .close = fr_close,
  998. .start = fr_start,
  999. .stop = fr_stop,
  1000. .detach = fr_destroy,
  1001. .ioctl = fr_ioctl,
  1002. .module = THIS_MODULE,
  1003. };
  1004. static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
  1005. {
  1006. fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
  1007. const size_t size = sizeof(fr_proto);
  1008. fr_proto new_settings;
  1009. hdlc_device *hdlc = dev_to_hdlc(dev);
  1010. fr_proto_pvc pvc;
  1011. int result;
  1012. switch (ifr->ifr_settings.type) {
  1013. case IF_GET_PROTO:
  1014. if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
  1015. return -EINVAL;
  1016. ifr->ifr_settings.type = IF_PROTO_FR;
  1017. if (ifr->ifr_settings.size < size) {
  1018. ifr->ifr_settings.size = size; /* data size wanted */
  1019. return -ENOBUFS;
  1020. }
  1021. if (copy_to_user(fr_s, &state(hdlc)->settings, size))
  1022. return -EFAULT;
  1023. return 0;
  1024. case IF_PROTO_FR:
  1025. if(!capable(CAP_NET_ADMIN))
  1026. return -EPERM;
  1027. if(dev->flags & IFF_UP)
  1028. return -EBUSY;
  1029. if (copy_from_user(&new_settings, fr_s, size))
  1030. return -EFAULT;
  1031. if (new_settings.lmi == LMI_DEFAULT)
  1032. new_settings.lmi = LMI_ANSI;
  1033. if ((new_settings.lmi != LMI_NONE &&
  1034. new_settings.lmi != LMI_ANSI &&
  1035. new_settings.lmi != LMI_CCITT &&
  1036. new_settings.lmi != LMI_CISCO) ||
  1037. new_settings.t391 < 1 ||
  1038. new_settings.t392 < 2 ||
  1039. new_settings.n391 < 1 ||
  1040. new_settings.n392 < 1 ||
  1041. new_settings.n393 < new_settings.n392 ||
  1042. new_settings.n393 > 32 ||
  1043. (new_settings.dce != 0 &&
  1044. new_settings.dce != 1))
  1045. return -EINVAL;
  1046. result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
  1047. if (result)
  1048. return result;
  1049. if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
  1050. result = attach_hdlc_protocol(dev, &proto, fr_rx,
  1051. sizeof(struct frad_state));
  1052. if (result)
  1053. return result;
  1054. state(hdlc)->first_pvc = NULL;
  1055. state(hdlc)->dce_pvc_count = 0;
  1056. }
  1057. memcpy(&state(hdlc)->settings, &new_settings, size);
  1058. dev->hard_start_xmit = hdlc->xmit;
  1059. dev->type = ARPHRD_FRAD;
  1060. return 0;
  1061. case IF_PROTO_FR_ADD_PVC:
  1062. case IF_PROTO_FR_DEL_PVC:
  1063. case IF_PROTO_FR_ADD_ETH_PVC:
  1064. case IF_PROTO_FR_DEL_ETH_PVC:
  1065. if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
  1066. return -EINVAL;
  1067. if(!capable(CAP_NET_ADMIN))
  1068. return -EPERM;
  1069. if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
  1070. sizeof(fr_proto_pvc)))
  1071. return -EFAULT;
  1072. if (pvc.dlci <= 0 || pvc.dlci >= 1024)
  1073. return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
  1074. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
  1075. ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
  1076. result = ARPHRD_ETHER; /* bridged Ethernet device */
  1077. else
  1078. result = ARPHRD_DLCI;
  1079. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
  1080. ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
  1081. return fr_add_pvc(dev, pvc.dlci, result);
  1082. else
  1083. return fr_del_pvc(hdlc, pvc.dlci, result);
  1084. }
  1085. return -EINVAL;
  1086. }
  1087. static int __init mod_init(void)
  1088. {
  1089. register_hdlc_protocol(&proto);
  1090. return 0;
  1091. }
  1092. static void __exit mod_exit(void)
  1093. {
  1094. unregister_hdlc_protocol(&proto);
  1095. }
  1096. module_init(mod_init);
  1097. module_exit(mod_exit);
  1098. MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
  1099. MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
  1100. MODULE_LICENSE("GPL v2");