hdlc_fr.c 30 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345
  1. /*
  2. * Generic HDLC support routines for Linux
  3. * Frame Relay support
  4. *
  5. * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of version 2 of the GNU General Public License
  9. * as published by the Free Software Foundation.
  10. *
  11. Theory of PVC state
  12. DCE mode:
  13. (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
  14. 0,x -> 1,1 if "link reliable" when sending FULL STATUS
  15. 1,1 -> 1,0 if received FULL STATUS ACK
  16. (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
  17. -> 1 when "PVC up" and (exist,new) = 1,0
  18. DTE mode:
  19. (exist,new,active) = FULL STATUS if "link reliable"
  20. = 0, 0, 0 if "link unreliable"
  21. No LMI:
  22. active = open and "link reliable"
  23. exist = new = not used
  24. CCITT LMI: ITU-T Q.933 Annex A
  25. ANSI LMI: ANSI T1.617 Annex D
  26. CISCO LMI: the original, aka "Gang of Four" LMI
  27. */
  28. #include <linux/module.h>
  29. #include <linux/kernel.h>
  30. #include <linux/slab.h>
  31. #include <linux/poll.h>
  32. #include <linux/errno.h>
  33. #include <linux/if_arp.h>
  34. #include <linux/init.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/pkt_sched.h>
  37. #include <linux/random.h>
  38. #include <linux/inetdevice.h>
  39. #include <linux/lapb.h>
  40. #include <linux/rtnetlink.h>
  41. #include <linux/etherdevice.h>
  42. #include <linux/hdlc.h>
  43. #undef DEBUG_PKT
  44. #undef DEBUG_ECN
  45. #undef DEBUG_LINK
  46. #undef DEBUG_PROTO
  47. #undef DEBUG_PVC
  48. #define FR_UI 0x03
  49. #define FR_PAD 0x00
  50. #define NLPID_IP 0xCC
  51. #define NLPID_IPV6 0x8E
  52. #define NLPID_SNAP 0x80
  53. #define NLPID_PAD 0x00
  54. #define NLPID_CCITT_ANSI_LMI 0x08
  55. #define NLPID_CISCO_LMI 0x09
  56. #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
  57. #define LMI_CISCO_DLCI 1023
  58. #define LMI_CALLREF 0x00 /* Call Reference */
  59. #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
  60. #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
  61. #define LMI_CCITT_REPTYPE 0x51
  62. #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
  63. #define LMI_CCITT_ALIVE 0x53
  64. #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
  65. #define LMI_CCITT_PVCSTAT 0x57
  66. #define LMI_FULLREP 0x00 /* full report */
  67. #define LMI_INTEGRITY 0x01 /* link integrity report */
  68. #define LMI_SINGLE 0x02 /* single PVC report */
  69. #define LMI_STATUS_ENQUIRY 0x75
  70. #define LMI_STATUS 0x7D /* reply */
  71. #define LMI_REPT_LEN 1 /* report type element length */
  72. #define LMI_INTEG_LEN 2 /* link integrity element length */
  73. #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
  74. #define LMI_ANSI_LENGTH 14
  75. typedef struct {
  76. #if defined(__LITTLE_ENDIAN_BITFIELD)
  77. unsigned ea1: 1;
  78. unsigned cr: 1;
  79. unsigned dlcih: 6;
  80. unsigned ea2: 1;
  81. unsigned de: 1;
  82. unsigned becn: 1;
  83. unsigned fecn: 1;
  84. unsigned dlcil: 4;
  85. #else
  86. unsigned dlcih: 6;
  87. unsigned cr: 1;
  88. unsigned ea1: 1;
  89. unsigned dlcil: 4;
  90. unsigned fecn: 1;
  91. unsigned becn: 1;
  92. unsigned de: 1;
  93. unsigned ea2: 1;
  94. #endif
  95. }__attribute__ ((packed)) fr_hdr;
  96. typedef struct pvc_device_struct {
  97. struct net_device *frad;
  98. struct net_device *main;
  99. struct net_device *ether; /* bridged Ethernet interface */
  100. struct pvc_device_struct *next; /* Sorted in ascending DLCI order */
  101. int dlci;
  102. int open_count;
  103. struct {
  104. unsigned int new: 1;
  105. unsigned int active: 1;
  106. unsigned int exist: 1;
  107. unsigned int deleted: 1;
  108. unsigned int fecn: 1;
  109. unsigned int becn: 1;
  110. unsigned int bandwidth; /* Cisco LMI reporting only */
  111. }state;
  112. }pvc_device;
  113. struct frad_state {
  114. fr_proto settings;
  115. pvc_device *first_pvc;
  116. int dce_pvc_count;
  117. struct timer_list timer;
  118. unsigned long last_poll;
  119. int reliable;
  120. int dce_changed;
  121. int request;
  122. int fullrep_sent;
  123. u32 last_errors; /* last errors bit list */
  124. u8 n391cnt;
  125. u8 txseq; /* TX sequence number */
  126. u8 rxseq; /* RX sequence number */
  127. };
  128. static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
  129. static inline u16 q922_to_dlci(u8 *hdr)
  130. {
  131. return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
  132. }
  133. static inline void dlci_to_q922(u8 *hdr, u16 dlci)
  134. {
  135. hdr[0] = (dlci >> 2) & 0xFC;
  136. hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
  137. }
  138. static inline struct frad_state * state(hdlc_device *hdlc)
  139. {
  140. return(struct frad_state *)(hdlc->state);
  141. }
  142. static __inline__ pvc_device* dev_to_pvc(struct net_device *dev)
  143. {
  144. return dev->priv;
  145. }
  146. static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
  147. {
  148. pvc_device *pvc = state(hdlc)->first_pvc;
  149. while (pvc) {
  150. if (pvc->dlci == dlci)
  151. return pvc;
  152. if (pvc->dlci > dlci)
  153. return NULL; /* the listed is sorted */
  154. pvc = pvc->next;
  155. }
  156. return NULL;
  157. }
  158. static pvc_device* add_pvc(struct net_device *dev, u16 dlci)
  159. {
  160. hdlc_device *hdlc = dev_to_hdlc(dev);
  161. pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
  162. while (*pvc_p) {
  163. if ((*pvc_p)->dlci == dlci)
  164. return *pvc_p;
  165. if ((*pvc_p)->dlci > dlci)
  166. break; /* the list is sorted */
  167. pvc_p = &(*pvc_p)->next;
  168. }
  169. pvc = kzalloc(sizeof(pvc_device), GFP_ATOMIC);
  170. #ifdef DEBUG_PVC
  171. printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
  172. #endif
  173. if (!pvc)
  174. return NULL;
  175. pvc->dlci = dlci;
  176. pvc->frad = dev;
  177. pvc->next = *pvc_p; /* Put it in the chain */
  178. *pvc_p = pvc;
  179. return pvc;
  180. }
  181. static inline int pvc_is_used(pvc_device *pvc)
  182. {
  183. return pvc->main || pvc->ether;
  184. }
  185. static inline void pvc_carrier(int on, pvc_device *pvc)
  186. {
  187. if (on) {
  188. if (pvc->main)
  189. if (!netif_carrier_ok(pvc->main))
  190. netif_carrier_on(pvc->main);
  191. if (pvc->ether)
  192. if (!netif_carrier_ok(pvc->ether))
  193. netif_carrier_on(pvc->ether);
  194. } else {
  195. if (pvc->main)
  196. if (netif_carrier_ok(pvc->main))
  197. netif_carrier_off(pvc->main);
  198. if (pvc->ether)
  199. if (netif_carrier_ok(pvc->ether))
  200. netif_carrier_off(pvc->ether);
  201. }
  202. }
  203. static inline void delete_unused_pvcs(hdlc_device *hdlc)
  204. {
  205. pvc_device **pvc_p = &state(hdlc)->first_pvc;
  206. while (*pvc_p) {
  207. if (!pvc_is_used(*pvc_p)) {
  208. pvc_device *pvc = *pvc_p;
  209. #ifdef DEBUG_PVC
  210. printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
  211. #endif
  212. *pvc_p = pvc->next;
  213. kfree(pvc);
  214. continue;
  215. }
  216. pvc_p = &(*pvc_p)->next;
  217. }
  218. }
  219. static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
  220. {
  221. if (type == ARPHRD_ETHER)
  222. return &pvc->ether;
  223. else
  224. return &pvc->main;
  225. }
  226. static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
  227. {
  228. u16 head_len;
  229. struct sk_buff *skb = *skb_p;
  230. switch (skb->protocol) {
  231. case __constant_htons(NLPID_CCITT_ANSI_LMI):
  232. head_len = 4;
  233. skb_push(skb, head_len);
  234. skb->data[3] = NLPID_CCITT_ANSI_LMI;
  235. break;
  236. case __constant_htons(NLPID_CISCO_LMI):
  237. head_len = 4;
  238. skb_push(skb, head_len);
  239. skb->data[3] = NLPID_CISCO_LMI;
  240. break;
  241. case __constant_htons(ETH_P_IP):
  242. head_len = 4;
  243. skb_push(skb, head_len);
  244. skb->data[3] = NLPID_IP;
  245. break;
  246. case __constant_htons(ETH_P_IPV6):
  247. head_len = 4;
  248. skb_push(skb, head_len);
  249. skb->data[3] = NLPID_IPV6;
  250. break;
  251. case __constant_htons(ETH_P_802_3):
  252. head_len = 10;
  253. if (skb_headroom(skb) < head_len) {
  254. struct sk_buff *skb2 = skb_realloc_headroom(skb,
  255. head_len);
  256. if (!skb2)
  257. return -ENOBUFS;
  258. dev_kfree_skb(skb);
  259. skb = *skb_p = skb2;
  260. }
  261. skb_push(skb, head_len);
  262. skb->data[3] = FR_PAD;
  263. skb->data[4] = NLPID_SNAP;
  264. skb->data[5] = FR_PAD;
  265. skb->data[6] = 0x80;
  266. skb->data[7] = 0xC2;
  267. skb->data[8] = 0x00;
  268. skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
  269. break;
  270. default:
  271. head_len = 10;
  272. skb_push(skb, head_len);
  273. skb->data[3] = FR_PAD;
  274. skb->data[4] = NLPID_SNAP;
  275. skb->data[5] = FR_PAD;
  276. skb->data[6] = FR_PAD;
  277. skb->data[7] = FR_PAD;
  278. *(__be16*)(skb->data + 8) = skb->protocol;
  279. }
  280. dlci_to_q922(skb->data, dlci);
  281. skb->data[2] = FR_UI;
  282. return 0;
  283. }
  284. static int pvc_open(struct net_device *dev)
  285. {
  286. pvc_device *pvc = dev_to_pvc(dev);
  287. if ((pvc->frad->flags & IFF_UP) == 0)
  288. return -EIO; /* Frad must be UP in order to activate PVC */
  289. if (pvc->open_count++ == 0) {
  290. hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
  291. if (state(hdlc)->settings.lmi == LMI_NONE)
  292. pvc->state.active = netif_carrier_ok(pvc->frad);
  293. pvc_carrier(pvc->state.active, pvc);
  294. state(hdlc)->dce_changed = 1;
  295. }
  296. return 0;
  297. }
  298. static int pvc_close(struct net_device *dev)
  299. {
  300. pvc_device *pvc = dev_to_pvc(dev);
  301. if (--pvc->open_count == 0) {
  302. hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
  303. if (state(hdlc)->settings.lmi == LMI_NONE)
  304. pvc->state.active = 0;
  305. if (state(hdlc)->settings.dce) {
  306. state(hdlc)->dce_changed = 1;
  307. pvc->state.active = 0;
  308. }
  309. }
  310. return 0;
  311. }
  312. static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  313. {
  314. pvc_device *pvc = dev_to_pvc(dev);
  315. fr_proto_pvc_info info;
  316. if (ifr->ifr_settings.type == IF_GET_PROTO) {
  317. if (dev->type == ARPHRD_ETHER)
  318. ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
  319. else
  320. ifr->ifr_settings.type = IF_PROTO_FR_PVC;
  321. if (ifr->ifr_settings.size < sizeof(info)) {
  322. /* data size wanted */
  323. ifr->ifr_settings.size = sizeof(info);
  324. return -ENOBUFS;
  325. }
  326. info.dlci = pvc->dlci;
  327. memcpy(info.master, pvc->frad->name, IFNAMSIZ);
  328. if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
  329. &info, sizeof(info)))
  330. return -EFAULT;
  331. return 0;
  332. }
  333. return -EINVAL;
  334. }
  335. static inline struct net_device_stats *pvc_get_stats(struct net_device *dev)
  336. {
  337. return &dev_to_desc(dev)->stats;
  338. }
  339. static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
  340. {
  341. pvc_device *pvc = dev_to_pvc(dev);
  342. struct net_device_stats *stats = pvc_get_stats(dev);
  343. if (pvc->state.active) {
  344. if (dev->type == ARPHRD_ETHER) {
  345. int pad = ETH_ZLEN - skb->len;
  346. if (pad > 0) { /* Pad the frame with zeros */
  347. int len = skb->len;
  348. if (skb_tailroom(skb) < pad)
  349. if (pskb_expand_head(skb, 0, pad,
  350. GFP_ATOMIC)) {
  351. stats->tx_dropped++;
  352. dev_kfree_skb(skb);
  353. return 0;
  354. }
  355. skb_put(skb, pad);
  356. memset(skb->data + len, 0, pad);
  357. }
  358. skb->protocol = __constant_htons(ETH_P_802_3);
  359. }
  360. if (!fr_hard_header(&skb, pvc->dlci)) {
  361. stats->tx_bytes += skb->len;
  362. stats->tx_packets++;
  363. if (pvc->state.fecn) /* TX Congestion counter */
  364. stats->tx_compressed++;
  365. skb->dev = pvc->frad;
  366. dev_queue_xmit(skb);
  367. return 0;
  368. }
  369. }
  370. stats->tx_dropped++;
  371. dev_kfree_skb(skb);
  372. return 0;
  373. }
  374. static int pvc_change_mtu(struct net_device *dev, int new_mtu)
  375. {
  376. if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
  377. return -EINVAL;
  378. dev->mtu = new_mtu;
  379. return 0;
  380. }
  381. static inline void fr_log_dlci_active(pvc_device *pvc)
  382. {
  383. printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
  384. pvc->frad->name,
  385. pvc->dlci,
  386. pvc->main ? pvc->main->name : "",
  387. pvc->main && pvc->ether ? " " : "",
  388. pvc->ether ? pvc->ether->name : "",
  389. pvc->state.new ? " new" : "",
  390. !pvc->state.exist ? "deleted" :
  391. pvc->state.active ? "active" : "inactive");
  392. }
  393. static inline u8 fr_lmi_nextseq(u8 x)
  394. {
  395. x++;
  396. return x ? x : 1;
  397. }
  398. static void fr_lmi_send(struct net_device *dev, int fullrep)
  399. {
  400. hdlc_device *hdlc = dev_to_hdlc(dev);
  401. struct sk_buff *skb;
  402. pvc_device *pvc = state(hdlc)->first_pvc;
  403. int lmi = state(hdlc)->settings.lmi;
  404. int dce = state(hdlc)->settings.dce;
  405. int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
  406. int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
  407. u8 *data;
  408. int i = 0;
  409. if (dce && fullrep) {
  410. len += state(hdlc)->dce_pvc_count * (2 + stat_len);
  411. if (len > HDLC_MAX_MRU) {
  412. printk(KERN_WARNING "%s: Too many PVCs while sending "
  413. "LMI full report\n", dev->name);
  414. return;
  415. }
  416. }
  417. skb = dev_alloc_skb(len);
  418. if (!skb) {
  419. printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
  420. dev->name);
  421. return;
  422. }
  423. memset(skb->data, 0, len);
  424. skb_reserve(skb, 4);
  425. if (lmi == LMI_CISCO) {
  426. skb->protocol = __constant_htons(NLPID_CISCO_LMI);
  427. fr_hard_header(&skb, LMI_CISCO_DLCI);
  428. } else {
  429. skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI);
  430. fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
  431. }
  432. data = skb_tail_pointer(skb);
  433. data[i++] = LMI_CALLREF;
  434. data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
  435. if (lmi == LMI_ANSI)
  436. data[i++] = LMI_ANSI_LOCKSHIFT;
  437. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  438. LMI_ANSI_CISCO_REPTYPE;
  439. data[i++] = LMI_REPT_LEN;
  440. data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
  441. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
  442. data[i++] = LMI_INTEG_LEN;
  443. data[i++] = state(hdlc)->txseq =
  444. fr_lmi_nextseq(state(hdlc)->txseq);
  445. data[i++] = state(hdlc)->rxseq;
  446. if (dce && fullrep) {
  447. while (pvc) {
  448. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  449. LMI_ANSI_CISCO_PVCSTAT;
  450. data[i++] = stat_len;
  451. /* LMI start/restart */
  452. if (state(hdlc)->reliable && !pvc->state.exist) {
  453. pvc->state.exist = pvc->state.new = 1;
  454. fr_log_dlci_active(pvc);
  455. }
  456. /* ifconfig PVC up */
  457. if (pvc->open_count && !pvc->state.active &&
  458. pvc->state.exist && !pvc->state.new) {
  459. pvc_carrier(1, pvc);
  460. pvc->state.active = 1;
  461. fr_log_dlci_active(pvc);
  462. }
  463. if (lmi == LMI_CISCO) {
  464. data[i] = pvc->dlci >> 8;
  465. data[i + 1] = pvc->dlci & 0xFF;
  466. } else {
  467. data[i] = (pvc->dlci >> 4) & 0x3F;
  468. data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
  469. data[i + 2] = 0x80;
  470. }
  471. if (pvc->state.new)
  472. data[i + 2] |= 0x08;
  473. else if (pvc->state.active)
  474. data[i + 2] |= 0x02;
  475. i += stat_len;
  476. pvc = pvc->next;
  477. }
  478. }
  479. skb_put(skb, i);
  480. skb->priority = TC_PRIO_CONTROL;
  481. skb->dev = dev;
  482. skb_reset_network_header(skb);
  483. dev_queue_xmit(skb);
  484. }
  485. static void fr_set_link_state(int reliable, struct net_device *dev)
  486. {
  487. hdlc_device *hdlc = dev_to_hdlc(dev);
  488. pvc_device *pvc = state(hdlc)->first_pvc;
  489. state(hdlc)->reliable = reliable;
  490. if (reliable) {
  491. netif_dormant_off(dev);
  492. state(hdlc)->n391cnt = 0; /* Request full status */
  493. state(hdlc)->dce_changed = 1;
  494. if (state(hdlc)->settings.lmi == LMI_NONE) {
  495. while (pvc) { /* Activate all PVCs */
  496. pvc_carrier(1, pvc);
  497. pvc->state.exist = pvc->state.active = 1;
  498. pvc->state.new = 0;
  499. pvc = pvc->next;
  500. }
  501. }
  502. } else {
  503. netif_dormant_on(dev);
  504. while (pvc) { /* Deactivate all PVCs */
  505. pvc_carrier(0, pvc);
  506. pvc->state.exist = pvc->state.active = 0;
  507. pvc->state.new = 0;
  508. if (!state(hdlc)->settings.dce)
  509. pvc->state.bandwidth = 0;
  510. pvc = pvc->next;
  511. }
  512. }
  513. }
  514. static void fr_timer(unsigned long arg)
  515. {
  516. struct net_device *dev = (struct net_device *)arg;
  517. hdlc_device *hdlc = dev_to_hdlc(dev);
  518. int i, cnt = 0, reliable;
  519. u32 list;
  520. if (state(hdlc)->settings.dce) {
  521. reliable = state(hdlc)->request &&
  522. time_before(jiffies, state(hdlc)->last_poll +
  523. state(hdlc)->settings.t392 * HZ);
  524. state(hdlc)->request = 0;
  525. } else {
  526. state(hdlc)->last_errors <<= 1; /* Shift the list */
  527. if (state(hdlc)->request) {
  528. if (state(hdlc)->reliable)
  529. printk(KERN_INFO "%s: No LMI status reply "
  530. "received\n", dev->name);
  531. state(hdlc)->last_errors |= 1;
  532. }
  533. list = state(hdlc)->last_errors;
  534. for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
  535. cnt += (list & 1); /* errors count */
  536. reliable = (cnt < state(hdlc)->settings.n392);
  537. }
  538. if (state(hdlc)->reliable != reliable) {
  539. printk(KERN_INFO "%s: Link %sreliable\n", dev->name,
  540. reliable ? "" : "un");
  541. fr_set_link_state(reliable, dev);
  542. }
  543. if (state(hdlc)->settings.dce)
  544. state(hdlc)->timer.expires = jiffies +
  545. state(hdlc)->settings.t392 * HZ;
  546. else {
  547. if (state(hdlc)->n391cnt)
  548. state(hdlc)->n391cnt--;
  549. fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
  550. state(hdlc)->last_poll = jiffies;
  551. state(hdlc)->request = 1;
  552. state(hdlc)->timer.expires = jiffies +
  553. state(hdlc)->settings.t391 * HZ;
  554. }
  555. state(hdlc)->timer.function = fr_timer;
  556. state(hdlc)->timer.data = arg;
  557. add_timer(&state(hdlc)->timer);
  558. }
  559. static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
  560. {
  561. hdlc_device *hdlc = dev_to_hdlc(dev);
  562. pvc_device *pvc;
  563. u8 rxseq, txseq;
  564. int lmi = state(hdlc)->settings.lmi;
  565. int dce = state(hdlc)->settings.dce;
  566. int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
  567. if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
  568. LMI_CCITT_CISCO_LENGTH)) {
  569. printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
  570. return 1;
  571. }
  572. if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
  573. NLPID_CCITT_ANSI_LMI)) {
  574. printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n",
  575. dev->name);
  576. return 1;
  577. }
  578. if (skb->data[4] != LMI_CALLREF) {
  579. printk(KERN_INFO "%s: Invalid LMI Call reference (0x%02X)\n",
  580. dev->name, skb->data[4]);
  581. return 1;
  582. }
  583. if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
  584. printk(KERN_INFO "%s: Invalid LMI Message type (0x%02X)\n",
  585. dev->name, skb->data[5]);
  586. return 1;
  587. }
  588. if (lmi == LMI_ANSI) {
  589. if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
  590. printk(KERN_INFO "%s: Not ANSI locking shift in LMI"
  591. " message (0x%02X)\n", dev->name, skb->data[6]);
  592. return 1;
  593. }
  594. i = 7;
  595. } else
  596. i = 6;
  597. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  598. LMI_ANSI_CISCO_REPTYPE)) {
  599. printk(KERN_INFO "%s: Not an LMI Report type IE (0x%02X)\n",
  600. dev->name, skb->data[i]);
  601. return 1;
  602. }
  603. if (skb->data[++i] != LMI_REPT_LEN) {
  604. printk(KERN_INFO "%s: Invalid LMI Report type IE length"
  605. " (%u)\n", dev->name, skb->data[i]);
  606. return 1;
  607. }
  608. reptype = skb->data[++i];
  609. if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
  610. printk(KERN_INFO "%s: Unsupported LMI Report type (0x%02X)\n",
  611. dev->name, reptype);
  612. return 1;
  613. }
  614. if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
  615. LMI_ANSI_CISCO_ALIVE)) {
  616. printk(KERN_INFO "%s: Not an LMI Link integrity verification"
  617. " IE (0x%02X)\n", dev->name, skb->data[i]);
  618. return 1;
  619. }
  620. if (skb->data[++i] != LMI_INTEG_LEN) {
  621. printk(KERN_INFO "%s: Invalid LMI Link integrity verification"
  622. " IE length (%u)\n", dev->name, skb->data[i]);
  623. return 1;
  624. }
  625. i++;
  626. state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
  627. rxseq = skb->data[i++]; /* Should confirm our sequence */
  628. txseq = state(hdlc)->txseq;
  629. if (dce)
  630. state(hdlc)->last_poll = jiffies;
  631. error = 0;
  632. if (!state(hdlc)->reliable)
  633. error = 1;
  634. if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
  635. state(hdlc)->n391cnt = 0;
  636. error = 1;
  637. }
  638. if (dce) {
  639. if (state(hdlc)->fullrep_sent && !error) {
  640. /* Stop sending full report - the last one has been confirmed by DTE */
  641. state(hdlc)->fullrep_sent = 0;
  642. pvc = state(hdlc)->first_pvc;
  643. while (pvc) {
  644. if (pvc->state.new) {
  645. pvc->state.new = 0;
  646. /* Tell DTE that new PVC is now active */
  647. state(hdlc)->dce_changed = 1;
  648. }
  649. pvc = pvc->next;
  650. }
  651. }
  652. if (state(hdlc)->dce_changed) {
  653. reptype = LMI_FULLREP;
  654. state(hdlc)->fullrep_sent = 1;
  655. state(hdlc)->dce_changed = 0;
  656. }
  657. state(hdlc)->request = 1; /* got request */
  658. fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
  659. return 0;
  660. }
  661. /* DTE */
  662. state(hdlc)->request = 0; /* got response, no request pending */
  663. if (error)
  664. return 0;
  665. if (reptype != LMI_FULLREP)
  666. return 0;
  667. pvc = state(hdlc)->first_pvc;
  668. while (pvc) {
  669. pvc->state.deleted = 1;
  670. pvc = pvc->next;
  671. }
  672. no_ram = 0;
  673. while (skb->len >= i + 2 + stat_len) {
  674. u16 dlci;
  675. u32 bw;
  676. unsigned int active, new;
  677. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  678. LMI_ANSI_CISCO_PVCSTAT)) {
  679. printk(KERN_INFO "%s: Not an LMI PVC status IE"
  680. " (0x%02X)\n", dev->name, skb->data[i]);
  681. return 1;
  682. }
  683. if (skb->data[++i] != stat_len) {
  684. printk(KERN_INFO "%s: Invalid LMI PVC status IE length"
  685. " (%u)\n", dev->name, skb->data[i]);
  686. return 1;
  687. }
  688. i++;
  689. new = !! (skb->data[i + 2] & 0x08);
  690. active = !! (skb->data[i + 2] & 0x02);
  691. if (lmi == LMI_CISCO) {
  692. dlci = (skb->data[i] << 8) | skb->data[i + 1];
  693. bw = (skb->data[i + 3] << 16) |
  694. (skb->data[i + 4] << 8) |
  695. (skb->data[i + 5]);
  696. } else {
  697. dlci = ((skb->data[i] & 0x3F) << 4) |
  698. ((skb->data[i + 1] & 0x78) >> 3);
  699. bw = 0;
  700. }
  701. pvc = add_pvc(dev, dlci);
  702. if (!pvc && !no_ram) {
  703. printk(KERN_WARNING
  704. "%s: Memory squeeze on fr_lmi_recv()\n",
  705. dev->name);
  706. no_ram = 1;
  707. }
  708. if (pvc) {
  709. pvc->state.exist = 1;
  710. pvc->state.deleted = 0;
  711. if (active != pvc->state.active ||
  712. new != pvc->state.new ||
  713. bw != pvc->state.bandwidth ||
  714. !pvc->state.exist) {
  715. pvc->state.new = new;
  716. pvc->state.active = active;
  717. pvc->state.bandwidth = bw;
  718. pvc_carrier(active, pvc);
  719. fr_log_dlci_active(pvc);
  720. }
  721. }
  722. i += stat_len;
  723. }
  724. pvc = state(hdlc)->first_pvc;
  725. while (pvc) {
  726. if (pvc->state.deleted && pvc->state.exist) {
  727. pvc_carrier(0, pvc);
  728. pvc->state.active = pvc->state.new = 0;
  729. pvc->state.exist = 0;
  730. pvc->state.bandwidth = 0;
  731. fr_log_dlci_active(pvc);
  732. }
  733. pvc = pvc->next;
  734. }
  735. /* Next full report after N391 polls */
  736. state(hdlc)->n391cnt = state(hdlc)->settings.n391;
  737. return 0;
  738. }
  739. static int fr_rx(struct sk_buff *skb)
  740. {
  741. struct net_device *frad = skb->dev;
  742. hdlc_device *hdlc = dev_to_hdlc(frad);
  743. fr_hdr *fh = (fr_hdr*)skb->data;
  744. u8 *data = skb->data;
  745. u16 dlci;
  746. pvc_device *pvc;
  747. struct net_device *dev = NULL;
  748. if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
  749. goto rx_error;
  750. dlci = q922_to_dlci(skb->data);
  751. if ((dlci == LMI_CCITT_ANSI_DLCI &&
  752. (state(hdlc)->settings.lmi == LMI_ANSI ||
  753. state(hdlc)->settings.lmi == LMI_CCITT)) ||
  754. (dlci == LMI_CISCO_DLCI &&
  755. state(hdlc)->settings.lmi == LMI_CISCO)) {
  756. if (fr_lmi_recv(frad, skb))
  757. goto rx_error;
  758. dev_kfree_skb_any(skb);
  759. return NET_RX_SUCCESS;
  760. }
  761. pvc = find_pvc(hdlc, dlci);
  762. if (!pvc) {
  763. #ifdef DEBUG_PKT
  764. printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
  765. frad->name, dlci);
  766. #endif
  767. dev_kfree_skb_any(skb);
  768. return NET_RX_DROP;
  769. }
  770. if (pvc->state.fecn != fh->fecn) {
  771. #ifdef DEBUG_ECN
  772. printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
  773. dlci, fh->fecn ? "N" : "FF");
  774. #endif
  775. pvc->state.fecn ^= 1;
  776. }
  777. if (pvc->state.becn != fh->becn) {
  778. #ifdef DEBUG_ECN
  779. printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
  780. dlci, fh->becn ? "N" : "FF");
  781. #endif
  782. pvc->state.becn ^= 1;
  783. }
  784. if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
  785. dev_to_desc(frad)->stats.rx_dropped++;
  786. return NET_RX_DROP;
  787. }
  788. if (data[3] == NLPID_IP) {
  789. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  790. dev = pvc->main;
  791. skb->protocol = htons(ETH_P_IP);
  792. } else if (data[3] == NLPID_IPV6) {
  793. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  794. dev = pvc->main;
  795. skb->protocol = htons(ETH_P_IPV6);
  796. } else if (skb->len > 10 && data[3] == FR_PAD &&
  797. data[4] == NLPID_SNAP && data[5] == FR_PAD) {
  798. u16 oui = ntohs(*(__be16*)(data + 6));
  799. u16 pid = ntohs(*(__be16*)(data + 8));
  800. skb_pull(skb, 10);
  801. switch ((((u32)oui) << 16) | pid) {
  802. case ETH_P_ARP: /* routed frame with SNAP */
  803. case ETH_P_IPX:
  804. case ETH_P_IP: /* a long variant */
  805. case ETH_P_IPV6:
  806. dev = pvc->main;
  807. skb->protocol = htons(pid);
  808. break;
  809. case 0x80C20007: /* bridged Ethernet frame */
  810. if ((dev = pvc->ether) != NULL)
  811. skb->protocol = eth_type_trans(skb, dev);
  812. break;
  813. default:
  814. printk(KERN_INFO "%s: Unsupported protocol, OUI=%x "
  815. "PID=%x\n", frad->name, oui, pid);
  816. dev_kfree_skb_any(skb);
  817. return NET_RX_DROP;
  818. }
  819. } else {
  820. printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x "
  821. "length = %i\n", frad->name, data[3], skb->len);
  822. dev_kfree_skb_any(skb);
  823. return NET_RX_DROP;
  824. }
  825. if (dev) {
  826. struct net_device_stats *stats = pvc_get_stats(dev);
  827. stats->rx_packets++; /* PVC traffic */
  828. stats->rx_bytes += skb->len;
  829. if (pvc->state.becn)
  830. stats->rx_compressed++;
  831. netif_rx(skb);
  832. return NET_RX_SUCCESS;
  833. } else {
  834. dev_kfree_skb_any(skb);
  835. return NET_RX_DROP;
  836. }
  837. rx_error:
  838. dev_to_desc(frad)->stats.rx_errors++; /* Mark error */
  839. dev_kfree_skb_any(skb);
  840. return NET_RX_DROP;
  841. }
  842. static void fr_start(struct net_device *dev)
  843. {
  844. hdlc_device *hdlc = dev_to_hdlc(dev);
  845. #ifdef DEBUG_LINK
  846. printk(KERN_DEBUG "fr_start\n");
  847. #endif
  848. if (state(hdlc)->settings.lmi != LMI_NONE) {
  849. state(hdlc)->reliable = 0;
  850. state(hdlc)->dce_changed = 1;
  851. state(hdlc)->request = 0;
  852. state(hdlc)->fullrep_sent = 0;
  853. state(hdlc)->last_errors = 0xFFFFFFFF;
  854. state(hdlc)->n391cnt = 0;
  855. state(hdlc)->txseq = state(hdlc)->rxseq = 0;
  856. init_timer(&state(hdlc)->timer);
  857. /* First poll after 1 s */
  858. state(hdlc)->timer.expires = jiffies + HZ;
  859. state(hdlc)->timer.function = fr_timer;
  860. state(hdlc)->timer.data = (unsigned long)dev;
  861. add_timer(&state(hdlc)->timer);
  862. } else
  863. fr_set_link_state(1, dev);
  864. }
  865. static void fr_stop(struct net_device *dev)
  866. {
  867. hdlc_device *hdlc = dev_to_hdlc(dev);
  868. #ifdef DEBUG_LINK
  869. printk(KERN_DEBUG "fr_stop\n");
  870. #endif
  871. if (state(hdlc)->settings.lmi != LMI_NONE)
  872. del_timer_sync(&state(hdlc)->timer);
  873. fr_set_link_state(0, dev);
  874. }
  875. static void fr_close(struct net_device *dev)
  876. {
  877. hdlc_device *hdlc = dev_to_hdlc(dev);
  878. pvc_device *pvc = state(hdlc)->first_pvc;
  879. while (pvc) { /* Shutdown all PVCs for this FRAD */
  880. if (pvc->main)
  881. dev_close(pvc->main);
  882. if (pvc->ether)
  883. dev_close(pvc->ether);
  884. pvc = pvc->next;
  885. }
  886. }
  887. static void pvc_setup(struct net_device *dev)
  888. {
  889. dev->type = ARPHRD_DLCI;
  890. dev->flags = IFF_POINTOPOINT;
  891. dev->hard_header_len = 10;
  892. dev->addr_len = 2;
  893. }
  894. static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
  895. {
  896. hdlc_device *hdlc = dev_to_hdlc(frad);
  897. pvc_device *pvc = NULL;
  898. struct net_device *dev;
  899. int result, used;
  900. char * prefix = "pvc%d";
  901. if (type == ARPHRD_ETHER)
  902. prefix = "pvceth%d";
  903. if ((pvc = add_pvc(frad, dlci)) == NULL) {
  904. printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
  905. frad->name);
  906. return -ENOBUFS;
  907. }
  908. if (*get_dev_p(pvc, type))
  909. return -EEXIST;
  910. used = pvc_is_used(pvc);
  911. if (type == ARPHRD_ETHER)
  912. dev = alloc_netdev(sizeof(struct net_device_stats),
  913. "pvceth%d", ether_setup);
  914. else
  915. dev = alloc_netdev(sizeof(struct net_device_stats),
  916. "pvc%d", pvc_setup);
  917. if (!dev) {
  918. printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
  919. frad->name);
  920. delete_unused_pvcs(hdlc);
  921. return -ENOBUFS;
  922. }
  923. if (type == ARPHRD_ETHER) {
  924. memcpy(dev->dev_addr, "\x00\x01", 2);
  925. get_random_bytes(dev->dev_addr + 2, ETH_ALEN - 2);
  926. } else {
  927. *(__be16*)dev->dev_addr = htons(dlci);
  928. dlci_to_q922(dev->broadcast, dlci);
  929. }
  930. dev->hard_start_xmit = pvc_xmit;
  931. dev->get_stats = pvc_get_stats;
  932. dev->open = pvc_open;
  933. dev->stop = pvc_close;
  934. dev->do_ioctl = pvc_ioctl;
  935. dev->change_mtu = pvc_change_mtu;
  936. dev->mtu = HDLC_MAX_MTU;
  937. dev->tx_queue_len = 0;
  938. dev->priv = pvc;
  939. result = dev_alloc_name(dev, dev->name);
  940. if (result < 0) {
  941. free_netdev(dev);
  942. delete_unused_pvcs(hdlc);
  943. return result;
  944. }
  945. if (register_netdevice(dev) != 0) {
  946. free_netdev(dev);
  947. delete_unused_pvcs(hdlc);
  948. return -EIO;
  949. }
  950. dev->destructor = free_netdev;
  951. *get_dev_p(pvc, type) = dev;
  952. if (!used) {
  953. state(hdlc)->dce_changed = 1;
  954. state(hdlc)->dce_pvc_count++;
  955. }
  956. return 0;
  957. }
  958. static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
  959. {
  960. pvc_device *pvc;
  961. struct net_device *dev;
  962. if ((pvc = find_pvc(hdlc, dlci)) == NULL)
  963. return -ENOENT;
  964. if ((dev = *get_dev_p(pvc, type)) == NULL)
  965. return -ENOENT;
  966. if (dev->flags & IFF_UP)
  967. return -EBUSY; /* PVC in use */
  968. unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
  969. *get_dev_p(pvc, type) = NULL;
  970. if (!pvc_is_used(pvc)) {
  971. state(hdlc)->dce_pvc_count--;
  972. state(hdlc)->dce_changed = 1;
  973. }
  974. delete_unused_pvcs(hdlc);
  975. return 0;
  976. }
  977. static void fr_destroy(struct net_device *frad)
  978. {
  979. hdlc_device *hdlc = dev_to_hdlc(frad);
  980. pvc_device *pvc = state(hdlc)->first_pvc;
  981. state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
  982. state(hdlc)->dce_pvc_count = 0;
  983. state(hdlc)->dce_changed = 1;
  984. while (pvc) {
  985. pvc_device *next = pvc->next;
  986. /* destructors will free_netdev() main and ether */
  987. if (pvc->main)
  988. unregister_netdevice(pvc->main);
  989. if (pvc->ether)
  990. unregister_netdevice(pvc->ether);
  991. kfree(pvc);
  992. pvc = next;
  993. }
  994. }
  995. static struct hdlc_proto proto = {
  996. .close = fr_close,
  997. .start = fr_start,
  998. .stop = fr_stop,
  999. .detach = fr_destroy,
  1000. .ioctl = fr_ioctl,
  1001. .module = THIS_MODULE,
  1002. };
  1003. static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
  1004. {
  1005. fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
  1006. const size_t size = sizeof(fr_proto);
  1007. fr_proto new_settings;
  1008. hdlc_device *hdlc = dev_to_hdlc(dev);
  1009. fr_proto_pvc pvc;
  1010. int result;
  1011. switch (ifr->ifr_settings.type) {
  1012. case IF_GET_PROTO:
  1013. if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
  1014. return -EINVAL;
  1015. ifr->ifr_settings.type = IF_PROTO_FR;
  1016. if (ifr->ifr_settings.size < size) {
  1017. ifr->ifr_settings.size = size; /* data size wanted */
  1018. return -ENOBUFS;
  1019. }
  1020. if (copy_to_user(fr_s, &state(hdlc)->settings, size))
  1021. return -EFAULT;
  1022. return 0;
  1023. case IF_PROTO_FR:
  1024. if(!capable(CAP_NET_ADMIN))
  1025. return -EPERM;
  1026. if(dev->flags & IFF_UP)
  1027. return -EBUSY;
  1028. if (copy_from_user(&new_settings, fr_s, size))
  1029. return -EFAULT;
  1030. if (new_settings.lmi == LMI_DEFAULT)
  1031. new_settings.lmi = LMI_ANSI;
  1032. if ((new_settings.lmi != LMI_NONE &&
  1033. new_settings.lmi != LMI_ANSI &&
  1034. new_settings.lmi != LMI_CCITT &&
  1035. new_settings.lmi != LMI_CISCO) ||
  1036. new_settings.t391 < 1 ||
  1037. new_settings.t392 < 2 ||
  1038. new_settings.n391 < 1 ||
  1039. new_settings.n392 < 1 ||
  1040. new_settings.n393 < new_settings.n392 ||
  1041. new_settings.n393 > 32 ||
  1042. (new_settings.dce != 0 &&
  1043. new_settings.dce != 1))
  1044. return -EINVAL;
  1045. result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
  1046. if (result)
  1047. return result;
  1048. if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
  1049. result = attach_hdlc_protocol(dev, &proto, fr_rx,
  1050. sizeof(struct frad_state));
  1051. if (result)
  1052. return result;
  1053. state(hdlc)->first_pvc = NULL;
  1054. state(hdlc)->dce_pvc_count = 0;
  1055. }
  1056. memcpy(&state(hdlc)->settings, &new_settings, size);
  1057. dev->hard_start_xmit = hdlc->xmit;
  1058. dev->type = ARPHRD_FRAD;
  1059. return 0;
  1060. case IF_PROTO_FR_ADD_PVC:
  1061. case IF_PROTO_FR_DEL_PVC:
  1062. case IF_PROTO_FR_ADD_ETH_PVC:
  1063. case IF_PROTO_FR_DEL_ETH_PVC:
  1064. if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
  1065. return -EINVAL;
  1066. if(!capable(CAP_NET_ADMIN))
  1067. return -EPERM;
  1068. if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
  1069. sizeof(fr_proto_pvc)))
  1070. return -EFAULT;
  1071. if (pvc.dlci <= 0 || pvc.dlci >= 1024)
  1072. return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
  1073. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
  1074. ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
  1075. result = ARPHRD_ETHER; /* bridged Ethernet device */
  1076. else
  1077. result = ARPHRD_DLCI;
  1078. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
  1079. ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
  1080. return fr_add_pvc(dev, pvc.dlci, result);
  1081. else
  1082. return fr_del_pvc(hdlc, pvc.dlci, result);
  1083. }
  1084. return -EINVAL;
  1085. }
  1086. static int __init mod_init(void)
  1087. {
  1088. register_hdlc_protocol(&proto);
  1089. return 0;
  1090. }
  1091. static void __exit mod_exit(void)
  1092. {
  1093. unregister_hdlc_protocol(&proto);
  1094. }
  1095. module_init(mod_init);
  1096. module_exit(mod_exit);
  1097. MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
  1098. MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
  1099. MODULE_LICENSE("GPL v2");