hdlc_fr.c 30 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338
  1. /*
  2. * Generic HDLC support routines for Linux
  3. * Frame Relay support
  4. *
  5. * Copyright (C) 1999 - 2006 Krzysztof Halasa <khc@pm.waw.pl>
  6. *
  7. * This program is free software; you can redistribute it and/or modify it
  8. * under the terms of version 2 of the GNU General Public License
  9. * as published by the Free Software Foundation.
  10. *
  11. Theory of PVC state
  12. DCE mode:
  13. (exist,new) -> 0,0 when "PVC create" or if "link unreliable"
  14. 0,x -> 1,1 if "link reliable" when sending FULL STATUS
  15. 1,1 -> 1,0 if received FULL STATUS ACK
  16. (active) -> 0 when "ifconfig PVC down" or "link unreliable" or "PVC create"
  17. -> 1 when "PVC up" and (exist,new) = 1,0
  18. DTE mode:
  19. (exist,new,active) = FULL STATUS if "link reliable"
  20. = 0, 0, 0 if "link unreliable"
  21. No LMI:
  22. active = open and "link reliable"
  23. exist = new = not used
  24. CCITT LMI: ITU-T Q.933 Annex A
  25. ANSI LMI: ANSI T1.617 Annex D
  26. CISCO LMI: the original, aka "Gang of Four" LMI
  27. */
  28. #include <linux/module.h>
  29. #include <linux/kernel.h>
  30. #include <linux/slab.h>
  31. #include <linux/poll.h>
  32. #include <linux/errno.h>
  33. #include <linux/if_arp.h>
  34. #include <linux/init.h>
  35. #include <linux/skbuff.h>
  36. #include <linux/pkt_sched.h>
  37. #include <linux/inetdevice.h>
  38. #include <linux/lapb.h>
  39. #include <linux/rtnetlink.h>
  40. #include <linux/etherdevice.h>
  41. #include <linux/hdlc.h>
  42. #undef DEBUG_PKT
  43. #undef DEBUG_ECN
  44. #undef DEBUG_LINK
  45. #undef DEBUG_PROTO
  46. #undef DEBUG_PVC
  47. #define FR_UI 0x03
  48. #define FR_PAD 0x00
  49. #define NLPID_IP 0xCC
  50. #define NLPID_IPV6 0x8E
  51. #define NLPID_SNAP 0x80
  52. #define NLPID_PAD 0x00
  53. #define NLPID_CCITT_ANSI_LMI 0x08
  54. #define NLPID_CISCO_LMI 0x09
  55. #define LMI_CCITT_ANSI_DLCI 0 /* LMI DLCI */
  56. #define LMI_CISCO_DLCI 1023
  57. #define LMI_CALLREF 0x00 /* Call Reference */
  58. #define LMI_ANSI_LOCKSHIFT 0x95 /* ANSI locking shift */
  59. #define LMI_ANSI_CISCO_REPTYPE 0x01 /* report type */
  60. #define LMI_CCITT_REPTYPE 0x51
  61. #define LMI_ANSI_CISCO_ALIVE 0x03 /* keep alive */
  62. #define LMI_CCITT_ALIVE 0x53
  63. #define LMI_ANSI_CISCO_PVCSTAT 0x07 /* PVC status */
  64. #define LMI_CCITT_PVCSTAT 0x57
  65. #define LMI_FULLREP 0x00 /* full report */
  66. #define LMI_INTEGRITY 0x01 /* link integrity report */
  67. #define LMI_SINGLE 0x02 /* single PVC report */
  68. #define LMI_STATUS_ENQUIRY 0x75
  69. #define LMI_STATUS 0x7D /* reply */
  70. #define LMI_REPT_LEN 1 /* report type element length */
  71. #define LMI_INTEG_LEN 2 /* link integrity element length */
  72. #define LMI_CCITT_CISCO_LENGTH 13 /* LMI frame lengths */
  73. #define LMI_ANSI_LENGTH 14
  74. typedef struct {
  75. #if defined(__LITTLE_ENDIAN_BITFIELD)
  76. unsigned ea1: 1;
  77. unsigned cr: 1;
  78. unsigned dlcih: 6;
  79. unsigned ea2: 1;
  80. unsigned de: 1;
  81. unsigned becn: 1;
  82. unsigned fecn: 1;
  83. unsigned dlcil: 4;
  84. #else
  85. unsigned dlcih: 6;
  86. unsigned cr: 1;
  87. unsigned ea1: 1;
  88. unsigned dlcil: 4;
  89. unsigned fecn: 1;
  90. unsigned becn: 1;
  91. unsigned de: 1;
  92. unsigned ea2: 1;
  93. #endif
  94. }__attribute__ ((packed)) fr_hdr;
  95. typedef struct pvc_device_struct {
  96. struct net_device *frad;
  97. struct net_device *main;
  98. struct net_device *ether; /* bridged Ethernet interface */
  99. struct pvc_device_struct *next; /* Sorted in ascending DLCI order */
  100. int dlci;
  101. int open_count;
  102. struct {
  103. unsigned int new: 1;
  104. unsigned int active: 1;
  105. unsigned int exist: 1;
  106. unsigned int deleted: 1;
  107. unsigned int fecn: 1;
  108. unsigned int becn: 1;
  109. unsigned int bandwidth; /* Cisco LMI reporting only */
  110. }state;
  111. }pvc_device;
  112. struct pvc_desc {
  113. struct net_device_stats stats;
  114. pvc_device *pvc;
  115. };
  116. struct frad_state {
  117. fr_proto settings;
  118. pvc_device *first_pvc;
  119. int dce_pvc_count;
  120. struct timer_list timer;
  121. unsigned long last_poll;
  122. int reliable;
  123. int dce_changed;
  124. int request;
  125. int fullrep_sent;
  126. u32 last_errors; /* last errors bit list */
  127. u8 n391cnt;
  128. u8 txseq; /* TX sequence number */
  129. u8 rxseq; /* RX sequence number */
  130. };
  131. static int fr_ioctl(struct net_device *dev, struct ifreq *ifr);
  132. static inline u16 q922_to_dlci(u8 *hdr)
  133. {
  134. return ((hdr[0] & 0xFC) << 2) | ((hdr[1] & 0xF0) >> 4);
  135. }
  136. static inline void dlci_to_q922(u8 *hdr, u16 dlci)
  137. {
  138. hdr[0] = (dlci >> 2) & 0xFC;
  139. hdr[1] = ((dlci << 4) & 0xF0) | 0x01;
  140. }
  141. static inline struct frad_state* state(hdlc_device *hdlc)
  142. {
  143. return(struct frad_state *)(hdlc->state);
  144. }
  145. static inline struct pvc_desc* pvcdev_to_desc(struct net_device *dev)
  146. {
  147. return dev->priv;
  148. }
  149. static inline struct net_device_stats* pvc_get_stats(struct net_device *dev)
  150. {
  151. return &pvcdev_to_desc(dev)->stats;
  152. }
  153. static inline pvc_device* find_pvc(hdlc_device *hdlc, u16 dlci)
  154. {
  155. pvc_device *pvc = state(hdlc)->first_pvc;
  156. while (pvc) {
  157. if (pvc->dlci == dlci)
  158. return pvc;
  159. if (pvc->dlci > dlci)
  160. return NULL; /* the listed is sorted */
  161. pvc = pvc->next;
  162. }
  163. return NULL;
  164. }
  165. static pvc_device* add_pvc(struct net_device *dev, u16 dlci)
  166. {
  167. hdlc_device *hdlc = dev_to_hdlc(dev);
  168. pvc_device *pvc, **pvc_p = &state(hdlc)->first_pvc;
  169. while (*pvc_p) {
  170. if ((*pvc_p)->dlci == dlci)
  171. return *pvc_p;
  172. if ((*pvc_p)->dlci > dlci)
  173. break; /* the list is sorted */
  174. pvc_p = &(*pvc_p)->next;
  175. }
  176. pvc = kzalloc(sizeof(pvc_device), GFP_ATOMIC);
  177. #ifdef DEBUG_PVC
  178. printk(KERN_DEBUG "add_pvc: allocated pvc %p, frad %p\n", pvc, dev);
  179. #endif
  180. if (!pvc)
  181. return NULL;
  182. pvc->dlci = dlci;
  183. pvc->frad = dev;
  184. pvc->next = *pvc_p; /* Put it in the chain */
  185. *pvc_p = pvc;
  186. return pvc;
  187. }
  188. static inline int pvc_is_used(pvc_device *pvc)
  189. {
  190. return pvc->main || pvc->ether;
  191. }
  192. static inline void pvc_carrier(int on, pvc_device *pvc)
  193. {
  194. if (on) {
  195. if (pvc->main)
  196. if (!netif_carrier_ok(pvc->main))
  197. netif_carrier_on(pvc->main);
  198. if (pvc->ether)
  199. if (!netif_carrier_ok(pvc->ether))
  200. netif_carrier_on(pvc->ether);
  201. } else {
  202. if (pvc->main)
  203. if (netif_carrier_ok(pvc->main))
  204. netif_carrier_off(pvc->main);
  205. if (pvc->ether)
  206. if (netif_carrier_ok(pvc->ether))
  207. netif_carrier_off(pvc->ether);
  208. }
  209. }
  210. static inline void delete_unused_pvcs(hdlc_device *hdlc)
  211. {
  212. pvc_device **pvc_p = &state(hdlc)->first_pvc;
  213. while (*pvc_p) {
  214. if (!pvc_is_used(*pvc_p)) {
  215. pvc_device *pvc = *pvc_p;
  216. #ifdef DEBUG_PVC
  217. printk(KERN_DEBUG "freeing unused pvc: %p\n", pvc);
  218. #endif
  219. *pvc_p = pvc->next;
  220. kfree(pvc);
  221. continue;
  222. }
  223. pvc_p = &(*pvc_p)->next;
  224. }
  225. }
  226. static inline struct net_device** get_dev_p(pvc_device *pvc, int type)
  227. {
  228. if (type == ARPHRD_ETHER)
  229. return &pvc->ether;
  230. else
  231. return &pvc->main;
  232. }
  233. static int fr_hard_header(struct sk_buff **skb_p, u16 dlci)
  234. {
  235. u16 head_len;
  236. struct sk_buff *skb = *skb_p;
  237. switch (skb->protocol) {
  238. case __constant_htons(NLPID_CCITT_ANSI_LMI):
  239. head_len = 4;
  240. skb_push(skb, head_len);
  241. skb->data[3] = NLPID_CCITT_ANSI_LMI;
  242. break;
  243. case __constant_htons(NLPID_CISCO_LMI):
  244. head_len = 4;
  245. skb_push(skb, head_len);
  246. skb->data[3] = NLPID_CISCO_LMI;
  247. break;
  248. case __constant_htons(ETH_P_IP):
  249. head_len = 4;
  250. skb_push(skb, head_len);
  251. skb->data[3] = NLPID_IP;
  252. break;
  253. case __constant_htons(ETH_P_IPV6):
  254. head_len = 4;
  255. skb_push(skb, head_len);
  256. skb->data[3] = NLPID_IPV6;
  257. break;
  258. case __constant_htons(ETH_P_802_3):
  259. head_len = 10;
  260. if (skb_headroom(skb) < head_len) {
  261. struct sk_buff *skb2 = skb_realloc_headroom(skb,
  262. head_len);
  263. if (!skb2)
  264. return -ENOBUFS;
  265. dev_kfree_skb(skb);
  266. skb = *skb_p = skb2;
  267. }
  268. skb_push(skb, head_len);
  269. skb->data[3] = FR_PAD;
  270. skb->data[4] = NLPID_SNAP;
  271. skb->data[5] = FR_PAD;
  272. skb->data[6] = 0x80;
  273. skb->data[7] = 0xC2;
  274. skb->data[8] = 0x00;
  275. skb->data[9] = 0x07; /* bridged Ethernet frame w/out FCS */
  276. break;
  277. default:
  278. head_len = 10;
  279. skb_push(skb, head_len);
  280. skb->data[3] = FR_PAD;
  281. skb->data[4] = NLPID_SNAP;
  282. skb->data[5] = FR_PAD;
  283. skb->data[6] = FR_PAD;
  284. skb->data[7] = FR_PAD;
  285. *(__be16*)(skb->data + 8) = skb->protocol;
  286. }
  287. dlci_to_q922(skb->data, dlci);
  288. skb->data[2] = FR_UI;
  289. return 0;
  290. }
  291. static int pvc_open(struct net_device *dev)
  292. {
  293. pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
  294. if ((pvc->frad->flags & IFF_UP) == 0)
  295. return -EIO; /* Frad must be UP in order to activate PVC */
  296. if (pvc->open_count++ == 0) {
  297. hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
  298. if (state(hdlc)->settings.lmi == LMI_NONE)
  299. pvc->state.active = netif_carrier_ok(pvc->frad);
  300. pvc_carrier(pvc->state.active, pvc);
  301. state(hdlc)->dce_changed = 1;
  302. }
  303. return 0;
  304. }
  305. static int pvc_close(struct net_device *dev)
  306. {
  307. pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
  308. if (--pvc->open_count == 0) {
  309. hdlc_device *hdlc = dev_to_hdlc(pvc->frad);
  310. if (state(hdlc)->settings.lmi == LMI_NONE)
  311. pvc->state.active = 0;
  312. if (state(hdlc)->settings.dce) {
  313. state(hdlc)->dce_changed = 1;
  314. pvc->state.active = 0;
  315. }
  316. }
  317. return 0;
  318. }
  319. static int pvc_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
  320. {
  321. pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
  322. fr_proto_pvc_info info;
  323. if (ifr->ifr_settings.type == IF_GET_PROTO) {
  324. if (dev->type == ARPHRD_ETHER)
  325. ifr->ifr_settings.type = IF_PROTO_FR_ETH_PVC;
  326. else
  327. ifr->ifr_settings.type = IF_PROTO_FR_PVC;
  328. if (ifr->ifr_settings.size < sizeof(info)) {
  329. /* data size wanted */
  330. ifr->ifr_settings.size = sizeof(info);
  331. return -ENOBUFS;
  332. }
  333. info.dlci = pvc->dlci;
  334. memcpy(info.master, pvc->frad->name, IFNAMSIZ);
  335. if (copy_to_user(ifr->ifr_settings.ifs_ifsu.fr_pvc_info,
  336. &info, sizeof(info)))
  337. return -EFAULT;
  338. return 0;
  339. }
  340. return -EINVAL;
  341. }
  342. static int pvc_xmit(struct sk_buff *skb, struct net_device *dev)
  343. {
  344. pvc_device *pvc = pvcdev_to_desc(dev)->pvc;
  345. struct net_device_stats *stats = pvc_get_stats(dev);
  346. if (pvc->state.active) {
  347. if (dev->type == ARPHRD_ETHER) {
  348. int pad = ETH_ZLEN - skb->len;
  349. if (pad > 0) { /* Pad the frame with zeros */
  350. int len = skb->len;
  351. if (skb_tailroom(skb) < pad)
  352. if (pskb_expand_head(skb, 0, pad,
  353. GFP_ATOMIC)) {
  354. stats->tx_dropped++;
  355. dev_kfree_skb(skb);
  356. return 0;
  357. }
  358. skb_put(skb, pad);
  359. memset(skb->data + len, 0, pad);
  360. }
  361. skb->protocol = __constant_htons(ETH_P_802_3);
  362. }
  363. if (!fr_hard_header(&skb, pvc->dlci)) {
  364. stats->tx_bytes += skb->len;
  365. stats->tx_packets++;
  366. if (pvc->state.fecn) /* TX Congestion counter */
  367. stats->tx_compressed++;
  368. skb->dev = pvc->frad;
  369. dev_queue_xmit(skb);
  370. return 0;
  371. }
  372. }
  373. stats->tx_dropped++;
  374. dev_kfree_skb(skb);
  375. return 0;
  376. }
  377. static int pvc_change_mtu(struct net_device *dev, int new_mtu)
  378. {
  379. if ((new_mtu < 68) || (new_mtu > HDLC_MAX_MTU))
  380. return -EINVAL;
  381. dev->mtu = new_mtu;
  382. return 0;
  383. }
  384. static inline void fr_log_dlci_active(pvc_device *pvc)
  385. {
  386. printk(KERN_INFO "%s: DLCI %d [%s%s%s]%s %s\n",
  387. pvc->frad->name,
  388. pvc->dlci,
  389. pvc->main ? pvc->main->name : "",
  390. pvc->main && pvc->ether ? " " : "",
  391. pvc->ether ? pvc->ether->name : "",
  392. pvc->state.new ? " new" : "",
  393. !pvc->state.exist ? "deleted" :
  394. pvc->state.active ? "active" : "inactive");
  395. }
  396. static inline u8 fr_lmi_nextseq(u8 x)
  397. {
  398. x++;
  399. return x ? x : 1;
  400. }
  401. static void fr_lmi_send(struct net_device *dev, int fullrep)
  402. {
  403. hdlc_device *hdlc = dev_to_hdlc(dev);
  404. struct sk_buff *skb;
  405. pvc_device *pvc = state(hdlc)->first_pvc;
  406. int lmi = state(hdlc)->settings.lmi;
  407. int dce = state(hdlc)->settings.dce;
  408. int len = lmi == LMI_ANSI ? LMI_ANSI_LENGTH : LMI_CCITT_CISCO_LENGTH;
  409. int stat_len = (lmi == LMI_CISCO) ? 6 : 3;
  410. u8 *data;
  411. int i = 0;
  412. if (dce && fullrep) {
  413. len += state(hdlc)->dce_pvc_count * (2 + stat_len);
  414. if (len > HDLC_MAX_MRU) {
  415. printk(KERN_WARNING "%s: Too many PVCs while sending "
  416. "LMI full report\n", dev->name);
  417. return;
  418. }
  419. }
  420. skb = dev_alloc_skb(len);
  421. if (!skb) {
  422. printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
  423. dev->name);
  424. return;
  425. }
  426. memset(skb->data, 0, len);
  427. skb_reserve(skb, 4);
  428. if (lmi == LMI_CISCO) {
  429. skb->protocol = __constant_htons(NLPID_CISCO_LMI);
  430. fr_hard_header(&skb, LMI_CISCO_DLCI);
  431. } else {
  432. skb->protocol = __constant_htons(NLPID_CCITT_ANSI_LMI);
  433. fr_hard_header(&skb, LMI_CCITT_ANSI_DLCI);
  434. }
  435. data = skb_tail_pointer(skb);
  436. data[i++] = LMI_CALLREF;
  437. data[i++] = dce ? LMI_STATUS : LMI_STATUS_ENQUIRY;
  438. if (lmi == LMI_ANSI)
  439. data[i++] = LMI_ANSI_LOCKSHIFT;
  440. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  441. LMI_ANSI_CISCO_REPTYPE;
  442. data[i++] = LMI_REPT_LEN;
  443. data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;
  444. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_ALIVE : LMI_ANSI_CISCO_ALIVE;
  445. data[i++] = LMI_INTEG_LEN;
  446. data[i++] = state(hdlc)->txseq =
  447. fr_lmi_nextseq(state(hdlc)->txseq);
  448. data[i++] = state(hdlc)->rxseq;
  449. if (dce && fullrep) {
  450. while (pvc) {
  451. data[i++] = lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  452. LMI_ANSI_CISCO_PVCSTAT;
  453. data[i++] = stat_len;
  454. /* LMI start/restart */
  455. if (state(hdlc)->reliable && !pvc->state.exist) {
  456. pvc->state.exist = pvc->state.new = 1;
  457. fr_log_dlci_active(pvc);
  458. }
  459. /* ifconfig PVC up */
  460. if (pvc->open_count && !pvc->state.active &&
  461. pvc->state.exist && !pvc->state.new) {
  462. pvc_carrier(1, pvc);
  463. pvc->state.active = 1;
  464. fr_log_dlci_active(pvc);
  465. }
  466. if (lmi == LMI_CISCO) {
  467. data[i] = pvc->dlci >> 8;
  468. data[i + 1] = pvc->dlci & 0xFF;
  469. } else {
  470. data[i] = (pvc->dlci >> 4) & 0x3F;
  471. data[i + 1] = ((pvc->dlci << 3) & 0x78) | 0x80;
  472. data[i + 2] = 0x80;
  473. }
  474. if (pvc->state.new)
  475. data[i + 2] |= 0x08;
  476. else if (pvc->state.active)
  477. data[i + 2] |= 0x02;
  478. i += stat_len;
  479. pvc = pvc->next;
  480. }
  481. }
  482. skb_put(skb, i);
  483. skb->priority = TC_PRIO_CONTROL;
  484. skb->dev = dev;
  485. skb_reset_network_header(skb);
  486. dev_queue_xmit(skb);
  487. }
  488. static void fr_set_link_state(int reliable, struct net_device *dev)
  489. {
  490. hdlc_device *hdlc = dev_to_hdlc(dev);
  491. pvc_device *pvc = state(hdlc)->first_pvc;
  492. state(hdlc)->reliable = reliable;
  493. if (reliable) {
  494. netif_dormant_off(dev);
  495. state(hdlc)->n391cnt = 0; /* Request full status */
  496. state(hdlc)->dce_changed = 1;
  497. if (state(hdlc)->settings.lmi == LMI_NONE) {
  498. while (pvc) { /* Activate all PVCs */
  499. pvc_carrier(1, pvc);
  500. pvc->state.exist = pvc->state.active = 1;
  501. pvc->state.new = 0;
  502. pvc = pvc->next;
  503. }
  504. }
  505. } else {
  506. netif_dormant_on(dev);
  507. while (pvc) { /* Deactivate all PVCs */
  508. pvc_carrier(0, pvc);
  509. pvc->state.exist = pvc->state.active = 0;
  510. pvc->state.new = 0;
  511. if (!state(hdlc)->settings.dce)
  512. pvc->state.bandwidth = 0;
  513. pvc = pvc->next;
  514. }
  515. }
  516. }
  517. static void fr_timer(unsigned long arg)
  518. {
  519. struct net_device *dev = (struct net_device *)arg;
  520. hdlc_device *hdlc = dev_to_hdlc(dev);
  521. int i, cnt = 0, reliable;
  522. u32 list;
  523. if (state(hdlc)->settings.dce) {
  524. reliable = state(hdlc)->request &&
  525. time_before(jiffies, state(hdlc)->last_poll +
  526. state(hdlc)->settings.t392 * HZ);
  527. state(hdlc)->request = 0;
  528. } else {
  529. state(hdlc)->last_errors <<= 1; /* Shift the list */
  530. if (state(hdlc)->request) {
  531. if (state(hdlc)->reliable)
  532. printk(KERN_INFO "%s: No LMI status reply "
  533. "received\n", dev->name);
  534. state(hdlc)->last_errors |= 1;
  535. }
  536. list = state(hdlc)->last_errors;
  537. for (i = 0; i < state(hdlc)->settings.n393; i++, list >>= 1)
  538. cnt += (list & 1); /* errors count */
  539. reliable = (cnt < state(hdlc)->settings.n392);
  540. }
  541. if (state(hdlc)->reliable != reliable) {
  542. printk(KERN_INFO "%s: Link %sreliable\n", dev->name,
  543. reliable ? "" : "un");
  544. fr_set_link_state(reliable, dev);
  545. }
  546. if (state(hdlc)->settings.dce)
  547. state(hdlc)->timer.expires = jiffies +
  548. state(hdlc)->settings.t392 * HZ;
  549. else {
  550. if (state(hdlc)->n391cnt)
  551. state(hdlc)->n391cnt--;
  552. fr_lmi_send(dev, state(hdlc)->n391cnt == 0);
  553. state(hdlc)->last_poll = jiffies;
  554. state(hdlc)->request = 1;
  555. state(hdlc)->timer.expires = jiffies +
  556. state(hdlc)->settings.t391 * HZ;
  557. }
  558. state(hdlc)->timer.function = fr_timer;
  559. state(hdlc)->timer.data = arg;
  560. add_timer(&state(hdlc)->timer);
  561. }
  562. static int fr_lmi_recv(struct net_device *dev, struct sk_buff *skb)
  563. {
  564. hdlc_device *hdlc = dev_to_hdlc(dev);
  565. pvc_device *pvc;
  566. u8 rxseq, txseq;
  567. int lmi = state(hdlc)->settings.lmi;
  568. int dce = state(hdlc)->settings.dce;
  569. int stat_len = (lmi == LMI_CISCO) ? 6 : 3, reptype, error, no_ram, i;
  570. if (skb->len < (lmi == LMI_ANSI ? LMI_ANSI_LENGTH :
  571. LMI_CCITT_CISCO_LENGTH)) {
  572. printk(KERN_INFO "%s: Short LMI frame\n", dev->name);
  573. return 1;
  574. }
  575. if (skb->data[3] != (lmi == LMI_CISCO ? NLPID_CISCO_LMI :
  576. NLPID_CCITT_ANSI_LMI)) {
  577. printk(KERN_INFO "%s: Received non-LMI frame with LMI DLCI\n",
  578. dev->name);
  579. return 1;
  580. }
  581. if (skb->data[4] != LMI_CALLREF) {
  582. printk(KERN_INFO "%s: Invalid LMI Call reference (0x%02X)\n",
  583. dev->name, skb->data[4]);
  584. return 1;
  585. }
  586. if (skb->data[5] != (dce ? LMI_STATUS_ENQUIRY : LMI_STATUS)) {
  587. printk(KERN_INFO "%s: Invalid LMI Message type (0x%02X)\n",
  588. dev->name, skb->data[5]);
  589. return 1;
  590. }
  591. if (lmi == LMI_ANSI) {
  592. if (skb->data[6] != LMI_ANSI_LOCKSHIFT) {
  593. printk(KERN_INFO "%s: Not ANSI locking shift in LMI"
  594. " message (0x%02X)\n", dev->name, skb->data[6]);
  595. return 1;
  596. }
  597. i = 7;
  598. } else
  599. i = 6;
  600. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_REPTYPE :
  601. LMI_ANSI_CISCO_REPTYPE)) {
  602. printk(KERN_INFO "%s: Not an LMI Report type IE (0x%02X)\n",
  603. dev->name, skb->data[i]);
  604. return 1;
  605. }
  606. if (skb->data[++i] != LMI_REPT_LEN) {
  607. printk(KERN_INFO "%s: Invalid LMI Report type IE length"
  608. " (%u)\n", dev->name, skb->data[i]);
  609. return 1;
  610. }
  611. reptype = skb->data[++i];
  612. if (reptype != LMI_INTEGRITY && reptype != LMI_FULLREP) {
  613. printk(KERN_INFO "%s: Unsupported LMI Report type (0x%02X)\n",
  614. dev->name, reptype);
  615. return 1;
  616. }
  617. if (skb->data[++i] != (lmi == LMI_CCITT ? LMI_CCITT_ALIVE :
  618. LMI_ANSI_CISCO_ALIVE)) {
  619. printk(KERN_INFO "%s: Not an LMI Link integrity verification"
  620. " IE (0x%02X)\n", dev->name, skb->data[i]);
  621. return 1;
  622. }
  623. if (skb->data[++i] != LMI_INTEG_LEN) {
  624. printk(KERN_INFO "%s: Invalid LMI Link integrity verification"
  625. " IE length (%u)\n", dev->name, skb->data[i]);
  626. return 1;
  627. }
  628. i++;
  629. state(hdlc)->rxseq = skb->data[i++]; /* TX sequence from peer */
  630. rxseq = skb->data[i++]; /* Should confirm our sequence */
  631. txseq = state(hdlc)->txseq;
  632. if (dce)
  633. state(hdlc)->last_poll = jiffies;
  634. error = 0;
  635. if (!state(hdlc)->reliable)
  636. error = 1;
  637. if (rxseq == 0 || rxseq != txseq) { /* Ask for full report next time */
  638. state(hdlc)->n391cnt = 0;
  639. error = 1;
  640. }
  641. if (dce) {
  642. if (state(hdlc)->fullrep_sent && !error) {
  643. /* Stop sending full report - the last one has been confirmed by DTE */
  644. state(hdlc)->fullrep_sent = 0;
  645. pvc = state(hdlc)->first_pvc;
  646. while (pvc) {
  647. if (pvc->state.new) {
  648. pvc->state.new = 0;
  649. /* Tell DTE that new PVC is now active */
  650. state(hdlc)->dce_changed = 1;
  651. }
  652. pvc = pvc->next;
  653. }
  654. }
  655. if (state(hdlc)->dce_changed) {
  656. reptype = LMI_FULLREP;
  657. state(hdlc)->fullrep_sent = 1;
  658. state(hdlc)->dce_changed = 0;
  659. }
  660. state(hdlc)->request = 1; /* got request */
  661. fr_lmi_send(dev, reptype == LMI_FULLREP ? 1 : 0);
  662. return 0;
  663. }
  664. /* DTE */
  665. state(hdlc)->request = 0; /* got response, no request pending */
  666. if (error)
  667. return 0;
  668. if (reptype != LMI_FULLREP)
  669. return 0;
  670. pvc = state(hdlc)->first_pvc;
  671. while (pvc) {
  672. pvc->state.deleted = 1;
  673. pvc = pvc->next;
  674. }
  675. no_ram = 0;
  676. while (skb->len >= i + 2 + stat_len) {
  677. u16 dlci;
  678. u32 bw;
  679. unsigned int active, new;
  680. if (skb->data[i] != (lmi == LMI_CCITT ? LMI_CCITT_PVCSTAT :
  681. LMI_ANSI_CISCO_PVCSTAT)) {
  682. printk(KERN_INFO "%s: Not an LMI PVC status IE"
  683. " (0x%02X)\n", dev->name, skb->data[i]);
  684. return 1;
  685. }
  686. if (skb->data[++i] != stat_len) {
  687. printk(KERN_INFO "%s: Invalid LMI PVC status IE length"
  688. " (%u)\n", dev->name, skb->data[i]);
  689. return 1;
  690. }
  691. i++;
  692. new = !! (skb->data[i + 2] & 0x08);
  693. active = !! (skb->data[i + 2] & 0x02);
  694. if (lmi == LMI_CISCO) {
  695. dlci = (skb->data[i] << 8) | skb->data[i + 1];
  696. bw = (skb->data[i + 3] << 16) |
  697. (skb->data[i + 4] << 8) |
  698. (skb->data[i + 5]);
  699. } else {
  700. dlci = ((skb->data[i] & 0x3F) << 4) |
  701. ((skb->data[i + 1] & 0x78) >> 3);
  702. bw = 0;
  703. }
  704. pvc = add_pvc(dev, dlci);
  705. if (!pvc && !no_ram) {
  706. printk(KERN_WARNING
  707. "%s: Memory squeeze on fr_lmi_recv()\n",
  708. dev->name);
  709. no_ram = 1;
  710. }
  711. if (pvc) {
  712. pvc->state.exist = 1;
  713. pvc->state.deleted = 0;
  714. if (active != pvc->state.active ||
  715. new != pvc->state.new ||
  716. bw != pvc->state.bandwidth ||
  717. !pvc->state.exist) {
  718. pvc->state.new = new;
  719. pvc->state.active = active;
  720. pvc->state.bandwidth = bw;
  721. pvc_carrier(active, pvc);
  722. fr_log_dlci_active(pvc);
  723. }
  724. }
  725. i += stat_len;
  726. }
  727. pvc = state(hdlc)->first_pvc;
  728. while (pvc) {
  729. if (pvc->state.deleted && pvc->state.exist) {
  730. pvc_carrier(0, pvc);
  731. pvc->state.active = pvc->state.new = 0;
  732. pvc->state.exist = 0;
  733. pvc->state.bandwidth = 0;
  734. fr_log_dlci_active(pvc);
  735. }
  736. pvc = pvc->next;
  737. }
  738. /* Next full report after N391 polls */
  739. state(hdlc)->n391cnt = state(hdlc)->settings.n391;
  740. return 0;
  741. }
  742. static int fr_rx(struct sk_buff *skb)
  743. {
  744. struct net_device *frad = skb->dev;
  745. hdlc_device *hdlc = dev_to_hdlc(frad);
  746. fr_hdr *fh = (fr_hdr*)skb->data;
  747. u8 *data = skb->data;
  748. u16 dlci;
  749. pvc_device *pvc;
  750. struct net_device *dev = NULL;
  751. if (skb->len <= 4 || fh->ea1 || data[2] != FR_UI)
  752. goto rx_error;
  753. dlci = q922_to_dlci(skb->data);
  754. if ((dlci == LMI_CCITT_ANSI_DLCI &&
  755. (state(hdlc)->settings.lmi == LMI_ANSI ||
  756. state(hdlc)->settings.lmi == LMI_CCITT)) ||
  757. (dlci == LMI_CISCO_DLCI &&
  758. state(hdlc)->settings.lmi == LMI_CISCO)) {
  759. if (fr_lmi_recv(frad, skb))
  760. goto rx_error;
  761. dev_kfree_skb_any(skb);
  762. return NET_RX_SUCCESS;
  763. }
  764. pvc = find_pvc(hdlc, dlci);
  765. if (!pvc) {
  766. #ifdef DEBUG_PKT
  767. printk(KERN_INFO "%s: No PVC for received frame's DLCI %d\n",
  768. frad->name, dlci);
  769. #endif
  770. dev_kfree_skb_any(skb);
  771. return NET_RX_DROP;
  772. }
  773. if (pvc->state.fecn != fh->fecn) {
  774. #ifdef DEBUG_ECN
  775. printk(KERN_DEBUG "%s: DLCI %d FECN O%s\n", frad->name,
  776. dlci, fh->fecn ? "N" : "FF");
  777. #endif
  778. pvc->state.fecn ^= 1;
  779. }
  780. if (pvc->state.becn != fh->becn) {
  781. #ifdef DEBUG_ECN
  782. printk(KERN_DEBUG "%s: DLCI %d BECN O%s\n", frad->name,
  783. dlci, fh->becn ? "N" : "FF");
  784. #endif
  785. pvc->state.becn ^= 1;
  786. }
  787. if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL) {
  788. dev_to_hdlc(frad)->stats.rx_dropped++;
  789. return NET_RX_DROP;
  790. }
  791. if (data[3] == NLPID_IP) {
  792. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  793. dev = pvc->main;
  794. skb->protocol = htons(ETH_P_IP);
  795. } else if (data[3] == NLPID_IPV6) {
  796. skb_pull(skb, 4); /* Remove 4-byte header (hdr, UI, NLPID) */
  797. dev = pvc->main;
  798. skb->protocol = htons(ETH_P_IPV6);
  799. } else if (skb->len > 10 && data[3] == FR_PAD &&
  800. data[4] == NLPID_SNAP && data[5] == FR_PAD) {
  801. u16 oui = ntohs(*(__be16*)(data + 6));
  802. u16 pid = ntohs(*(__be16*)(data + 8));
  803. skb_pull(skb, 10);
  804. switch ((((u32)oui) << 16) | pid) {
  805. case ETH_P_ARP: /* routed frame with SNAP */
  806. case ETH_P_IPX:
  807. case ETH_P_IP: /* a long variant */
  808. case ETH_P_IPV6:
  809. dev = pvc->main;
  810. skb->protocol = htons(pid);
  811. break;
  812. case 0x80C20007: /* bridged Ethernet frame */
  813. if ((dev = pvc->ether) != NULL)
  814. skb->protocol = eth_type_trans(skb, dev);
  815. break;
  816. default:
  817. printk(KERN_INFO "%s: Unsupported protocol, OUI=%x "
  818. "PID=%x\n", frad->name, oui, pid);
  819. dev_kfree_skb_any(skb);
  820. return NET_RX_DROP;
  821. }
  822. } else {
  823. printk(KERN_INFO "%s: Unsupported protocol, NLPID=%x "
  824. "length = %i\n", frad->name, data[3], skb->len);
  825. dev_kfree_skb_any(skb);
  826. return NET_RX_DROP;
  827. }
  828. if (dev) {
  829. struct net_device_stats *stats = pvc_get_stats(dev);
  830. stats->rx_packets++; /* PVC traffic */
  831. stats->rx_bytes += skb->len;
  832. if (pvc->state.becn)
  833. stats->rx_compressed++;
  834. netif_rx(skb);
  835. return NET_RX_SUCCESS;
  836. } else {
  837. dev_kfree_skb_any(skb);
  838. return NET_RX_DROP;
  839. }
  840. rx_error:
  841. dev_to_hdlc(frad)->stats.rx_errors++; /* Mark error */
  842. dev_kfree_skb_any(skb);
  843. return NET_RX_DROP;
  844. }
  845. static void fr_start(struct net_device *dev)
  846. {
  847. hdlc_device *hdlc = dev_to_hdlc(dev);
  848. #ifdef DEBUG_LINK
  849. printk(KERN_DEBUG "fr_start\n");
  850. #endif
  851. if (state(hdlc)->settings.lmi != LMI_NONE) {
  852. state(hdlc)->reliable = 0;
  853. state(hdlc)->dce_changed = 1;
  854. state(hdlc)->request = 0;
  855. state(hdlc)->fullrep_sent = 0;
  856. state(hdlc)->last_errors = 0xFFFFFFFF;
  857. state(hdlc)->n391cnt = 0;
  858. state(hdlc)->txseq = state(hdlc)->rxseq = 0;
  859. init_timer(&state(hdlc)->timer);
  860. /* First poll after 1 s */
  861. state(hdlc)->timer.expires = jiffies + HZ;
  862. state(hdlc)->timer.function = fr_timer;
  863. state(hdlc)->timer.data = (unsigned long)dev;
  864. add_timer(&state(hdlc)->timer);
  865. } else
  866. fr_set_link_state(1, dev);
  867. }
  868. static void fr_stop(struct net_device *dev)
  869. {
  870. hdlc_device *hdlc = dev_to_hdlc(dev);
  871. #ifdef DEBUG_LINK
  872. printk(KERN_DEBUG "fr_stop\n");
  873. #endif
  874. if (state(hdlc)->settings.lmi != LMI_NONE)
  875. del_timer_sync(&state(hdlc)->timer);
  876. fr_set_link_state(0, dev);
  877. }
  878. static void fr_close(struct net_device *dev)
  879. {
  880. hdlc_device *hdlc = dev_to_hdlc(dev);
  881. pvc_device *pvc = state(hdlc)->first_pvc;
  882. while (pvc) { /* Shutdown all PVCs for this FRAD */
  883. if (pvc->main)
  884. dev_close(pvc->main);
  885. if (pvc->ether)
  886. dev_close(pvc->ether);
  887. pvc = pvc->next;
  888. }
  889. }
  890. static void pvc_setup(struct net_device *dev)
  891. {
  892. dev->type = ARPHRD_DLCI;
  893. dev->flags = IFF_POINTOPOINT;
  894. dev->hard_header_len = 10;
  895. dev->addr_len = 2;
  896. }
  897. static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type)
  898. {
  899. hdlc_device *hdlc = dev_to_hdlc(frad);
  900. pvc_device *pvc = NULL;
  901. struct net_device *dev;
  902. int result, used;
  903. if ((pvc = add_pvc(frad, dlci)) == NULL) {
  904. printk(KERN_WARNING "%s: Memory squeeze on fr_add_pvc()\n",
  905. frad->name);
  906. return -ENOBUFS;
  907. }
  908. if (*get_dev_p(pvc, type))
  909. return -EEXIST;
  910. used = pvc_is_used(pvc);
  911. if (type == ARPHRD_ETHER)
  912. dev = alloc_netdev(sizeof(struct pvc_desc), "pvceth%d",
  913. ether_setup);
  914. else
  915. dev = alloc_netdev(sizeof(struct pvc_desc), "pvc%d", pvc_setup);
  916. if (!dev) {
  917. printk(KERN_WARNING "%s: Memory squeeze on fr_pvc()\n",
  918. frad->name);
  919. delete_unused_pvcs(hdlc);
  920. return -ENOBUFS;
  921. }
  922. if (type == ARPHRD_ETHER)
  923. random_ether_addr(dev->dev_addr);
  924. else {
  925. *(__be16*)dev->dev_addr = htons(dlci);
  926. dlci_to_q922(dev->broadcast, dlci);
  927. }
  928. dev->hard_start_xmit = pvc_xmit;
  929. dev->get_stats = pvc_get_stats;
  930. dev->open = pvc_open;
  931. dev->stop = pvc_close;
  932. dev->do_ioctl = pvc_ioctl;
  933. dev->change_mtu = pvc_change_mtu;
  934. dev->mtu = HDLC_MAX_MTU;
  935. dev->tx_queue_len = 0;
  936. pvcdev_to_desc(dev)->pvc = pvc;
  937. result = dev_alloc_name(dev, dev->name);
  938. if (result < 0) {
  939. free_netdev(dev);
  940. delete_unused_pvcs(hdlc);
  941. return result;
  942. }
  943. if (register_netdevice(dev) != 0) {
  944. free_netdev(dev);
  945. delete_unused_pvcs(hdlc);
  946. return -EIO;
  947. }
  948. dev->destructor = free_netdev;
  949. *get_dev_p(pvc, type) = dev;
  950. if (!used) {
  951. state(hdlc)->dce_changed = 1;
  952. state(hdlc)->dce_pvc_count++;
  953. }
  954. return 0;
  955. }
  956. static int fr_del_pvc(hdlc_device *hdlc, unsigned int dlci, int type)
  957. {
  958. pvc_device *pvc;
  959. struct net_device *dev;
  960. if ((pvc = find_pvc(hdlc, dlci)) == NULL)
  961. return -ENOENT;
  962. if ((dev = *get_dev_p(pvc, type)) == NULL)
  963. return -ENOENT;
  964. if (dev->flags & IFF_UP)
  965. return -EBUSY; /* PVC in use */
  966. unregister_netdevice(dev); /* the destructor will free_netdev(dev) */
  967. *get_dev_p(pvc, type) = NULL;
  968. if (!pvc_is_used(pvc)) {
  969. state(hdlc)->dce_pvc_count--;
  970. state(hdlc)->dce_changed = 1;
  971. }
  972. delete_unused_pvcs(hdlc);
  973. return 0;
  974. }
  975. static void fr_destroy(struct net_device *frad)
  976. {
  977. hdlc_device *hdlc = dev_to_hdlc(frad);
  978. pvc_device *pvc = state(hdlc)->first_pvc;
  979. state(hdlc)->first_pvc = NULL; /* All PVCs destroyed */
  980. state(hdlc)->dce_pvc_count = 0;
  981. state(hdlc)->dce_changed = 1;
  982. while (pvc) {
  983. pvc_device *next = pvc->next;
  984. /* destructors will free_netdev() main and ether */
  985. if (pvc->main)
  986. unregister_netdevice(pvc->main);
  987. if (pvc->ether)
  988. unregister_netdevice(pvc->ether);
  989. kfree(pvc);
  990. pvc = next;
  991. }
  992. }
  993. static struct hdlc_proto proto = {
  994. .close = fr_close,
  995. .start = fr_start,
  996. .stop = fr_stop,
  997. .detach = fr_destroy,
  998. .ioctl = fr_ioctl,
  999. .netif_rx = fr_rx,
  1000. .module = THIS_MODULE,
  1001. };
  1002. static int fr_ioctl(struct net_device *dev, struct ifreq *ifr)
  1003. {
  1004. fr_proto __user *fr_s = ifr->ifr_settings.ifs_ifsu.fr;
  1005. const size_t size = sizeof(fr_proto);
  1006. fr_proto new_settings;
  1007. hdlc_device *hdlc = dev_to_hdlc(dev);
  1008. fr_proto_pvc pvc;
  1009. int result;
  1010. switch (ifr->ifr_settings.type) {
  1011. case IF_GET_PROTO:
  1012. if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
  1013. return -EINVAL;
  1014. ifr->ifr_settings.type = IF_PROTO_FR;
  1015. if (ifr->ifr_settings.size < size) {
  1016. ifr->ifr_settings.size = size; /* data size wanted */
  1017. return -ENOBUFS;
  1018. }
  1019. if (copy_to_user(fr_s, &state(hdlc)->settings, size))
  1020. return -EFAULT;
  1021. return 0;
  1022. case IF_PROTO_FR:
  1023. if(!capable(CAP_NET_ADMIN))
  1024. return -EPERM;
  1025. if(dev->flags & IFF_UP)
  1026. return -EBUSY;
  1027. if (copy_from_user(&new_settings, fr_s, size))
  1028. return -EFAULT;
  1029. if (new_settings.lmi == LMI_DEFAULT)
  1030. new_settings.lmi = LMI_ANSI;
  1031. if ((new_settings.lmi != LMI_NONE &&
  1032. new_settings.lmi != LMI_ANSI &&
  1033. new_settings.lmi != LMI_CCITT &&
  1034. new_settings.lmi != LMI_CISCO) ||
  1035. new_settings.t391 < 1 ||
  1036. new_settings.t392 < 2 ||
  1037. new_settings.n391 < 1 ||
  1038. new_settings.n392 < 1 ||
  1039. new_settings.n393 < new_settings.n392 ||
  1040. new_settings.n393 > 32 ||
  1041. (new_settings.dce != 0 &&
  1042. new_settings.dce != 1))
  1043. return -EINVAL;
  1044. result=hdlc->attach(dev, ENCODING_NRZ,PARITY_CRC16_PR1_CCITT);
  1045. if (result)
  1046. return result;
  1047. if (dev_to_hdlc(dev)->proto != &proto) { /* Different proto */
  1048. result = attach_hdlc_protocol(dev, &proto,
  1049. sizeof(struct frad_state));
  1050. if (result)
  1051. return result;
  1052. state(hdlc)->first_pvc = NULL;
  1053. state(hdlc)->dce_pvc_count = 0;
  1054. }
  1055. memcpy(&state(hdlc)->settings, &new_settings, size);
  1056. dev->hard_start_xmit = hdlc->xmit;
  1057. dev->type = ARPHRD_FRAD;
  1058. return 0;
  1059. case IF_PROTO_FR_ADD_PVC:
  1060. case IF_PROTO_FR_DEL_PVC:
  1061. case IF_PROTO_FR_ADD_ETH_PVC:
  1062. case IF_PROTO_FR_DEL_ETH_PVC:
  1063. if (dev_to_hdlc(dev)->proto != &proto) /* Different proto */
  1064. return -EINVAL;
  1065. if(!capable(CAP_NET_ADMIN))
  1066. return -EPERM;
  1067. if (copy_from_user(&pvc, ifr->ifr_settings.ifs_ifsu.fr_pvc,
  1068. sizeof(fr_proto_pvc)))
  1069. return -EFAULT;
  1070. if (pvc.dlci <= 0 || pvc.dlci >= 1024)
  1071. return -EINVAL; /* Only 10 bits, DLCI 0 reserved */
  1072. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC ||
  1073. ifr->ifr_settings.type == IF_PROTO_FR_DEL_ETH_PVC)
  1074. result = ARPHRD_ETHER; /* bridged Ethernet device */
  1075. else
  1076. result = ARPHRD_DLCI;
  1077. if (ifr->ifr_settings.type == IF_PROTO_FR_ADD_PVC ||
  1078. ifr->ifr_settings.type == IF_PROTO_FR_ADD_ETH_PVC)
  1079. return fr_add_pvc(dev, pvc.dlci, result);
  1080. else
  1081. return fr_del_pvc(hdlc, pvc.dlci, result);
  1082. }
  1083. return -EINVAL;
  1084. }
  1085. static int __init mod_init(void)
  1086. {
  1087. register_hdlc_protocol(&proto);
  1088. return 0;
  1089. }
  1090. static void __exit mod_exit(void)
  1091. {
  1092. unregister_hdlc_protocol(&proto);
  1093. }
  1094. module_init(mod_init);
  1095. module_exit(mod_exit);
  1096. MODULE_AUTHOR("Krzysztof Halasa <khc@pm.waw.pl>");
  1097. MODULE_DESCRIPTION("Frame-Relay protocol support for generic HDLC");
  1098. MODULE_LICENSE("GPL v2");