dn_dev.c 34 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512
  1. /*
  2. * DECnet An implementation of the DECnet protocol suite for the LINUX
  3. * operating system. DECnet is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * DECnet Device Layer
  7. *
  8. * Authors: Steve Whitehouse <SteveW@ACM.org>
  9. * Eduardo Marcelo Serrat <emserrat@geocities.com>
  10. *
  11. * Changes:
  12. * Steve Whitehouse : Devices now see incoming frames so they
  13. * can mark on who it came from.
  14. * Steve Whitehouse : Fixed bug in creating neighbours. Each neighbour
  15. * can now have a device specific setup func.
  16. * Steve Whitehouse : Added /proc/sys/net/decnet/conf/<dev>/
  17. * Steve Whitehouse : Fixed bug which sometimes killed timer
  18. * Steve Whitehouse : Multiple ifaddr support
  19. * Steve Whitehouse : SIOCGIFCONF is now a compile time option
  20. * Steve Whitehouse : /proc/sys/net/decnet/conf/<sys>/forwarding
  21. * Steve Whitehouse : Removed timer1 - it's a user space issue now
  22. * Patrick Caulfield : Fixed router hello message format
  23. * Steve Whitehouse : Got rid of constant sizes for blksize for
  24. * devices. All mtu based now.
  25. */
  26. #include <linux/capability.h>
  27. #include <linux/module.h>
  28. #include <linux/moduleparam.h>
  29. #include <linux/init.h>
  30. #include <linux/net.h>
  31. #include <linux/netdevice.h>
  32. #include <linux/proc_fs.h>
  33. #include <linux/seq_file.h>
  34. #include <linux/timer.h>
  35. #include <linux/string.h>
  36. #include <linux/if_addr.h>
  37. #include <linux/if_arp.h>
  38. #include <linux/if_ether.h>
  39. #include <linux/skbuff.h>
  40. #include <linux/sysctl.h>
  41. #include <linux/notifier.h>
  42. #include <asm/uaccess.h>
  43. #include <asm/system.h>
  44. #include <net/neighbour.h>
  45. #include <net/dst.h>
  46. #include <net/flow.h>
  47. #include <net/fib_rules.h>
  48. #include <net/netlink.h>
  49. #include <net/dn.h>
  50. #include <net/dn_dev.h>
  51. #include <net/dn_route.h>
  52. #include <net/dn_neigh.h>
  53. #include <net/dn_fib.h>
  54. #define DN_IFREQ_SIZE (sizeof(struct ifreq) - sizeof(struct sockaddr) + sizeof(struct sockaddr_dn))
  55. static char dn_rt_all_end_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x04,0x00,0x00};
  56. static char dn_rt_all_rt_mcast[ETH_ALEN] = {0xAB,0x00,0x00,0x03,0x00,0x00};
  57. static char dn_hiord[ETH_ALEN] = {0xAA,0x00,0x04,0x00,0x00,0x00};
  58. static unsigned char dn_eco_version[3] = {0x02,0x00,0x00};
  59. extern struct neigh_table dn_neigh_table;
  60. /*
  61. * decnet_address is kept in network order.
  62. */
  63. __le16 decnet_address = 0;
  64. static DEFINE_RWLOCK(dndev_lock);
  65. static struct net_device *decnet_default_device;
  66. static BLOCKING_NOTIFIER_HEAD(dnaddr_chain);
  67. static struct dn_dev *dn_dev_create(struct net_device *dev, int *err);
  68. static void dn_dev_delete(struct net_device *dev);
  69. static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa);
  70. static int dn_eth_up(struct net_device *);
  71. static void dn_eth_down(struct net_device *);
  72. static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa);
  73. static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa);
  74. static struct dn_dev_parms dn_dev_list[] = {
  75. {
  76. .type = ARPHRD_ETHER, /* Ethernet */
  77. .mode = DN_DEV_BCAST,
  78. .state = DN_DEV_S_RU,
  79. .t2 = 1,
  80. .t3 = 10,
  81. .name = "ethernet",
  82. .ctl_name = NET_DECNET_CONF_ETHER,
  83. .up = dn_eth_up,
  84. .down = dn_eth_down,
  85. .timer3 = dn_send_brd_hello,
  86. },
  87. {
  88. .type = ARPHRD_IPGRE, /* DECnet tunneled over GRE in IP */
  89. .mode = DN_DEV_BCAST,
  90. .state = DN_DEV_S_RU,
  91. .t2 = 1,
  92. .t3 = 10,
  93. .name = "ipgre",
  94. .ctl_name = NET_DECNET_CONF_GRE,
  95. .timer3 = dn_send_brd_hello,
  96. },
  97. #if 0
  98. {
  99. .type = ARPHRD_X25, /* Bog standard X.25 */
  100. .mode = DN_DEV_UCAST,
  101. .state = DN_DEV_S_DS,
  102. .t2 = 1,
  103. .t3 = 120,
  104. .name = "x25",
  105. .ctl_name = NET_DECNET_CONF_X25,
  106. .timer3 = dn_send_ptp_hello,
  107. },
  108. #endif
  109. #if 0
  110. {
  111. .type = ARPHRD_PPP, /* DECnet over PPP */
  112. .mode = DN_DEV_BCAST,
  113. .state = DN_DEV_S_RU,
  114. .t2 = 1,
  115. .t3 = 10,
  116. .name = "ppp",
  117. .ctl_name = NET_DECNET_CONF_PPP,
  118. .timer3 = dn_send_brd_hello,
  119. },
  120. #endif
  121. {
  122. .type = ARPHRD_DDCMP, /* DECnet over DDCMP */
  123. .mode = DN_DEV_UCAST,
  124. .state = DN_DEV_S_DS,
  125. .t2 = 1,
  126. .t3 = 120,
  127. .name = "ddcmp",
  128. .ctl_name = NET_DECNET_CONF_DDCMP,
  129. .timer3 = dn_send_ptp_hello,
  130. },
  131. {
  132. .type = ARPHRD_LOOPBACK, /* Loopback interface - always last */
  133. .mode = DN_DEV_BCAST,
  134. .state = DN_DEV_S_RU,
  135. .t2 = 1,
  136. .t3 = 10,
  137. .name = "loopback",
  138. .ctl_name = NET_DECNET_CONF_LOOPBACK,
  139. .timer3 = dn_send_brd_hello,
  140. }
  141. };
  142. #define DN_DEV_LIST_SIZE (sizeof(dn_dev_list)/sizeof(struct dn_dev_parms))
  143. #define DN_DEV_PARMS_OFFSET(x) ((int) ((char *) &((struct dn_dev_parms *)0)->x))
  144. #ifdef CONFIG_SYSCTL
  145. static int min_t2[] = { 1 };
  146. static int max_t2[] = { 60 }; /* No max specified, but this seems sensible */
  147. static int min_t3[] = { 1 };
  148. static int max_t3[] = { 8191 }; /* Must fit in 16 bits when multiplied by BCT3MULT or T3MULT */
  149. static int min_priority[1];
  150. static int max_priority[] = { 127 }; /* From DECnet spec */
  151. static int dn_forwarding_proc(ctl_table *, int, struct file *,
  152. void __user *, size_t *, loff_t *);
  153. static int dn_forwarding_sysctl(ctl_table *table, int __user *name, int nlen,
  154. void __user *oldval, size_t __user *oldlenp,
  155. void __user *newval, size_t newlen,
  156. void **context);
  157. static struct dn_dev_sysctl_table {
  158. struct ctl_table_header *sysctl_header;
  159. ctl_table dn_dev_vars[5];
  160. ctl_table dn_dev_dev[2];
  161. ctl_table dn_dev_conf_dir[2];
  162. ctl_table dn_dev_proto_dir[2];
  163. ctl_table dn_dev_root_dir[2];
  164. } dn_dev_sysctl = {
  165. NULL,
  166. {
  167. {
  168. .ctl_name = NET_DECNET_CONF_DEV_FORWARDING,
  169. .procname = "forwarding",
  170. .data = (void *)DN_DEV_PARMS_OFFSET(forwarding),
  171. .maxlen = sizeof(int),
  172. .mode = 0644,
  173. .proc_handler = dn_forwarding_proc,
  174. .strategy = dn_forwarding_sysctl,
  175. },
  176. {
  177. .ctl_name = NET_DECNET_CONF_DEV_PRIORITY,
  178. .procname = "priority",
  179. .data = (void *)DN_DEV_PARMS_OFFSET(priority),
  180. .maxlen = sizeof(int),
  181. .mode = 0644,
  182. .proc_handler = proc_dointvec_minmax,
  183. .strategy = sysctl_intvec,
  184. .extra1 = &min_priority,
  185. .extra2 = &max_priority
  186. },
  187. {
  188. .ctl_name = NET_DECNET_CONF_DEV_T2,
  189. .procname = "t2",
  190. .data = (void *)DN_DEV_PARMS_OFFSET(t2),
  191. .maxlen = sizeof(int),
  192. .mode = 0644,
  193. .proc_handler = proc_dointvec_minmax,
  194. .strategy = sysctl_intvec,
  195. .extra1 = &min_t2,
  196. .extra2 = &max_t2
  197. },
  198. {
  199. .ctl_name = NET_DECNET_CONF_DEV_T3,
  200. .procname = "t3",
  201. .data = (void *)DN_DEV_PARMS_OFFSET(t3),
  202. .maxlen = sizeof(int),
  203. .mode = 0644,
  204. .proc_handler = proc_dointvec_minmax,
  205. .strategy = sysctl_intvec,
  206. .extra1 = &min_t3,
  207. .extra2 = &max_t3
  208. },
  209. {0}
  210. },
  211. {{
  212. .ctl_name = 0,
  213. .procname = "",
  214. .mode = 0555,
  215. .child = dn_dev_sysctl.dn_dev_vars
  216. }, {0}},
  217. {{
  218. .ctl_name = NET_DECNET_CONF,
  219. .procname = "conf",
  220. .mode = 0555,
  221. .child = dn_dev_sysctl.dn_dev_dev
  222. }, {0}},
  223. {{
  224. .ctl_name = NET_DECNET,
  225. .procname = "decnet",
  226. .mode = 0555,
  227. .child = dn_dev_sysctl.dn_dev_conf_dir
  228. }, {0}},
  229. {{
  230. .ctl_name = CTL_NET,
  231. .procname = "net",
  232. .mode = 0555,
  233. .child = dn_dev_sysctl.dn_dev_proto_dir
  234. }, {0}}
  235. };
  236. static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
  237. {
  238. struct dn_dev_sysctl_table *t;
  239. int i;
  240. t = kmemdup(&dn_dev_sysctl, sizeof(*t), GFP_KERNEL);
  241. if (t == NULL)
  242. return;
  243. for(i = 0; i < ARRAY_SIZE(t->dn_dev_vars) - 1; i++) {
  244. long offset = (long)t->dn_dev_vars[i].data;
  245. t->dn_dev_vars[i].data = ((char *)parms) + offset;
  246. t->dn_dev_vars[i].de = NULL;
  247. }
  248. if (dev) {
  249. t->dn_dev_dev[0].procname = dev->name;
  250. t->dn_dev_dev[0].ctl_name = dev->ifindex;
  251. } else {
  252. t->dn_dev_dev[0].procname = parms->name;
  253. t->dn_dev_dev[0].ctl_name = parms->ctl_name;
  254. }
  255. t->dn_dev_dev[0].child = t->dn_dev_vars;
  256. t->dn_dev_dev[0].de = NULL;
  257. t->dn_dev_conf_dir[0].child = t->dn_dev_dev;
  258. t->dn_dev_conf_dir[0].de = NULL;
  259. t->dn_dev_proto_dir[0].child = t->dn_dev_conf_dir;
  260. t->dn_dev_proto_dir[0].de = NULL;
  261. t->dn_dev_root_dir[0].child = t->dn_dev_proto_dir;
  262. t->dn_dev_root_dir[0].de = NULL;
  263. t->dn_dev_vars[0].extra1 = (void *)dev;
  264. t->sysctl_header = register_sysctl_table(t->dn_dev_root_dir, 0);
  265. if (t->sysctl_header == NULL)
  266. kfree(t);
  267. else
  268. parms->sysctl = t;
  269. }
  270. static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
  271. {
  272. if (parms->sysctl) {
  273. struct dn_dev_sysctl_table *t = parms->sysctl;
  274. parms->sysctl = NULL;
  275. unregister_sysctl_table(t->sysctl_header);
  276. kfree(t);
  277. }
  278. }
  279. static int dn_forwarding_proc(ctl_table *table, int write,
  280. struct file *filep,
  281. void __user *buffer,
  282. size_t *lenp, loff_t *ppos)
  283. {
  284. #ifdef CONFIG_DECNET_ROUTER
  285. struct net_device *dev = table->extra1;
  286. struct dn_dev *dn_db;
  287. int err;
  288. int tmp, old;
  289. if (table->extra1 == NULL)
  290. return -EINVAL;
  291. dn_db = dev->dn_ptr;
  292. old = dn_db->parms.forwarding;
  293. err = proc_dointvec(table, write, filep, buffer, lenp, ppos);
  294. if ((err >= 0) && write) {
  295. if (dn_db->parms.forwarding < 0)
  296. dn_db->parms.forwarding = 0;
  297. if (dn_db->parms.forwarding > 2)
  298. dn_db->parms.forwarding = 2;
  299. /*
  300. * What an ugly hack this is... its works, just. It
  301. * would be nice if sysctl/proc were just that little
  302. * bit more flexible so I don't have to write a special
  303. * routine, or suffer hacks like this - SJW
  304. */
  305. tmp = dn_db->parms.forwarding;
  306. dn_db->parms.forwarding = old;
  307. if (dn_db->parms.down)
  308. dn_db->parms.down(dev);
  309. dn_db->parms.forwarding = tmp;
  310. if (dn_db->parms.up)
  311. dn_db->parms.up(dev);
  312. }
  313. return err;
  314. #else
  315. return -EINVAL;
  316. #endif
  317. }
  318. static int dn_forwarding_sysctl(ctl_table *table, int __user *name, int nlen,
  319. void __user *oldval, size_t __user *oldlenp,
  320. void __user *newval, size_t newlen,
  321. void **context)
  322. {
  323. #ifdef CONFIG_DECNET_ROUTER
  324. struct net_device *dev = table->extra1;
  325. struct dn_dev *dn_db;
  326. int value;
  327. if (table->extra1 == NULL)
  328. return -EINVAL;
  329. dn_db = dev->dn_ptr;
  330. if (newval && newlen) {
  331. if (newlen != sizeof(int))
  332. return -EINVAL;
  333. if (get_user(value, (int __user *)newval))
  334. return -EFAULT;
  335. if (value < 0)
  336. return -EINVAL;
  337. if (value > 2)
  338. return -EINVAL;
  339. if (dn_db->parms.down)
  340. dn_db->parms.down(dev);
  341. dn_db->parms.forwarding = value;
  342. if (dn_db->parms.up)
  343. dn_db->parms.up(dev);
  344. }
  345. return 0;
  346. #else
  347. return -EINVAL;
  348. #endif
  349. }
  350. #else /* CONFIG_SYSCTL */
  351. static void dn_dev_sysctl_unregister(struct dn_dev_parms *parms)
  352. {
  353. }
  354. static void dn_dev_sysctl_register(struct net_device *dev, struct dn_dev_parms *parms)
  355. {
  356. }
  357. #endif /* CONFIG_SYSCTL */
  358. static inline __u16 mtu2blksize(struct net_device *dev)
  359. {
  360. u32 blksize = dev->mtu;
  361. if (blksize > 0xffff)
  362. blksize = 0xffff;
  363. if (dev->type == ARPHRD_ETHER ||
  364. dev->type == ARPHRD_PPP ||
  365. dev->type == ARPHRD_IPGRE ||
  366. dev->type == ARPHRD_LOOPBACK)
  367. blksize -= 2;
  368. return (__u16)blksize;
  369. }
  370. static struct dn_ifaddr *dn_dev_alloc_ifa(void)
  371. {
  372. struct dn_ifaddr *ifa;
  373. ifa = kzalloc(sizeof(*ifa), GFP_KERNEL);
  374. return ifa;
  375. }
  376. static __inline__ void dn_dev_free_ifa(struct dn_ifaddr *ifa)
  377. {
  378. kfree(ifa);
  379. }
  380. static void dn_dev_del_ifa(struct dn_dev *dn_db, struct dn_ifaddr **ifap, int destroy)
  381. {
  382. struct dn_ifaddr *ifa1 = *ifap;
  383. unsigned char mac_addr[6];
  384. struct net_device *dev = dn_db->dev;
  385. ASSERT_RTNL();
  386. *ifap = ifa1->ifa_next;
  387. if (dn_db->dev->type == ARPHRD_ETHER) {
  388. if (ifa1->ifa_local != dn_eth2dn(dev->dev_addr)) {
  389. dn_dn2eth(mac_addr, ifa1->ifa_local);
  390. dev_mc_delete(dev, mac_addr, ETH_ALEN, 0);
  391. }
  392. }
  393. dn_ifaddr_notify(RTM_DELADDR, ifa1);
  394. blocking_notifier_call_chain(&dnaddr_chain, NETDEV_DOWN, ifa1);
  395. if (destroy) {
  396. dn_dev_free_ifa(ifa1);
  397. if (dn_db->ifa_list == NULL)
  398. dn_dev_delete(dn_db->dev);
  399. }
  400. }
  401. static int dn_dev_insert_ifa(struct dn_dev *dn_db, struct dn_ifaddr *ifa)
  402. {
  403. struct net_device *dev = dn_db->dev;
  404. struct dn_ifaddr *ifa1;
  405. unsigned char mac_addr[6];
  406. ASSERT_RTNL();
  407. /* Check for duplicates */
  408. for(ifa1 = dn_db->ifa_list; ifa1; ifa1 = ifa1->ifa_next) {
  409. if (ifa1->ifa_local == ifa->ifa_local)
  410. return -EEXIST;
  411. }
  412. if (dev->type == ARPHRD_ETHER) {
  413. if (ifa->ifa_local != dn_eth2dn(dev->dev_addr)) {
  414. dn_dn2eth(mac_addr, ifa->ifa_local);
  415. dev_mc_add(dev, mac_addr, ETH_ALEN, 0);
  416. dev_mc_upload(dev);
  417. }
  418. }
  419. ifa->ifa_next = dn_db->ifa_list;
  420. dn_db->ifa_list = ifa;
  421. dn_ifaddr_notify(RTM_NEWADDR, ifa);
  422. blocking_notifier_call_chain(&dnaddr_chain, NETDEV_UP, ifa);
  423. return 0;
  424. }
  425. static int dn_dev_set_ifa(struct net_device *dev, struct dn_ifaddr *ifa)
  426. {
  427. struct dn_dev *dn_db = dev->dn_ptr;
  428. int rv;
  429. if (dn_db == NULL) {
  430. int err;
  431. dn_db = dn_dev_create(dev, &err);
  432. if (dn_db == NULL)
  433. return err;
  434. }
  435. ifa->ifa_dev = dn_db;
  436. if (dev->flags & IFF_LOOPBACK)
  437. ifa->ifa_scope = RT_SCOPE_HOST;
  438. rv = dn_dev_insert_ifa(dn_db, ifa);
  439. if (rv)
  440. dn_dev_free_ifa(ifa);
  441. return rv;
  442. }
  443. int dn_dev_ioctl(unsigned int cmd, void __user *arg)
  444. {
  445. char buffer[DN_IFREQ_SIZE];
  446. struct ifreq *ifr = (struct ifreq *)buffer;
  447. struct sockaddr_dn *sdn = (struct sockaddr_dn *)&ifr->ifr_addr;
  448. struct dn_dev *dn_db;
  449. struct net_device *dev;
  450. struct dn_ifaddr *ifa = NULL, **ifap = NULL;
  451. int ret = 0;
  452. if (copy_from_user(ifr, arg, DN_IFREQ_SIZE))
  453. return -EFAULT;
  454. ifr->ifr_name[IFNAMSIZ-1] = 0;
  455. #ifdef CONFIG_KMOD
  456. dev_load(ifr->ifr_name);
  457. #endif
  458. switch(cmd) {
  459. case SIOCGIFADDR:
  460. break;
  461. case SIOCSIFADDR:
  462. if (!capable(CAP_NET_ADMIN))
  463. return -EACCES;
  464. if (sdn->sdn_family != AF_DECnet)
  465. return -EINVAL;
  466. break;
  467. default:
  468. return -EINVAL;
  469. }
  470. rtnl_lock();
  471. if ((dev = __dev_get_by_name(ifr->ifr_name)) == NULL) {
  472. ret = -ENODEV;
  473. goto done;
  474. }
  475. if ((dn_db = dev->dn_ptr) != NULL) {
  476. for (ifap = &dn_db->ifa_list; (ifa=*ifap) != NULL; ifap = &ifa->ifa_next)
  477. if (strcmp(ifr->ifr_name, ifa->ifa_label) == 0)
  478. break;
  479. }
  480. if (ifa == NULL && cmd != SIOCSIFADDR) {
  481. ret = -EADDRNOTAVAIL;
  482. goto done;
  483. }
  484. switch(cmd) {
  485. case SIOCGIFADDR:
  486. *((__le16 *)sdn->sdn_nodeaddr) = ifa->ifa_local;
  487. goto rarok;
  488. case SIOCSIFADDR:
  489. if (!ifa) {
  490. if ((ifa = dn_dev_alloc_ifa()) == NULL) {
  491. ret = -ENOBUFS;
  492. break;
  493. }
  494. memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
  495. } else {
  496. if (ifa->ifa_local == dn_saddr2dn(sdn))
  497. break;
  498. dn_dev_del_ifa(dn_db, ifap, 0);
  499. }
  500. ifa->ifa_local = ifa->ifa_address = dn_saddr2dn(sdn);
  501. ret = dn_dev_set_ifa(dev, ifa);
  502. }
  503. done:
  504. rtnl_unlock();
  505. return ret;
  506. rarok:
  507. if (copy_to_user(arg, ifr, DN_IFREQ_SIZE))
  508. ret = -EFAULT;
  509. goto done;
  510. }
  511. struct net_device *dn_dev_get_default(void)
  512. {
  513. struct net_device *dev;
  514. read_lock(&dndev_lock);
  515. dev = decnet_default_device;
  516. if (dev) {
  517. if (dev->dn_ptr)
  518. dev_hold(dev);
  519. else
  520. dev = NULL;
  521. }
  522. read_unlock(&dndev_lock);
  523. return dev;
  524. }
  525. int dn_dev_set_default(struct net_device *dev, int force)
  526. {
  527. struct net_device *old = NULL;
  528. int rv = -EBUSY;
  529. if (!dev->dn_ptr)
  530. return -ENODEV;
  531. write_lock(&dndev_lock);
  532. if (force || decnet_default_device == NULL) {
  533. old = decnet_default_device;
  534. decnet_default_device = dev;
  535. rv = 0;
  536. }
  537. write_unlock(&dndev_lock);
  538. if (old)
  539. dev_put(old);
  540. return rv;
  541. }
  542. static void dn_dev_check_default(struct net_device *dev)
  543. {
  544. write_lock(&dndev_lock);
  545. if (dev == decnet_default_device) {
  546. decnet_default_device = NULL;
  547. } else {
  548. dev = NULL;
  549. }
  550. write_unlock(&dndev_lock);
  551. if (dev)
  552. dev_put(dev);
  553. }
  554. static struct dn_dev *dn_dev_by_index(int ifindex)
  555. {
  556. struct net_device *dev;
  557. struct dn_dev *dn_dev = NULL;
  558. dev = dev_get_by_index(ifindex);
  559. if (dev) {
  560. dn_dev = dev->dn_ptr;
  561. dev_put(dev);
  562. }
  563. return dn_dev;
  564. }
  565. static struct nla_policy dn_ifa_policy[IFA_MAX+1] __read_mostly = {
  566. [IFA_ADDRESS] = { .type = NLA_U16 },
  567. [IFA_LOCAL] = { .type = NLA_U16 },
  568. [IFA_LABEL] = { .type = NLA_STRING,
  569. .len = IFNAMSIZ - 1 },
  570. };
  571. static int dn_nl_deladdr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
  572. {
  573. struct nlattr *tb[IFA_MAX+1];
  574. struct dn_dev *dn_db;
  575. struct ifaddrmsg *ifm;
  576. struct dn_ifaddr *ifa, **ifap;
  577. int err = -EADDRNOTAVAIL;
  578. err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
  579. if (err < 0)
  580. goto errout;
  581. ifm = nlmsg_data(nlh);
  582. if ((dn_db = dn_dev_by_index(ifm->ifa_index)) == NULL)
  583. goto errout;
  584. for (ifap = &dn_db->ifa_list; (ifa = *ifap); ifap = &ifa->ifa_next) {
  585. if (tb[IFA_LOCAL] &&
  586. nla_memcmp(tb[IFA_LOCAL], &ifa->ifa_local, 2))
  587. continue;
  588. if (tb[IFA_LABEL] && nla_strcmp(tb[IFA_LABEL], ifa->ifa_label))
  589. continue;
  590. dn_dev_del_ifa(dn_db, ifap, 1);
  591. return 0;
  592. }
  593. errout:
  594. return err;
  595. }
  596. static int dn_nl_newaddr(struct sk_buff *skb, struct nlmsghdr *nlh, void *arg)
  597. {
  598. struct nlattr *tb[IFA_MAX+1];
  599. struct net_device *dev;
  600. struct dn_dev *dn_db;
  601. struct ifaddrmsg *ifm;
  602. struct dn_ifaddr *ifa;
  603. int err;
  604. err = nlmsg_parse(nlh, sizeof(*ifm), tb, IFA_MAX, dn_ifa_policy);
  605. if (err < 0)
  606. return err;
  607. if (tb[IFA_LOCAL] == NULL)
  608. return -EINVAL;
  609. ifm = nlmsg_data(nlh);
  610. if ((dev = __dev_get_by_index(ifm->ifa_index)) == NULL)
  611. return -ENODEV;
  612. if ((dn_db = dev->dn_ptr) == NULL) {
  613. int err;
  614. dn_db = dn_dev_create(dev, &err);
  615. if (!dn_db)
  616. return err;
  617. }
  618. if ((ifa = dn_dev_alloc_ifa()) == NULL)
  619. return -ENOBUFS;
  620. if (tb[IFA_ADDRESS] == NULL)
  621. tb[IFA_ADDRESS] = tb[IFA_LOCAL];
  622. ifa->ifa_local = nla_get_le16(tb[IFA_LOCAL]);
  623. ifa->ifa_address = nla_get_le16(tb[IFA_ADDRESS]);
  624. ifa->ifa_flags = ifm->ifa_flags;
  625. ifa->ifa_scope = ifm->ifa_scope;
  626. ifa->ifa_dev = dn_db;
  627. if (tb[IFA_LABEL])
  628. nla_strlcpy(ifa->ifa_label, tb[IFA_LABEL], IFNAMSIZ);
  629. else
  630. memcpy(ifa->ifa_label, dev->name, IFNAMSIZ);
  631. err = dn_dev_insert_ifa(dn_db, ifa);
  632. if (err)
  633. dn_dev_free_ifa(ifa);
  634. return err;
  635. }
  636. static inline size_t dn_ifaddr_nlmsg_size(void)
  637. {
  638. return NLMSG_ALIGN(sizeof(struct ifaddrmsg))
  639. + nla_total_size(IFNAMSIZ) /* IFA_LABEL */
  640. + nla_total_size(2) /* IFA_ADDRESS */
  641. + nla_total_size(2); /* IFA_LOCAL */
  642. }
  643. static int dn_nl_fill_ifaddr(struct sk_buff *skb, struct dn_ifaddr *ifa,
  644. u32 pid, u32 seq, int event, unsigned int flags)
  645. {
  646. struct ifaddrmsg *ifm;
  647. struct nlmsghdr *nlh;
  648. nlh = nlmsg_put(skb, pid, seq, event, sizeof(*ifm), flags);
  649. if (nlh == NULL)
  650. return -ENOBUFS;
  651. ifm = nlmsg_data(nlh);
  652. ifm->ifa_family = AF_DECnet;
  653. ifm->ifa_prefixlen = 16;
  654. ifm->ifa_flags = ifa->ifa_flags | IFA_F_PERMANENT;
  655. ifm->ifa_scope = ifa->ifa_scope;
  656. ifm->ifa_index = ifa->ifa_dev->dev->ifindex;
  657. if (ifa->ifa_address)
  658. NLA_PUT_LE16(skb, IFA_ADDRESS, ifa->ifa_address);
  659. if (ifa->ifa_local)
  660. NLA_PUT_LE16(skb, IFA_LOCAL, ifa->ifa_local);
  661. if (ifa->ifa_label[0])
  662. NLA_PUT_STRING(skb, IFA_LABEL, ifa->ifa_label);
  663. return nlmsg_end(skb, nlh);
  664. nla_put_failure:
  665. return nlmsg_cancel(skb, nlh);
  666. }
  667. static void dn_ifaddr_notify(int event, struct dn_ifaddr *ifa)
  668. {
  669. struct sk_buff *skb;
  670. int err = -ENOBUFS;
  671. skb = alloc_skb(dn_ifaddr_nlmsg_size(), GFP_KERNEL);
  672. if (skb == NULL)
  673. goto errout;
  674. err = dn_nl_fill_ifaddr(skb, ifa, 0, 0, event, 0);
  675. /* failure implies BUG in dn_ifaddr_nlmsg_size() */
  676. BUG_ON(err < 0);
  677. err = rtnl_notify(skb, 0, RTNLGRP_DECnet_IFADDR, NULL, GFP_KERNEL);
  678. errout:
  679. if (err < 0)
  680. rtnl_set_sk_err(RTNLGRP_DECnet_IFADDR, err);
  681. }
  682. static int dn_nl_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
  683. {
  684. int idx, dn_idx = 0, skip_ndevs, skip_naddr;
  685. struct net_device *dev;
  686. struct dn_dev *dn_db;
  687. struct dn_ifaddr *ifa;
  688. skip_ndevs = cb->args[0];
  689. skip_naddr = cb->args[1];
  690. read_lock(&dev_base_lock);
  691. for (dev = dev_base, idx = 0; dev; dev = dev->next, idx++) {
  692. if (idx < skip_ndevs)
  693. continue;
  694. else if (idx > skip_ndevs) {
  695. /* Only skip over addresses for first dev dumped
  696. * in this iteration (idx == skip_ndevs) */
  697. skip_naddr = 0;
  698. }
  699. if ((dn_db = dev->dn_ptr) == NULL)
  700. continue;
  701. for (ifa = dn_db->ifa_list, dn_idx = 0; ifa;
  702. ifa = ifa->ifa_next, dn_idx++) {
  703. if (dn_idx < skip_naddr)
  704. continue;
  705. if (dn_nl_fill_ifaddr(skb, ifa, NETLINK_CB(cb->skb).pid,
  706. cb->nlh->nlmsg_seq, RTM_NEWADDR,
  707. NLM_F_MULTI) < 0)
  708. goto done;
  709. }
  710. }
  711. done:
  712. read_unlock(&dev_base_lock);
  713. cb->args[0] = idx;
  714. cb->args[1] = dn_idx;
  715. return skb->len;
  716. }
  717. static int dn_dev_get_first(struct net_device *dev, __le16 *addr)
  718. {
  719. struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
  720. struct dn_ifaddr *ifa;
  721. int rv = -ENODEV;
  722. if (dn_db == NULL)
  723. goto out;
  724. ifa = dn_db->ifa_list;
  725. if (ifa != NULL) {
  726. *addr = ifa->ifa_local;
  727. rv = 0;
  728. }
  729. out:
  730. return rv;
  731. }
  732. /*
  733. * Find a default address to bind to.
  734. *
  735. * This is one of those areas where the initial VMS concepts don't really
  736. * map onto the Linux concepts, and since we introduced multiple addresses
  737. * per interface we have to cope with slightly odd ways of finding out what
  738. * "our address" really is. Mostly it's not a problem; for this we just guess
  739. * a sensible default. Eventually the routing code will take care of all the
  740. * nasties for us I hope.
  741. */
  742. int dn_dev_bind_default(__le16 *addr)
  743. {
  744. struct net_device *dev;
  745. int rv;
  746. dev = dn_dev_get_default();
  747. last_chance:
  748. if (dev) {
  749. read_lock(&dev_base_lock);
  750. rv = dn_dev_get_first(dev, addr);
  751. read_unlock(&dev_base_lock);
  752. dev_put(dev);
  753. if (rv == 0 || dev == &loopback_dev)
  754. return rv;
  755. }
  756. dev = &loopback_dev;
  757. dev_hold(dev);
  758. goto last_chance;
  759. }
  760. static void dn_send_endnode_hello(struct net_device *dev, struct dn_ifaddr *ifa)
  761. {
  762. struct endnode_hello_message *msg;
  763. struct sk_buff *skb = NULL;
  764. __le16 *pktlen;
  765. struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
  766. if ((skb = dn_alloc_skb(NULL, sizeof(*msg), GFP_ATOMIC)) == NULL)
  767. return;
  768. skb->dev = dev;
  769. msg = (struct endnode_hello_message *)skb_put(skb,sizeof(*msg));
  770. msg->msgflg = 0x0D;
  771. memcpy(msg->tiver, dn_eco_version, 3);
  772. dn_dn2eth(msg->id, ifa->ifa_local);
  773. msg->iinfo = DN_RT_INFO_ENDN;
  774. msg->blksize = dn_htons(mtu2blksize(dev));
  775. msg->area = 0x00;
  776. memset(msg->seed, 0, 8);
  777. memcpy(msg->neighbor, dn_hiord, ETH_ALEN);
  778. if (dn_db->router) {
  779. struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
  780. dn_dn2eth(msg->neighbor, dn->addr);
  781. }
  782. msg->timer = dn_htons((unsigned short)dn_db->parms.t3);
  783. msg->mpd = 0x00;
  784. msg->datalen = 0x02;
  785. memset(msg->data, 0xAA, 2);
  786. pktlen = (__le16 *)skb_push(skb,2);
  787. *pktlen = dn_htons(skb->len - 2);
  788. skb->nh.raw = skb->data;
  789. dn_rt_finish_output(skb, dn_rt_all_rt_mcast, msg->id);
  790. }
  791. #define DRDELAY (5 * HZ)
  792. static int dn_am_i_a_router(struct dn_neigh *dn, struct dn_dev *dn_db, struct dn_ifaddr *ifa)
  793. {
  794. /* First check time since device went up */
  795. if ((jiffies - dn_db->uptime) < DRDELAY)
  796. return 0;
  797. /* If there is no router, then yes... */
  798. if (!dn_db->router)
  799. return 1;
  800. /* otherwise only if we have a higher priority or.. */
  801. if (dn->priority < dn_db->parms.priority)
  802. return 1;
  803. /* if we have equal priority and a higher node number */
  804. if (dn->priority != dn_db->parms.priority)
  805. return 0;
  806. if (dn_ntohs(dn->addr) < dn_ntohs(ifa->ifa_local))
  807. return 1;
  808. return 0;
  809. }
  810. static void dn_send_router_hello(struct net_device *dev, struct dn_ifaddr *ifa)
  811. {
  812. int n;
  813. struct dn_dev *dn_db = dev->dn_ptr;
  814. struct dn_neigh *dn = (struct dn_neigh *)dn_db->router;
  815. struct sk_buff *skb;
  816. size_t size;
  817. unsigned char *ptr;
  818. unsigned char *i1, *i2;
  819. __le16 *pktlen;
  820. char *src;
  821. if (mtu2blksize(dev) < (26 + 7))
  822. return;
  823. n = mtu2blksize(dev) - 26;
  824. n /= 7;
  825. if (n > 32)
  826. n = 32;
  827. size = 2 + 26 + 7 * n;
  828. if ((skb = dn_alloc_skb(NULL, size, GFP_ATOMIC)) == NULL)
  829. return;
  830. skb->dev = dev;
  831. ptr = skb_put(skb, size);
  832. *ptr++ = DN_RT_PKT_CNTL | DN_RT_PKT_ERTH;
  833. *ptr++ = 2; /* ECO */
  834. *ptr++ = 0;
  835. *ptr++ = 0;
  836. dn_dn2eth(ptr, ifa->ifa_local);
  837. src = ptr;
  838. ptr += ETH_ALEN;
  839. *ptr++ = dn_db->parms.forwarding == 1 ?
  840. DN_RT_INFO_L1RT : DN_RT_INFO_L2RT;
  841. *((__le16 *)ptr) = dn_htons(mtu2blksize(dev));
  842. ptr += 2;
  843. *ptr++ = dn_db->parms.priority; /* Priority */
  844. *ptr++ = 0; /* Area: Reserved */
  845. *((__le16 *)ptr) = dn_htons((unsigned short)dn_db->parms.t3);
  846. ptr += 2;
  847. *ptr++ = 0; /* MPD: Reserved */
  848. i1 = ptr++;
  849. memset(ptr, 0, 7); /* Name: Reserved */
  850. ptr += 7;
  851. i2 = ptr++;
  852. n = dn_neigh_elist(dev, ptr, n);
  853. *i2 = 7 * n;
  854. *i1 = 8 + *i2;
  855. skb_trim(skb, (27 + *i2));
  856. pktlen = (__le16 *)skb_push(skb, 2);
  857. *pktlen = dn_htons(skb->len - 2);
  858. skb->nh.raw = skb->data;
  859. if (dn_am_i_a_router(dn, dn_db, ifa)) {
  860. struct sk_buff *skb2 = skb_copy(skb, GFP_ATOMIC);
  861. if (skb2) {
  862. dn_rt_finish_output(skb2, dn_rt_all_end_mcast, src);
  863. }
  864. }
  865. dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
  866. }
  867. static void dn_send_brd_hello(struct net_device *dev, struct dn_ifaddr *ifa)
  868. {
  869. struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
  870. if (dn_db->parms.forwarding == 0)
  871. dn_send_endnode_hello(dev, ifa);
  872. else
  873. dn_send_router_hello(dev, ifa);
  874. }
  875. static void dn_send_ptp_hello(struct net_device *dev, struct dn_ifaddr *ifa)
  876. {
  877. int tdlen = 16;
  878. int size = dev->hard_header_len + 2 + 4 + tdlen;
  879. struct sk_buff *skb = dn_alloc_skb(NULL, size, GFP_ATOMIC);
  880. int i;
  881. unsigned char *ptr;
  882. char src[ETH_ALEN];
  883. if (skb == NULL)
  884. return ;
  885. skb->dev = dev;
  886. skb_push(skb, dev->hard_header_len);
  887. ptr = skb_put(skb, 2 + 4 + tdlen);
  888. *ptr++ = DN_RT_PKT_HELO;
  889. *((__le16 *)ptr) = ifa->ifa_local;
  890. ptr += 2;
  891. *ptr++ = tdlen;
  892. for(i = 0; i < tdlen; i++)
  893. *ptr++ = 0252;
  894. dn_dn2eth(src, ifa->ifa_local);
  895. dn_rt_finish_output(skb, dn_rt_all_rt_mcast, src);
  896. }
  897. static int dn_eth_up(struct net_device *dev)
  898. {
  899. struct dn_dev *dn_db = dev->dn_ptr;
  900. if (dn_db->parms.forwarding == 0)
  901. dev_mc_add(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
  902. else
  903. dev_mc_add(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
  904. dev_mc_upload(dev);
  905. dn_db->use_long = 1;
  906. return 0;
  907. }
  908. static void dn_eth_down(struct net_device *dev)
  909. {
  910. struct dn_dev *dn_db = dev->dn_ptr;
  911. if (dn_db->parms.forwarding == 0)
  912. dev_mc_delete(dev, dn_rt_all_end_mcast, ETH_ALEN, 0);
  913. else
  914. dev_mc_delete(dev, dn_rt_all_rt_mcast, ETH_ALEN, 0);
  915. }
  916. static void dn_dev_set_timer(struct net_device *dev);
  917. static void dn_dev_timer_func(unsigned long arg)
  918. {
  919. struct net_device *dev = (struct net_device *)arg;
  920. struct dn_dev *dn_db = dev->dn_ptr;
  921. struct dn_ifaddr *ifa;
  922. if (dn_db->t3 <= dn_db->parms.t2) {
  923. if (dn_db->parms.timer3) {
  924. for(ifa = dn_db->ifa_list; ifa; ifa = ifa->ifa_next) {
  925. if (!(ifa->ifa_flags & IFA_F_SECONDARY))
  926. dn_db->parms.timer3(dev, ifa);
  927. }
  928. }
  929. dn_db->t3 = dn_db->parms.t3;
  930. } else {
  931. dn_db->t3 -= dn_db->parms.t2;
  932. }
  933. dn_dev_set_timer(dev);
  934. }
  935. static void dn_dev_set_timer(struct net_device *dev)
  936. {
  937. struct dn_dev *dn_db = dev->dn_ptr;
  938. if (dn_db->parms.t2 > dn_db->parms.t3)
  939. dn_db->parms.t2 = dn_db->parms.t3;
  940. dn_db->timer.data = (unsigned long)dev;
  941. dn_db->timer.function = dn_dev_timer_func;
  942. dn_db->timer.expires = jiffies + (dn_db->parms.t2 * HZ);
  943. add_timer(&dn_db->timer);
  944. }
  945. struct dn_dev *dn_dev_create(struct net_device *dev, int *err)
  946. {
  947. int i;
  948. struct dn_dev_parms *p = dn_dev_list;
  949. struct dn_dev *dn_db;
  950. for(i = 0; i < DN_DEV_LIST_SIZE; i++, p++) {
  951. if (p->type == dev->type)
  952. break;
  953. }
  954. *err = -ENODEV;
  955. if (i == DN_DEV_LIST_SIZE)
  956. return NULL;
  957. *err = -ENOBUFS;
  958. if ((dn_db = kzalloc(sizeof(struct dn_dev), GFP_ATOMIC)) == NULL)
  959. return NULL;
  960. memcpy(&dn_db->parms, p, sizeof(struct dn_dev_parms));
  961. smp_wmb();
  962. dev->dn_ptr = dn_db;
  963. dn_db->dev = dev;
  964. init_timer(&dn_db->timer);
  965. dn_db->uptime = jiffies;
  966. if (dn_db->parms.up) {
  967. if (dn_db->parms.up(dev) < 0) {
  968. dev->dn_ptr = NULL;
  969. kfree(dn_db);
  970. return NULL;
  971. }
  972. }
  973. dn_db->neigh_parms = neigh_parms_alloc(dev, &dn_neigh_table);
  974. dn_dev_sysctl_register(dev, &dn_db->parms);
  975. dn_dev_set_timer(dev);
  976. *err = 0;
  977. return dn_db;
  978. }
  979. /*
  980. * This processes a device up event. We only start up
  981. * the loopback device & ethernet devices with correct
  982. * MAC addreses automatically. Others must be started
  983. * specifically.
  984. *
  985. * FIXME: How should we configure the loopback address ? If we could dispense
  986. * with using decnet_address here and for autobind, it will be one less thing
  987. * for users to worry about setting up.
  988. */
  989. void dn_dev_up(struct net_device *dev)
  990. {
  991. struct dn_ifaddr *ifa;
  992. __le16 addr = decnet_address;
  993. int maybe_default = 0;
  994. struct dn_dev *dn_db = (struct dn_dev *)dev->dn_ptr;
  995. if ((dev->type != ARPHRD_ETHER) && (dev->type != ARPHRD_LOOPBACK))
  996. return;
  997. /*
  998. * Need to ensure that loopback device has a dn_db attached to it
  999. * to allow creation of neighbours against it, even though it might
  1000. * not have a local address of its own. Might as well do the same for
  1001. * all autoconfigured interfaces.
  1002. */
  1003. if (dn_db == NULL) {
  1004. int err;
  1005. dn_db = dn_dev_create(dev, &err);
  1006. if (dn_db == NULL)
  1007. return;
  1008. }
  1009. if (dev->type == ARPHRD_ETHER) {
  1010. if (memcmp(dev->dev_addr, dn_hiord, 4) != 0)
  1011. return;
  1012. addr = dn_eth2dn(dev->dev_addr);
  1013. maybe_default = 1;
  1014. }
  1015. if (addr == 0)
  1016. return;
  1017. if ((ifa = dn_dev_alloc_ifa()) == NULL)
  1018. return;
  1019. ifa->ifa_local = ifa->ifa_address = addr;
  1020. ifa->ifa_flags = 0;
  1021. ifa->ifa_scope = RT_SCOPE_UNIVERSE;
  1022. strcpy(ifa->ifa_label, dev->name);
  1023. dn_dev_set_ifa(dev, ifa);
  1024. /*
  1025. * Automagically set the default device to the first automatically
  1026. * configured ethernet card in the system.
  1027. */
  1028. if (maybe_default) {
  1029. dev_hold(dev);
  1030. if (dn_dev_set_default(dev, 0))
  1031. dev_put(dev);
  1032. }
  1033. }
  1034. static void dn_dev_delete(struct net_device *dev)
  1035. {
  1036. struct dn_dev *dn_db = dev->dn_ptr;
  1037. if (dn_db == NULL)
  1038. return;
  1039. del_timer_sync(&dn_db->timer);
  1040. dn_dev_sysctl_unregister(&dn_db->parms);
  1041. dn_dev_check_default(dev);
  1042. neigh_ifdown(&dn_neigh_table, dev);
  1043. if (dn_db->parms.down)
  1044. dn_db->parms.down(dev);
  1045. dev->dn_ptr = NULL;
  1046. neigh_parms_release(&dn_neigh_table, dn_db->neigh_parms);
  1047. neigh_ifdown(&dn_neigh_table, dev);
  1048. if (dn_db->router)
  1049. neigh_release(dn_db->router);
  1050. if (dn_db->peer)
  1051. neigh_release(dn_db->peer);
  1052. kfree(dn_db);
  1053. }
  1054. void dn_dev_down(struct net_device *dev)
  1055. {
  1056. struct dn_dev *dn_db = dev->dn_ptr;
  1057. struct dn_ifaddr *ifa;
  1058. if (dn_db == NULL)
  1059. return;
  1060. while((ifa = dn_db->ifa_list) != NULL) {
  1061. dn_dev_del_ifa(dn_db, &dn_db->ifa_list, 0);
  1062. dn_dev_free_ifa(ifa);
  1063. }
  1064. dn_dev_delete(dev);
  1065. }
  1066. void dn_dev_init_pkt(struct sk_buff *skb)
  1067. {
  1068. return;
  1069. }
  1070. void dn_dev_veri_pkt(struct sk_buff *skb)
  1071. {
  1072. return;
  1073. }
  1074. void dn_dev_hello(struct sk_buff *skb)
  1075. {
  1076. return;
  1077. }
  1078. void dn_dev_devices_off(void)
  1079. {
  1080. struct net_device *dev;
  1081. rtnl_lock();
  1082. for(dev = dev_base; dev; dev = dev->next)
  1083. dn_dev_down(dev);
  1084. rtnl_unlock();
  1085. }
  1086. void dn_dev_devices_on(void)
  1087. {
  1088. struct net_device *dev;
  1089. rtnl_lock();
  1090. for(dev = dev_base; dev; dev = dev->next) {
  1091. if (dev->flags & IFF_UP)
  1092. dn_dev_up(dev);
  1093. }
  1094. rtnl_unlock();
  1095. }
  1096. int register_dnaddr_notifier(struct notifier_block *nb)
  1097. {
  1098. return blocking_notifier_chain_register(&dnaddr_chain, nb);
  1099. }
  1100. int unregister_dnaddr_notifier(struct notifier_block *nb)
  1101. {
  1102. return blocking_notifier_chain_unregister(&dnaddr_chain, nb);
  1103. }
  1104. #ifdef CONFIG_PROC_FS
  1105. static inline struct net_device *dn_dev_get_next(struct seq_file *seq, struct net_device *dev)
  1106. {
  1107. do {
  1108. dev = dev->next;
  1109. } while(dev && !dev->dn_ptr);
  1110. return dev;
  1111. }
  1112. static struct net_device *dn_dev_get_idx(struct seq_file *seq, loff_t pos)
  1113. {
  1114. struct net_device *dev;
  1115. dev = dev_base;
  1116. if (dev && !dev->dn_ptr)
  1117. dev = dn_dev_get_next(seq, dev);
  1118. if (pos) {
  1119. while(dev && (dev = dn_dev_get_next(seq, dev)))
  1120. --pos;
  1121. }
  1122. return dev;
  1123. }
  1124. static void *dn_dev_seq_start(struct seq_file *seq, loff_t *pos)
  1125. {
  1126. if (*pos) {
  1127. struct net_device *dev;
  1128. read_lock(&dev_base_lock);
  1129. dev = dn_dev_get_idx(seq, *pos - 1);
  1130. if (dev == NULL)
  1131. read_unlock(&dev_base_lock);
  1132. return dev;
  1133. }
  1134. return SEQ_START_TOKEN;
  1135. }
  1136. static void *dn_dev_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1137. {
  1138. struct net_device *dev = v;
  1139. loff_t one = 1;
  1140. if (v == SEQ_START_TOKEN) {
  1141. dev = dn_dev_seq_start(seq, &one);
  1142. } else {
  1143. dev = dn_dev_get_next(seq, dev);
  1144. if (dev == NULL)
  1145. read_unlock(&dev_base_lock);
  1146. }
  1147. ++*pos;
  1148. return dev;
  1149. }
  1150. static void dn_dev_seq_stop(struct seq_file *seq, void *v)
  1151. {
  1152. if (v && v != SEQ_START_TOKEN)
  1153. read_unlock(&dev_base_lock);
  1154. }
  1155. static char *dn_type2asc(char type)
  1156. {
  1157. switch(type) {
  1158. case DN_DEV_BCAST:
  1159. return "B";
  1160. case DN_DEV_UCAST:
  1161. return "U";
  1162. case DN_DEV_MPOINT:
  1163. return "M";
  1164. }
  1165. return "?";
  1166. }
  1167. static int dn_dev_seq_show(struct seq_file *seq, void *v)
  1168. {
  1169. if (v == SEQ_START_TOKEN)
  1170. seq_puts(seq, "Name Flags T1 Timer1 T3 Timer3 BlkSize Pri State DevType Router Peer\n");
  1171. else {
  1172. struct net_device *dev = v;
  1173. char peer_buf[DN_ASCBUF_LEN];
  1174. char router_buf[DN_ASCBUF_LEN];
  1175. struct dn_dev *dn_db = dev->dn_ptr;
  1176. seq_printf(seq, "%-8s %1s %04u %04u %04lu %04lu"
  1177. " %04hu %03d %02x %-10s %-7s %-7s\n",
  1178. dev->name ? dev->name : "???",
  1179. dn_type2asc(dn_db->parms.mode),
  1180. 0, 0,
  1181. dn_db->t3, dn_db->parms.t3,
  1182. mtu2blksize(dev),
  1183. dn_db->parms.priority,
  1184. dn_db->parms.state, dn_db->parms.name,
  1185. dn_db->router ? dn_addr2asc(dn_ntohs(*(__le16 *)dn_db->router->primary_key), router_buf) : "",
  1186. dn_db->peer ? dn_addr2asc(dn_ntohs(*(__le16 *)dn_db->peer->primary_key), peer_buf) : "");
  1187. }
  1188. return 0;
  1189. }
  1190. static struct seq_operations dn_dev_seq_ops = {
  1191. .start = dn_dev_seq_start,
  1192. .next = dn_dev_seq_next,
  1193. .stop = dn_dev_seq_stop,
  1194. .show = dn_dev_seq_show,
  1195. };
  1196. static int dn_dev_seq_open(struct inode *inode, struct file *file)
  1197. {
  1198. return seq_open(file, &dn_dev_seq_ops);
  1199. }
  1200. static struct file_operations dn_dev_seq_fops = {
  1201. .owner = THIS_MODULE,
  1202. .open = dn_dev_seq_open,
  1203. .read = seq_read,
  1204. .llseek = seq_lseek,
  1205. .release = seq_release,
  1206. };
  1207. #endif /* CONFIG_PROC_FS */
  1208. static struct rtnetlink_link dnet_rtnetlink_table[RTM_NR_MSGTYPES] =
  1209. {
  1210. [RTM_NEWADDR - RTM_BASE] = { .doit = dn_nl_newaddr, },
  1211. [RTM_DELADDR - RTM_BASE] = { .doit = dn_nl_deladdr, },
  1212. [RTM_GETADDR - RTM_BASE] = { .dumpit = dn_nl_dump_ifaddr, },
  1213. #ifdef CONFIG_DECNET_ROUTER
  1214. [RTM_NEWROUTE - RTM_BASE] = { .doit = dn_fib_rtm_newroute, },
  1215. [RTM_DELROUTE - RTM_BASE] = { .doit = dn_fib_rtm_delroute, },
  1216. [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute,
  1217. .dumpit = dn_fib_dump, },
  1218. [RTM_GETRULE - RTM_BASE] = { .dumpit = dn_fib_dump_rules, },
  1219. #else
  1220. [RTM_GETROUTE - RTM_BASE] = { .doit = dn_cache_getroute,
  1221. .dumpit = dn_cache_dump, },
  1222. #endif
  1223. };
  1224. static int __initdata addr[2];
  1225. module_param_array(addr, int, NULL, 0444);
  1226. MODULE_PARM_DESC(addr, "The DECnet address of this machine: area,node");
  1227. void __init dn_dev_init(void)
  1228. {
  1229. if (addr[0] > 63 || addr[0] < 0) {
  1230. printk(KERN_ERR "DECnet: Area must be between 0 and 63");
  1231. return;
  1232. }
  1233. if (addr[1] > 1023 || addr[1] < 0) {
  1234. printk(KERN_ERR "DECnet: Node must be between 0 and 1023");
  1235. return;
  1236. }
  1237. decnet_address = dn_htons((addr[0] << 10) | addr[1]);
  1238. dn_dev_devices_on();
  1239. rtnetlink_links[PF_DECnet] = dnet_rtnetlink_table;
  1240. proc_net_fops_create("decnet_dev", S_IRUGO, &dn_dev_seq_fops);
  1241. #ifdef CONFIG_SYSCTL
  1242. {
  1243. int i;
  1244. for(i = 0; i < DN_DEV_LIST_SIZE; i++)
  1245. dn_dev_sysctl_register(NULL, &dn_dev_list[i]);
  1246. }
  1247. #endif /* CONFIG_SYSCTL */
  1248. }
  1249. void __exit dn_dev_cleanup(void)
  1250. {
  1251. rtnetlink_links[PF_DECnet] = NULL;
  1252. #ifdef CONFIG_SYSCTL
  1253. {
  1254. int i;
  1255. for(i = 0; i < DN_DEV_LIST_SIZE; i++)
  1256. dn_dev_sysctl_unregister(&dn_dev_list[i]);
  1257. }
  1258. #endif /* CONFIG_SYSCTL */
  1259. proc_net_remove("decnet_dev");
  1260. dn_dev_devices_off();
  1261. }