dn_route.c 44 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827
  1. /*
  2. * DECnet An implementation of the DECnet protocol suite for the LINUX
  3. * operating system. DECnet is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * DECnet Routing Functions (Endnode and Router)
  7. *
  8. * Authors: Steve Whitehouse <SteveW@ACM.org>
  9. * Eduardo Marcelo Serrat <emserrat@geocities.com>
  10. *
  11. * Changes:
  12. * Steve Whitehouse : Fixes to allow "intra-ethernet" and
  13. * "return-to-sender" bits on outgoing
  14. * packets.
  15. * Steve Whitehouse : Timeouts for cached routes.
  16. * Steve Whitehouse : Use dst cache for input routes too.
  17. * Steve Whitehouse : Fixed error values in dn_send_skb.
  18. * Steve Whitehouse : Rework routing functions to better fit
  19. * DECnet routing design
  20. * Alexey Kuznetsov : New SMP locking
  21. * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
  22. * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
  23. * Fixed possible skb leak in rtnetlink funcs.
  24. * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
  25. * Alexey Kuznetsov's finer grained locking
  26. * from ipv4/route.c.
  27. * Steve Whitehouse : Routing is now starting to look like a
  28. * sensible set of code now, mainly due to
  29. * my copying the IPv4 routing code. The
  30. * hooks here are modified and will continue
  31. * to evolve for a while.
  32. * Steve Whitehouse : Real SMP at last :-) Also new netfilter
  33. * stuff. Look out raw sockets your days
  34. * are numbered!
  35. * Steve Whitehouse : Added return-to-sender functions. Added
  36. * backlog congestion level return codes.
  37. * Steve Whitehouse : Fixed bug where routes were set up with
  38. * no ref count on net devices.
  39. * Steve Whitehouse : RCU for the route cache
  40. * Steve Whitehouse : Preparations for the flow cache
  41. * Steve Whitehouse : Prepare for nonlinear skbs
  42. */
  43. /******************************************************************************
  44. (c) 1995-1998 E.M. Serrat emserrat@geocities.com
  45. This program is free software; you can redistribute it and/or modify
  46. it under the terms of the GNU General Public License as published by
  47. the Free Software Foundation; either version 2 of the License, or
  48. any later version.
  49. This program is distributed in the hope that it will be useful,
  50. but WITHOUT ANY WARRANTY; without even the implied warranty of
  51. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  52. GNU General Public License for more details.
  53. *******************************************************************************/
  54. #include <linux/errno.h>
  55. #include <linux/types.h>
  56. #include <linux/socket.h>
  57. #include <linux/in.h>
  58. #include <linux/kernel.h>
  59. #include <linux/sockios.h>
  60. #include <linux/net.h>
  61. #include <linux/netdevice.h>
  62. #include <linux/inet.h>
  63. #include <linux/route.h>
  64. #include <linux/in_route.h>
  65. #include <linux/slab.h>
  66. #include <net/sock.h>
  67. #include <linux/mm.h>
  68. #include <linux/proc_fs.h>
  69. #include <linux/seq_file.h>
  70. #include <linux/init.h>
  71. #include <linux/rtnetlink.h>
  72. #include <linux/string.h>
  73. #include <linux/netfilter_decnet.h>
  74. #include <linux/rcupdate.h>
  75. #include <linux/times.h>
  76. #include <asm/errno.h>
  77. #include <net/net_namespace.h>
  78. #include <net/netlink.h>
  79. #include <net/neighbour.h>
  80. #include <net/dst.h>
  81. #include <net/flow.h>
  82. #include <net/fib_rules.h>
  83. #include <net/dn.h>
  84. #include <net/dn_dev.h>
  85. #include <net/dn_nsp.h>
  86. #include <net/dn_route.h>
  87. #include <net/dn_neigh.h>
  88. #include <net/dn_fib.h>
  89. struct dn_rt_hash_bucket
  90. {
  91. struct dn_route __rcu *chain;
  92. spinlock_t lock;
  93. };
  94. extern struct neigh_table dn_neigh_table;
  95. static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
  96. static const int dn_rt_min_delay = 2 * HZ;
  97. static const int dn_rt_max_delay = 10 * HZ;
  98. static const int dn_rt_mtu_expires = 10 * 60 * HZ;
  99. static unsigned long dn_rt_deadline;
  100. static int dn_dst_gc(struct dst_ops *ops);
  101. static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
  102. static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
  103. static void dn_dst_link_failure(struct sk_buff *);
  104. static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
  105. static int dn_route_input(struct sk_buff *);
  106. static void dn_run_flush(unsigned long dummy);
  107. static struct dn_rt_hash_bucket *dn_rt_hash_table;
  108. static unsigned dn_rt_hash_mask;
  109. static struct timer_list dn_route_timer;
  110. static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
  111. int decnet_dst_gc_interval = 2;
  112. static struct dst_ops dn_dst_ops = {
  113. .family = PF_DECnet,
  114. .protocol = cpu_to_be16(ETH_P_DNA_RT),
  115. .gc_thresh = 128,
  116. .gc = dn_dst_gc,
  117. .check = dn_dst_check,
  118. .negative_advice = dn_dst_negative_advice,
  119. .link_failure = dn_dst_link_failure,
  120. .update_pmtu = dn_dst_update_pmtu,
  121. };
  122. static __inline__ unsigned dn_hash(__le16 src, __le16 dst)
  123. {
  124. __u16 tmp = (__u16 __force)(src ^ dst);
  125. tmp ^= (tmp >> 3);
  126. tmp ^= (tmp >> 5);
  127. tmp ^= (tmp >> 10);
  128. return dn_rt_hash_mask & (unsigned)tmp;
  129. }
  130. static inline void dnrt_free(struct dn_route *rt)
  131. {
  132. call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
  133. }
  134. static inline void dnrt_drop(struct dn_route *rt)
  135. {
  136. dst_release(&rt->dst);
  137. call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
  138. }
  139. static void dn_dst_check_expire(unsigned long dummy)
  140. {
  141. int i;
  142. struct dn_route *rt;
  143. struct dn_route __rcu **rtp;
  144. unsigned long now = jiffies;
  145. unsigned long expire = 120 * HZ;
  146. for (i = 0; i <= dn_rt_hash_mask; i++) {
  147. rtp = &dn_rt_hash_table[i].chain;
  148. spin_lock(&dn_rt_hash_table[i].lock);
  149. while ((rt = rcu_dereference_protected(*rtp,
  150. lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
  151. if (atomic_read(&rt->dst.__refcnt) ||
  152. (now - rt->dst.lastuse) < expire) {
  153. rtp = &rt->dst.dn_next;
  154. continue;
  155. }
  156. *rtp = rt->dst.dn_next;
  157. rt->dst.dn_next = NULL;
  158. dnrt_free(rt);
  159. }
  160. spin_unlock(&dn_rt_hash_table[i].lock);
  161. if ((jiffies - now) > 0)
  162. break;
  163. }
  164. mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ);
  165. }
  166. static int dn_dst_gc(struct dst_ops *ops)
  167. {
  168. struct dn_route *rt;
  169. struct dn_route __rcu **rtp;
  170. int i;
  171. unsigned long now = jiffies;
  172. unsigned long expire = 10 * HZ;
  173. for (i = 0; i <= dn_rt_hash_mask; i++) {
  174. spin_lock_bh(&dn_rt_hash_table[i].lock);
  175. rtp = &dn_rt_hash_table[i].chain;
  176. while ((rt = rcu_dereference_protected(*rtp,
  177. lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
  178. if (atomic_read(&rt->dst.__refcnt) ||
  179. (now - rt->dst.lastuse) < expire) {
  180. rtp = &rt->dst.dn_next;
  181. continue;
  182. }
  183. *rtp = rt->dst.dn_next;
  184. rt->dst.dn_next = NULL;
  185. dnrt_drop(rt);
  186. break;
  187. }
  188. spin_unlock_bh(&dn_rt_hash_table[i].lock);
  189. }
  190. return 0;
  191. }
  192. /*
  193. * The decnet standards don't impose a particular minimum mtu, what they
  194. * do insist on is that the routing layer accepts a datagram of at least
  195. * 230 bytes long. Here we have to subtract the routing header length from
  196. * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
  197. * assume the worst and use a long header size.
  198. *
  199. * We update both the mtu and the advertised mss (i.e. the segment size we
  200. * advertise to the other end).
  201. */
  202. static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
  203. {
  204. u32 min_mtu = 230;
  205. struct dn_dev *dn = dst->neighbour ?
  206. rcu_dereference_raw(dst->neighbour->dev->dn_ptr) : NULL;
  207. if (dn && dn->use_long == 0)
  208. min_mtu -= 6;
  209. else
  210. min_mtu -= 21;
  211. if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
  212. if (!(dst_metric_locked(dst, RTAX_MTU))) {
  213. dst->metrics[RTAX_MTU-1] = mtu;
  214. dst_set_expires(dst, dn_rt_mtu_expires);
  215. }
  216. if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
  217. u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
  218. if (dst_metric(dst, RTAX_ADVMSS) > mss)
  219. dst->metrics[RTAX_ADVMSS-1] = mss;
  220. }
  221. }
  222. }
  223. /*
  224. * When a route has been marked obsolete. (e.g. routing cache flush)
  225. */
  226. static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
  227. {
  228. return NULL;
  229. }
  230. static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst)
  231. {
  232. dst_release(dst);
  233. return NULL;
  234. }
  235. static void dn_dst_link_failure(struct sk_buff *skb)
  236. {
  237. }
  238. static inline int compare_keys(struct flowi *fl1, struct flowi *fl2)
  239. {
  240. return ((fl1->fld_dst ^ fl2->fld_dst) |
  241. (fl1->fld_src ^ fl2->fld_src) |
  242. (fl1->mark ^ fl2->mark) |
  243. (fl1->fld_scope ^ fl2->fld_scope) |
  244. (fl1->oif ^ fl2->oif) |
  245. (fl1->iif ^ fl2->iif)) == 0;
  246. }
  247. static int dn_insert_route(struct dn_route *rt, unsigned hash, struct dn_route **rp)
  248. {
  249. struct dn_route *rth;
  250. struct dn_route __rcu **rthp;
  251. unsigned long now = jiffies;
  252. rthp = &dn_rt_hash_table[hash].chain;
  253. spin_lock_bh(&dn_rt_hash_table[hash].lock);
  254. while ((rth = rcu_dereference_protected(*rthp,
  255. lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
  256. if (compare_keys(&rth->fl, &rt->fl)) {
  257. /* Put it first */
  258. *rthp = rth->dst.dn_next;
  259. rcu_assign_pointer(rth->dst.dn_next,
  260. dn_rt_hash_table[hash].chain);
  261. rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
  262. dst_use(&rth->dst, now);
  263. spin_unlock_bh(&dn_rt_hash_table[hash].lock);
  264. dnrt_drop(rt);
  265. *rp = rth;
  266. return 0;
  267. }
  268. rthp = &rth->dst.dn_next;
  269. }
  270. rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
  271. rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
  272. dst_use(&rt->dst, now);
  273. spin_unlock_bh(&dn_rt_hash_table[hash].lock);
  274. *rp = rt;
  275. return 0;
  276. }
  277. static void dn_run_flush(unsigned long dummy)
  278. {
  279. int i;
  280. struct dn_route *rt, *next;
  281. for (i = 0; i < dn_rt_hash_mask; i++) {
  282. spin_lock_bh(&dn_rt_hash_table[i].lock);
  283. if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
  284. goto nothing_to_declare;
  285. for(; rt; rt = next) {
  286. next = rcu_dereference_raw(rt->dst.dn_next);
  287. RCU_INIT_POINTER(rt->dst.dn_next, NULL);
  288. dst_free((struct dst_entry *)rt);
  289. }
  290. nothing_to_declare:
  291. spin_unlock_bh(&dn_rt_hash_table[i].lock);
  292. }
  293. }
  294. static DEFINE_SPINLOCK(dn_rt_flush_lock);
  295. void dn_rt_cache_flush(int delay)
  296. {
  297. unsigned long now = jiffies;
  298. int user_mode = !in_interrupt();
  299. if (delay < 0)
  300. delay = dn_rt_min_delay;
  301. spin_lock_bh(&dn_rt_flush_lock);
  302. if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) {
  303. long tmo = (long)(dn_rt_deadline - now);
  304. if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay)
  305. tmo = 0;
  306. if (delay > tmo)
  307. delay = tmo;
  308. }
  309. if (delay <= 0) {
  310. spin_unlock_bh(&dn_rt_flush_lock);
  311. dn_run_flush(0);
  312. return;
  313. }
  314. if (dn_rt_deadline == 0)
  315. dn_rt_deadline = now + dn_rt_max_delay;
  316. dn_rt_flush_timer.expires = now + delay;
  317. add_timer(&dn_rt_flush_timer);
  318. spin_unlock_bh(&dn_rt_flush_lock);
  319. }
  320. /**
  321. * dn_return_short - Return a short packet to its sender
  322. * @skb: The packet to return
  323. *
  324. */
  325. static int dn_return_short(struct sk_buff *skb)
  326. {
  327. struct dn_skb_cb *cb;
  328. unsigned char *ptr;
  329. __le16 *src;
  330. __le16 *dst;
  331. /* Add back headers */
  332. skb_push(skb, skb->data - skb_network_header(skb));
  333. if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
  334. return NET_RX_DROP;
  335. cb = DN_SKB_CB(skb);
  336. /* Skip packet length and point to flags */
  337. ptr = skb->data + 2;
  338. *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
  339. dst = (__le16 *)ptr;
  340. ptr += 2;
  341. src = (__le16 *)ptr;
  342. ptr += 2;
  343. *ptr = 0; /* Zero hop count */
  344. swap(*src, *dst);
  345. skb->pkt_type = PACKET_OUTGOING;
  346. dn_rt_finish_output(skb, NULL, NULL);
  347. return NET_RX_SUCCESS;
  348. }
  349. /**
  350. * dn_return_long - Return a long packet to its sender
  351. * @skb: The long format packet to return
  352. *
  353. */
  354. static int dn_return_long(struct sk_buff *skb)
  355. {
  356. struct dn_skb_cb *cb;
  357. unsigned char *ptr;
  358. unsigned char *src_addr, *dst_addr;
  359. unsigned char tmp[ETH_ALEN];
  360. /* Add back all headers */
  361. skb_push(skb, skb->data - skb_network_header(skb));
  362. if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
  363. return NET_RX_DROP;
  364. cb = DN_SKB_CB(skb);
  365. /* Ignore packet length and point to flags */
  366. ptr = skb->data + 2;
  367. /* Skip padding */
  368. if (*ptr & DN_RT_F_PF) {
  369. char padlen = (*ptr & ~DN_RT_F_PF);
  370. ptr += padlen;
  371. }
  372. *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
  373. ptr += 2;
  374. dst_addr = ptr;
  375. ptr += 8;
  376. src_addr = ptr;
  377. ptr += 6;
  378. *ptr = 0; /* Zero hop count */
  379. /* Swap source and destination */
  380. memcpy(tmp, src_addr, ETH_ALEN);
  381. memcpy(src_addr, dst_addr, ETH_ALEN);
  382. memcpy(dst_addr, tmp, ETH_ALEN);
  383. skb->pkt_type = PACKET_OUTGOING;
  384. dn_rt_finish_output(skb, dst_addr, src_addr);
  385. return NET_RX_SUCCESS;
  386. }
  387. /**
  388. * dn_route_rx_packet - Try and find a route for an incoming packet
  389. * @skb: The packet to find a route for
  390. *
  391. * Returns: result of input function if route is found, error code otherwise
  392. */
  393. static int dn_route_rx_packet(struct sk_buff *skb)
  394. {
  395. struct dn_skb_cb *cb;
  396. int err;
  397. if ((err = dn_route_input(skb)) == 0)
  398. return dst_input(skb);
  399. cb = DN_SKB_CB(skb);
  400. if (decnet_debug_level & 4) {
  401. char *devname = skb->dev ? skb->dev->name : "???";
  402. printk(KERN_DEBUG
  403. "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
  404. (int)cb->rt_flags, devname, skb->len,
  405. le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
  406. err, skb->pkt_type);
  407. }
  408. if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
  409. switch(cb->rt_flags & DN_RT_PKT_MSK) {
  410. case DN_RT_PKT_SHORT:
  411. return dn_return_short(skb);
  412. case DN_RT_PKT_LONG:
  413. return dn_return_long(skb);
  414. }
  415. }
  416. kfree_skb(skb);
  417. return NET_RX_DROP;
  418. }
  419. static int dn_route_rx_long(struct sk_buff *skb)
  420. {
  421. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  422. unsigned char *ptr = skb->data;
  423. if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */
  424. goto drop_it;
  425. skb_pull(skb, 20);
  426. skb_reset_transport_header(skb);
  427. /* Destination info */
  428. ptr += 2;
  429. cb->dst = dn_eth2dn(ptr);
  430. if (memcmp(ptr, dn_hiord_addr, 4) != 0)
  431. goto drop_it;
  432. ptr += 6;
  433. /* Source info */
  434. ptr += 2;
  435. cb->src = dn_eth2dn(ptr);
  436. if (memcmp(ptr, dn_hiord_addr, 4) != 0)
  437. goto drop_it;
  438. ptr += 6;
  439. /* Other junk */
  440. ptr++;
  441. cb->hops = *ptr++; /* Visit Count */
  442. return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
  443. dn_route_rx_packet);
  444. drop_it:
  445. kfree_skb(skb);
  446. return NET_RX_DROP;
  447. }
  448. static int dn_route_rx_short(struct sk_buff *skb)
  449. {
  450. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  451. unsigned char *ptr = skb->data;
  452. if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */
  453. goto drop_it;
  454. skb_pull(skb, 5);
  455. skb_reset_transport_header(skb);
  456. cb->dst = *(__le16 *)ptr;
  457. ptr += 2;
  458. cb->src = *(__le16 *)ptr;
  459. ptr += 2;
  460. cb->hops = *ptr & 0x3f;
  461. return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
  462. dn_route_rx_packet);
  463. drop_it:
  464. kfree_skb(skb);
  465. return NET_RX_DROP;
  466. }
  467. static int dn_route_discard(struct sk_buff *skb)
  468. {
  469. /*
  470. * I know we drop the packet here, but thats considered success in
  471. * this case
  472. */
  473. kfree_skb(skb);
  474. return NET_RX_SUCCESS;
  475. }
  476. static int dn_route_ptp_hello(struct sk_buff *skb)
  477. {
  478. dn_dev_hello(skb);
  479. dn_neigh_pointopoint_hello(skb);
  480. return NET_RX_SUCCESS;
  481. }
  482. int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
  483. {
  484. struct dn_skb_cb *cb;
  485. unsigned char flags = 0;
  486. __u16 len = le16_to_cpu(*(__le16 *)skb->data);
  487. struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
  488. unsigned char padlen = 0;
  489. if (!net_eq(dev_net(dev), &init_net))
  490. goto dump_it;
  491. if (dn == NULL)
  492. goto dump_it;
  493. if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
  494. goto out;
  495. if (!pskb_may_pull(skb, 3))
  496. goto dump_it;
  497. skb_pull(skb, 2);
  498. if (len > skb->len)
  499. goto dump_it;
  500. skb_trim(skb, len);
  501. flags = *skb->data;
  502. cb = DN_SKB_CB(skb);
  503. cb->stamp = jiffies;
  504. cb->iif = dev->ifindex;
  505. /*
  506. * If we have padding, remove it.
  507. */
  508. if (flags & DN_RT_F_PF) {
  509. padlen = flags & ~DN_RT_F_PF;
  510. if (!pskb_may_pull(skb, padlen + 1))
  511. goto dump_it;
  512. skb_pull(skb, padlen);
  513. flags = *skb->data;
  514. }
  515. skb_reset_network_header(skb);
  516. /*
  517. * Weed out future version DECnet
  518. */
  519. if (flags & DN_RT_F_VER)
  520. goto dump_it;
  521. cb->rt_flags = flags;
  522. if (decnet_debug_level & 1)
  523. printk(KERN_DEBUG
  524. "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
  525. (int)flags, (dev) ? dev->name : "???", len, skb->len,
  526. padlen);
  527. if (flags & DN_RT_PKT_CNTL) {
  528. if (unlikely(skb_linearize(skb)))
  529. goto dump_it;
  530. switch(flags & DN_RT_CNTL_MSK) {
  531. case DN_RT_PKT_INIT:
  532. dn_dev_init_pkt(skb);
  533. break;
  534. case DN_RT_PKT_VERI:
  535. dn_dev_veri_pkt(skb);
  536. break;
  537. }
  538. if (dn->parms.state != DN_DEV_S_RU)
  539. goto dump_it;
  540. switch(flags & DN_RT_CNTL_MSK) {
  541. case DN_RT_PKT_HELO:
  542. return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
  543. skb, skb->dev, NULL,
  544. dn_route_ptp_hello);
  545. case DN_RT_PKT_L1RT:
  546. case DN_RT_PKT_L2RT:
  547. return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
  548. skb, skb->dev, NULL,
  549. dn_route_discard);
  550. case DN_RT_PKT_ERTH:
  551. return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
  552. skb, skb->dev, NULL,
  553. dn_neigh_router_hello);
  554. case DN_RT_PKT_EEDH:
  555. return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
  556. skb, skb->dev, NULL,
  557. dn_neigh_endnode_hello);
  558. }
  559. } else {
  560. if (dn->parms.state != DN_DEV_S_RU)
  561. goto dump_it;
  562. skb_pull(skb, 1); /* Pull flags */
  563. switch(flags & DN_RT_PKT_MSK) {
  564. case DN_RT_PKT_LONG:
  565. return dn_route_rx_long(skb);
  566. case DN_RT_PKT_SHORT:
  567. return dn_route_rx_short(skb);
  568. }
  569. }
  570. dump_it:
  571. kfree_skb(skb);
  572. out:
  573. return NET_RX_DROP;
  574. }
  575. static int dn_output(struct sk_buff *skb)
  576. {
  577. struct dst_entry *dst = skb_dst(skb);
  578. struct dn_route *rt = (struct dn_route *)dst;
  579. struct net_device *dev = dst->dev;
  580. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  581. struct neighbour *neigh;
  582. int err = -EINVAL;
  583. if ((neigh = dst->neighbour) == NULL)
  584. goto error;
  585. skb->dev = dev;
  586. cb->src = rt->rt_saddr;
  587. cb->dst = rt->rt_daddr;
  588. /*
  589. * Always set the Intra-Ethernet bit on all outgoing packets
  590. * originated on this node. Only valid flag from upper layers
  591. * is return-to-sender-requested. Set hop count to 0 too.
  592. */
  593. cb->rt_flags &= ~DN_RT_F_RQR;
  594. cb->rt_flags |= DN_RT_F_IE;
  595. cb->hops = 0;
  596. return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
  597. neigh->output);
  598. error:
  599. if (net_ratelimit())
  600. printk(KERN_DEBUG "dn_output: This should not happen\n");
  601. kfree_skb(skb);
  602. return err;
  603. }
  604. static int dn_forward(struct sk_buff *skb)
  605. {
  606. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  607. struct dst_entry *dst = skb_dst(skb);
  608. struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
  609. struct dn_route *rt;
  610. struct neighbour *neigh = dst->neighbour;
  611. int header_len;
  612. #ifdef CONFIG_NETFILTER
  613. struct net_device *dev = skb->dev;
  614. #endif
  615. if (skb->pkt_type != PACKET_HOST)
  616. goto drop;
  617. /* Ensure that we have enough space for headers */
  618. rt = (struct dn_route *)skb_dst(skb);
  619. header_len = dn_db->use_long ? 21 : 6;
  620. if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
  621. goto drop;
  622. /*
  623. * Hop count exceeded.
  624. */
  625. if (++cb->hops > 30)
  626. goto drop;
  627. skb->dev = rt->dst.dev;
  628. /*
  629. * If packet goes out same interface it came in on, then set
  630. * the Intra-Ethernet bit. This has no effect for short
  631. * packets, so we don't need to test for them here.
  632. */
  633. cb->rt_flags &= ~DN_RT_F_IE;
  634. if (rt->rt_flags & RTCF_DOREDIRECT)
  635. cb->rt_flags |= DN_RT_F_IE;
  636. return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
  637. neigh->output);
  638. drop:
  639. kfree_skb(skb);
  640. return NET_RX_DROP;
  641. }
  642. /*
  643. * Used to catch bugs. This should never normally get
  644. * called.
  645. */
  646. static int dn_rt_bug(struct sk_buff *skb)
  647. {
  648. if (net_ratelimit()) {
  649. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  650. printk(KERN_DEBUG "dn_rt_bug: skb from:%04x to:%04x\n",
  651. le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
  652. }
  653. kfree_skb(skb);
  654. return NET_RX_DROP;
  655. }
  656. static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
  657. {
  658. struct dn_fib_info *fi = res->fi;
  659. struct net_device *dev = rt->dst.dev;
  660. struct neighbour *n;
  661. unsigned mss;
  662. if (fi) {
  663. if (DN_FIB_RES_GW(*res) &&
  664. DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
  665. rt->rt_gateway = DN_FIB_RES_GW(*res);
  666. memcpy(rt->dst.metrics, fi->fib_metrics,
  667. sizeof(rt->dst.metrics));
  668. }
  669. rt->rt_type = res->type;
  670. if (dev != NULL && rt->dst.neighbour == NULL) {
  671. n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
  672. if (IS_ERR(n))
  673. return PTR_ERR(n);
  674. rt->dst.neighbour = n;
  675. }
  676. if (dst_metric(&rt->dst, RTAX_MTU) == 0 ||
  677. dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
  678. rt->dst.metrics[RTAX_MTU-1] = rt->dst.dev->mtu;
  679. mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
  680. if (dst_metric(&rt->dst, RTAX_ADVMSS) == 0 ||
  681. dst_metric(&rt->dst, RTAX_ADVMSS) > mss)
  682. rt->dst.metrics[RTAX_ADVMSS-1] = mss;
  683. return 0;
  684. }
  685. static inline int dn_match_addr(__le16 addr1, __le16 addr2)
  686. {
  687. __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2);
  688. int match = 16;
  689. while(tmp) {
  690. tmp >>= 1;
  691. match--;
  692. }
  693. return match;
  694. }
  695. static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
  696. {
  697. __le16 saddr = 0;
  698. struct dn_dev *dn_db;
  699. struct dn_ifaddr *ifa;
  700. int best_match = 0;
  701. int ret;
  702. rcu_read_lock();
  703. dn_db = rcu_dereference(dev->dn_ptr);
  704. for (ifa = rcu_dereference(dn_db->ifa_list);
  705. ifa != NULL;
  706. ifa = rcu_dereference(ifa->ifa_next)) {
  707. if (ifa->ifa_scope > scope)
  708. continue;
  709. if (!daddr) {
  710. saddr = ifa->ifa_local;
  711. break;
  712. }
  713. ret = dn_match_addr(daddr, ifa->ifa_local);
  714. if (ret > best_match)
  715. saddr = ifa->ifa_local;
  716. if (best_match == 0)
  717. saddr = ifa->ifa_local;
  718. }
  719. rcu_read_unlock();
  720. return saddr;
  721. }
  722. static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res)
  723. {
  724. return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope);
  725. }
  726. static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res)
  727. {
  728. __le16 mask = dnet_make_mask(res->prefixlen);
  729. return (daddr&~mask)|res->fi->fib_nh->nh_gw;
  730. }
  731. static int dn_route_output_slow(struct dst_entry **pprt, const struct flowi *oldflp, int try_hard)
  732. {
  733. struct flowi fl = { .fld_dst = oldflp->fld_dst,
  734. .fld_src = oldflp->fld_src,
  735. .fld_scope = RT_SCOPE_UNIVERSE,
  736. .mark = oldflp->mark,
  737. .iif = init_net.loopback_dev->ifindex,
  738. .oif = oldflp->oif };
  739. struct dn_route *rt = NULL;
  740. struct net_device *dev_out = NULL, *dev;
  741. struct neighbour *neigh = NULL;
  742. unsigned hash;
  743. unsigned flags = 0;
  744. struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
  745. int err;
  746. int free_res = 0;
  747. __le16 gateway = 0;
  748. if (decnet_debug_level & 16)
  749. printk(KERN_DEBUG
  750. "dn_route_output_slow: dst=%04x src=%04x mark=%d"
  751. " iif=%d oif=%d\n", le16_to_cpu(oldflp->fld_dst),
  752. le16_to_cpu(oldflp->fld_src),
  753. oldflp->mark, init_net.loopback_dev->ifindex, oldflp->oif);
  754. /* If we have an output interface, verify its a DECnet device */
  755. if (oldflp->oif) {
  756. dev_out = dev_get_by_index(&init_net, oldflp->oif);
  757. err = -ENODEV;
  758. if (dev_out && dev_out->dn_ptr == NULL) {
  759. dev_put(dev_out);
  760. dev_out = NULL;
  761. }
  762. if (dev_out == NULL)
  763. goto out;
  764. }
  765. /* If we have a source address, verify that its a local address */
  766. if (oldflp->fld_src) {
  767. err = -EADDRNOTAVAIL;
  768. if (dev_out) {
  769. if (dn_dev_islocal(dev_out, oldflp->fld_src))
  770. goto source_ok;
  771. dev_put(dev_out);
  772. goto out;
  773. }
  774. rcu_read_lock();
  775. for_each_netdev_rcu(&init_net, dev) {
  776. if (!dev->dn_ptr)
  777. continue;
  778. if (!dn_dev_islocal(dev, oldflp->fld_src))
  779. continue;
  780. if ((dev->flags & IFF_LOOPBACK) &&
  781. oldflp->fld_dst &&
  782. !dn_dev_islocal(dev, oldflp->fld_dst))
  783. continue;
  784. dev_out = dev;
  785. break;
  786. }
  787. rcu_read_unlock();
  788. if (dev_out == NULL)
  789. goto out;
  790. dev_hold(dev_out);
  791. source_ok:
  792. ;
  793. }
  794. /* No destination? Assume its local */
  795. if (!fl.fld_dst) {
  796. fl.fld_dst = fl.fld_src;
  797. err = -EADDRNOTAVAIL;
  798. if (dev_out)
  799. dev_put(dev_out);
  800. dev_out = init_net.loopback_dev;
  801. dev_hold(dev_out);
  802. if (!fl.fld_dst) {
  803. fl.fld_dst =
  804. fl.fld_src = dnet_select_source(dev_out, 0,
  805. RT_SCOPE_HOST);
  806. if (!fl.fld_dst)
  807. goto out;
  808. }
  809. fl.oif = init_net.loopback_dev->ifindex;
  810. res.type = RTN_LOCAL;
  811. goto make_route;
  812. }
  813. if (decnet_debug_level & 16)
  814. printk(KERN_DEBUG
  815. "dn_route_output_slow: initial checks complete."
  816. " dst=%o4x src=%04x oif=%d try_hard=%d\n",
  817. le16_to_cpu(fl.fld_dst), le16_to_cpu(fl.fld_src),
  818. fl.oif, try_hard);
  819. /*
  820. * N.B. If the kernel is compiled without router support then
  821. * dn_fib_lookup() will evaluate to non-zero so this if () block
  822. * will always be executed.
  823. */
  824. err = -ESRCH;
  825. if (try_hard || (err = dn_fib_lookup(&fl, &res)) != 0) {
  826. struct dn_dev *dn_db;
  827. if (err != -ESRCH)
  828. goto out;
  829. /*
  830. * Here the fallback is basically the standard algorithm for
  831. * routing in endnodes which is described in the DECnet routing
  832. * docs
  833. *
  834. * If we are not trying hard, look in neighbour cache.
  835. * The result is tested to ensure that if a specific output
  836. * device/source address was requested, then we honour that
  837. * here
  838. */
  839. if (!try_hard) {
  840. neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fl.fld_dst);
  841. if (neigh) {
  842. if ((oldflp->oif &&
  843. (neigh->dev->ifindex != oldflp->oif)) ||
  844. (oldflp->fld_src &&
  845. (!dn_dev_islocal(neigh->dev,
  846. oldflp->fld_src)))) {
  847. neigh_release(neigh);
  848. neigh = NULL;
  849. } else {
  850. if (dev_out)
  851. dev_put(dev_out);
  852. if (dn_dev_islocal(neigh->dev, fl.fld_dst)) {
  853. dev_out = init_net.loopback_dev;
  854. res.type = RTN_LOCAL;
  855. } else {
  856. dev_out = neigh->dev;
  857. }
  858. dev_hold(dev_out);
  859. goto select_source;
  860. }
  861. }
  862. }
  863. /* Not there? Perhaps its a local address */
  864. if (dev_out == NULL)
  865. dev_out = dn_dev_get_default();
  866. err = -ENODEV;
  867. if (dev_out == NULL)
  868. goto out;
  869. dn_db = rcu_dereference_raw(dev_out->dn_ptr);
  870. /* Possible improvement - check all devices for local addr */
  871. if (dn_dev_islocal(dev_out, fl.fld_dst)) {
  872. dev_put(dev_out);
  873. dev_out = init_net.loopback_dev;
  874. dev_hold(dev_out);
  875. res.type = RTN_LOCAL;
  876. goto select_source;
  877. }
  878. /* Not local either.... try sending it to the default router */
  879. neigh = neigh_clone(dn_db->router);
  880. BUG_ON(neigh && neigh->dev != dev_out);
  881. /* Ok then, we assume its directly connected and move on */
  882. select_source:
  883. if (neigh)
  884. gateway = ((struct dn_neigh *)neigh)->addr;
  885. if (gateway == 0)
  886. gateway = fl.fld_dst;
  887. if (fl.fld_src == 0) {
  888. fl.fld_src = dnet_select_source(dev_out, gateway,
  889. res.type == RTN_LOCAL ?
  890. RT_SCOPE_HOST :
  891. RT_SCOPE_LINK);
  892. if (fl.fld_src == 0 && res.type != RTN_LOCAL)
  893. goto e_addr;
  894. }
  895. fl.oif = dev_out->ifindex;
  896. goto make_route;
  897. }
  898. free_res = 1;
  899. if (res.type == RTN_NAT)
  900. goto e_inval;
  901. if (res.type == RTN_LOCAL) {
  902. if (!fl.fld_src)
  903. fl.fld_src = fl.fld_dst;
  904. if (dev_out)
  905. dev_put(dev_out);
  906. dev_out = init_net.loopback_dev;
  907. dev_hold(dev_out);
  908. fl.oif = dev_out->ifindex;
  909. if (res.fi)
  910. dn_fib_info_put(res.fi);
  911. res.fi = NULL;
  912. goto make_route;
  913. }
  914. if (res.fi->fib_nhs > 1 && fl.oif == 0)
  915. dn_fib_select_multipath(&fl, &res);
  916. /*
  917. * We could add some logic to deal with default routes here and
  918. * get rid of some of the special casing above.
  919. */
  920. if (!fl.fld_src)
  921. fl.fld_src = DN_FIB_RES_PREFSRC(res);
  922. if (dev_out)
  923. dev_put(dev_out);
  924. dev_out = DN_FIB_RES_DEV(res);
  925. dev_hold(dev_out);
  926. fl.oif = dev_out->ifindex;
  927. gateway = DN_FIB_RES_GW(res);
  928. make_route:
  929. if (dev_out->flags & IFF_LOOPBACK)
  930. flags |= RTCF_LOCAL;
  931. rt = dst_alloc(&dn_dst_ops);
  932. if (rt == NULL)
  933. goto e_nobufs;
  934. atomic_set(&rt->dst.__refcnt, 1);
  935. rt->dst.flags = DST_HOST;
  936. rt->fl.fld_src = oldflp->fld_src;
  937. rt->fl.fld_dst = oldflp->fld_dst;
  938. rt->fl.oif = oldflp->oif;
  939. rt->fl.iif = 0;
  940. rt->fl.mark = oldflp->mark;
  941. rt->rt_saddr = fl.fld_src;
  942. rt->rt_daddr = fl.fld_dst;
  943. rt->rt_gateway = gateway ? gateway : fl.fld_dst;
  944. rt->rt_local_src = fl.fld_src;
  945. rt->rt_dst_map = fl.fld_dst;
  946. rt->rt_src_map = fl.fld_src;
  947. rt->dst.dev = dev_out;
  948. dev_hold(dev_out);
  949. rt->dst.neighbour = neigh;
  950. neigh = NULL;
  951. rt->dst.lastuse = jiffies;
  952. rt->dst.output = dn_output;
  953. rt->dst.input = dn_rt_bug;
  954. rt->rt_flags = flags;
  955. if (flags & RTCF_LOCAL)
  956. rt->dst.input = dn_nsp_rx;
  957. err = dn_rt_set_next_hop(rt, &res);
  958. if (err)
  959. goto e_neighbour;
  960. hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
  961. dn_insert_route(rt, hash, (struct dn_route **)pprt);
  962. done:
  963. if (neigh)
  964. neigh_release(neigh);
  965. if (free_res)
  966. dn_fib_res_put(&res);
  967. if (dev_out)
  968. dev_put(dev_out);
  969. out:
  970. return err;
  971. e_addr:
  972. err = -EADDRNOTAVAIL;
  973. goto done;
  974. e_inval:
  975. err = -EINVAL;
  976. goto done;
  977. e_nobufs:
  978. err = -ENOBUFS;
  979. goto done;
  980. e_neighbour:
  981. dst_free(&rt->dst);
  982. goto e_nobufs;
  983. }
  984. /*
  985. * N.B. The flags may be moved into the flowi at some future stage.
  986. */
  987. static int __dn_route_output_key(struct dst_entry **pprt, const struct flowi *flp, int flags)
  988. {
  989. unsigned hash = dn_hash(flp->fld_src, flp->fld_dst);
  990. struct dn_route *rt = NULL;
  991. if (!(flags & MSG_TRYHARD)) {
  992. rcu_read_lock_bh();
  993. for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
  994. rt = rcu_dereference_bh(rt->dst.dn_next)) {
  995. if ((flp->fld_dst == rt->fl.fld_dst) &&
  996. (flp->fld_src == rt->fl.fld_src) &&
  997. (flp->mark == rt->fl.mark) &&
  998. dn_is_output_route(rt) &&
  999. (rt->fl.oif == flp->oif)) {
  1000. dst_use(&rt->dst, jiffies);
  1001. rcu_read_unlock_bh();
  1002. *pprt = &rt->dst;
  1003. return 0;
  1004. }
  1005. }
  1006. rcu_read_unlock_bh();
  1007. }
  1008. return dn_route_output_slow(pprt, flp, flags);
  1009. }
  1010. static int dn_route_output_key(struct dst_entry **pprt, struct flowi *flp, int flags)
  1011. {
  1012. int err;
  1013. err = __dn_route_output_key(pprt, flp, flags);
  1014. if (err == 0 && flp->proto) {
  1015. err = xfrm_lookup(&init_net, pprt, flp, NULL, 0);
  1016. }
  1017. return err;
  1018. }
  1019. int dn_route_output_sock(struct dst_entry **pprt, struct flowi *fl, struct sock *sk, int flags)
  1020. {
  1021. int err;
  1022. err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
  1023. if (err == 0 && fl->proto) {
  1024. err = xfrm_lookup(&init_net, pprt, fl, sk,
  1025. (flags & MSG_DONTWAIT) ? 0 : XFRM_LOOKUP_WAIT);
  1026. }
  1027. return err;
  1028. }
  1029. static int dn_route_input_slow(struct sk_buff *skb)
  1030. {
  1031. struct dn_route *rt = NULL;
  1032. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  1033. struct net_device *in_dev = skb->dev;
  1034. struct net_device *out_dev = NULL;
  1035. struct dn_dev *dn_db;
  1036. struct neighbour *neigh = NULL;
  1037. unsigned hash;
  1038. int flags = 0;
  1039. __le16 gateway = 0;
  1040. __le16 local_src = 0;
  1041. struct flowi fl = { .fld_dst = cb->dst,
  1042. .fld_src = cb->src,
  1043. .fld_scope = RT_SCOPE_UNIVERSE,
  1044. .mark = skb->mark,
  1045. .iif = skb->dev->ifindex };
  1046. struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
  1047. int err = -EINVAL;
  1048. int free_res = 0;
  1049. dev_hold(in_dev);
  1050. if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
  1051. goto out;
  1052. /* Zero source addresses are not allowed */
  1053. if (fl.fld_src == 0)
  1054. goto out;
  1055. /*
  1056. * In this case we've just received a packet from a source
  1057. * outside ourselves pretending to come from us. We don't
  1058. * allow it any further to prevent routing loops, spoofing and
  1059. * other nasties. Loopback packets already have the dst attached
  1060. * so this only affects packets which have originated elsewhere.
  1061. */
  1062. err = -ENOTUNIQ;
  1063. if (dn_dev_islocal(in_dev, cb->src))
  1064. goto out;
  1065. err = dn_fib_lookup(&fl, &res);
  1066. if (err) {
  1067. if (err != -ESRCH)
  1068. goto out;
  1069. /*
  1070. * Is the destination us ?
  1071. */
  1072. if (!dn_dev_islocal(in_dev, cb->dst))
  1073. goto e_inval;
  1074. res.type = RTN_LOCAL;
  1075. } else {
  1076. __le16 src_map = fl.fld_src;
  1077. free_res = 1;
  1078. out_dev = DN_FIB_RES_DEV(res);
  1079. if (out_dev == NULL) {
  1080. if (net_ratelimit())
  1081. printk(KERN_CRIT "Bug in dn_route_input_slow() "
  1082. "No output device\n");
  1083. goto e_inval;
  1084. }
  1085. dev_hold(out_dev);
  1086. if (res.r)
  1087. src_map = fl.fld_src; /* no NAT support for now */
  1088. gateway = DN_FIB_RES_GW(res);
  1089. if (res.type == RTN_NAT) {
  1090. fl.fld_dst = dn_fib_rules_map_destination(fl.fld_dst, &res);
  1091. dn_fib_res_put(&res);
  1092. free_res = 0;
  1093. if (dn_fib_lookup(&fl, &res))
  1094. goto e_inval;
  1095. free_res = 1;
  1096. if (res.type != RTN_UNICAST)
  1097. goto e_inval;
  1098. flags |= RTCF_DNAT;
  1099. gateway = fl.fld_dst;
  1100. }
  1101. fl.fld_src = src_map;
  1102. }
  1103. switch(res.type) {
  1104. case RTN_UNICAST:
  1105. /*
  1106. * Forwarding check here, we only check for forwarding
  1107. * being turned off, if you want to only forward intra
  1108. * area, its up to you to set the routing tables up
  1109. * correctly.
  1110. */
  1111. if (dn_db->parms.forwarding == 0)
  1112. goto e_inval;
  1113. if (res.fi->fib_nhs > 1 && fl.oif == 0)
  1114. dn_fib_select_multipath(&fl, &res);
  1115. /*
  1116. * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
  1117. * flag as a hint to set the intra-ethernet bit when
  1118. * forwarding. If we've got NAT in operation, we don't do
  1119. * this optimisation.
  1120. */
  1121. if (out_dev == in_dev && !(flags & RTCF_NAT))
  1122. flags |= RTCF_DOREDIRECT;
  1123. local_src = DN_FIB_RES_PREFSRC(res);
  1124. case RTN_BLACKHOLE:
  1125. case RTN_UNREACHABLE:
  1126. break;
  1127. case RTN_LOCAL:
  1128. flags |= RTCF_LOCAL;
  1129. fl.fld_src = cb->dst;
  1130. fl.fld_dst = cb->src;
  1131. /* Routing tables gave us a gateway */
  1132. if (gateway)
  1133. goto make_route;
  1134. /* Packet was intra-ethernet, so we know its on-link */
  1135. if (cb->rt_flags & DN_RT_F_IE) {
  1136. gateway = cb->src;
  1137. flags |= RTCF_DIRECTSRC;
  1138. goto make_route;
  1139. }
  1140. /* Use the default router if there is one */
  1141. neigh = neigh_clone(dn_db->router);
  1142. if (neigh) {
  1143. gateway = ((struct dn_neigh *)neigh)->addr;
  1144. goto make_route;
  1145. }
  1146. /* Close eyes and pray */
  1147. gateway = cb->src;
  1148. flags |= RTCF_DIRECTSRC;
  1149. goto make_route;
  1150. default:
  1151. goto e_inval;
  1152. }
  1153. make_route:
  1154. rt = dst_alloc(&dn_dst_ops);
  1155. if (rt == NULL)
  1156. goto e_nobufs;
  1157. rt->rt_saddr = fl.fld_src;
  1158. rt->rt_daddr = fl.fld_dst;
  1159. rt->rt_gateway = fl.fld_dst;
  1160. if (gateway)
  1161. rt->rt_gateway = gateway;
  1162. rt->rt_local_src = local_src ? local_src : rt->rt_saddr;
  1163. rt->rt_dst_map = fl.fld_dst;
  1164. rt->rt_src_map = fl.fld_src;
  1165. rt->fl.fld_src = cb->src;
  1166. rt->fl.fld_dst = cb->dst;
  1167. rt->fl.oif = 0;
  1168. rt->fl.iif = in_dev->ifindex;
  1169. rt->fl.mark = fl.mark;
  1170. rt->dst.flags = DST_HOST;
  1171. rt->dst.neighbour = neigh;
  1172. rt->dst.dev = out_dev;
  1173. rt->dst.lastuse = jiffies;
  1174. rt->dst.output = dn_rt_bug;
  1175. switch(res.type) {
  1176. case RTN_UNICAST:
  1177. rt->dst.input = dn_forward;
  1178. break;
  1179. case RTN_LOCAL:
  1180. rt->dst.output = dn_output;
  1181. rt->dst.input = dn_nsp_rx;
  1182. rt->dst.dev = in_dev;
  1183. flags |= RTCF_LOCAL;
  1184. break;
  1185. default:
  1186. case RTN_UNREACHABLE:
  1187. case RTN_BLACKHOLE:
  1188. rt->dst.input = dst_discard;
  1189. }
  1190. rt->rt_flags = flags;
  1191. if (rt->dst.dev)
  1192. dev_hold(rt->dst.dev);
  1193. err = dn_rt_set_next_hop(rt, &res);
  1194. if (err)
  1195. goto e_neighbour;
  1196. hash = dn_hash(rt->fl.fld_src, rt->fl.fld_dst);
  1197. dn_insert_route(rt, hash, &rt);
  1198. skb_dst_set(skb, &rt->dst);
  1199. done:
  1200. if (neigh)
  1201. neigh_release(neigh);
  1202. if (free_res)
  1203. dn_fib_res_put(&res);
  1204. dev_put(in_dev);
  1205. if (out_dev)
  1206. dev_put(out_dev);
  1207. out:
  1208. return err;
  1209. e_inval:
  1210. err = -EINVAL;
  1211. goto done;
  1212. e_nobufs:
  1213. err = -ENOBUFS;
  1214. goto done;
  1215. e_neighbour:
  1216. dst_free(&rt->dst);
  1217. goto done;
  1218. }
  1219. static int dn_route_input(struct sk_buff *skb)
  1220. {
  1221. struct dn_route *rt;
  1222. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  1223. unsigned hash = dn_hash(cb->src, cb->dst);
  1224. if (skb_dst(skb))
  1225. return 0;
  1226. rcu_read_lock();
  1227. for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
  1228. rt = rcu_dereference(rt->dst.dn_next)) {
  1229. if ((rt->fl.fld_src == cb->src) &&
  1230. (rt->fl.fld_dst == cb->dst) &&
  1231. (rt->fl.oif == 0) &&
  1232. (rt->fl.mark == skb->mark) &&
  1233. (rt->fl.iif == cb->iif)) {
  1234. dst_use(&rt->dst, jiffies);
  1235. rcu_read_unlock();
  1236. skb_dst_set(skb, (struct dst_entry *)rt);
  1237. return 0;
  1238. }
  1239. }
  1240. rcu_read_unlock();
  1241. return dn_route_input_slow(skb);
  1242. }
  1243. static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
  1244. int event, int nowait, unsigned int flags)
  1245. {
  1246. struct dn_route *rt = (struct dn_route *)skb_dst(skb);
  1247. struct rtmsg *r;
  1248. struct nlmsghdr *nlh;
  1249. unsigned char *b = skb_tail_pointer(skb);
  1250. long expires;
  1251. nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*r), flags);
  1252. r = NLMSG_DATA(nlh);
  1253. r->rtm_family = AF_DECnet;
  1254. r->rtm_dst_len = 16;
  1255. r->rtm_src_len = 0;
  1256. r->rtm_tos = 0;
  1257. r->rtm_table = RT_TABLE_MAIN;
  1258. RTA_PUT_U32(skb, RTA_TABLE, RT_TABLE_MAIN);
  1259. r->rtm_type = rt->rt_type;
  1260. r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
  1261. r->rtm_scope = RT_SCOPE_UNIVERSE;
  1262. r->rtm_protocol = RTPROT_UNSPEC;
  1263. if (rt->rt_flags & RTCF_NOTIFY)
  1264. r->rtm_flags |= RTM_F_NOTIFY;
  1265. RTA_PUT(skb, RTA_DST, 2, &rt->rt_daddr);
  1266. if (rt->fl.fld_src) {
  1267. r->rtm_src_len = 16;
  1268. RTA_PUT(skb, RTA_SRC, 2, &rt->fl.fld_src);
  1269. }
  1270. if (rt->dst.dev)
  1271. RTA_PUT(skb, RTA_OIF, sizeof(int), &rt->dst.dev->ifindex);
  1272. /*
  1273. * Note to self - change this if input routes reverse direction when
  1274. * they deal only with inputs and not with replies like they do
  1275. * currently.
  1276. */
  1277. RTA_PUT(skb, RTA_PREFSRC, 2, &rt->rt_local_src);
  1278. if (rt->rt_daddr != rt->rt_gateway)
  1279. RTA_PUT(skb, RTA_GATEWAY, 2, &rt->rt_gateway);
  1280. if (rtnetlink_put_metrics(skb, rt->dst.metrics) < 0)
  1281. goto rtattr_failure;
  1282. expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
  1283. if (rtnl_put_cacheinfo(skb, &rt->dst, 0, 0, 0, expires,
  1284. rt->dst.error) < 0)
  1285. goto rtattr_failure;
  1286. if (dn_is_input_route(rt))
  1287. RTA_PUT(skb, RTA_IIF, sizeof(int), &rt->fl.iif);
  1288. nlh->nlmsg_len = skb_tail_pointer(skb) - b;
  1289. return skb->len;
  1290. nlmsg_failure:
  1291. rtattr_failure:
  1292. nlmsg_trim(skb, b);
  1293. return -1;
  1294. }
  1295. /*
  1296. * This is called by both endnodes and routers now.
  1297. */
  1298. static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
  1299. {
  1300. struct net *net = sock_net(in_skb->sk);
  1301. struct rtattr **rta = arg;
  1302. struct rtmsg *rtm = NLMSG_DATA(nlh);
  1303. struct dn_route *rt = NULL;
  1304. struct dn_skb_cb *cb;
  1305. int err;
  1306. struct sk_buff *skb;
  1307. struct flowi fl;
  1308. if (!net_eq(net, &init_net))
  1309. return -EINVAL;
  1310. memset(&fl, 0, sizeof(fl));
  1311. fl.proto = DNPROTO_NSP;
  1312. skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
  1313. if (skb == NULL)
  1314. return -ENOBUFS;
  1315. skb_reset_mac_header(skb);
  1316. cb = DN_SKB_CB(skb);
  1317. if (rta[RTA_SRC-1])
  1318. memcpy(&fl.fld_src, RTA_DATA(rta[RTA_SRC-1]), 2);
  1319. if (rta[RTA_DST-1])
  1320. memcpy(&fl.fld_dst, RTA_DATA(rta[RTA_DST-1]), 2);
  1321. if (rta[RTA_IIF-1])
  1322. memcpy(&fl.iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
  1323. if (fl.iif) {
  1324. struct net_device *dev;
  1325. if ((dev = dev_get_by_index(&init_net, fl.iif)) == NULL) {
  1326. kfree_skb(skb);
  1327. return -ENODEV;
  1328. }
  1329. if (!dev->dn_ptr) {
  1330. dev_put(dev);
  1331. kfree_skb(skb);
  1332. return -ENODEV;
  1333. }
  1334. skb->protocol = htons(ETH_P_DNA_RT);
  1335. skb->dev = dev;
  1336. cb->src = fl.fld_src;
  1337. cb->dst = fl.fld_dst;
  1338. local_bh_disable();
  1339. err = dn_route_input(skb);
  1340. local_bh_enable();
  1341. memset(cb, 0, sizeof(struct dn_skb_cb));
  1342. rt = (struct dn_route *)skb_dst(skb);
  1343. if (!err && -rt->dst.error)
  1344. err = rt->dst.error;
  1345. } else {
  1346. int oif = 0;
  1347. if (rta[RTA_OIF - 1])
  1348. memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
  1349. fl.oif = oif;
  1350. err = dn_route_output_key((struct dst_entry **)&rt, &fl, 0);
  1351. }
  1352. if (skb->dev)
  1353. dev_put(skb->dev);
  1354. skb->dev = NULL;
  1355. if (err)
  1356. goto out_free;
  1357. skb_dst_set(skb, &rt->dst);
  1358. if (rtm->rtm_flags & RTM_F_NOTIFY)
  1359. rt->rt_flags |= RTCF_NOTIFY;
  1360. err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
  1361. if (err == 0)
  1362. goto out_free;
  1363. if (err < 0) {
  1364. err = -EMSGSIZE;
  1365. goto out_free;
  1366. }
  1367. return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
  1368. out_free:
  1369. kfree_skb(skb);
  1370. return err;
  1371. }
  1372. /*
  1373. * For routers, this is called from dn_fib_dump, but for endnodes its
  1374. * called directly from the rtnetlink dispatch table.
  1375. */
  1376. int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1377. {
  1378. struct net *net = sock_net(skb->sk);
  1379. struct dn_route *rt;
  1380. int h, s_h;
  1381. int idx, s_idx;
  1382. if (!net_eq(net, &init_net))
  1383. return 0;
  1384. if (NLMSG_PAYLOAD(cb->nlh, 0) < sizeof(struct rtmsg))
  1385. return -EINVAL;
  1386. if (!(((struct rtmsg *)NLMSG_DATA(cb->nlh))->rtm_flags&RTM_F_CLONED))
  1387. return 0;
  1388. s_h = cb->args[0];
  1389. s_idx = idx = cb->args[1];
  1390. for(h = 0; h <= dn_rt_hash_mask; h++) {
  1391. if (h < s_h)
  1392. continue;
  1393. if (h > s_h)
  1394. s_idx = 0;
  1395. rcu_read_lock_bh();
  1396. for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
  1397. rt;
  1398. rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
  1399. if (idx < s_idx)
  1400. continue;
  1401. skb_dst_set(skb, dst_clone(&rt->dst));
  1402. if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
  1403. cb->nlh->nlmsg_seq, RTM_NEWROUTE,
  1404. 1, NLM_F_MULTI) <= 0) {
  1405. skb_dst_drop(skb);
  1406. rcu_read_unlock_bh();
  1407. goto done;
  1408. }
  1409. skb_dst_drop(skb);
  1410. }
  1411. rcu_read_unlock_bh();
  1412. }
  1413. done:
  1414. cb->args[0] = h;
  1415. cb->args[1] = idx;
  1416. return skb->len;
  1417. }
  1418. #ifdef CONFIG_PROC_FS
  1419. struct dn_rt_cache_iter_state {
  1420. int bucket;
  1421. };
  1422. static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
  1423. {
  1424. struct dn_route *rt = NULL;
  1425. struct dn_rt_cache_iter_state *s = seq->private;
  1426. for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
  1427. rcu_read_lock_bh();
  1428. rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
  1429. if (rt)
  1430. break;
  1431. rcu_read_unlock_bh();
  1432. }
  1433. return rt;
  1434. }
  1435. static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
  1436. {
  1437. struct dn_rt_cache_iter_state *s = seq->private;
  1438. rt = rcu_dereference_bh(rt->dst.dn_next);
  1439. while (!rt) {
  1440. rcu_read_unlock_bh();
  1441. if (--s->bucket < 0)
  1442. break;
  1443. rcu_read_lock_bh();
  1444. rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
  1445. }
  1446. return rt;
  1447. }
  1448. static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
  1449. {
  1450. struct dn_route *rt = dn_rt_cache_get_first(seq);
  1451. if (rt) {
  1452. while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
  1453. --*pos;
  1454. }
  1455. return *pos ? NULL : rt;
  1456. }
  1457. static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1458. {
  1459. struct dn_route *rt = dn_rt_cache_get_next(seq, v);
  1460. ++*pos;
  1461. return rt;
  1462. }
  1463. static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v)
  1464. {
  1465. if (v)
  1466. rcu_read_unlock_bh();
  1467. }
  1468. static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
  1469. {
  1470. struct dn_route *rt = v;
  1471. char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
  1472. seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
  1473. rt->dst.dev ? rt->dst.dev->name : "*",
  1474. dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
  1475. dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
  1476. atomic_read(&rt->dst.__refcnt),
  1477. rt->dst.__use,
  1478. (int) dst_metric(&rt->dst, RTAX_RTT));
  1479. return 0;
  1480. }
  1481. static const struct seq_operations dn_rt_cache_seq_ops = {
  1482. .start = dn_rt_cache_seq_start,
  1483. .next = dn_rt_cache_seq_next,
  1484. .stop = dn_rt_cache_seq_stop,
  1485. .show = dn_rt_cache_seq_show,
  1486. };
  1487. static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
  1488. {
  1489. return seq_open_private(file, &dn_rt_cache_seq_ops,
  1490. sizeof(struct dn_rt_cache_iter_state));
  1491. }
  1492. static const struct file_operations dn_rt_cache_seq_fops = {
  1493. .owner = THIS_MODULE,
  1494. .open = dn_rt_cache_seq_open,
  1495. .read = seq_read,
  1496. .llseek = seq_lseek,
  1497. .release = seq_release_private,
  1498. };
  1499. #endif /* CONFIG_PROC_FS */
  1500. void __init dn_route_init(void)
  1501. {
  1502. int i, goal, order;
  1503. dn_dst_ops.kmem_cachep =
  1504. kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
  1505. SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  1506. dst_entries_init(&dn_dst_ops);
  1507. setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
  1508. dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
  1509. add_timer(&dn_route_timer);
  1510. goal = totalram_pages >> (26 - PAGE_SHIFT);
  1511. for(order = 0; (1UL << order) < goal; order++)
  1512. /* NOTHING */;
  1513. /*
  1514. * Only want 1024 entries max, since the table is very, very unlikely
  1515. * to be larger than that.
  1516. */
  1517. while(order && ((((1UL << order) * PAGE_SIZE) /
  1518. sizeof(struct dn_rt_hash_bucket)) >= 2048))
  1519. order--;
  1520. do {
  1521. dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
  1522. sizeof(struct dn_rt_hash_bucket);
  1523. while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
  1524. dn_rt_hash_mask--;
  1525. dn_rt_hash_table = (struct dn_rt_hash_bucket *)
  1526. __get_free_pages(GFP_ATOMIC, order);
  1527. } while (dn_rt_hash_table == NULL && --order > 0);
  1528. if (!dn_rt_hash_table)
  1529. panic("Failed to allocate DECnet route cache hash table\n");
  1530. printk(KERN_INFO
  1531. "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
  1532. dn_rt_hash_mask,
  1533. (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
  1534. dn_rt_hash_mask--;
  1535. for(i = 0; i <= dn_rt_hash_mask; i++) {
  1536. spin_lock_init(&dn_rt_hash_table[i].lock);
  1537. dn_rt_hash_table[i].chain = NULL;
  1538. }
  1539. dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
  1540. proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
  1541. #ifdef CONFIG_DECNET_ROUTER
  1542. rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute, dn_fib_dump);
  1543. #else
  1544. rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
  1545. dn_cache_dump);
  1546. #endif
  1547. }
  1548. void __exit dn_route_cleanup(void)
  1549. {
  1550. del_timer(&dn_route_timer);
  1551. dn_run_flush(0);
  1552. proc_net_remove(&init_net, "decnet_cache");
  1553. dst_entries_destroy(&dn_dst_ops);
  1554. }