dn_route.c 47 KB

1234567891011121314151617181920212223242526272829303132333435363738394041424344454647484950515253545556575859606162636465666768697071727374757677787980818283848586878889909192939495969798991001011021031041051061071081091101111121131141151161171181191201211221231241251261271281291301311321331341351361371381391401411421431441451461471481491501511521531541551561571581591601611621631641651661671681691701711721731741751761771781791801811821831841851861871881891901911921931941951961971981992002012022032042052062072082092102112122132142152162172182192202212222232242252262272282292302312322332342352362372382392402412422432442452462472482492502512522532542552562572582592602612622632642652662672682692702712722732742752762772782792802812822832842852862872882892902912922932942952962972982993003013023033043053063073083093103113123133143153163173183193203213223233243253263273283293303313323333343353363373383393403413423433443453463473483493503513523533543553563573583593603613623633643653663673683693703713723733743753763773783793803813823833843853863873883893903913923933943953963973983994004014024034044054064074084094104114124134144154164174184194204214224234244254264274284294304314324334344354364374384394404414424434444454464474484494504514524534544554564574584594604614624634644654664674684694704714724734744754764774784794804814824834844854864874884894904914924934944954964974984995005015025035045055065075085095105115125135145155165175185195205215225235245255265275285295305315325335345355365375385395405415425435445455465475485495505515525535545555565575585595605615625635645655665675685695705715725735745755765775785795805815825835845855865875885895905915925935945955965975985996006016026036046056066076086096106116126136146156166176186196206216226236246256266276286296306316326336346356366376386396406416426436446456466476486496506516526536546556566576586596606616626636646656666676686696706716726736746756766776786796806816826836846856866876886896906916926936946956966976986997007017027037047057067077087097107117127137147157167177187197207217227237247257267277287297307317327337347357367377387397407417427437447457467477487497507517527537547557567577587597607617627637647657667677687697707717727737747757767777787797807817827837847857867877887897907917927937947957967977987998008018028038048058068078088098108118128138148158168178188198208218228238248258268278288298308318328338348358368378388398408418428438448458468478488498508518528538548558568578588598608618628638648658668678688698708718728738748758768778788798808818828838848858868878888898908918928938948958968978988999009019029039049059069079089099109119129139149159169179189199209219229239249259269279289299309319329339349359369379389399409419429439449459469479489499509519529539549559569579589599609619629639649659669679689699709719729739749759769779789799809819829839849859869879889899909919929939949959969979989991000100110021003100410051006100710081009101010111012101310141015101610171018101910201021102210231024102510261027102810291030103110321033103410351036103710381039104010411042104310441045104610471048104910501051105210531054105510561057105810591060106110621063106410651066106710681069107010711072107310741075107610771078107910801081108210831084108510861087108810891090109110921093109410951096109710981099110011011102110311041105110611071108110911101111111211131114111511161117111811191120112111221123112411251126112711281129113011311132113311341135113611371138113911401141114211431144114511461147114811491150115111521153115411551156115711581159116011611162116311641165116611671168116911701171117211731174117511761177117811791180118111821183118411851186118711881189119011911192119311941195119611971198119912001201120212031204120512061207120812091210121112121213121412151216121712181219122012211222122312241225122612271228122912301231123212331234123512361237123812391240124112421243124412451246124712481249125012511252125312541255125612571258125912601261126212631264126512661267126812691270127112721273127412751276127712781279128012811282128312841285128612871288128912901291129212931294129512961297129812991300130113021303130413051306130713081309131013111312131313141315131613171318131913201321132213231324132513261327132813291330133113321333133413351336133713381339134013411342134313441345134613471348134913501351135213531354135513561357135813591360136113621363136413651366136713681369137013711372137313741375137613771378137913801381138213831384138513861387138813891390139113921393139413951396139713981399140014011402140314041405140614071408140914101411141214131414141514161417141814191420142114221423142414251426142714281429143014311432143314341435143614371438143914401441144214431444144514461447144814491450145114521453145414551456145714581459146014611462146314641465146614671468146914701471147214731474147514761477147814791480148114821483148414851486148714881489149014911492149314941495149614971498149915001501150215031504150515061507150815091510151115121513151415151516151715181519152015211522152315241525152615271528152915301531153215331534153515361537153815391540154115421543154415451546154715481549155015511552155315541555155615571558155915601561156215631564156515661567156815691570157115721573157415751576157715781579158015811582158315841585158615871588158915901591159215931594159515961597159815991600160116021603160416051606160716081609161016111612161316141615161616171618161916201621162216231624162516261627162816291630163116321633163416351636163716381639164016411642164316441645164616471648164916501651165216531654165516561657165816591660166116621663166416651666166716681669167016711672167316741675167616771678167916801681168216831684168516861687168816891690169116921693169416951696169716981699170017011702170317041705170617071708170917101711171217131714171517161717171817191720172117221723172417251726172717281729173017311732173317341735173617371738173917401741174217431744174517461747174817491750175117521753175417551756175717581759176017611762176317641765176617671768176917701771177217731774177517761777177817791780178117821783178417851786178717881789179017911792179317941795179617971798179918001801180218031804180518061807180818091810181118121813181418151816181718181819182018211822182318241825182618271828182918301831183218331834183518361837183818391840184118421843184418451846184718481849185018511852185318541855185618571858185918601861186218631864186518661867186818691870187118721873187418751876187718781879188018811882188318841885188618871888188918901891189218931894189518961897189818991900190119021903190419051906190719081909191019111912191319141915191619171918191919201921
  1. /*
  2. * DECnet An implementation of the DECnet protocol suite for the LINUX
  3. * operating system. DECnet is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * DECnet Routing Functions (Endnode and Router)
  7. *
  8. * Authors: Steve Whitehouse <SteveW@ACM.org>
  9. * Eduardo Marcelo Serrat <emserrat@geocities.com>
  10. *
  11. * Changes:
  12. * Steve Whitehouse : Fixes to allow "intra-ethernet" and
  13. * "return-to-sender" bits on outgoing
  14. * packets.
  15. * Steve Whitehouse : Timeouts for cached routes.
  16. * Steve Whitehouse : Use dst cache for input routes too.
  17. * Steve Whitehouse : Fixed error values in dn_send_skb.
  18. * Steve Whitehouse : Rework routing functions to better fit
  19. * DECnet routing design
  20. * Alexey Kuznetsov : New SMP locking
  21. * Steve Whitehouse : More SMP locking changes & dn_cache_dump()
  22. * Steve Whitehouse : Prerouting NF hook, now really is prerouting.
  23. * Fixed possible skb leak in rtnetlink funcs.
  24. * Steve Whitehouse : Dave Miller's dynamic hash table sizing and
  25. * Alexey Kuznetsov's finer grained locking
  26. * from ipv4/route.c.
  27. * Steve Whitehouse : Routing is now starting to look like a
  28. * sensible set of code now, mainly due to
  29. * my copying the IPv4 routing code. The
  30. * hooks here are modified and will continue
  31. * to evolve for a while.
  32. * Steve Whitehouse : Real SMP at last :-) Also new netfilter
  33. * stuff. Look out raw sockets your days
  34. * are numbered!
  35. * Steve Whitehouse : Added return-to-sender functions. Added
  36. * backlog congestion level return codes.
  37. * Steve Whitehouse : Fixed bug where routes were set up with
  38. * no ref count on net devices.
  39. * Steve Whitehouse : RCU for the route cache
  40. * Steve Whitehouse : Preparations for the flow cache
  41. * Steve Whitehouse : Prepare for nonlinear skbs
  42. */
  43. /******************************************************************************
  44. (c) 1995-1998 E.M. Serrat emserrat@geocities.com
  45. This program is free software; you can redistribute it and/or modify
  46. it under the terms of the GNU General Public License as published by
  47. the Free Software Foundation; either version 2 of the License, or
  48. any later version.
  49. This program is distributed in the hope that it will be useful,
  50. but WITHOUT ANY WARRANTY; without even the implied warranty of
  51. MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
  52. GNU General Public License for more details.
  53. *******************************************************************************/
  54. #include <linux/errno.h>
  55. #include <linux/types.h>
  56. #include <linux/socket.h>
  57. #include <linux/in.h>
  58. #include <linux/kernel.h>
  59. #include <linux/sockios.h>
  60. #include <linux/net.h>
  61. #include <linux/netdevice.h>
  62. #include <linux/inet.h>
  63. #include <linux/route.h>
  64. #include <linux/in_route.h>
  65. #include <linux/slab.h>
  66. #include <net/sock.h>
  67. #include <linux/mm.h>
  68. #include <linux/proc_fs.h>
  69. #include <linux/seq_file.h>
  70. #include <linux/init.h>
  71. #include <linux/rtnetlink.h>
  72. #include <linux/string.h>
  73. #include <linux/netfilter_decnet.h>
  74. #include <linux/rcupdate.h>
  75. #include <linux/times.h>
  76. #include <linux/export.h>
  77. #include <asm/errno.h>
  78. #include <net/net_namespace.h>
  79. #include <net/netlink.h>
  80. #include <net/neighbour.h>
  81. #include <net/dst.h>
  82. #include <net/flow.h>
  83. #include <net/fib_rules.h>
  84. #include <net/dn.h>
  85. #include <net/dn_dev.h>
  86. #include <net/dn_nsp.h>
  87. #include <net/dn_route.h>
  88. #include <net/dn_neigh.h>
  89. #include <net/dn_fib.h>
  90. struct dn_rt_hash_bucket
  91. {
  92. struct dn_route __rcu *chain;
  93. spinlock_t lock;
  94. };
  95. extern struct neigh_table dn_neigh_table;
  96. static unsigned char dn_hiord_addr[6] = {0xAA,0x00,0x04,0x00,0x00,0x00};
  97. static const int dn_rt_min_delay = 2 * HZ;
  98. static const int dn_rt_max_delay = 10 * HZ;
  99. static const int dn_rt_mtu_expires = 10 * 60 * HZ;
  100. static unsigned long dn_rt_deadline;
  101. static int dn_dst_gc(struct dst_ops *ops);
  102. static struct dst_entry *dn_dst_check(struct dst_entry *, __u32);
  103. static unsigned int dn_dst_default_advmss(const struct dst_entry *dst);
  104. static unsigned int dn_dst_mtu(const struct dst_entry *dst);
  105. static void dn_dst_destroy(struct dst_entry *);
  106. static void dn_dst_ifdown(struct dst_entry *, struct net_device *dev, int how);
  107. static struct dst_entry *dn_dst_negative_advice(struct dst_entry *);
  108. static void dn_dst_link_failure(struct sk_buff *);
  109. static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu);
  110. static void dn_dst_redirect(struct dst_entry *dst, struct sk_buff *skb);
  111. static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
  112. struct sk_buff *skb,
  113. const void *daddr);
  114. static int dn_route_input(struct sk_buff *);
  115. static void dn_run_flush(unsigned long dummy);
  116. static struct dn_rt_hash_bucket *dn_rt_hash_table;
  117. static unsigned int dn_rt_hash_mask;
  118. static struct timer_list dn_route_timer;
  119. static DEFINE_TIMER(dn_rt_flush_timer, dn_run_flush, 0, 0);
  120. int decnet_dst_gc_interval = 2;
  121. static struct dst_ops dn_dst_ops = {
  122. .family = PF_DECnet,
  123. .protocol = cpu_to_be16(ETH_P_DNA_RT),
  124. .gc_thresh = 128,
  125. .gc = dn_dst_gc,
  126. .check = dn_dst_check,
  127. .default_advmss = dn_dst_default_advmss,
  128. .mtu = dn_dst_mtu,
  129. .cow_metrics = dst_cow_metrics_generic,
  130. .destroy = dn_dst_destroy,
  131. .ifdown = dn_dst_ifdown,
  132. .negative_advice = dn_dst_negative_advice,
  133. .link_failure = dn_dst_link_failure,
  134. .update_pmtu = dn_dst_update_pmtu,
  135. .redirect = dn_dst_redirect,
  136. .neigh_lookup = dn_dst_neigh_lookup,
  137. };
  138. static void dn_dst_destroy(struct dst_entry *dst)
  139. {
  140. struct dn_route *rt = (struct dn_route *) dst;
  141. if (rt->n)
  142. neigh_release(rt->n);
  143. dst_destroy_metrics_generic(dst);
  144. }
  145. static void dn_dst_ifdown(struct dst_entry *dst, struct net_device *dev, int how)
  146. {
  147. if (how) {
  148. struct dn_route *rt = (struct dn_route *) dst;
  149. struct neighbour *n = rt->n;
  150. if (n && n->dev == dev) {
  151. n->dev = dev_net(dev)->loopback_dev;
  152. dev_hold(n->dev);
  153. dev_put(dev);
  154. }
  155. }
  156. }
  157. static __inline__ unsigned int dn_hash(__le16 src, __le16 dst)
  158. {
  159. __u16 tmp = (__u16 __force)(src ^ dst);
  160. tmp ^= (tmp >> 3);
  161. tmp ^= (tmp >> 5);
  162. tmp ^= (tmp >> 10);
  163. return dn_rt_hash_mask & (unsigned int)tmp;
  164. }
  165. static inline void dnrt_free(struct dn_route *rt)
  166. {
  167. call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
  168. }
  169. static inline void dnrt_drop(struct dn_route *rt)
  170. {
  171. dst_release(&rt->dst);
  172. call_rcu_bh(&rt->dst.rcu_head, dst_rcu_free);
  173. }
  174. static void dn_dst_check_expire(unsigned long dummy)
  175. {
  176. int i;
  177. struct dn_route *rt;
  178. struct dn_route __rcu **rtp;
  179. unsigned long now = jiffies;
  180. unsigned long expire = 120 * HZ;
  181. for (i = 0; i <= dn_rt_hash_mask; i++) {
  182. rtp = &dn_rt_hash_table[i].chain;
  183. spin_lock(&dn_rt_hash_table[i].lock);
  184. while ((rt = rcu_dereference_protected(*rtp,
  185. lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
  186. if (atomic_read(&rt->dst.__refcnt) ||
  187. (now - rt->dst.lastuse) < expire) {
  188. rtp = &rt->dst.dn_next;
  189. continue;
  190. }
  191. *rtp = rt->dst.dn_next;
  192. rt->dst.dn_next = NULL;
  193. dnrt_free(rt);
  194. }
  195. spin_unlock(&dn_rt_hash_table[i].lock);
  196. if ((jiffies - now) > 0)
  197. break;
  198. }
  199. mod_timer(&dn_route_timer, now + decnet_dst_gc_interval * HZ);
  200. }
  201. static int dn_dst_gc(struct dst_ops *ops)
  202. {
  203. struct dn_route *rt;
  204. struct dn_route __rcu **rtp;
  205. int i;
  206. unsigned long now = jiffies;
  207. unsigned long expire = 10 * HZ;
  208. for (i = 0; i <= dn_rt_hash_mask; i++) {
  209. spin_lock_bh(&dn_rt_hash_table[i].lock);
  210. rtp = &dn_rt_hash_table[i].chain;
  211. while ((rt = rcu_dereference_protected(*rtp,
  212. lockdep_is_held(&dn_rt_hash_table[i].lock))) != NULL) {
  213. if (atomic_read(&rt->dst.__refcnt) ||
  214. (now - rt->dst.lastuse) < expire) {
  215. rtp = &rt->dst.dn_next;
  216. continue;
  217. }
  218. *rtp = rt->dst.dn_next;
  219. rt->dst.dn_next = NULL;
  220. dnrt_drop(rt);
  221. break;
  222. }
  223. spin_unlock_bh(&dn_rt_hash_table[i].lock);
  224. }
  225. return 0;
  226. }
  227. /*
  228. * The decnet standards don't impose a particular minimum mtu, what they
  229. * do insist on is that the routing layer accepts a datagram of at least
  230. * 230 bytes long. Here we have to subtract the routing header length from
  231. * 230 to get the minimum acceptable mtu. If there is no neighbour, then we
  232. * assume the worst and use a long header size.
  233. *
  234. * We update both the mtu and the advertised mss (i.e. the segment size we
  235. * advertise to the other end).
  236. */
  237. static void dn_dst_update_pmtu(struct dst_entry *dst, u32 mtu)
  238. {
  239. struct dn_route *rt = (struct dn_route *) dst;
  240. struct neighbour *n = rt->n;
  241. u32 min_mtu = 230;
  242. struct dn_dev *dn;
  243. dn = n ? rcu_dereference_raw(n->dev->dn_ptr) : NULL;
  244. if (dn && dn->use_long == 0)
  245. min_mtu -= 6;
  246. else
  247. min_mtu -= 21;
  248. if (dst_metric(dst, RTAX_MTU) > mtu && mtu >= min_mtu) {
  249. if (!(dst_metric_locked(dst, RTAX_MTU))) {
  250. dst_metric_set(dst, RTAX_MTU, mtu);
  251. dst_set_expires(dst, dn_rt_mtu_expires);
  252. }
  253. if (!(dst_metric_locked(dst, RTAX_ADVMSS))) {
  254. u32 mss = mtu - DN_MAX_NSP_DATA_HEADER;
  255. u32 existing_mss = dst_metric_raw(dst, RTAX_ADVMSS);
  256. if (!existing_mss || existing_mss > mss)
  257. dst_metric_set(dst, RTAX_ADVMSS, mss);
  258. }
  259. }
  260. }
  261. static void dn_dst_redirect(struct dst_entry *dst, struct sk_buff *skb)
  262. {
  263. }
  264. /*
  265. * When a route has been marked obsolete. (e.g. routing cache flush)
  266. */
  267. static struct dst_entry *dn_dst_check(struct dst_entry *dst, __u32 cookie)
  268. {
  269. return NULL;
  270. }
  271. static struct dst_entry *dn_dst_negative_advice(struct dst_entry *dst)
  272. {
  273. dst_release(dst);
  274. return NULL;
  275. }
  276. static void dn_dst_link_failure(struct sk_buff *skb)
  277. {
  278. }
  279. static inline int compare_keys(struct flowidn *fl1, struct flowidn *fl2)
  280. {
  281. return ((fl1->daddr ^ fl2->daddr) |
  282. (fl1->saddr ^ fl2->saddr) |
  283. (fl1->flowidn_mark ^ fl2->flowidn_mark) |
  284. (fl1->flowidn_scope ^ fl2->flowidn_scope) |
  285. (fl1->flowidn_oif ^ fl2->flowidn_oif) |
  286. (fl1->flowidn_iif ^ fl2->flowidn_iif)) == 0;
  287. }
  288. static int dn_insert_route(struct dn_route *rt, unsigned int hash, struct dn_route **rp)
  289. {
  290. struct dn_route *rth;
  291. struct dn_route __rcu **rthp;
  292. unsigned long now = jiffies;
  293. rthp = &dn_rt_hash_table[hash].chain;
  294. spin_lock_bh(&dn_rt_hash_table[hash].lock);
  295. while ((rth = rcu_dereference_protected(*rthp,
  296. lockdep_is_held(&dn_rt_hash_table[hash].lock))) != NULL) {
  297. if (compare_keys(&rth->fld, &rt->fld)) {
  298. /* Put it first */
  299. *rthp = rth->dst.dn_next;
  300. rcu_assign_pointer(rth->dst.dn_next,
  301. dn_rt_hash_table[hash].chain);
  302. rcu_assign_pointer(dn_rt_hash_table[hash].chain, rth);
  303. dst_use(&rth->dst, now);
  304. spin_unlock_bh(&dn_rt_hash_table[hash].lock);
  305. dnrt_drop(rt);
  306. *rp = rth;
  307. return 0;
  308. }
  309. rthp = &rth->dst.dn_next;
  310. }
  311. rcu_assign_pointer(rt->dst.dn_next, dn_rt_hash_table[hash].chain);
  312. rcu_assign_pointer(dn_rt_hash_table[hash].chain, rt);
  313. dst_use(&rt->dst, now);
  314. spin_unlock_bh(&dn_rt_hash_table[hash].lock);
  315. *rp = rt;
  316. return 0;
  317. }
  318. static void dn_run_flush(unsigned long dummy)
  319. {
  320. int i;
  321. struct dn_route *rt, *next;
  322. for (i = 0; i < dn_rt_hash_mask; i++) {
  323. spin_lock_bh(&dn_rt_hash_table[i].lock);
  324. if ((rt = xchg((struct dn_route **)&dn_rt_hash_table[i].chain, NULL)) == NULL)
  325. goto nothing_to_declare;
  326. for(; rt; rt = next) {
  327. next = rcu_dereference_raw(rt->dst.dn_next);
  328. RCU_INIT_POINTER(rt->dst.dn_next, NULL);
  329. dst_free((struct dst_entry *)rt);
  330. }
  331. nothing_to_declare:
  332. spin_unlock_bh(&dn_rt_hash_table[i].lock);
  333. }
  334. }
  335. static DEFINE_SPINLOCK(dn_rt_flush_lock);
  336. void dn_rt_cache_flush(int delay)
  337. {
  338. unsigned long now = jiffies;
  339. int user_mode = !in_interrupt();
  340. if (delay < 0)
  341. delay = dn_rt_min_delay;
  342. spin_lock_bh(&dn_rt_flush_lock);
  343. if (del_timer(&dn_rt_flush_timer) && delay > 0 && dn_rt_deadline) {
  344. long tmo = (long)(dn_rt_deadline - now);
  345. if (user_mode && tmo < dn_rt_max_delay - dn_rt_min_delay)
  346. tmo = 0;
  347. if (delay > tmo)
  348. delay = tmo;
  349. }
  350. if (delay <= 0) {
  351. spin_unlock_bh(&dn_rt_flush_lock);
  352. dn_run_flush(0);
  353. return;
  354. }
  355. if (dn_rt_deadline == 0)
  356. dn_rt_deadline = now + dn_rt_max_delay;
  357. dn_rt_flush_timer.expires = now + delay;
  358. add_timer(&dn_rt_flush_timer);
  359. spin_unlock_bh(&dn_rt_flush_lock);
  360. }
  361. /**
  362. * dn_return_short - Return a short packet to its sender
  363. * @skb: The packet to return
  364. *
  365. */
  366. static int dn_return_short(struct sk_buff *skb)
  367. {
  368. struct dn_skb_cb *cb;
  369. unsigned char *ptr;
  370. __le16 *src;
  371. __le16 *dst;
  372. /* Add back headers */
  373. skb_push(skb, skb->data - skb_network_header(skb));
  374. if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
  375. return NET_RX_DROP;
  376. cb = DN_SKB_CB(skb);
  377. /* Skip packet length and point to flags */
  378. ptr = skb->data + 2;
  379. *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
  380. dst = (__le16 *)ptr;
  381. ptr += 2;
  382. src = (__le16 *)ptr;
  383. ptr += 2;
  384. *ptr = 0; /* Zero hop count */
  385. swap(*src, *dst);
  386. skb->pkt_type = PACKET_OUTGOING;
  387. dn_rt_finish_output(skb, NULL, NULL);
  388. return NET_RX_SUCCESS;
  389. }
  390. /**
  391. * dn_return_long - Return a long packet to its sender
  392. * @skb: The long format packet to return
  393. *
  394. */
  395. static int dn_return_long(struct sk_buff *skb)
  396. {
  397. struct dn_skb_cb *cb;
  398. unsigned char *ptr;
  399. unsigned char *src_addr, *dst_addr;
  400. unsigned char tmp[ETH_ALEN];
  401. /* Add back all headers */
  402. skb_push(skb, skb->data - skb_network_header(skb));
  403. if ((skb = skb_unshare(skb, GFP_ATOMIC)) == NULL)
  404. return NET_RX_DROP;
  405. cb = DN_SKB_CB(skb);
  406. /* Ignore packet length and point to flags */
  407. ptr = skb->data + 2;
  408. /* Skip padding */
  409. if (*ptr & DN_RT_F_PF) {
  410. char padlen = (*ptr & ~DN_RT_F_PF);
  411. ptr += padlen;
  412. }
  413. *ptr++ = (cb->rt_flags & ~DN_RT_F_RQR) | DN_RT_F_RTS;
  414. ptr += 2;
  415. dst_addr = ptr;
  416. ptr += 8;
  417. src_addr = ptr;
  418. ptr += 6;
  419. *ptr = 0; /* Zero hop count */
  420. /* Swap source and destination */
  421. memcpy(tmp, src_addr, ETH_ALEN);
  422. memcpy(src_addr, dst_addr, ETH_ALEN);
  423. memcpy(dst_addr, tmp, ETH_ALEN);
  424. skb->pkt_type = PACKET_OUTGOING;
  425. dn_rt_finish_output(skb, dst_addr, src_addr);
  426. return NET_RX_SUCCESS;
  427. }
  428. /**
  429. * dn_route_rx_packet - Try and find a route for an incoming packet
  430. * @skb: The packet to find a route for
  431. *
  432. * Returns: result of input function if route is found, error code otherwise
  433. */
  434. static int dn_route_rx_packet(struct sk_buff *skb)
  435. {
  436. struct dn_skb_cb *cb;
  437. int err;
  438. if ((err = dn_route_input(skb)) == 0)
  439. return dst_input(skb);
  440. cb = DN_SKB_CB(skb);
  441. if (decnet_debug_level & 4) {
  442. char *devname = skb->dev ? skb->dev->name : "???";
  443. printk(KERN_DEBUG
  444. "DECnet: dn_route_rx_packet: rt_flags=0x%02x dev=%s len=%d src=0x%04hx dst=0x%04hx err=%d type=%d\n",
  445. (int)cb->rt_flags, devname, skb->len,
  446. le16_to_cpu(cb->src), le16_to_cpu(cb->dst),
  447. err, skb->pkt_type);
  448. }
  449. if ((skb->pkt_type == PACKET_HOST) && (cb->rt_flags & DN_RT_F_RQR)) {
  450. switch (cb->rt_flags & DN_RT_PKT_MSK) {
  451. case DN_RT_PKT_SHORT:
  452. return dn_return_short(skb);
  453. case DN_RT_PKT_LONG:
  454. return dn_return_long(skb);
  455. }
  456. }
  457. kfree_skb(skb);
  458. return NET_RX_DROP;
  459. }
  460. static int dn_route_rx_long(struct sk_buff *skb)
  461. {
  462. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  463. unsigned char *ptr = skb->data;
  464. if (!pskb_may_pull(skb, 21)) /* 20 for long header, 1 for shortest nsp */
  465. goto drop_it;
  466. skb_pull(skb, 20);
  467. skb_reset_transport_header(skb);
  468. /* Destination info */
  469. ptr += 2;
  470. cb->dst = dn_eth2dn(ptr);
  471. if (memcmp(ptr, dn_hiord_addr, 4) != 0)
  472. goto drop_it;
  473. ptr += 6;
  474. /* Source info */
  475. ptr += 2;
  476. cb->src = dn_eth2dn(ptr);
  477. if (memcmp(ptr, dn_hiord_addr, 4) != 0)
  478. goto drop_it;
  479. ptr += 6;
  480. /* Other junk */
  481. ptr++;
  482. cb->hops = *ptr++; /* Visit Count */
  483. return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
  484. dn_route_rx_packet);
  485. drop_it:
  486. kfree_skb(skb);
  487. return NET_RX_DROP;
  488. }
  489. static int dn_route_rx_short(struct sk_buff *skb)
  490. {
  491. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  492. unsigned char *ptr = skb->data;
  493. if (!pskb_may_pull(skb, 6)) /* 5 for short header + 1 for shortest nsp */
  494. goto drop_it;
  495. skb_pull(skb, 5);
  496. skb_reset_transport_header(skb);
  497. cb->dst = *(__le16 *)ptr;
  498. ptr += 2;
  499. cb->src = *(__le16 *)ptr;
  500. ptr += 2;
  501. cb->hops = *ptr & 0x3f;
  502. return NF_HOOK(NFPROTO_DECNET, NF_DN_PRE_ROUTING, skb, skb->dev, NULL,
  503. dn_route_rx_packet);
  504. drop_it:
  505. kfree_skb(skb);
  506. return NET_RX_DROP;
  507. }
  508. static int dn_route_discard(struct sk_buff *skb)
  509. {
  510. /*
  511. * I know we drop the packet here, but thats considered success in
  512. * this case
  513. */
  514. kfree_skb(skb);
  515. return NET_RX_SUCCESS;
  516. }
  517. static int dn_route_ptp_hello(struct sk_buff *skb)
  518. {
  519. dn_dev_hello(skb);
  520. dn_neigh_pointopoint_hello(skb);
  521. return NET_RX_SUCCESS;
  522. }
  523. int dn_route_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *pt, struct net_device *orig_dev)
  524. {
  525. struct dn_skb_cb *cb;
  526. unsigned char flags = 0;
  527. __u16 len = le16_to_cpu(*(__le16 *)skb->data);
  528. struct dn_dev *dn = rcu_dereference(dev->dn_ptr);
  529. unsigned char padlen = 0;
  530. if (!net_eq(dev_net(dev), &init_net))
  531. goto dump_it;
  532. if (dn == NULL)
  533. goto dump_it;
  534. if ((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)
  535. goto out;
  536. if (!pskb_may_pull(skb, 3))
  537. goto dump_it;
  538. skb_pull(skb, 2);
  539. if (len > skb->len)
  540. goto dump_it;
  541. skb_trim(skb, len);
  542. flags = *skb->data;
  543. cb = DN_SKB_CB(skb);
  544. cb->stamp = jiffies;
  545. cb->iif = dev->ifindex;
  546. /*
  547. * If we have padding, remove it.
  548. */
  549. if (flags & DN_RT_F_PF) {
  550. padlen = flags & ~DN_RT_F_PF;
  551. if (!pskb_may_pull(skb, padlen + 1))
  552. goto dump_it;
  553. skb_pull(skb, padlen);
  554. flags = *skb->data;
  555. }
  556. skb_reset_network_header(skb);
  557. /*
  558. * Weed out future version DECnet
  559. */
  560. if (flags & DN_RT_F_VER)
  561. goto dump_it;
  562. cb->rt_flags = flags;
  563. if (decnet_debug_level & 1)
  564. printk(KERN_DEBUG
  565. "dn_route_rcv: got 0x%02x from %s [%d %d %d]\n",
  566. (int)flags, (dev) ? dev->name : "???", len, skb->len,
  567. padlen);
  568. if (flags & DN_RT_PKT_CNTL) {
  569. if (unlikely(skb_linearize(skb)))
  570. goto dump_it;
  571. switch (flags & DN_RT_CNTL_MSK) {
  572. case DN_RT_PKT_INIT:
  573. dn_dev_init_pkt(skb);
  574. break;
  575. case DN_RT_PKT_VERI:
  576. dn_dev_veri_pkt(skb);
  577. break;
  578. }
  579. if (dn->parms.state != DN_DEV_S_RU)
  580. goto dump_it;
  581. switch (flags & DN_RT_CNTL_MSK) {
  582. case DN_RT_PKT_HELO:
  583. return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
  584. skb, skb->dev, NULL,
  585. dn_route_ptp_hello);
  586. case DN_RT_PKT_L1RT:
  587. case DN_RT_PKT_L2RT:
  588. return NF_HOOK(NFPROTO_DECNET, NF_DN_ROUTE,
  589. skb, skb->dev, NULL,
  590. dn_route_discard);
  591. case DN_RT_PKT_ERTH:
  592. return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
  593. skb, skb->dev, NULL,
  594. dn_neigh_router_hello);
  595. case DN_RT_PKT_EEDH:
  596. return NF_HOOK(NFPROTO_DECNET, NF_DN_HELLO,
  597. skb, skb->dev, NULL,
  598. dn_neigh_endnode_hello);
  599. }
  600. } else {
  601. if (dn->parms.state != DN_DEV_S_RU)
  602. goto dump_it;
  603. skb_pull(skb, 1); /* Pull flags */
  604. switch (flags & DN_RT_PKT_MSK) {
  605. case DN_RT_PKT_LONG:
  606. return dn_route_rx_long(skb);
  607. case DN_RT_PKT_SHORT:
  608. return dn_route_rx_short(skb);
  609. }
  610. }
  611. dump_it:
  612. kfree_skb(skb);
  613. out:
  614. return NET_RX_DROP;
  615. }
  616. static int dn_to_neigh_output(struct sk_buff *skb)
  617. {
  618. struct dst_entry *dst = skb_dst(skb);
  619. struct dn_route *rt = (struct dn_route *) dst;
  620. struct neighbour *n = rt->n;
  621. return n->output(n, skb);
  622. }
  623. static int dn_output(struct sk_buff *skb)
  624. {
  625. struct dst_entry *dst = skb_dst(skb);
  626. struct dn_route *rt = (struct dn_route *)dst;
  627. struct net_device *dev = dst->dev;
  628. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  629. int err = -EINVAL;
  630. if (rt->n == NULL)
  631. goto error;
  632. skb->dev = dev;
  633. cb->src = rt->rt_saddr;
  634. cb->dst = rt->rt_daddr;
  635. /*
  636. * Always set the Intra-Ethernet bit on all outgoing packets
  637. * originated on this node. Only valid flag from upper layers
  638. * is return-to-sender-requested. Set hop count to 0 too.
  639. */
  640. cb->rt_flags &= ~DN_RT_F_RQR;
  641. cb->rt_flags |= DN_RT_F_IE;
  642. cb->hops = 0;
  643. return NF_HOOK(NFPROTO_DECNET, NF_DN_LOCAL_OUT, skb, NULL, dev,
  644. dn_to_neigh_output);
  645. error:
  646. net_dbg_ratelimited("dn_output: This should not happen\n");
  647. kfree_skb(skb);
  648. return err;
  649. }
  650. static int dn_forward(struct sk_buff *skb)
  651. {
  652. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  653. struct dst_entry *dst = skb_dst(skb);
  654. struct dn_dev *dn_db = rcu_dereference(dst->dev->dn_ptr);
  655. struct dn_route *rt;
  656. int header_len;
  657. #ifdef CONFIG_NETFILTER
  658. struct net_device *dev = skb->dev;
  659. #endif
  660. if (skb->pkt_type != PACKET_HOST)
  661. goto drop;
  662. /* Ensure that we have enough space for headers */
  663. rt = (struct dn_route *)skb_dst(skb);
  664. header_len = dn_db->use_long ? 21 : 6;
  665. if (skb_cow(skb, LL_RESERVED_SPACE(rt->dst.dev)+header_len))
  666. goto drop;
  667. /*
  668. * Hop count exceeded.
  669. */
  670. if (++cb->hops > 30)
  671. goto drop;
  672. skb->dev = rt->dst.dev;
  673. /*
  674. * If packet goes out same interface it came in on, then set
  675. * the Intra-Ethernet bit. This has no effect for short
  676. * packets, so we don't need to test for them here.
  677. */
  678. cb->rt_flags &= ~DN_RT_F_IE;
  679. if (rt->rt_flags & RTCF_DOREDIRECT)
  680. cb->rt_flags |= DN_RT_F_IE;
  681. return NF_HOOK(NFPROTO_DECNET, NF_DN_FORWARD, skb, dev, skb->dev,
  682. dn_to_neigh_output);
  683. drop:
  684. kfree_skb(skb);
  685. return NET_RX_DROP;
  686. }
  687. /*
  688. * Used to catch bugs. This should never normally get
  689. * called.
  690. */
  691. static int dn_rt_bug(struct sk_buff *skb)
  692. {
  693. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  694. net_dbg_ratelimited("dn_rt_bug: skb from:%04x to:%04x\n",
  695. le16_to_cpu(cb->src), le16_to_cpu(cb->dst));
  696. kfree_skb(skb);
  697. return NET_RX_DROP;
  698. }
  699. static unsigned int dn_dst_default_advmss(const struct dst_entry *dst)
  700. {
  701. return dn_mss_from_pmtu(dst->dev, dst_mtu(dst));
  702. }
  703. static unsigned int dn_dst_mtu(const struct dst_entry *dst)
  704. {
  705. unsigned int mtu = dst_metric_raw(dst, RTAX_MTU);
  706. return mtu ? : dst->dev->mtu;
  707. }
  708. static struct neighbour *dn_dst_neigh_lookup(const struct dst_entry *dst,
  709. struct sk_buff *skb,
  710. const void *daddr)
  711. {
  712. return __neigh_lookup_errno(&dn_neigh_table, daddr, dst->dev);
  713. }
  714. static int dn_rt_set_next_hop(struct dn_route *rt, struct dn_fib_res *res)
  715. {
  716. struct dn_fib_info *fi = res->fi;
  717. struct net_device *dev = rt->dst.dev;
  718. unsigned int mss_metric;
  719. struct neighbour *n;
  720. if (fi) {
  721. if (DN_FIB_RES_GW(*res) &&
  722. DN_FIB_RES_NH(*res).nh_scope == RT_SCOPE_LINK)
  723. rt->rt_gateway = DN_FIB_RES_GW(*res);
  724. dst_init_metrics(&rt->dst, fi->fib_metrics, true);
  725. }
  726. rt->rt_type = res->type;
  727. if (dev != NULL && rt->n == NULL) {
  728. n = __neigh_lookup_errno(&dn_neigh_table, &rt->rt_gateway, dev);
  729. if (IS_ERR(n))
  730. return PTR_ERR(n);
  731. rt->n = n;
  732. }
  733. if (dst_metric(&rt->dst, RTAX_MTU) > rt->dst.dev->mtu)
  734. dst_metric_set(&rt->dst, RTAX_MTU, rt->dst.dev->mtu);
  735. mss_metric = dst_metric_raw(&rt->dst, RTAX_ADVMSS);
  736. if (mss_metric) {
  737. unsigned int mss = dn_mss_from_pmtu(dev, dst_mtu(&rt->dst));
  738. if (mss_metric > mss)
  739. dst_metric_set(&rt->dst, RTAX_ADVMSS, mss);
  740. }
  741. return 0;
  742. }
  743. static inline int dn_match_addr(__le16 addr1, __le16 addr2)
  744. {
  745. __u16 tmp = le16_to_cpu(addr1) ^ le16_to_cpu(addr2);
  746. int match = 16;
  747. while(tmp) {
  748. tmp >>= 1;
  749. match--;
  750. }
  751. return match;
  752. }
  753. static __le16 dnet_select_source(const struct net_device *dev, __le16 daddr, int scope)
  754. {
  755. __le16 saddr = 0;
  756. struct dn_dev *dn_db;
  757. struct dn_ifaddr *ifa;
  758. int best_match = 0;
  759. int ret;
  760. rcu_read_lock();
  761. dn_db = rcu_dereference(dev->dn_ptr);
  762. for (ifa = rcu_dereference(dn_db->ifa_list);
  763. ifa != NULL;
  764. ifa = rcu_dereference(ifa->ifa_next)) {
  765. if (ifa->ifa_scope > scope)
  766. continue;
  767. if (!daddr) {
  768. saddr = ifa->ifa_local;
  769. break;
  770. }
  771. ret = dn_match_addr(daddr, ifa->ifa_local);
  772. if (ret > best_match)
  773. saddr = ifa->ifa_local;
  774. if (best_match == 0)
  775. saddr = ifa->ifa_local;
  776. }
  777. rcu_read_unlock();
  778. return saddr;
  779. }
  780. static inline __le16 __dn_fib_res_prefsrc(struct dn_fib_res *res)
  781. {
  782. return dnet_select_source(DN_FIB_RES_DEV(*res), DN_FIB_RES_GW(*res), res->scope);
  783. }
  784. static inline __le16 dn_fib_rules_map_destination(__le16 daddr, struct dn_fib_res *res)
  785. {
  786. __le16 mask = dnet_make_mask(res->prefixlen);
  787. return (daddr&~mask)|res->fi->fib_nh->nh_gw;
  788. }
  789. static int dn_route_output_slow(struct dst_entry **pprt, const struct flowidn *oldflp, int try_hard)
  790. {
  791. struct flowidn fld = {
  792. .daddr = oldflp->daddr,
  793. .saddr = oldflp->saddr,
  794. .flowidn_scope = RT_SCOPE_UNIVERSE,
  795. .flowidn_mark = oldflp->flowidn_mark,
  796. .flowidn_iif = init_net.loopback_dev->ifindex,
  797. .flowidn_oif = oldflp->flowidn_oif,
  798. };
  799. struct dn_route *rt = NULL;
  800. struct net_device *dev_out = NULL, *dev;
  801. struct neighbour *neigh = NULL;
  802. unsigned int hash;
  803. unsigned int flags = 0;
  804. struct dn_fib_res res = { .fi = NULL, .type = RTN_UNICAST };
  805. int err;
  806. int free_res = 0;
  807. __le16 gateway = 0;
  808. if (decnet_debug_level & 16)
  809. printk(KERN_DEBUG
  810. "dn_route_output_slow: dst=%04x src=%04x mark=%d"
  811. " iif=%d oif=%d\n", le16_to_cpu(oldflp->daddr),
  812. le16_to_cpu(oldflp->saddr),
  813. oldflp->flowidn_mark, init_net.loopback_dev->ifindex,
  814. oldflp->flowidn_oif);
  815. /* If we have an output interface, verify its a DECnet device */
  816. if (oldflp->flowidn_oif) {
  817. dev_out = dev_get_by_index(&init_net, oldflp->flowidn_oif);
  818. err = -ENODEV;
  819. if (dev_out && dev_out->dn_ptr == NULL) {
  820. dev_put(dev_out);
  821. dev_out = NULL;
  822. }
  823. if (dev_out == NULL)
  824. goto out;
  825. }
  826. /* If we have a source address, verify that its a local address */
  827. if (oldflp->saddr) {
  828. err = -EADDRNOTAVAIL;
  829. if (dev_out) {
  830. if (dn_dev_islocal(dev_out, oldflp->saddr))
  831. goto source_ok;
  832. dev_put(dev_out);
  833. goto out;
  834. }
  835. rcu_read_lock();
  836. for_each_netdev_rcu(&init_net, dev) {
  837. if (!dev->dn_ptr)
  838. continue;
  839. if (!dn_dev_islocal(dev, oldflp->saddr))
  840. continue;
  841. if ((dev->flags & IFF_LOOPBACK) &&
  842. oldflp->daddr &&
  843. !dn_dev_islocal(dev, oldflp->daddr))
  844. continue;
  845. dev_out = dev;
  846. break;
  847. }
  848. rcu_read_unlock();
  849. if (dev_out == NULL)
  850. goto out;
  851. dev_hold(dev_out);
  852. source_ok:
  853. ;
  854. }
  855. /* No destination? Assume its local */
  856. if (!fld.daddr) {
  857. fld.daddr = fld.saddr;
  858. err = -EADDRNOTAVAIL;
  859. if (dev_out)
  860. dev_put(dev_out);
  861. dev_out = init_net.loopback_dev;
  862. dev_hold(dev_out);
  863. if (!fld.daddr) {
  864. fld.daddr =
  865. fld.saddr = dnet_select_source(dev_out, 0,
  866. RT_SCOPE_HOST);
  867. if (!fld.daddr)
  868. goto out;
  869. }
  870. fld.flowidn_oif = init_net.loopback_dev->ifindex;
  871. res.type = RTN_LOCAL;
  872. goto make_route;
  873. }
  874. if (decnet_debug_level & 16)
  875. printk(KERN_DEBUG
  876. "dn_route_output_slow: initial checks complete."
  877. " dst=%o4x src=%04x oif=%d try_hard=%d\n",
  878. le16_to_cpu(fld.daddr), le16_to_cpu(fld.saddr),
  879. fld.flowidn_oif, try_hard);
  880. /*
  881. * N.B. If the kernel is compiled without router support then
  882. * dn_fib_lookup() will evaluate to non-zero so this if () block
  883. * will always be executed.
  884. */
  885. err = -ESRCH;
  886. if (try_hard || (err = dn_fib_lookup(&fld, &res)) != 0) {
  887. struct dn_dev *dn_db;
  888. if (err != -ESRCH)
  889. goto out;
  890. /*
  891. * Here the fallback is basically the standard algorithm for
  892. * routing in endnodes which is described in the DECnet routing
  893. * docs
  894. *
  895. * If we are not trying hard, look in neighbour cache.
  896. * The result is tested to ensure that if a specific output
  897. * device/source address was requested, then we honour that
  898. * here
  899. */
  900. if (!try_hard) {
  901. neigh = neigh_lookup_nodev(&dn_neigh_table, &init_net, &fld.daddr);
  902. if (neigh) {
  903. if ((oldflp->flowidn_oif &&
  904. (neigh->dev->ifindex != oldflp->flowidn_oif)) ||
  905. (oldflp->saddr &&
  906. (!dn_dev_islocal(neigh->dev,
  907. oldflp->saddr)))) {
  908. neigh_release(neigh);
  909. neigh = NULL;
  910. } else {
  911. if (dev_out)
  912. dev_put(dev_out);
  913. if (dn_dev_islocal(neigh->dev, fld.daddr)) {
  914. dev_out = init_net.loopback_dev;
  915. res.type = RTN_LOCAL;
  916. } else {
  917. dev_out = neigh->dev;
  918. }
  919. dev_hold(dev_out);
  920. goto select_source;
  921. }
  922. }
  923. }
  924. /* Not there? Perhaps its a local address */
  925. if (dev_out == NULL)
  926. dev_out = dn_dev_get_default();
  927. err = -ENODEV;
  928. if (dev_out == NULL)
  929. goto out;
  930. dn_db = rcu_dereference_raw(dev_out->dn_ptr);
  931. /* Possible improvement - check all devices for local addr */
  932. if (dn_dev_islocal(dev_out, fld.daddr)) {
  933. dev_put(dev_out);
  934. dev_out = init_net.loopback_dev;
  935. dev_hold(dev_out);
  936. res.type = RTN_LOCAL;
  937. goto select_source;
  938. }
  939. /* Not local either.... try sending it to the default router */
  940. neigh = neigh_clone(dn_db->router);
  941. BUG_ON(neigh && neigh->dev != dev_out);
  942. /* Ok then, we assume its directly connected and move on */
  943. select_source:
  944. if (neigh)
  945. gateway = ((struct dn_neigh *)neigh)->addr;
  946. if (gateway == 0)
  947. gateway = fld.daddr;
  948. if (fld.saddr == 0) {
  949. fld.saddr = dnet_select_source(dev_out, gateway,
  950. res.type == RTN_LOCAL ?
  951. RT_SCOPE_HOST :
  952. RT_SCOPE_LINK);
  953. if (fld.saddr == 0 && res.type != RTN_LOCAL)
  954. goto e_addr;
  955. }
  956. fld.flowidn_oif = dev_out->ifindex;
  957. goto make_route;
  958. }
  959. free_res = 1;
  960. if (res.type == RTN_NAT)
  961. goto e_inval;
  962. if (res.type == RTN_LOCAL) {
  963. if (!fld.saddr)
  964. fld.saddr = fld.daddr;
  965. if (dev_out)
  966. dev_put(dev_out);
  967. dev_out = init_net.loopback_dev;
  968. dev_hold(dev_out);
  969. fld.flowidn_oif = dev_out->ifindex;
  970. if (res.fi)
  971. dn_fib_info_put(res.fi);
  972. res.fi = NULL;
  973. goto make_route;
  974. }
  975. if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
  976. dn_fib_select_multipath(&fld, &res);
  977. /*
  978. * We could add some logic to deal with default routes here and
  979. * get rid of some of the special casing above.
  980. */
  981. if (!fld.saddr)
  982. fld.saddr = DN_FIB_RES_PREFSRC(res);
  983. if (dev_out)
  984. dev_put(dev_out);
  985. dev_out = DN_FIB_RES_DEV(res);
  986. dev_hold(dev_out);
  987. fld.flowidn_oif = dev_out->ifindex;
  988. gateway = DN_FIB_RES_GW(res);
  989. make_route:
  990. if (dev_out->flags & IFF_LOOPBACK)
  991. flags |= RTCF_LOCAL;
  992. rt = dst_alloc(&dn_dst_ops, dev_out, 1, 0, DST_HOST);
  993. if (rt == NULL)
  994. goto e_nobufs;
  995. memset(&rt->fld, 0, sizeof(rt->fld));
  996. rt->fld.saddr = oldflp->saddr;
  997. rt->fld.daddr = oldflp->daddr;
  998. rt->fld.flowidn_oif = oldflp->flowidn_oif;
  999. rt->fld.flowidn_iif = 0;
  1000. rt->fld.flowidn_mark = oldflp->flowidn_mark;
  1001. rt->rt_saddr = fld.saddr;
  1002. rt->rt_daddr = fld.daddr;
  1003. rt->rt_gateway = gateway ? gateway : fld.daddr;
  1004. rt->rt_local_src = fld.saddr;
  1005. rt->rt_dst_map = fld.daddr;
  1006. rt->rt_src_map = fld.saddr;
  1007. rt->n = neigh;
  1008. neigh = NULL;
  1009. rt->dst.lastuse = jiffies;
  1010. rt->dst.output = dn_output;
  1011. rt->dst.input = dn_rt_bug;
  1012. rt->rt_flags = flags;
  1013. if (flags & RTCF_LOCAL)
  1014. rt->dst.input = dn_nsp_rx;
  1015. err = dn_rt_set_next_hop(rt, &res);
  1016. if (err)
  1017. goto e_neighbour;
  1018. hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
  1019. dn_insert_route(rt, hash, (struct dn_route **)pprt);
  1020. done:
  1021. if (neigh)
  1022. neigh_release(neigh);
  1023. if (free_res)
  1024. dn_fib_res_put(&res);
  1025. if (dev_out)
  1026. dev_put(dev_out);
  1027. out:
  1028. return err;
  1029. e_addr:
  1030. err = -EADDRNOTAVAIL;
  1031. goto done;
  1032. e_inval:
  1033. err = -EINVAL;
  1034. goto done;
  1035. e_nobufs:
  1036. err = -ENOBUFS;
  1037. goto done;
  1038. e_neighbour:
  1039. dst_free(&rt->dst);
  1040. goto e_nobufs;
  1041. }
  1042. /*
  1043. * N.B. The flags may be moved into the flowi at some future stage.
  1044. */
  1045. static int __dn_route_output_key(struct dst_entry **pprt, const struct flowidn *flp, int flags)
  1046. {
  1047. unsigned int hash = dn_hash(flp->saddr, flp->daddr);
  1048. struct dn_route *rt = NULL;
  1049. if (!(flags & MSG_TRYHARD)) {
  1050. rcu_read_lock_bh();
  1051. for (rt = rcu_dereference_bh(dn_rt_hash_table[hash].chain); rt;
  1052. rt = rcu_dereference_bh(rt->dst.dn_next)) {
  1053. if ((flp->daddr == rt->fld.daddr) &&
  1054. (flp->saddr == rt->fld.saddr) &&
  1055. (flp->flowidn_mark == rt->fld.flowidn_mark) &&
  1056. dn_is_output_route(rt) &&
  1057. (rt->fld.flowidn_oif == flp->flowidn_oif)) {
  1058. dst_use(&rt->dst, jiffies);
  1059. rcu_read_unlock_bh();
  1060. *pprt = &rt->dst;
  1061. return 0;
  1062. }
  1063. }
  1064. rcu_read_unlock_bh();
  1065. }
  1066. return dn_route_output_slow(pprt, flp, flags);
  1067. }
  1068. static int dn_route_output_key(struct dst_entry **pprt, struct flowidn *flp, int flags)
  1069. {
  1070. int err;
  1071. err = __dn_route_output_key(pprt, flp, flags);
  1072. if (err == 0 && flp->flowidn_proto) {
  1073. *pprt = xfrm_lookup(&init_net, *pprt,
  1074. flowidn_to_flowi(flp), NULL, 0);
  1075. if (IS_ERR(*pprt)) {
  1076. err = PTR_ERR(*pprt);
  1077. *pprt = NULL;
  1078. }
  1079. }
  1080. return err;
  1081. }
  1082. int dn_route_output_sock(struct dst_entry **pprt, struct flowidn *fl, struct sock *sk, int flags)
  1083. {
  1084. int err;
  1085. err = __dn_route_output_key(pprt, fl, flags & MSG_TRYHARD);
  1086. if (err == 0 && fl->flowidn_proto) {
  1087. if (!(flags & MSG_DONTWAIT))
  1088. fl->flowidn_flags |= FLOWI_FLAG_CAN_SLEEP;
  1089. *pprt = xfrm_lookup(&init_net, *pprt,
  1090. flowidn_to_flowi(fl), sk, 0);
  1091. if (IS_ERR(*pprt)) {
  1092. err = PTR_ERR(*pprt);
  1093. *pprt = NULL;
  1094. }
  1095. }
  1096. return err;
  1097. }
  1098. static int dn_route_input_slow(struct sk_buff *skb)
  1099. {
  1100. struct dn_route *rt = NULL;
  1101. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  1102. struct net_device *in_dev = skb->dev;
  1103. struct net_device *out_dev = NULL;
  1104. struct dn_dev *dn_db;
  1105. struct neighbour *neigh = NULL;
  1106. unsigned int hash;
  1107. int flags = 0;
  1108. __le16 gateway = 0;
  1109. __le16 local_src = 0;
  1110. struct flowidn fld = {
  1111. .daddr = cb->dst,
  1112. .saddr = cb->src,
  1113. .flowidn_scope = RT_SCOPE_UNIVERSE,
  1114. .flowidn_mark = skb->mark,
  1115. .flowidn_iif = skb->dev->ifindex,
  1116. };
  1117. struct dn_fib_res res = { .fi = NULL, .type = RTN_UNREACHABLE };
  1118. int err = -EINVAL;
  1119. int free_res = 0;
  1120. dev_hold(in_dev);
  1121. if ((dn_db = rcu_dereference(in_dev->dn_ptr)) == NULL)
  1122. goto out;
  1123. /* Zero source addresses are not allowed */
  1124. if (fld.saddr == 0)
  1125. goto out;
  1126. /*
  1127. * In this case we've just received a packet from a source
  1128. * outside ourselves pretending to come from us. We don't
  1129. * allow it any further to prevent routing loops, spoofing and
  1130. * other nasties. Loopback packets already have the dst attached
  1131. * so this only affects packets which have originated elsewhere.
  1132. */
  1133. err = -ENOTUNIQ;
  1134. if (dn_dev_islocal(in_dev, cb->src))
  1135. goto out;
  1136. err = dn_fib_lookup(&fld, &res);
  1137. if (err) {
  1138. if (err != -ESRCH)
  1139. goto out;
  1140. /*
  1141. * Is the destination us ?
  1142. */
  1143. if (!dn_dev_islocal(in_dev, cb->dst))
  1144. goto e_inval;
  1145. res.type = RTN_LOCAL;
  1146. } else {
  1147. __le16 src_map = fld.saddr;
  1148. free_res = 1;
  1149. out_dev = DN_FIB_RES_DEV(res);
  1150. if (out_dev == NULL) {
  1151. net_crit_ratelimited("Bug in dn_route_input_slow() No output device\n");
  1152. goto e_inval;
  1153. }
  1154. dev_hold(out_dev);
  1155. if (res.r)
  1156. src_map = fld.saddr; /* no NAT support for now */
  1157. gateway = DN_FIB_RES_GW(res);
  1158. if (res.type == RTN_NAT) {
  1159. fld.daddr = dn_fib_rules_map_destination(fld.daddr, &res);
  1160. dn_fib_res_put(&res);
  1161. free_res = 0;
  1162. if (dn_fib_lookup(&fld, &res))
  1163. goto e_inval;
  1164. free_res = 1;
  1165. if (res.type != RTN_UNICAST)
  1166. goto e_inval;
  1167. flags |= RTCF_DNAT;
  1168. gateway = fld.daddr;
  1169. }
  1170. fld.saddr = src_map;
  1171. }
  1172. switch(res.type) {
  1173. case RTN_UNICAST:
  1174. /*
  1175. * Forwarding check here, we only check for forwarding
  1176. * being turned off, if you want to only forward intra
  1177. * area, its up to you to set the routing tables up
  1178. * correctly.
  1179. */
  1180. if (dn_db->parms.forwarding == 0)
  1181. goto e_inval;
  1182. if (res.fi->fib_nhs > 1 && fld.flowidn_oif == 0)
  1183. dn_fib_select_multipath(&fld, &res);
  1184. /*
  1185. * Check for out_dev == in_dev. We use the RTCF_DOREDIRECT
  1186. * flag as a hint to set the intra-ethernet bit when
  1187. * forwarding. If we've got NAT in operation, we don't do
  1188. * this optimisation.
  1189. */
  1190. if (out_dev == in_dev && !(flags & RTCF_NAT))
  1191. flags |= RTCF_DOREDIRECT;
  1192. local_src = DN_FIB_RES_PREFSRC(res);
  1193. case RTN_BLACKHOLE:
  1194. case RTN_UNREACHABLE:
  1195. break;
  1196. case RTN_LOCAL:
  1197. flags |= RTCF_LOCAL;
  1198. fld.saddr = cb->dst;
  1199. fld.daddr = cb->src;
  1200. /* Routing tables gave us a gateway */
  1201. if (gateway)
  1202. goto make_route;
  1203. /* Packet was intra-ethernet, so we know its on-link */
  1204. if (cb->rt_flags & DN_RT_F_IE) {
  1205. gateway = cb->src;
  1206. flags |= RTCF_DIRECTSRC;
  1207. goto make_route;
  1208. }
  1209. /* Use the default router if there is one */
  1210. neigh = neigh_clone(dn_db->router);
  1211. if (neigh) {
  1212. gateway = ((struct dn_neigh *)neigh)->addr;
  1213. goto make_route;
  1214. }
  1215. /* Close eyes and pray */
  1216. gateway = cb->src;
  1217. flags |= RTCF_DIRECTSRC;
  1218. goto make_route;
  1219. default:
  1220. goto e_inval;
  1221. }
  1222. make_route:
  1223. rt = dst_alloc(&dn_dst_ops, out_dev, 0, 0, DST_HOST);
  1224. if (rt == NULL)
  1225. goto e_nobufs;
  1226. memset(&rt->fld, 0, sizeof(rt->fld));
  1227. rt->rt_saddr = fld.saddr;
  1228. rt->rt_daddr = fld.daddr;
  1229. rt->rt_gateway = fld.daddr;
  1230. if (gateway)
  1231. rt->rt_gateway = gateway;
  1232. rt->rt_local_src = local_src ? local_src : rt->rt_saddr;
  1233. rt->rt_dst_map = fld.daddr;
  1234. rt->rt_src_map = fld.saddr;
  1235. rt->fld.saddr = cb->src;
  1236. rt->fld.daddr = cb->dst;
  1237. rt->fld.flowidn_oif = 0;
  1238. rt->fld.flowidn_iif = in_dev->ifindex;
  1239. rt->fld.flowidn_mark = fld.flowidn_mark;
  1240. rt->n = neigh;
  1241. rt->dst.lastuse = jiffies;
  1242. rt->dst.output = dn_rt_bug;
  1243. switch (res.type) {
  1244. case RTN_UNICAST:
  1245. rt->dst.input = dn_forward;
  1246. break;
  1247. case RTN_LOCAL:
  1248. rt->dst.output = dn_output;
  1249. rt->dst.input = dn_nsp_rx;
  1250. rt->dst.dev = in_dev;
  1251. flags |= RTCF_LOCAL;
  1252. break;
  1253. default:
  1254. case RTN_UNREACHABLE:
  1255. case RTN_BLACKHOLE:
  1256. rt->dst.input = dst_discard;
  1257. }
  1258. rt->rt_flags = flags;
  1259. err = dn_rt_set_next_hop(rt, &res);
  1260. if (err)
  1261. goto e_neighbour;
  1262. hash = dn_hash(rt->fld.saddr, rt->fld.daddr);
  1263. dn_insert_route(rt, hash, &rt);
  1264. skb_dst_set(skb, &rt->dst);
  1265. done:
  1266. if (neigh)
  1267. neigh_release(neigh);
  1268. if (free_res)
  1269. dn_fib_res_put(&res);
  1270. dev_put(in_dev);
  1271. if (out_dev)
  1272. dev_put(out_dev);
  1273. out:
  1274. return err;
  1275. e_inval:
  1276. err = -EINVAL;
  1277. goto done;
  1278. e_nobufs:
  1279. err = -ENOBUFS;
  1280. goto done;
  1281. e_neighbour:
  1282. dst_free(&rt->dst);
  1283. goto done;
  1284. }
  1285. static int dn_route_input(struct sk_buff *skb)
  1286. {
  1287. struct dn_route *rt;
  1288. struct dn_skb_cb *cb = DN_SKB_CB(skb);
  1289. unsigned int hash = dn_hash(cb->src, cb->dst);
  1290. if (skb_dst(skb))
  1291. return 0;
  1292. rcu_read_lock();
  1293. for(rt = rcu_dereference(dn_rt_hash_table[hash].chain); rt != NULL;
  1294. rt = rcu_dereference(rt->dst.dn_next)) {
  1295. if ((rt->fld.saddr == cb->src) &&
  1296. (rt->fld.daddr == cb->dst) &&
  1297. (rt->fld.flowidn_oif == 0) &&
  1298. (rt->fld.flowidn_mark == skb->mark) &&
  1299. (rt->fld.flowidn_iif == cb->iif)) {
  1300. dst_use(&rt->dst, jiffies);
  1301. rcu_read_unlock();
  1302. skb_dst_set(skb, (struct dst_entry *)rt);
  1303. return 0;
  1304. }
  1305. }
  1306. rcu_read_unlock();
  1307. return dn_route_input_slow(skb);
  1308. }
  1309. static int dn_rt_fill_info(struct sk_buff *skb, u32 pid, u32 seq,
  1310. int event, int nowait, unsigned int flags)
  1311. {
  1312. struct dn_route *rt = (struct dn_route *)skb_dst(skb);
  1313. struct rtmsg *r;
  1314. struct nlmsghdr *nlh;
  1315. long expires;
  1316. nlh = nlmsg_put(skb, pid, seq, event, sizeof(*r), flags);
  1317. if (!nlh)
  1318. return -EMSGSIZE;
  1319. r = nlmsg_data(nlh);
  1320. r->rtm_family = AF_DECnet;
  1321. r->rtm_dst_len = 16;
  1322. r->rtm_src_len = 0;
  1323. r->rtm_tos = 0;
  1324. r->rtm_table = RT_TABLE_MAIN;
  1325. r->rtm_type = rt->rt_type;
  1326. r->rtm_flags = (rt->rt_flags & ~0xFFFF) | RTM_F_CLONED;
  1327. r->rtm_scope = RT_SCOPE_UNIVERSE;
  1328. r->rtm_protocol = RTPROT_UNSPEC;
  1329. if (rt->rt_flags & RTCF_NOTIFY)
  1330. r->rtm_flags |= RTM_F_NOTIFY;
  1331. if (nla_put_u32(skb, RTA_TABLE, RT_TABLE_MAIN) < 0 ||
  1332. nla_put_le16(skb, RTA_DST, rt->rt_daddr) < 0)
  1333. goto errout;
  1334. if (rt->fld.saddr) {
  1335. r->rtm_src_len = 16;
  1336. if (nla_put_le16(skb, RTA_SRC, rt->fld.saddr) < 0)
  1337. goto errout;
  1338. }
  1339. if (rt->dst.dev &&
  1340. nla_put_u32(skb, RTA_OIF, rt->dst.dev->ifindex) < 0)
  1341. goto errout;
  1342. /*
  1343. * Note to self - change this if input routes reverse direction when
  1344. * they deal only with inputs and not with replies like they do
  1345. * currently.
  1346. */
  1347. if (nla_put_le16(skb, RTA_PREFSRC, rt->rt_local_src) < 0)
  1348. goto errout;
  1349. if (rt->rt_daddr != rt->rt_gateway &&
  1350. nla_put_le16(skb, RTA_GATEWAY, rt->rt_gateway) < 0)
  1351. goto errout;
  1352. if (rtnetlink_put_metrics(skb, dst_metrics_ptr(&rt->dst)) < 0)
  1353. goto errout;
  1354. expires = rt->dst.expires ? rt->dst.expires - jiffies : 0;
  1355. if (rtnl_put_cacheinfo(skb, &rt->dst, 0, expires,
  1356. rt->dst.error) < 0)
  1357. goto errout;
  1358. if (dn_is_input_route(rt) &&
  1359. nla_put_u32(skb, RTA_IIF, rt->fld.flowidn_iif) < 0)
  1360. goto errout;
  1361. return nlmsg_end(skb, nlh);
  1362. errout:
  1363. nlmsg_cancel(skb, nlh);
  1364. return -EMSGSIZE;
  1365. }
  1366. /*
  1367. * This is called by both endnodes and routers now.
  1368. */
  1369. static int dn_cache_getroute(struct sk_buff *in_skb, struct nlmsghdr *nlh, void *arg)
  1370. {
  1371. struct net *net = sock_net(in_skb->sk);
  1372. struct rtattr **rta = arg;
  1373. struct rtmsg *rtm = nlmsg_data(nlh);
  1374. struct dn_route *rt = NULL;
  1375. struct dn_skb_cb *cb;
  1376. int err;
  1377. struct sk_buff *skb;
  1378. struct flowidn fld;
  1379. if (!net_eq(net, &init_net))
  1380. return -EINVAL;
  1381. memset(&fld, 0, sizeof(fld));
  1382. fld.flowidn_proto = DNPROTO_NSP;
  1383. skb = nlmsg_new(NLMSG_DEFAULT_SIZE, GFP_KERNEL);
  1384. if (skb == NULL)
  1385. return -ENOBUFS;
  1386. skb_reset_mac_header(skb);
  1387. cb = DN_SKB_CB(skb);
  1388. if (rta[RTA_SRC-1])
  1389. memcpy(&fld.saddr, RTA_DATA(rta[RTA_SRC-1]), 2);
  1390. if (rta[RTA_DST-1])
  1391. memcpy(&fld.daddr, RTA_DATA(rta[RTA_DST-1]), 2);
  1392. if (rta[RTA_IIF-1])
  1393. memcpy(&fld.flowidn_iif, RTA_DATA(rta[RTA_IIF-1]), sizeof(int));
  1394. if (fld.flowidn_iif) {
  1395. struct net_device *dev;
  1396. if ((dev = dev_get_by_index(&init_net, fld.flowidn_iif)) == NULL) {
  1397. kfree_skb(skb);
  1398. return -ENODEV;
  1399. }
  1400. if (!dev->dn_ptr) {
  1401. dev_put(dev);
  1402. kfree_skb(skb);
  1403. return -ENODEV;
  1404. }
  1405. skb->protocol = htons(ETH_P_DNA_RT);
  1406. skb->dev = dev;
  1407. cb->src = fld.saddr;
  1408. cb->dst = fld.daddr;
  1409. local_bh_disable();
  1410. err = dn_route_input(skb);
  1411. local_bh_enable();
  1412. memset(cb, 0, sizeof(struct dn_skb_cb));
  1413. rt = (struct dn_route *)skb_dst(skb);
  1414. if (!err && -rt->dst.error)
  1415. err = rt->dst.error;
  1416. } else {
  1417. int oif = 0;
  1418. if (rta[RTA_OIF - 1])
  1419. memcpy(&oif, RTA_DATA(rta[RTA_OIF - 1]), sizeof(int));
  1420. fld.flowidn_oif = oif;
  1421. err = dn_route_output_key((struct dst_entry **)&rt, &fld, 0);
  1422. }
  1423. if (skb->dev)
  1424. dev_put(skb->dev);
  1425. skb->dev = NULL;
  1426. if (err)
  1427. goto out_free;
  1428. skb_dst_set(skb, &rt->dst);
  1429. if (rtm->rtm_flags & RTM_F_NOTIFY)
  1430. rt->rt_flags |= RTCF_NOTIFY;
  1431. err = dn_rt_fill_info(skb, NETLINK_CB(in_skb).pid, nlh->nlmsg_seq, RTM_NEWROUTE, 0, 0);
  1432. if (err == 0)
  1433. goto out_free;
  1434. if (err < 0) {
  1435. err = -EMSGSIZE;
  1436. goto out_free;
  1437. }
  1438. return rtnl_unicast(skb, &init_net, NETLINK_CB(in_skb).pid);
  1439. out_free:
  1440. kfree_skb(skb);
  1441. return err;
  1442. }
  1443. /*
  1444. * For routers, this is called from dn_fib_dump, but for endnodes its
  1445. * called directly from the rtnetlink dispatch table.
  1446. */
  1447. int dn_cache_dump(struct sk_buff *skb, struct netlink_callback *cb)
  1448. {
  1449. struct net *net = sock_net(skb->sk);
  1450. struct dn_route *rt;
  1451. int h, s_h;
  1452. int idx, s_idx;
  1453. struct rtmsg *rtm;
  1454. if (!net_eq(net, &init_net))
  1455. return 0;
  1456. if (nlmsg_len(cb->nlh) < sizeof(struct rtmsg))
  1457. return -EINVAL;
  1458. rtm = nlmsg_data(cb->nlh);
  1459. if (!(rtm->rtm_flags & RTM_F_CLONED))
  1460. return 0;
  1461. s_h = cb->args[0];
  1462. s_idx = idx = cb->args[1];
  1463. for(h = 0; h <= dn_rt_hash_mask; h++) {
  1464. if (h < s_h)
  1465. continue;
  1466. if (h > s_h)
  1467. s_idx = 0;
  1468. rcu_read_lock_bh();
  1469. for(rt = rcu_dereference_bh(dn_rt_hash_table[h].chain), idx = 0;
  1470. rt;
  1471. rt = rcu_dereference_bh(rt->dst.dn_next), idx++) {
  1472. if (idx < s_idx)
  1473. continue;
  1474. skb_dst_set(skb, dst_clone(&rt->dst));
  1475. if (dn_rt_fill_info(skb, NETLINK_CB(cb->skb).pid,
  1476. cb->nlh->nlmsg_seq, RTM_NEWROUTE,
  1477. 1, NLM_F_MULTI) <= 0) {
  1478. skb_dst_drop(skb);
  1479. rcu_read_unlock_bh();
  1480. goto done;
  1481. }
  1482. skb_dst_drop(skb);
  1483. }
  1484. rcu_read_unlock_bh();
  1485. }
  1486. done:
  1487. cb->args[0] = h;
  1488. cb->args[1] = idx;
  1489. return skb->len;
  1490. }
  1491. #ifdef CONFIG_PROC_FS
  1492. struct dn_rt_cache_iter_state {
  1493. int bucket;
  1494. };
  1495. static struct dn_route *dn_rt_cache_get_first(struct seq_file *seq)
  1496. {
  1497. struct dn_route *rt = NULL;
  1498. struct dn_rt_cache_iter_state *s = seq->private;
  1499. for(s->bucket = dn_rt_hash_mask; s->bucket >= 0; --s->bucket) {
  1500. rcu_read_lock_bh();
  1501. rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
  1502. if (rt)
  1503. break;
  1504. rcu_read_unlock_bh();
  1505. }
  1506. return rt;
  1507. }
  1508. static struct dn_route *dn_rt_cache_get_next(struct seq_file *seq, struct dn_route *rt)
  1509. {
  1510. struct dn_rt_cache_iter_state *s = seq->private;
  1511. rt = rcu_dereference_bh(rt->dst.dn_next);
  1512. while (!rt) {
  1513. rcu_read_unlock_bh();
  1514. if (--s->bucket < 0)
  1515. break;
  1516. rcu_read_lock_bh();
  1517. rt = rcu_dereference_bh(dn_rt_hash_table[s->bucket].chain);
  1518. }
  1519. return rt;
  1520. }
  1521. static void *dn_rt_cache_seq_start(struct seq_file *seq, loff_t *pos)
  1522. {
  1523. struct dn_route *rt = dn_rt_cache_get_first(seq);
  1524. if (rt) {
  1525. while(*pos && (rt = dn_rt_cache_get_next(seq, rt)))
  1526. --*pos;
  1527. }
  1528. return *pos ? NULL : rt;
  1529. }
  1530. static void *dn_rt_cache_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1531. {
  1532. struct dn_route *rt = dn_rt_cache_get_next(seq, v);
  1533. ++*pos;
  1534. return rt;
  1535. }
  1536. static void dn_rt_cache_seq_stop(struct seq_file *seq, void *v)
  1537. {
  1538. if (v)
  1539. rcu_read_unlock_bh();
  1540. }
  1541. static int dn_rt_cache_seq_show(struct seq_file *seq, void *v)
  1542. {
  1543. struct dn_route *rt = v;
  1544. char buf1[DN_ASCBUF_LEN], buf2[DN_ASCBUF_LEN];
  1545. seq_printf(seq, "%-8s %-7s %-7s %04d %04d %04d\n",
  1546. rt->dst.dev ? rt->dst.dev->name : "*",
  1547. dn_addr2asc(le16_to_cpu(rt->rt_daddr), buf1),
  1548. dn_addr2asc(le16_to_cpu(rt->rt_saddr), buf2),
  1549. atomic_read(&rt->dst.__refcnt),
  1550. rt->dst.__use, 0);
  1551. return 0;
  1552. }
  1553. static const struct seq_operations dn_rt_cache_seq_ops = {
  1554. .start = dn_rt_cache_seq_start,
  1555. .next = dn_rt_cache_seq_next,
  1556. .stop = dn_rt_cache_seq_stop,
  1557. .show = dn_rt_cache_seq_show,
  1558. };
  1559. static int dn_rt_cache_seq_open(struct inode *inode, struct file *file)
  1560. {
  1561. return seq_open_private(file, &dn_rt_cache_seq_ops,
  1562. sizeof(struct dn_rt_cache_iter_state));
  1563. }
  1564. static const struct file_operations dn_rt_cache_seq_fops = {
  1565. .owner = THIS_MODULE,
  1566. .open = dn_rt_cache_seq_open,
  1567. .read = seq_read,
  1568. .llseek = seq_lseek,
  1569. .release = seq_release_private,
  1570. };
  1571. #endif /* CONFIG_PROC_FS */
  1572. void __init dn_route_init(void)
  1573. {
  1574. int i, goal, order;
  1575. dn_dst_ops.kmem_cachep =
  1576. kmem_cache_create("dn_dst_cache", sizeof(struct dn_route), 0,
  1577. SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
  1578. dst_entries_init(&dn_dst_ops);
  1579. setup_timer(&dn_route_timer, dn_dst_check_expire, 0);
  1580. dn_route_timer.expires = jiffies + decnet_dst_gc_interval * HZ;
  1581. add_timer(&dn_route_timer);
  1582. goal = totalram_pages >> (26 - PAGE_SHIFT);
  1583. for(order = 0; (1UL << order) < goal; order++)
  1584. /* NOTHING */;
  1585. /*
  1586. * Only want 1024 entries max, since the table is very, very unlikely
  1587. * to be larger than that.
  1588. */
  1589. while(order && ((((1UL << order) * PAGE_SIZE) /
  1590. sizeof(struct dn_rt_hash_bucket)) >= 2048))
  1591. order--;
  1592. do {
  1593. dn_rt_hash_mask = (1UL << order) * PAGE_SIZE /
  1594. sizeof(struct dn_rt_hash_bucket);
  1595. while(dn_rt_hash_mask & (dn_rt_hash_mask - 1))
  1596. dn_rt_hash_mask--;
  1597. dn_rt_hash_table = (struct dn_rt_hash_bucket *)
  1598. __get_free_pages(GFP_ATOMIC, order);
  1599. } while (dn_rt_hash_table == NULL && --order > 0);
  1600. if (!dn_rt_hash_table)
  1601. panic("Failed to allocate DECnet route cache hash table\n");
  1602. printk(KERN_INFO
  1603. "DECnet: Routing cache hash table of %u buckets, %ldKbytes\n",
  1604. dn_rt_hash_mask,
  1605. (long)(dn_rt_hash_mask*sizeof(struct dn_rt_hash_bucket))/1024);
  1606. dn_rt_hash_mask--;
  1607. for(i = 0; i <= dn_rt_hash_mask; i++) {
  1608. spin_lock_init(&dn_rt_hash_table[i].lock);
  1609. dn_rt_hash_table[i].chain = NULL;
  1610. }
  1611. dn_dst_ops.gc_thresh = (dn_rt_hash_mask + 1);
  1612. proc_net_fops_create(&init_net, "decnet_cache", S_IRUGO, &dn_rt_cache_seq_fops);
  1613. #ifdef CONFIG_DECNET_ROUTER
  1614. rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
  1615. dn_fib_dump, NULL);
  1616. #else
  1617. rtnl_register(PF_DECnet, RTM_GETROUTE, dn_cache_getroute,
  1618. dn_cache_dump, NULL);
  1619. #endif
  1620. }
  1621. void __exit dn_route_cleanup(void)
  1622. {
  1623. del_timer(&dn_route_timer);
  1624. dn_run_flush(0);
  1625. proc_net_remove(&init_net, "decnet_cache");
  1626. dst_entries_destroy(&dn_dst_ops);
  1627. }