fib_semantics.c 31 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332
  1. /*
  2. * INET An implementation of the TCP/IP protocol suite for the LINUX
  3. * operating system. INET is implemented using the BSD Socket
  4. * interface as the means of communication with the user level.
  5. *
  6. * IPv4 Forwarding Information Base: semantics.
  7. *
  8. * Version: $Id: fib_semantics.c,v 1.19 2002/01/12 07:54:56 davem Exp $
  9. *
  10. * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
  11. *
  12. * This program is free software; you can redistribute it and/or
  13. * modify it under the terms of the GNU General Public License
  14. * as published by the Free Software Foundation; either version
  15. * 2 of the License, or (at your option) any later version.
  16. */
  17. #include <linux/config.h>
  18. #include <asm/uaccess.h>
  19. #include <asm/system.h>
  20. #include <linux/bitops.h>
  21. #include <linux/types.h>
  22. #include <linux/kernel.h>
  23. #include <linux/jiffies.h>
  24. #include <linux/mm.h>
  25. #include <linux/string.h>
  26. #include <linux/socket.h>
  27. #include <linux/sockios.h>
  28. #include <linux/errno.h>
  29. #include <linux/in.h>
  30. #include <linux/inet.h>
  31. #include <linux/netdevice.h>
  32. #include <linux/if_arp.h>
  33. #include <linux/proc_fs.h>
  34. #include <linux/skbuff.h>
  35. #include <linux/netlink.h>
  36. #include <linux/init.h>
  37. #include <net/ip.h>
  38. #include <net/protocol.h>
  39. #include <net/route.h>
  40. #include <net/tcp.h>
  41. #include <net/sock.h>
  42. #include <net/ip_fib.h>
  43. #include <net/ip_mp_alg.h>
  44. #include "fib_lookup.h"
  45. #define FSprintk(a...)
  46. static DEFINE_RWLOCK(fib_info_lock);
  47. static struct hlist_head *fib_info_hash;
  48. static struct hlist_head *fib_info_laddrhash;
  49. static unsigned int fib_hash_size;
  50. static unsigned int fib_info_cnt;
  51. #define DEVINDEX_HASHBITS 8
  52. #define DEVINDEX_HASHSIZE (1U << DEVINDEX_HASHBITS)
  53. static struct hlist_head fib_info_devhash[DEVINDEX_HASHSIZE];
  54. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  55. static DEFINE_SPINLOCK(fib_multipath_lock);
  56. #define for_nexthops(fi) { int nhsel; const struct fib_nh * nh; \
  57. for (nhsel=0, nh = (fi)->fib_nh; nhsel < (fi)->fib_nhs; nh++, nhsel++)
  58. #define change_nexthops(fi) { int nhsel; struct fib_nh * nh; \
  59. for (nhsel=0, nh = (struct fib_nh*)((fi)->fib_nh); nhsel < (fi)->fib_nhs; nh++, nhsel++)
  60. #else /* CONFIG_IP_ROUTE_MULTIPATH */
  61. /* Hope, that gcc will optimize it to get rid of dummy loop */
  62. #define for_nexthops(fi) { int nhsel=0; const struct fib_nh * nh = (fi)->fib_nh; \
  63. for (nhsel=0; nhsel < 1; nhsel++)
  64. #define change_nexthops(fi) { int nhsel=0; struct fib_nh * nh = (struct fib_nh*)((fi)->fib_nh); \
  65. for (nhsel=0; nhsel < 1; nhsel++)
  66. #endif /* CONFIG_IP_ROUTE_MULTIPATH */
  67. #define endfor_nexthops(fi) }
  68. static struct
  69. {
  70. int error;
  71. u8 scope;
  72. } fib_props[RTA_MAX + 1] = {
  73. {
  74. .error = 0,
  75. .scope = RT_SCOPE_NOWHERE,
  76. }, /* RTN_UNSPEC */
  77. {
  78. .error = 0,
  79. .scope = RT_SCOPE_UNIVERSE,
  80. }, /* RTN_UNICAST */
  81. {
  82. .error = 0,
  83. .scope = RT_SCOPE_HOST,
  84. }, /* RTN_LOCAL */
  85. {
  86. .error = 0,
  87. .scope = RT_SCOPE_LINK,
  88. }, /* RTN_BROADCAST */
  89. {
  90. .error = 0,
  91. .scope = RT_SCOPE_LINK,
  92. }, /* RTN_ANYCAST */
  93. {
  94. .error = 0,
  95. .scope = RT_SCOPE_UNIVERSE,
  96. }, /* RTN_MULTICAST */
  97. {
  98. .error = -EINVAL,
  99. .scope = RT_SCOPE_UNIVERSE,
  100. }, /* RTN_BLACKHOLE */
  101. {
  102. .error = -EHOSTUNREACH,
  103. .scope = RT_SCOPE_UNIVERSE,
  104. }, /* RTN_UNREACHABLE */
  105. {
  106. .error = -EACCES,
  107. .scope = RT_SCOPE_UNIVERSE,
  108. }, /* RTN_PROHIBIT */
  109. {
  110. .error = -EAGAIN,
  111. .scope = RT_SCOPE_UNIVERSE,
  112. }, /* RTN_THROW */
  113. {
  114. .error = -EINVAL,
  115. .scope = RT_SCOPE_NOWHERE,
  116. }, /* RTN_NAT */
  117. {
  118. .error = -EINVAL,
  119. .scope = RT_SCOPE_NOWHERE,
  120. }, /* RTN_XRESOLVE */
  121. };
  122. /* Release a nexthop info record */
  123. void free_fib_info(struct fib_info *fi)
  124. {
  125. if (fi->fib_dead == 0) {
  126. printk("Freeing alive fib_info %p\n", fi);
  127. return;
  128. }
  129. change_nexthops(fi) {
  130. if (nh->nh_dev)
  131. dev_put(nh->nh_dev);
  132. nh->nh_dev = NULL;
  133. } endfor_nexthops(fi);
  134. fib_info_cnt--;
  135. kfree(fi);
  136. }
  137. void fib_release_info(struct fib_info *fi)
  138. {
  139. write_lock(&fib_info_lock);
  140. if (fi && --fi->fib_treeref == 0) {
  141. hlist_del(&fi->fib_hash);
  142. if (fi->fib_prefsrc)
  143. hlist_del(&fi->fib_lhash);
  144. change_nexthops(fi) {
  145. if (!nh->nh_dev)
  146. continue;
  147. hlist_del(&nh->nh_hash);
  148. } endfor_nexthops(fi)
  149. fi->fib_dead = 1;
  150. fib_info_put(fi);
  151. }
  152. write_unlock(&fib_info_lock);
  153. }
  154. static __inline__ int nh_comp(const struct fib_info *fi, const struct fib_info *ofi)
  155. {
  156. const struct fib_nh *onh = ofi->fib_nh;
  157. for_nexthops(fi) {
  158. if (nh->nh_oif != onh->nh_oif ||
  159. nh->nh_gw != onh->nh_gw ||
  160. nh->nh_scope != onh->nh_scope ||
  161. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  162. nh->nh_weight != onh->nh_weight ||
  163. #endif
  164. #ifdef CONFIG_NET_CLS_ROUTE
  165. nh->nh_tclassid != onh->nh_tclassid ||
  166. #endif
  167. ((nh->nh_flags^onh->nh_flags)&~RTNH_F_DEAD))
  168. return -1;
  169. onh++;
  170. } endfor_nexthops(fi);
  171. return 0;
  172. }
  173. static inline unsigned int fib_info_hashfn(const struct fib_info *fi)
  174. {
  175. unsigned int mask = (fib_hash_size - 1);
  176. unsigned int val = fi->fib_nhs;
  177. val ^= fi->fib_protocol;
  178. val ^= fi->fib_prefsrc;
  179. val ^= fi->fib_priority;
  180. return (val ^ (val >> 7) ^ (val >> 12)) & mask;
  181. }
  182. static struct fib_info *fib_find_info(const struct fib_info *nfi)
  183. {
  184. struct hlist_head *head;
  185. struct hlist_node *node;
  186. struct fib_info *fi;
  187. unsigned int hash;
  188. hash = fib_info_hashfn(nfi);
  189. head = &fib_info_hash[hash];
  190. hlist_for_each_entry(fi, node, head, fib_hash) {
  191. if (fi->fib_nhs != nfi->fib_nhs)
  192. continue;
  193. if (nfi->fib_protocol == fi->fib_protocol &&
  194. nfi->fib_prefsrc == fi->fib_prefsrc &&
  195. nfi->fib_priority == fi->fib_priority &&
  196. memcmp(nfi->fib_metrics, fi->fib_metrics,
  197. sizeof(fi->fib_metrics)) == 0 &&
  198. ((nfi->fib_flags^fi->fib_flags)&~RTNH_F_DEAD) == 0 &&
  199. (nfi->fib_nhs == 0 || nh_comp(fi, nfi) == 0))
  200. return fi;
  201. }
  202. return NULL;
  203. }
  204. static inline unsigned int fib_devindex_hashfn(unsigned int val)
  205. {
  206. unsigned int mask = DEVINDEX_HASHSIZE - 1;
  207. return (val ^
  208. (val >> DEVINDEX_HASHBITS) ^
  209. (val >> (DEVINDEX_HASHBITS * 2))) & mask;
  210. }
  211. /* Check, that the gateway is already configured.
  212. Used only by redirect accept routine.
  213. */
  214. int ip_fib_check_default(u32 gw, struct net_device *dev)
  215. {
  216. struct hlist_head *head;
  217. struct hlist_node *node;
  218. struct fib_nh *nh;
  219. unsigned int hash;
  220. read_lock(&fib_info_lock);
  221. hash = fib_devindex_hashfn(dev->ifindex);
  222. head = &fib_info_devhash[hash];
  223. hlist_for_each_entry(nh, node, head, nh_hash) {
  224. if (nh->nh_dev == dev &&
  225. nh->nh_gw == gw &&
  226. !(nh->nh_flags&RTNH_F_DEAD)) {
  227. read_unlock(&fib_info_lock);
  228. return 0;
  229. }
  230. }
  231. read_unlock(&fib_info_lock);
  232. return -1;
  233. }
  234. void rtmsg_fib(int event, u32 key, struct fib_alias *fa,
  235. int z, int tb_id,
  236. struct nlmsghdr *n, struct netlink_skb_parms *req)
  237. {
  238. struct sk_buff *skb;
  239. u32 pid = req ? req->pid : n->nlmsg_pid;
  240. int size = NLMSG_SPACE(sizeof(struct rtmsg)+256);
  241. skb = alloc_skb(size, GFP_KERNEL);
  242. if (!skb)
  243. return;
  244. if (fib_dump_info(skb, pid, n->nlmsg_seq, event, tb_id,
  245. fa->fa_type, fa->fa_scope, &key, z,
  246. fa->fa_tos,
  247. fa->fa_info, 0) < 0) {
  248. kfree_skb(skb);
  249. return;
  250. }
  251. NETLINK_CB(skb).dst_groups = RTMGRP_IPV4_ROUTE;
  252. if (n->nlmsg_flags&NLM_F_ECHO)
  253. atomic_inc(&skb->users);
  254. netlink_broadcast(rtnl, skb, pid, RTMGRP_IPV4_ROUTE, GFP_KERNEL);
  255. if (n->nlmsg_flags&NLM_F_ECHO)
  256. netlink_unicast(rtnl, skb, pid, MSG_DONTWAIT);
  257. }
  258. /* Return the first fib alias matching TOS with
  259. * priority less than or equal to PRIO.
  260. */
  261. struct fib_alias *fib_find_alias(struct list_head *fah, u8 tos, u32 prio)
  262. {
  263. if (fah) {
  264. struct fib_alias *fa;
  265. list_for_each_entry(fa, fah, fa_list) {
  266. if (fa->fa_tos > tos)
  267. continue;
  268. if (fa->fa_info->fib_priority >= prio ||
  269. fa->fa_tos < tos)
  270. return fa;
  271. }
  272. }
  273. return NULL;
  274. }
  275. int fib_detect_death(struct fib_info *fi, int order,
  276. struct fib_info **last_resort, int *last_idx, int *dflt)
  277. {
  278. struct neighbour *n;
  279. int state = NUD_NONE;
  280. n = neigh_lookup(&arp_tbl, &fi->fib_nh[0].nh_gw, fi->fib_dev);
  281. if (n) {
  282. state = n->nud_state;
  283. neigh_release(n);
  284. }
  285. if (state==NUD_REACHABLE)
  286. return 0;
  287. if ((state&NUD_VALID) && order != *dflt)
  288. return 0;
  289. if ((state&NUD_VALID) ||
  290. (*last_idx<0 && order > *dflt)) {
  291. *last_resort = fi;
  292. *last_idx = order;
  293. }
  294. return 1;
  295. }
  296. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  297. static u32 fib_get_attr32(struct rtattr *attr, int attrlen, int type)
  298. {
  299. while (RTA_OK(attr,attrlen)) {
  300. if (attr->rta_type == type)
  301. return *(u32*)RTA_DATA(attr);
  302. attr = RTA_NEXT(attr, attrlen);
  303. }
  304. return 0;
  305. }
  306. static int
  307. fib_count_nexthops(struct rtattr *rta)
  308. {
  309. int nhs = 0;
  310. struct rtnexthop *nhp = RTA_DATA(rta);
  311. int nhlen = RTA_PAYLOAD(rta);
  312. while (nhlen >= (int)sizeof(struct rtnexthop)) {
  313. if ((nhlen -= nhp->rtnh_len) < 0)
  314. return 0;
  315. nhs++;
  316. nhp = RTNH_NEXT(nhp);
  317. };
  318. return nhs;
  319. }
  320. static int
  321. fib_get_nhs(struct fib_info *fi, const struct rtattr *rta, const struct rtmsg *r)
  322. {
  323. struct rtnexthop *nhp = RTA_DATA(rta);
  324. int nhlen = RTA_PAYLOAD(rta);
  325. change_nexthops(fi) {
  326. int attrlen = nhlen - sizeof(struct rtnexthop);
  327. if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
  328. return -EINVAL;
  329. nh->nh_flags = (r->rtm_flags&~0xFF) | nhp->rtnh_flags;
  330. nh->nh_oif = nhp->rtnh_ifindex;
  331. nh->nh_weight = nhp->rtnh_hops + 1;
  332. if (attrlen) {
  333. nh->nh_gw = fib_get_attr32(RTNH_DATA(nhp), attrlen, RTA_GATEWAY);
  334. #ifdef CONFIG_NET_CLS_ROUTE
  335. nh->nh_tclassid = fib_get_attr32(RTNH_DATA(nhp), attrlen, RTA_FLOW);
  336. #endif
  337. }
  338. nhp = RTNH_NEXT(nhp);
  339. } endfor_nexthops(fi);
  340. return 0;
  341. }
  342. #endif
  343. int fib_nh_match(struct rtmsg *r, struct nlmsghdr *nlh, struct kern_rta *rta,
  344. struct fib_info *fi)
  345. {
  346. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  347. struct rtnexthop *nhp;
  348. int nhlen;
  349. #endif
  350. if (rta->rta_priority &&
  351. *rta->rta_priority != fi->fib_priority)
  352. return 1;
  353. if (rta->rta_oif || rta->rta_gw) {
  354. if ((!rta->rta_oif || *rta->rta_oif == fi->fib_nh->nh_oif) &&
  355. (!rta->rta_gw || memcmp(rta->rta_gw, &fi->fib_nh->nh_gw, 4) == 0))
  356. return 0;
  357. return 1;
  358. }
  359. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  360. if (rta->rta_mp == NULL)
  361. return 0;
  362. nhp = RTA_DATA(rta->rta_mp);
  363. nhlen = RTA_PAYLOAD(rta->rta_mp);
  364. for_nexthops(fi) {
  365. int attrlen = nhlen - sizeof(struct rtnexthop);
  366. u32 gw;
  367. if (attrlen < 0 || (nhlen -= nhp->rtnh_len) < 0)
  368. return -EINVAL;
  369. if (nhp->rtnh_ifindex && nhp->rtnh_ifindex != nh->nh_oif)
  370. return 1;
  371. if (attrlen) {
  372. gw = fib_get_attr32(RTNH_DATA(nhp), attrlen, RTA_GATEWAY);
  373. if (gw && gw != nh->nh_gw)
  374. return 1;
  375. #ifdef CONFIG_NET_CLS_ROUTE
  376. gw = fib_get_attr32(RTNH_DATA(nhp), attrlen, RTA_FLOW);
  377. if (gw && gw != nh->nh_tclassid)
  378. return 1;
  379. #endif
  380. }
  381. nhp = RTNH_NEXT(nhp);
  382. } endfor_nexthops(fi);
  383. #endif
  384. return 0;
  385. }
  386. /*
  387. Picture
  388. -------
  389. Semantics of nexthop is very messy by historical reasons.
  390. We have to take into account, that:
  391. a) gateway can be actually local interface address,
  392. so that gatewayed route is direct.
  393. b) gateway must be on-link address, possibly
  394. described not by an ifaddr, but also by a direct route.
  395. c) If both gateway and interface are specified, they should not
  396. contradict.
  397. d) If we use tunnel routes, gateway could be not on-link.
  398. Attempt to reconcile all of these (alas, self-contradictory) conditions
  399. results in pretty ugly and hairy code with obscure logic.
  400. I chose to generalized it instead, so that the size
  401. of code does not increase practically, but it becomes
  402. much more general.
  403. Every prefix is assigned a "scope" value: "host" is local address,
  404. "link" is direct route,
  405. [ ... "site" ... "interior" ... ]
  406. and "universe" is true gateway route with global meaning.
  407. Every prefix refers to a set of "nexthop"s (gw, oif),
  408. where gw must have narrower scope. This recursion stops
  409. when gw has LOCAL scope or if "nexthop" is declared ONLINK,
  410. which means that gw is forced to be on link.
  411. Code is still hairy, but now it is apparently logically
  412. consistent and very flexible. F.e. as by-product it allows
  413. to co-exists in peace independent exterior and interior
  414. routing processes.
  415. Normally it looks as following.
  416. {universe prefix} -> (gw, oif) [scope link]
  417. |
  418. |-> {link prefix} -> (gw, oif) [scope local]
  419. |
  420. |-> {local prefix} (terminal node)
  421. */
  422. static int fib_check_nh(const struct rtmsg *r, struct fib_info *fi, struct fib_nh *nh)
  423. {
  424. int err;
  425. if (nh->nh_gw) {
  426. struct fib_result res;
  427. #ifdef CONFIG_IP_ROUTE_PERVASIVE
  428. if (nh->nh_flags&RTNH_F_PERVASIVE)
  429. return 0;
  430. #endif
  431. if (nh->nh_flags&RTNH_F_ONLINK) {
  432. struct net_device *dev;
  433. if (r->rtm_scope >= RT_SCOPE_LINK)
  434. return -EINVAL;
  435. if (inet_addr_type(nh->nh_gw) != RTN_UNICAST)
  436. return -EINVAL;
  437. if ((dev = __dev_get_by_index(nh->nh_oif)) == NULL)
  438. return -ENODEV;
  439. if (!(dev->flags&IFF_UP))
  440. return -ENETDOWN;
  441. nh->nh_dev = dev;
  442. dev_hold(dev);
  443. nh->nh_scope = RT_SCOPE_LINK;
  444. return 0;
  445. }
  446. {
  447. struct flowi fl = { .nl_u = { .ip4_u =
  448. { .daddr = nh->nh_gw,
  449. .scope = r->rtm_scope + 1 } },
  450. .oif = nh->nh_oif };
  451. /* It is not necessary, but requires a bit of thinking */
  452. if (fl.fl4_scope < RT_SCOPE_LINK)
  453. fl.fl4_scope = RT_SCOPE_LINK;
  454. if ((err = fib_lookup(&fl, &res)) != 0)
  455. return err;
  456. }
  457. err = -EINVAL;
  458. if (res.type != RTN_UNICAST && res.type != RTN_LOCAL)
  459. goto out;
  460. nh->nh_scope = res.scope;
  461. nh->nh_oif = FIB_RES_OIF(res);
  462. if ((nh->nh_dev = FIB_RES_DEV(res)) == NULL)
  463. goto out;
  464. dev_hold(nh->nh_dev);
  465. err = -ENETDOWN;
  466. if (!(nh->nh_dev->flags & IFF_UP))
  467. goto out;
  468. err = 0;
  469. out:
  470. fib_res_put(&res);
  471. return err;
  472. } else {
  473. struct in_device *in_dev;
  474. if (nh->nh_flags&(RTNH_F_PERVASIVE|RTNH_F_ONLINK))
  475. return -EINVAL;
  476. in_dev = inetdev_by_index(nh->nh_oif);
  477. if (in_dev == NULL)
  478. return -ENODEV;
  479. if (!(in_dev->dev->flags&IFF_UP)) {
  480. in_dev_put(in_dev);
  481. return -ENETDOWN;
  482. }
  483. nh->nh_dev = in_dev->dev;
  484. dev_hold(nh->nh_dev);
  485. nh->nh_scope = RT_SCOPE_HOST;
  486. in_dev_put(in_dev);
  487. }
  488. return 0;
  489. }
  490. static inline unsigned int fib_laddr_hashfn(u32 val)
  491. {
  492. unsigned int mask = (fib_hash_size - 1);
  493. return (val ^ (val >> 7) ^ (val >> 14)) & mask;
  494. }
  495. static struct hlist_head *fib_hash_alloc(int bytes)
  496. {
  497. if (bytes <= PAGE_SIZE)
  498. return kmalloc(bytes, GFP_KERNEL);
  499. else
  500. return (struct hlist_head *)
  501. __get_free_pages(GFP_KERNEL, get_order(bytes));
  502. }
  503. static void fib_hash_free(struct hlist_head *hash, int bytes)
  504. {
  505. if (!hash)
  506. return;
  507. if (bytes <= PAGE_SIZE)
  508. kfree(hash);
  509. else
  510. free_pages((unsigned long) hash, get_order(bytes));
  511. }
  512. static void fib_hash_move(struct hlist_head *new_info_hash,
  513. struct hlist_head *new_laddrhash,
  514. unsigned int new_size)
  515. {
  516. unsigned int old_size = fib_hash_size;
  517. unsigned int i;
  518. write_lock(&fib_info_lock);
  519. fib_hash_size = new_size;
  520. for (i = 0; i < old_size; i++) {
  521. struct hlist_head *head = &fib_info_hash[i];
  522. struct hlist_node *node, *n;
  523. struct fib_info *fi;
  524. hlist_for_each_entry_safe(fi, node, n, head, fib_hash) {
  525. struct hlist_head *dest;
  526. unsigned int new_hash;
  527. hlist_del(&fi->fib_hash);
  528. new_hash = fib_info_hashfn(fi);
  529. dest = &new_info_hash[new_hash];
  530. hlist_add_head(&fi->fib_hash, dest);
  531. }
  532. }
  533. fib_info_hash = new_info_hash;
  534. for (i = 0; i < old_size; i++) {
  535. struct hlist_head *lhead = &fib_info_laddrhash[i];
  536. struct hlist_node *node, *n;
  537. struct fib_info *fi;
  538. hlist_for_each_entry_safe(fi, node, n, lhead, fib_lhash) {
  539. struct hlist_head *ldest;
  540. unsigned int new_hash;
  541. hlist_del(&fi->fib_lhash);
  542. new_hash = fib_laddr_hashfn(fi->fib_prefsrc);
  543. ldest = &new_laddrhash[new_hash];
  544. hlist_add_head(&fi->fib_lhash, ldest);
  545. }
  546. }
  547. fib_info_laddrhash = new_laddrhash;
  548. write_unlock(&fib_info_lock);
  549. }
  550. struct fib_info *
  551. fib_create_info(const struct rtmsg *r, struct kern_rta *rta,
  552. const struct nlmsghdr *nlh, int *errp)
  553. {
  554. int err;
  555. struct fib_info *fi = NULL;
  556. struct fib_info *ofi;
  557. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  558. int nhs = 1;
  559. #else
  560. const int nhs = 1;
  561. #endif
  562. #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
  563. u32 mp_alg = IP_MP_ALG_NONE;
  564. #endif
  565. /* Fast check to catch the most weird cases */
  566. if (fib_props[r->rtm_type].scope > r->rtm_scope)
  567. goto err_inval;
  568. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  569. if (rta->rta_mp) {
  570. nhs = fib_count_nexthops(rta->rta_mp);
  571. if (nhs == 0)
  572. goto err_inval;
  573. }
  574. #endif
  575. #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
  576. if (rta->rta_mp_alg) {
  577. mp_alg = *rta->rta_mp_alg;
  578. if (mp_alg < IP_MP_ALG_NONE ||
  579. mp_alg > IP_MP_ALG_MAX)
  580. goto err_inval;
  581. }
  582. #endif
  583. err = -ENOBUFS;
  584. if (fib_info_cnt >= fib_hash_size) {
  585. unsigned int new_size = fib_hash_size << 1;
  586. struct hlist_head *new_info_hash;
  587. struct hlist_head *new_laddrhash;
  588. unsigned int bytes;
  589. if (!new_size)
  590. new_size = 1;
  591. bytes = new_size * sizeof(struct hlist_head *);
  592. new_info_hash = fib_hash_alloc(bytes);
  593. new_laddrhash = fib_hash_alloc(bytes);
  594. if (!new_info_hash || !new_laddrhash) {
  595. fib_hash_free(new_info_hash, bytes);
  596. fib_hash_free(new_laddrhash, bytes);
  597. } else {
  598. memset(new_info_hash, 0, bytes);
  599. memset(new_laddrhash, 0, bytes);
  600. fib_hash_move(new_info_hash, new_laddrhash, new_size);
  601. }
  602. if (!fib_hash_size)
  603. goto failure;
  604. }
  605. fi = kmalloc(sizeof(*fi)+nhs*sizeof(struct fib_nh), GFP_KERNEL);
  606. if (fi == NULL)
  607. goto failure;
  608. fib_info_cnt++;
  609. memset(fi, 0, sizeof(*fi)+nhs*sizeof(struct fib_nh));
  610. fi->fib_protocol = r->rtm_protocol;
  611. fi->fib_nhs = nhs;
  612. change_nexthops(fi) {
  613. nh->nh_parent = fi;
  614. } endfor_nexthops(fi)
  615. fi->fib_flags = r->rtm_flags;
  616. if (rta->rta_priority)
  617. fi->fib_priority = *rta->rta_priority;
  618. if (rta->rta_mx) {
  619. int attrlen = RTA_PAYLOAD(rta->rta_mx);
  620. struct rtattr *attr = RTA_DATA(rta->rta_mx);
  621. while (RTA_OK(attr, attrlen)) {
  622. unsigned flavor = attr->rta_type;
  623. if (flavor) {
  624. if (flavor > RTAX_MAX)
  625. goto err_inval;
  626. fi->fib_metrics[flavor-1] = *(unsigned*)RTA_DATA(attr);
  627. }
  628. attr = RTA_NEXT(attr, attrlen);
  629. }
  630. }
  631. if (rta->rta_prefsrc)
  632. memcpy(&fi->fib_prefsrc, rta->rta_prefsrc, 4);
  633. if (rta->rta_mp) {
  634. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  635. if ((err = fib_get_nhs(fi, rta->rta_mp, r)) != 0)
  636. goto failure;
  637. if (rta->rta_oif && fi->fib_nh->nh_oif != *rta->rta_oif)
  638. goto err_inval;
  639. if (rta->rta_gw && memcmp(&fi->fib_nh->nh_gw, rta->rta_gw, 4))
  640. goto err_inval;
  641. #ifdef CONFIG_NET_CLS_ROUTE
  642. if (rta->rta_flow && memcmp(&fi->fib_nh->nh_tclassid, rta->rta_flow, 4))
  643. goto err_inval;
  644. #endif
  645. #else
  646. goto err_inval;
  647. #endif
  648. } else {
  649. struct fib_nh *nh = fi->fib_nh;
  650. if (rta->rta_oif)
  651. nh->nh_oif = *rta->rta_oif;
  652. if (rta->rta_gw)
  653. memcpy(&nh->nh_gw, rta->rta_gw, 4);
  654. #ifdef CONFIG_NET_CLS_ROUTE
  655. if (rta->rta_flow)
  656. memcpy(&nh->nh_tclassid, rta->rta_flow, 4);
  657. #endif
  658. nh->nh_flags = r->rtm_flags;
  659. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  660. nh->nh_weight = 1;
  661. #endif
  662. }
  663. #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
  664. fi->fib_mp_alg = mp_alg;
  665. #endif
  666. if (fib_props[r->rtm_type].error) {
  667. if (rta->rta_gw || rta->rta_oif || rta->rta_mp)
  668. goto err_inval;
  669. goto link_it;
  670. }
  671. if (r->rtm_scope > RT_SCOPE_HOST)
  672. goto err_inval;
  673. if (r->rtm_scope == RT_SCOPE_HOST) {
  674. struct fib_nh *nh = fi->fib_nh;
  675. /* Local address is added. */
  676. if (nhs != 1 || nh->nh_gw)
  677. goto err_inval;
  678. nh->nh_scope = RT_SCOPE_NOWHERE;
  679. nh->nh_dev = dev_get_by_index(fi->fib_nh->nh_oif);
  680. err = -ENODEV;
  681. if (nh->nh_dev == NULL)
  682. goto failure;
  683. } else {
  684. change_nexthops(fi) {
  685. if ((err = fib_check_nh(r, fi, nh)) != 0)
  686. goto failure;
  687. } endfor_nexthops(fi)
  688. }
  689. if (fi->fib_prefsrc) {
  690. if (r->rtm_type != RTN_LOCAL || rta->rta_dst == NULL ||
  691. memcmp(&fi->fib_prefsrc, rta->rta_dst, 4))
  692. if (inet_addr_type(fi->fib_prefsrc) != RTN_LOCAL)
  693. goto err_inval;
  694. }
  695. link_it:
  696. if ((ofi = fib_find_info(fi)) != NULL) {
  697. fi->fib_dead = 1;
  698. free_fib_info(fi);
  699. ofi->fib_treeref++;
  700. return ofi;
  701. }
  702. fi->fib_treeref++;
  703. atomic_inc(&fi->fib_clntref);
  704. write_lock(&fib_info_lock);
  705. hlist_add_head(&fi->fib_hash,
  706. &fib_info_hash[fib_info_hashfn(fi)]);
  707. if (fi->fib_prefsrc) {
  708. struct hlist_head *head;
  709. head = &fib_info_laddrhash[fib_laddr_hashfn(fi->fib_prefsrc)];
  710. hlist_add_head(&fi->fib_lhash, head);
  711. }
  712. change_nexthops(fi) {
  713. struct hlist_head *head;
  714. unsigned int hash;
  715. if (!nh->nh_dev)
  716. continue;
  717. hash = fib_devindex_hashfn(nh->nh_dev->ifindex);
  718. head = &fib_info_devhash[hash];
  719. hlist_add_head(&nh->nh_hash, head);
  720. } endfor_nexthops(fi)
  721. write_unlock(&fib_info_lock);
  722. return fi;
  723. err_inval:
  724. err = -EINVAL;
  725. failure:
  726. *errp = err;
  727. if (fi) {
  728. fi->fib_dead = 1;
  729. free_fib_info(fi);
  730. }
  731. return NULL;
  732. }
  733. int fib_semantic_match(struct list_head *head, const struct flowi *flp,
  734. struct fib_result *res, __u32 zone, __u32 mask,
  735. int prefixlen)
  736. {
  737. struct fib_alias *fa;
  738. int nh_sel = 0;
  739. list_for_each_entry(fa, head, fa_list) {
  740. int err;
  741. if (fa->fa_tos &&
  742. fa->fa_tos != flp->fl4_tos)
  743. continue;
  744. if (fa->fa_scope < flp->fl4_scope)
  745. continue;
  746. fa->fa_state |= FA_S_ACCESSED;
  747. err = fib_props[fa->fa_type].error;
  748. if (err == 0) {
  749. struct fib_info *fi = fa->fa_info;
  750. if (fi->fib_flags & RTNH_F_DEAD)
  751. continue;
  752. switch (fa->fa_type) {
  753. case RTN_UNICAST:
  754. case RTN_LOCAL:
  755. case RTN_BROADCAST:
  756. case RTN_ANYCAST:
  757. case RTN_MULTICAST:
  758. for_nexthops(fi) {
  759. if (nh->nh_flags&RTNH_F_DEAD)
  760. continue;
  761. if (!flp->oif || flp->oif == nh->nh_oif)
  762. break;
  763. }
  764. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  765. if (nhsel < fi->fib_nhs) {
  766. nh_sel = nhsel;
  767. goto out_fill_res;
  768. }
  769. #else
  770. if (nhsel < 1) {
  771. goto out_fill_res;
  772. }
  773. #endif
  774. endfor_nexthops(fi);
  775. continue;
  776. default:
  777. printk(KERN_DEBUG "impossible 102\n");
  778. return -EINVAL;
  779. };
  780. }
  781. return err;
  782. }
  783. return 1;
  784. out_fill_res:
  785. res->prefixlen = prefixlen;
  786. res->nh_sel = nh_sel;
  787. res->type = fa->fa_type;
  788. res->scope = fa->fa_scope;
  789. res->fi = fa->fa_info;
  790. #ifdef CONFIG_IP_ROUTE_MULTIPATH_CACHED
  791. res->netmask = mask;
  792. res->network = zone &
  793. (0xFFFFFFFF >> (32 - prefixlen));
  794. #endif
  795. atomic_inc(&res->fi->fib_clntref);
  796. return 0;
  797. }
  798. /* Find appropriate source address to this destination */
  799. u32 __fib_res_prefsrc(struct fib_result *res)
  800. {
  801. return inet_select_addr(FIB_RES_DEV(*res), FIB_RES_GW(*res), res->scope);
  802. }
  803. int
  804. fib_dump_info(struct sk_buff *skb, u32 pid, u32 seq, int event,
  805. u8 tb_id, u8 type, u8 scope, void *dst, int dst_len, u8 tos,
  806. struct fib_info *fi, unsigned int flags)
  807. {
  808. struct rtmsg *rtm;
  809. struct nlmsghdr *nlh;
  810. unsigned char *b = skb->tail;
  811. nlh = NLMSG_NEW(skb, pid, seq, event, sizeof(*rtm), flags);
  812. rtm = NLMSG_DATA(nlh);
  813. rtm->rtm_family = AF_INET;
  814. rtm->rtm_dst_len = dst_len;
  815. rtm->rtm_src_len = 0;
  816. rtm->rtm_tos = tos;
  817. rtm->rtm_table = tb_id;
  818. rtm->rtm_type = type;
  819. rtm->rtm_flags = fi->fib_flags;
  820. rtm->rtm_scope = scope;
  821. if (rtm->rtm_dst_len)
  822. RTA_PUT(skb, RTA_DST, 4, dst);
  823. rtm->rtm_protocol = fi->fib_protocol;
  824. if (fi->fib_priority)
  825. RTA_PUT(skb, RTA_PRIORITY, 4, &fi->fib_priority);
  826. #ifdef CONFIG_NET_CLS_ROUTE
  827. if (fi->fib_nh[0].nh_tclassid)
  828. RTA_PUT(skb, RTA_FLOW, 4, &fi->fib_nh[0].nh_tclassid);
  829. #endif
  830. if (rtnetlink_put_metrics(skb, fi->fib_metrics) < 0)
  831. goto rtattr_failure;
  832. if (fi->fib_prefsrc)
  833. RTA_PUT(skb, RTA_PREFSRC, 4, &fi->fib_prefsrc);
  834. if (fi->fib_nhs == 1) {
  835. if (fi->fib_nh->nh_gw)
  836. RTA_PUT(skb, RTA_GATEWAY, 4, &fi->fib_nh->nh_gw);
  837. if (fi->fib_nh->nh_oif)
  838. RTA_PUT(skb, RTA_OIF, sizeof(int), &fi->fib_nh->nh_oif);
  839. }
  840. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  841. if (fi->fib_nhs > 1) {
  842. struct rtnexthop *nhp;
  843. struct rtattr *mp_head;
  844. if (skb_tailroom(skb) <= RTA_SPACE(0))
  845. goto rtattr_failure;
  846. mp_head = (struct rtattr*)skb_put(skb, RTA_SPACE(0));
  847. for_nexthops(fi) {
  848. if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
  849. goto rtattr_failure;
  850. nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
  851. nhp->rtnh_flags = nh->nh_flags & 0xFF;
  852. nhp->rtnh_hops = nh->nh_weight-1;
  853. nhp->rtnh_ifindex = nh->nh_oif;
  854. if (nh->nh_gw)
  855. RTA_PUT(skb, RTA_GATEWAY, 4, &nh->nh_gw);
  856. nhp->rtnh_len = skb->tail - (unsigned char*)nhp;
  857. } endfor_nexthops(fi);
  858. mp_head->rta_type = RTA_MULTIPATH;
  859. mp_head->rta_len = skb->tail - (u8*)mp_head;
  860. }
  861. #endif
  862. nlh->nlmsg_len = skb->tail - b;
  863. return skb->len;
  864. nlmsg_failure:
  865. rtattr_failure:
  866. skb_trim(skb, b - skb->data);
  867. return -1;
  868. }
  869. #ifndef CONFIG_IP_NOSIOCRT
  870. int
  871. fib_convert_rtentry(int cmd, struct nlmsghdr *nl, struct rtmsg *rtm,
  872. struct kern_rta *rta, struct rtentry *r)
  873. {
  874. int plen;
  875. u32 *ptr;
  876. memset(rtm, 0, sizeof(*rtm));
  877. memset(rta, 0, sizeof(*rta));
  878. if (r->rt_dst.sa_family != AF_INET)
  879. return -EAFNOSUPPORT;
  880. /* Check mask for validity:
  881. a) it must be contiguous.
  882. b) destination must have all host bits clear.
  883. c) if application forgot to set correct family (AF_INET),
  884. reject request unless it is absolutely clear i.e.
  885. both family and mask are zero.
  886. */
  887. plen = 32;
  888. ptr = &((struct sockaddr_in*)&r->rt_dst)->sin_addr.s_addr;
  889. if (!(r->rt_flags&RTF_HOST)) {
  890. u32 mask = ((struct sockaddr_in*)&r->rt_genmask)->sin_addr.s_addr;
  891. if (r->rt_genmask.sa_family != AF_INET) {
  892. if (mask || r->rt_genmask.sa_family)
  893. return -EAFNOSUPPORT;
  894. }
  895. if (bad_mask(mask, *ptr))
  896. return -EINVAL;
  897. plen = inet_mask_len(mask);
  898. }
  899. nl->nlmsg_flags = NLM_F_REQUEST;
  900. nl->nlmsg_pid = current->pid;
  901. nl->nlmsg_seq = 0;
  902. nl->nlmsg_len = NLMSG_LENGTH(sizeof(*rtm));
  903. if (cmd == SIOCDELRT) {
  904. nl->nlmsg_type = RTM_DELROUTE;
  905. nl->nlmsg_flags = 0;
  906. } else {
  907. nl->nlmsg_type = RTM_NEWROUTE;
  908. nl->nlmsg_flags = NLM_F_REQUEST|NLM_F_CREATE;
  909. rtm->rtm_protocol = RTPROT_BOOT;
  910. }
  911. rtm->rtm_dst_len = plen;
  912. rta->rta_dst = ptr;
  913. if (r->rt_metric) {
  914. *(u32*)&r->rt_pad3 = r->rt_metric - 1;
  915. rta->rta_priority = (u32*)&r->rt_pad3;
  916. }
  917. if (r->rt_flags&RTF_REJECT) {
  918. rtm->rtm_scope = RT_SCOPE_HOST;
  919. rtm->rtm_type = RTN_UNREACHABLE;
  920. return 0;
  921. }
  922. rtm->rtm_scope = RT_SCOPE_NOWHERE;
  923. rtm->rtm_type = RTN_UNICAST;
  924. if (r->rt_dev) {
  925. char *colon;
  926. struct net_device *dev;
  927. char devname[IFNAMSIZ];
  928. if (copy_from_user(devname, r->rt_dev, IFNAMSIZ-1))
  929. return -EFAULT;
  930. devname[IFNAMSIZ-1] = 0;
  931. colon = strchr(devname, ':');
  932. if (colon)
  933. *colon = 0;
  934. dev = __dev_get_by_name(devname);
  935. if (!dev)
  936. return -ENODEV;
  937. rta->rta_oif = &dev->ifindex;
  938. if (colon) {
  939. struct in_ifaddr *ifa;
  940. struct in_device *in_dev = __in_dev_get(dev);
  941. if (!in_dev)
  942. return -ENODEV;
  943. *colon = ':';
  944. for (ifa = in_dev->ifa_list; ifa; ifa = ifa->ifa_next)
  945. if (strcmp(ifa->ifa_label, devname) == 0)
  946. break;
  947. if (ifa == NULL)
  948. return -ENODEV;
  949. rta->rta_prefsrc = &ifa->ifa_local;
  950. }
  951. }
  952. ptr = &((struct sockaddr_in*)&r->rt_gateway)->sin_addr.s_addr;
  953. if (r->rt_gateway.sa_family == AF_INET && *ptr) {
  954. rta->rta_gw = ptr;
  955. if (r->rt_flags&RTF_GATEWAY && inet_addr_type(*ptr) == RTN_UNICAST)
  956. rtm->rtm_scope = RT_SCOPE_UNIVERSE;
  957. }
  958. if (cmd == SIOCDELRT)
  959. return 0;
  960. if (r->rt_flags&RTF_GATEWAY && rta->rta_gw == NULL)
  961. return -EINVAL;
  962. if (rtm->rtm_scope == RT_SCOPE_NOWHERE)
  963. rtm->rtm_scope = RT_SCOPE_LINK;
  964. if (r->rt_flags&(RTF_MTU|RTF_WINDOW|RTF_IRTT)) {
  965. struct rtattr *rec;
  966. struct rtattr *mx = kmalloc(RTA_LENGTH(3*RTA_LENGTH(4)), GFP_KERNEL);
  967. if (mx == NULL)
  968. return -ENOMEM;
  969. rta->rta_mx = mx;
  970. mx->rta_type = RTA_METRICS;
  971. mx->rta_len = RTA_LENGTH(0);
  972. if (r->rt_flags&RTF_MTU) {
  973. rec = (void*)((char*)mx + RTA_ALIGN(mx->rta_len));
  974. rec->rta_type = RTAX_ADVMSS;
  975. rec->rta_len = RTA_LENGTH(4);
  976. mx->rta_len += RTA_LENGTH(4);
  977. *(u32*)RTA_DATA(rec) = r->rt_mtu - 40;
  978. }
  979. if (r->rt_flags&RTF_WINDOW) {
  980. rec = (void*)((char*)mx + RTA_ALIGN(mx->rta_len));
  981. rec->rta_type = RTAX_WINDOW;
  982. rec->rta_len = RTA_LENGTH(4);
  983. mx->rta_len += RTA_LENGTH(4);
  984. *(u32*)RTA_DATA(rec) = r->rt_window;
  985. }
  986. if (r->rt_flags&RTF_IRTT) {
  987. rec = (void*)((char*)mx + RTA_ALIGN(mx->rta_len));
  988. rec->rta_type = RTAX_RTT;
  989. rec->rta_len = RTA_LENGTH(4);
  990. mx->rta_len += RTA_LENGTH(4);
  991. *(u32*)RTA_DATA(rec) = r->rt_irtt<<3;
  992. }
  993. }
  994. return 0;
  995. }
  996. #endif
  997. /*
  998. Update FIB if:
  999. - local address disappeared -> we must delete all the entries
  1000. referring to it.
  1001. - device went down -> we must shutdown all nexthops going via it.
  1002. */
  1003. int fib_sync_down(u32 local, struct net_device *dev, int force)
  1004. {
  1005. int ret = 0;
  1006. int scope = RT_SCOPE_NOWHERE;
  1007. if (force)
  1008. scope = -1;
  1009. if (local && fib_info_laddrhash) {
  1010. unsigned int hash = fib_laddr_hashfn(local);
  1011. struct hlist_head *head = &fib_info_laddrhash[hash];
  1012. struct hlist_node *node;
  1013. struct fib_info *fi;
  1014. hlist_for_each_entry(fi, node, head, fib_lhash) {
  1015. if (fi->fib_prefsrc == local) {
  1016. fi->fib_flags |= RTNH_F_DEAD;
  1017. ret++;
  1018. }
  1019. }
  1020. }
  1021. if (dev) {
  1022. struct fib_info *prev_fi = NULL;
  1023. unsigned int hash = fib_devindex_hashfn(dev->ifindex);
  1024. struct hlist_head *head = &fib_info_devhash[hash];
  1025. struct hlist_node *node;
  1026. struct fib_nh *nh;
  1027. hlist_for_each_entry(nh, node, head, nh_hash) {
  1028. struct fib_info *fi = nh->nh_parent;
  1029. int dead;
  1030. BUG_ON(!fi->fib_nhs);
  1031. if (nh->nh_dev != dev || fi == prev_fi)
  1032. continue;
  1033. prev_fi = fi;
  1034. dead = 0;
  1035. change_nexthops(fi) {
  1036. if (nh->nh_flags&RTNH_F_DEAD)
  1037. dead++;
  1038. else if (nh->nh_dev == dev &&
  1039. nh->nh_scope != scope) {
  1040. nh->nh_flags |= RTNH_F_DEAD;
  1041. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  1042. spin_lock_bh(&fib_multipath_lock);
  1043. fi->fib_power -= nh->nh_power;
  1044. nh->nh_power = 0;
  1045. spin_unlock_bh(&fib_multipath_lock);
  1046. #endif
  1047. dead++;
  1048. }
  1049. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  1050. if (force > 1 && nh->nh_dev == dev) {
  1051. dead = fi->fib_nhs;
  1052. break;
  1053. }
  1054. #endif
  1055. } endfor_nexthops(fi)
  1056. if (dead == fi->fib_nhs) {
  1057. fi->fib_flags |= RTNH_F_DEAD;
  1058. ret++;
  1059. }
  1060. }
  1061. }
  1062. return ret;
  1063. }
  1064. #ifdef CONFIG_IP_ROUTE_MULTIPATH
  1065. /*
  1066. Dead device goes up. We wake up dead nexthops.
  1067. It takes sense only on multipath routes.
  1068. */
  1069. int fib_sync_up(struct net_device *dev)
  1070. {
  1071. struct fib_info *prev_fi;
  1072. unsigned int hash;
  1073. struct hlist_head *head;
  1074. struct hlist_node *node;
  1075. struct fib_nh *nh;
  1076. int ret;
  1077. if (!(dev->flags&IFF_UP))
  1078. return 0;
  1079. prev_fi = NULL;
  1080. hash = fib_devindex_hashfn(dev->ifindex);
  1081. head = &fib_info_devhash[hash];
  1082. ret = 0;
  1083. hlist_for_each_entry(nh, node, head, nh_hash) {
  1084. struct fib_info *fi = nh->nh_parent;
  1085. int alive;
  1086. BUG_ON(!fi->fib_nhs);
  1087. if (nh->nh_dev != dev || fi == prev_fi)
  1088. continue;
  1089. prev_fi = fi;
  1090. alive = 0;
  1091. change_nexthops(fi) {
  1092. if (!(nh->nh_flags&RTNH_F_DEAD)) {
  1093. alive++;
  1094. continue;
  1095. }
  1096. if (nh->nh_dev == NULL || !(nh->nh_dev->flags&IFF_UP))
  1097. continue;
  1098. if (nh->nh_dev != dev || __in_dev_get(dev) == NULL)
  1099. continue;
  1100. alive++;
  1101. spin_lock_bh(&fib_multipath_lock);
  1102. nh->nh_power = 0;
  1103. nh->nh_flags &= ~RTNH_F_DEAD;
  1104. spin_unlock_bh(&fib_multipath_lock);
  1105. } endfor_nexthops(fi)
  1106. if (alive > 0) {
  1107. fi->fib_flags &= ~RTNH_F_DEAD;
  1108. ret++;
  1109. }
  1110. }
  1111. return ret;
  1112. }
  1113. /*
  1114. The algorithm is suboptimal, but it provides really
  1115. fair weighted route distribution.
  1116. */
  1117. void fib_select_multipath(const struct flowi *flp, struct fib_result *res)
  1118. {
  1119. struct fib_info *fi = res->fi;
  1120. int w;
  1121. spin_lock_bh(&fib_multipath_lock);
  1122. if (fi->fib_power <= 0) {
  1123. int power = 0;
  1124. change_nexthops(fi) {
  1125. if (!(nh->nh_flags&RTNH_F_DEAD)) {
  1126. power += nh->nh_weight;
  1127. nh->nh_power = nh->nh_weight;
  1128. }
  1129. } endfor_nexthops(fi);
  1130. fi->fib_power = power;
  1131. if (power <= 0) {
  1132. spin_unlock_bh(&fib_multipath_lock);
  1133. /* Race condition: route has just become dead. */
  1134. res->nh_sel = 0;
  1135. return;
  1136. }
  1137. }
  1138. /* w should be random number [0..fi->fib_power-1],
  1139. it is pretty bad approximation.
  1140. */
  1141. w = jiffies % fi->fib_power;
  1142. change_nexthops(fi) {
  1143. if (!(nh->nh_flags&RTNH_F_DEAD) && nh->nh_power) {
  1144. if ((w -= nh->nh_power) <= 0) {
  1145. nh->nh_power--;
  1146. fi->fib_power--;
  1147. res->nh_sel = nhsel;
  1148. spin_unlock_bh(&fib_multipath_lock);
  1149. return;
  1150. }
  1151. }
  1152. } endfor_nexthops(fi);
  1153. /* Race condition: route has just become dead. */
  1154. res->nh_sel = 0;
  1155. spin_unlock_bh(&fib_multipath_lock);
  1156. }
  1157. #endif