ipmr.c 42 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899
  1. /*
  2. * IP multicast routing support for mrouted 3.6/3.8
  3. *
  4. * (c) 1995 Alan Cox, <alan@redhat.com>
  5. * Linux Consultancy and Custom Driver Development
  6. *
  7. * This program is free software; you can redistribute it and/or
  8. * modify it under the terms of the GNU General Public License
  9. * as published by the Free Software Foundation; either version
  10. * 2 of the License, or (at your option) any later version.
  11. *
  12. * Version: $Id: ipmr.c,v 1.65 2001/10/31 21:55:54 davem Exp $
  13. *
  14. * Fixes:
  15. * Michael Chastain : Incorrect size of copying.
  16. * Alan Cox : Added the cache manager code
  17. * Alan Cox : Fixed the clone/copy bug and device race.
  18. * Mike McLagan : Routing by source
  19. * Malcolm Beattie : Buffer handling fixes.
  20. * Alexey Kuznetsov : Double buffer free and other fixes.
  21. * SVR Anand : Fixed several multicast bugs and problems.
  22. * Alexey Kuznetsov : Status, optimisations and more.
  23. * Brad Parker : Better behaviour on mrouted upcall
  24. * overflow.
  25. * Carlos Picoto : PIMv1 Support
  26. * Pavlin Ivanov Radoslavov: PIMv2 Registers must checksum only PIM header
  27. * Relax this requrement to work with older peers.
  28. *
  29. */
  30. #include <asm/system.h>
  31. #include <asm/uaccess.h>
  32. #include <linux/types.h>
  33. #include <linux/capability.h>
  34. #include <linux/errno.h>
  35. #include <linux/timer.h>
  36. #include <linux/mm.h>
  37. #include <linux/kernel.h>
  38. #include <linux/fcntl.h>
  39. #include <linux/stat.h>
  40. #include <linux/socket.h>
  41. #include <linux/in.h>
  42. #include <linux/inet.h>
  43. #include <linux/netdevice.h>
  44. #include <linux/inetdevice.h>
  45. #include <linux/igmp.h>
  46. #include <linux/proc_fs.h>
  47. #include <linux/seq_file.h>
  48. #include <linux/mroute.h>
  49. #include <linux/init.h>
  50. #include <linux/if_ether.h>
  51. #include <net/net_namespace.h>
  52. #include <net/ip.h>
  53. #include <net/protocol.h>
  54. #include <linux/skbuff.h>
  55. #include <net/route.h>
  56. #include <net/sock.h>
  57. #include <net/icmp.h>
  58. #include <net/udp.h>
  59. #include <net/raw.h>
  60. #include <linux/notifier.h>
  61. #include <linux/if_arp.h>
  62. #include <linux/netfilter_ipv4.h>
  63. #include <net/ipip.h>
  64. #include <net/checksum.h>
  65. #include <net/netlink.h>
  66. #if defined(CONFIG_IP_PIMSM_V1) || defined(CONFIG_IP_PIMSM_V2)
  67. #define CONFIG_IP_PIMSM 1
  68. #endif
  69. static struct sock *mroute_socket;
  70. /* Big lock, protecting vif table, mrt cache and mroute socket state.
  71. Note that the changes are semaphored via rtnl_lock.
  72. */
  73. static DEFINE_RWLOCK(mrt_lock);
  74. /*
  75. * Multicast router control variables
  76. */
  77. static struct vif_device vif_table[MAXVIFS]; /* Devices */
  78. static int maxvif;
  79. #define VIF_EXISTS(idx) (vif_table[idx].dev != NULL)
  80. static int mroute_do_assert; /* Set in PIM assert */
  81. static int mroute_do_pim;
  82. static struct mfc_cache *mfc_cache_array[MFC_LINES]; /* Forwarding cache */
  83. static struct mfc_cache *mfc_unres_queue; /* Queue of unresolved entries */
  84. static atomic_t cache_resolve_queue_len; /* Size of unresolved */
  85. /* Special spinlock for queue of unresolved entries */
  86. static DEFINE_SPINLOCK(mfc_unres_lock);
  87. /* We return to original Alan's scheme. Hash table of resolved
  88. entries is changed only in process context and protected
  89. with weak lock mrt_lock. Queue of unresolved entries is protected
  90. with strong spinlock mfc_unres_lock.
  91. In this case data path is free of exclusive locks at all.
  92. */
  93. static struct kmem_cache *mrt_cachep __read_mostly;
  94. static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
  95. static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
  96. static int ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm);
  97. #ifdef CONFIG_IP_PIMSM_V2
  98. static struct net_protocol pim_protocol;
  99. #endif
  100. static struct timer_list ipmr_expire_timer;
  101. /* Service routines creating virtual interfaces: DVMRP tunnels and PIMREG */
  102. static
  103. struct net_device *ipmr_new_tunnel(struct vifctl *v)
  104. {
  105. struct net_device *dev;
  106. dev = __dev_get_by_name(&init_net, "tunl0");
  107. if (dev) {
  108. int err;
  109. struct ifreq ifr;
  110. mm_segment_t oldfs;
  111. struct ip_tunnel_parm p;
  112. struct in_device *in_dev;
  113. memset(&p, 0, sizeof(p));
  114. p.iph.daddr = v->vifc_rmt_addr.s_addr;
  115. p.iph.saddr = v->vifc_lcl_addr.s_addr;
  116. p.iph.version = 4;
  117. p.iph.ihl = 5;
  118. p.iph.protocol = IPPROTO_IPIP;
  119. sprintf(p.name, "dvmrp%d", v->vifc_vifi);
  120. ifr.ifr_ifru.ifru_data = (void*)&p;
  121. oldfs = get_fs(); set_fs(KERNEL_DS);
  122. err = dev->do_ioctl(dev, &ifr, SIOCADDTUNNEL);
  123. set_fs(oldfs);
  124. dev = NULL;
  125. if (err == 0 && (dev = __dev_get_by_name(&init_net, p.name)) != NULL) {
  126. dev->flags |= IFF_MULTICAST;
  127. in_dev = __in_dev_get_rtnl(dev);
  128. if (in_dev == NULL)
  129. goto failure;
  130. ipv4_devconf_setall(in_dev);
  131. IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
  132. if (dev_open(dev))
  133. goto failure;
  134. }
  135. }
  136. return dev;
  137. failure:
  138. /* allow the register to be completed before unregistering. */
  139. rtnl_unlock();
  140. rtnl_lock();
  141. unregister_netdevice(dev);
  142. return NULL;
  143. }
  144. #ifdef CONFIG_IP_PIMSM
  145. static int reg_vif_num = -1;
  146. static int reg_vif_xmit(struct sk_buff *skb, struct net_device *dev)
  147. {
  148. read_lock(&mrt_lock);
  149. ((struct net_device_stats*)netdev_priv(dev))->tx_bytes += skb->len;
  150. ((struct net_device_stats*)netdev_priv(dev))->tx_packets++;
  151. ipmr_cache_report(skb, reg_vif_num, IGMPMSG_WHOLEPKT);
  152. read_unlock(&mrt_lock);
  153. kfree_skb(skb);
  154. return 0;
  155. }
  156. static struct net_device_stats *reg_vif_get_stats(struct net_device *dev)
  157. {
  158. return (struct net_device_stats*)netdev_priv(dev);
  159. }
  160. static void reg_vif_setup(struct net_device *dev)
  161. {
  162. dev->type = ARPHRD_PIMREG;
  163. dev->mtu = ETH_DATA_LEN - sizeof(struct iphdr) - 8;
  164. dev->flags = IFF_NOARP;
  165. dev->hard_start_xmit = reg_vif_xmit;
  166. dev->get_stats = reg_vif_get_stats;
  167. dev->destructor = free_netdev;
  168. }
  169. static struct net_device *ipmr_reg_vif(void)
  170. {
  171. struct net_device *dev;
  172. struct in_device *in_dev;
  173. dev = alloc_netdev(sizeof(struct net_device_stats), "pimreg",
  174. reg_vif_setup);
  175. if (dev == NULL)
  176. return NULL;
  177. if (register_netdevice(dev)) {
  178. free_netdev(dev);
  179. return NULL;
  180. }
  181. dev->iflink = 0;
  182. rcu_read_lock();
  183. if ((in_dev = __in_dev_get_rcu(dev)) == NULL) {
  184. rcu_read_unlock();
  185. goto failure;
  186. }
  187. ipv4_devconf_setall(in_dev);
  188. IPV4_DEVCONF(in_dev->cnf, RP_FILTER) = 0;
  189. rcu_read_unlock();
  190. if (dev_open(dev))
  191. goto failure;
  192. return dev;
  193. failure:
  194. /* allow the register to be completed before unregistering. */
  195. rtnl_unlock();
  196. rtnl_lock();
  197. unregister_netdevice(dev);
  198. return NULL;
  199. }
  200. #endif
  201. /*
  202. * Delete a VIF entry
  203. */
  204. static int vif_delete(int vifi)
  205. {
  206. struct vif_device *v;
  207. struct net_device *dev;
  208. struct in_device *in_dev;
  209. if (vifi < 0 || vifi >= maxvif)
  210. return -EADDRNOTAVAIL;
  211. v = &vif_table[vifi];
  212. write_lock_bh(&mrt_lock);
  213. dev = v->dev;
  214. v->dev = NULL;
  215. if (!dev) {
  216. write_unlock_bh(&mrt_lock);
  217. return -EADDRNOTAVAIL;
  218. }
  219. #ifdef CONFIG_IP_PIMSM
  220. if (vifi == reg_vif_num)
  221. reg_vif_num = -1;
  222. #endif
  223. if (vifi+1 == maxvif) {
  224. int tmp;
  225. for (tmp=vifi-1; tmp>=0; tmp--) {
  226. if (VIF_EXISTS(tmp))
  227. break;
  228. }
  229. maxvif = tmp+1;
  230. }
  231. write_unlock_bh(&mrt_lock);
  232. dev_set_allmulti(dev, -1);
  233. if ((in_dev = __in_dev_get_rtnl(dev)) != NULL) {
  234. IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)--;
  235. ip_rt_multicast_event(in_dev);
  236. }
  237. if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
  238. unregister_netdevice(dev);
  239. dev_put(dev);
  240. return 0;
  241. }
  242. /* Destroy an unresolved cache entry, killing queued skbs
  243. and reporting error to netlink readers.
  244. */
  245. static void ipmr_destroy_unres(struct mfc_cache *c)
  246. {
  247. struct sk_buff *skb;
  248. struct nlmsgerr *e;
  249. atomic_dec(&cache_resolve_queue_len);
  250. while ((skb=skb_dequeue(&c->mfc_un.unres.unresolved))) {
  251. if (ip_hdr(skb)->version == 0) {
  252. struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
  253. nlh->nlmsg_type = NLMSG_ERROR;
  254. nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
  255. skb_trim(skb, nlh->nlmsg_len);
  256. e = NLMSG_DATA(nlh);
  257. e->error = -ETIMEDOUT;
  258. memset(&e->msg, 0, sizeof(e->msg));
  259. rtnl_unicast(skb, NETLINK_CB(skb).pid);
  260. } else
  261. kfree_skb(skb);
  262. }
  263. kmem_cache_free(mrt_cachep, c);
  264. }
  265. /* Single timer process for all the unresolved queue. */
  266. static void ipmr_expire_process(unsigned long dummy)
  267. {
  268. unsigned long now;
  269. unsigned long expires;
  270. struct mfc_cache *c, **cp;
  271. if (!spin_trylock(&mfc_unres_lock)) {
  272. mod_timer(&ipmr_expire_timer, jiffies+HZ/10);
  273. return;
  274. }
  275. if (atomic_read(&cache_resolve_queue_len) == 0)
  276. goto out;
  277. now = jiffies;
  278. expires = 10*HZ;
  279. cp = &mfc_unres_queue;
  280. while ((c=*cp) != NULL) {
  281. if (time_after(c->mfc_un.unres.expires, now)) {
  282. unsigned long interval = c->mfc_un.unres.expires - now;
  283. if (interval < expires)
  284. expires = interval;
  285. cp = &c->next;
  286. continue;
  287. }
  288. *cp = c->next;
  289. ipmr_destroy_unres(c);
  290. }
  291. if (atomic_read(&cache_resolve_queue_len))
  292. mod_timer(&ipmr_expire_timer, jiffies + expires);
  293. out:
  294. spin_unlock(&mfc_unres_lock);
  295. }
  296. /* Fill oifs list. It is called under write locked mrt_lock. */
  297. static void ipmr_update_thresholds(struct mfc_cache *cache, unsigned char *ttls)
  298. {
  299. int vifi;
  300. cache->mfc_un.res.minvif = MAXVIFS;
  301. cache->mfc_un.res.maxvif = 0;
  302. memset(cache->mfc_un.res.ttls, 255, MAXVIFS);
  303. for (vifi=0; vifi<maxvif; vifi++) {
  304. if (VIF_EXISTS(vifi) && ttls[vifi] && ttls[vifi] < 255) {
  305. cache->mfc_un.res.ttls[vifi] = ttls[vifi];
  306. if (cache->mfc_un.res.minvif > vifi)
  307. cache->mfc_un.res.minvif = vifi;
  308. if (cache->mfc_un.res.maxvif <= vifi)
  309. cache->mfc_un.res.maxvif = vifi + 1;
  310. }
  311. }
  312. }
  313. static int vif_add(struct vifctl *vifc, int mrtsock)
  314. {
  315. int vifi = vifc->vifc_vifi;
  316. struct vif_device *v = &vif_table[vifi];
  317. struct net_device *dev;
  318. struct in_device *in_dev;
  319. /* Is vif busy ? */
  320. if (VIF_EXISTS(vifi))
  321. return -EADDRINUSE;
  322. switch (vifc->vifc_flags) {
  323. #ifdef CONFIG_IP_PIMSM
  324. case VIFF_REGISTER:
  325. /*
  326. * Special Purpose VIF in PIM
  327. * All the packets will be sent to the daemon
  328. */
  329. if (reg_vif_num >= 0)
  330. return -EADDRINUSE;
  331. dev = ipmr_reg_vif();
  332. if (!dev)
  333. return -ENOBUFS;
  334. break;
  335. #endif
  336. case VIFF_TUNNEL:
  337. dev = ipmr_new_tunnel(vifc);
  338. if (!dev)
  339. return -ENOBUFS;
  340. break;
  341. case 0:
  342. dev = ip_dev_find(vifc->vifc_lcl_addr.s_addr);
  343. if (!dev)
  344. return -EADDRNOTAVAIL;
  345. dev_put(dev);
  346. break;
  347. default:
  348. return -EINVAL;
  349. }
  350. if ((in_dev = __in_dev_get_rtnl(dev)) == NULL)
  351. return -EADDRNOTAVAIL;
  352. IPV4_DEVCONF(in_dev->cnf, MC_FORWARDING)++;
  353. dev_set_allmulti(dev, +1);
  354. ip_rt_multicast_event(in_dev);
  355. /*
  356. * Fill in the VIF structures
  357. */
  358. v->rate_limit=vifc->vifc_rate_limit;
  359. v->local=vifc->vifc_lcl_addr.s_addr;
  360. v->remote=vifc->vifc_rmt_addr.s_addr;
  361. v->flags=vifc->vifc_flags;
  362. if (!mrtsock)
  363. v->flags |= VIFF_STATIC;
  364. v->threshold=vifc->vifc_threshold;
  365. v->bytes_in = 0;
  366. v->bytes_out = 0;
  367. v->pkt_in = 0;
  368. v->pkt_out = 0;
  369. v->link = dev->ifindex;
  370. if (v->flags&(VIFF_TUNNEL|VIFF_REGISTER))
  371. v->link = dev->iflink;
  372. /* And finish update writing critical data */
  373. write_lock_bh(&mrt_lock);
  374. dev_hold(dev);
  375. v->dev=dev;
  376. #ifdef CONFIG_IP_PIMSM
  377. if (v->flags&VIFF_REGISTER)
  378. reg_vif_num = vifi;
  379. #endif
  380. if (vifi+1 > maxvif)
  381. maxvif = vifi+1;
  382. write_unlock_bh(&mrt_lock);
  383. return 0;
  384. }
  385. static struct mfc_cache *ipmr_cache_find(__be32 origin, __be32 mcastgrp)
  386. {
  387. int line=MFC_HASH(mcastgrp,origin);
  388. struct mfc_cache *c;
  389. for (c=mfc_cache_array[line]; c; c = c->next) {
  390. if (c->mfc_origin==origin && c->mfc_mcastgrp==mcastgrp)
  391. break;
  392. }
  393. return c;
  394. }
  395. /*
  396. * Allocate a multicast cache entry
  397. */
  398. static struct mfc_cache *ipmr_cache_alloc(void)
  399. {
  400. struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_KERNEL);
  401. if (c==NULL)
  402. return NULL;
  403. c->mfc_un.res.minvif = MAXVIFS;
  404. return c;
  405. }
  406. static struct mfc_cache *ipmr_cache_alloc_unres(void)
  407. {
  408. struct mfc_cache *c=kmem_cache_zalloc(mrt_cachep, GFP_ATOMIC);
  409. if (c==NULL)
  410. return NULL;
  411. skb_queue_head_init(&c->mfc_un.unres.unresolved);
  412. c->mfc_un.unres.expires = jiffies + 10*HZ;
  413. return c;
  414. }
  415. /*
  416. * A cache entry has gone into a resolved state from queued
  417. */
  418. static void ipmr_cache_resolve(struct mfc_cache *uc, struct mfc_cache *c)
  419. {
  420. struct sk_buff *skb;
  421. struct nlmsgerr *e;
  422. /*
  423. * Play the pending entries through our router
  424. */
  425. while ((skb=__skb_dequeue(&uc->mfc_un.unres.unresolved))) {
  426. if (ip_hdr(skb)->version == 0) {
  427. struct nlmsghdr *nlh = (struct nlmsghdr *)skb_pull(skb, sizeof(struct iphdr));
  428. if (ipmr_fill_mroute(skb, c, NLMSG_DATA(nlh)) > 0) {
  429. nlh->nlmsg_len = (skb_tail_pointer(skb) -
  430. (u8 *)nlh);
  431. } else {
  432. nlh->nlmsg_type = NLMSG_ERROR;
  433. nlh->nlmsg_len = NLMSG_LENGTH(sizeof(struct nlmsgerr));
  434. skb_trim(skb, nlh->nlmsg_len);
  435. e = NLMSG_DATA(nlh);
  436. e->error = -EMSGSIZE;
  437. memset(&e->msg, 0, sizeof(e->msg));
  438. }
  439. rtnl_unicast(skb, NETLINK_CB(skb).pid);
  440. } else
  441. ip_mr_forward(skb, c, 0);
  442. }
  443. }
  444. /*
  445. * Bounce a cache query up to mrouted. We could use netlink for this but mrouted
  446. * expects the following bizarre scheme.
  447. *
  448. * Called under mrt_lock.
  449. */
  450. static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert)
  451. {
  452. struct sk_buff *skb;
  453. const int ihl = ip_hdrlen(pkt);
  454. struct igmphdr *igmp;
  455. struct igmpmsg *msg;
  456. int ret;
  457. #ifdef CONFIG_IP_PIMSM
  458. if (assert == IGMPMSG_WHOLEPKT)
  459. skb = skb_realloc_headroom(pkt, sizeof(struct iphdr));
  460. else
  461. #endif
  462. skb = alloc_skb(128, GFP_ATOMIC);
  463. if (!skb)
  464. return -ENOBUFS;
  465. #ifdef CONFIG_IP_PIMSM
  466. if (assert == IGMPMSG_WHOLEPKT) {
  467. /* Ugly, but we have no choice with this interface.
  468. Duplicate old header, fix ihl, length etc.
  469. And all this only to mangle msg->im_msgtype and
  470. to set msg->im_mbz to "mbz" :-)
  471. */
  472. skb_push(skb, sizeof(struct iphdr));
  473. skb_reset_network_header(skb);
  474. skb_reset_transport_header(skb);
  475. msg = (struct igmpmsg *)skb_network_header(skb);
  476. memcpy(msg, skb_network_header(pkt), sizeof(struct iphdr));
  477. msg->im_msgtype = IGMPMSG_WHOLEPKT;
  478. msg->im_mbz = 0;
  479. msg->im_vif = reg_vif_num;
  480. ip_hdr(skb)->ihl = sizeof(struct iphdr) >> 2;
  481. ip_hdr(skb)->tot_len = htons(ntohs(ip_hdr(pkt)->tot_len) +
  482. sizeof(struct iphdr));
  483. } else
  484. #endif
  485. {
  486. /*
  487. * Copy the IP header
  488. */
  489. skb->network_header = skb->tail;
  490. skb_put(skb, ihl);
  491. skb_copy_to_linear_data(skb, pkt->data, ihl);
  492. ip_hdr(skb)->protocol = 0; /* Flag to the kernel this is a route add */
  493. msg = (struct igmpmsg *)skb_network_header(skb);
  494. msg->im_vif = vifi;
  495. skb->dst = dst_clone(pkt->dst);
  496. /*
  497. * Add our header
  498. */
  499. igmp=(struct igmphdr *)skb_put(skb,sizeof(struct igmphdr));
  500. igmp->type =
  501. msg->im_msgtype = assert;
  502. igmp->code = 0;
  503. ip_hdr(skb)->tot_len = htons(skb->len); /* Fix the length */
  504. skb->transport_header = skb->network_header;
  505. }
  506. if (mroute_socket == NULL) {
  507. kfree_skb(skb);
  508. return -EINVAL;
  509. }
  510. /*
  511. * Deliver to mrouted
  512. */
  513. if ((ret=sock_queue_rcv_skb(mroute_socket,skb))<0) {
  514. if (net_ratelimit())
  515. printk(KERN_WARNING "mroute: pending queue full, dropping entries.\n");
  516. kfree_skb(skb);
  517. }
  518. return ret;
  519. }
  520. /*
  521. * Queue a packet for resolution. It gets locked cache entry!
  522. */
  523. static int
  524. ipmr_cache_unresolved(vifi_t vifi, struct sk_buff *skb)
  525. {
  526. int err;
  527. struct mfc_cache *c;
  528. const struct iphdr *iph = ip_hdr(skb);
  529. spin_lock_bh(&mfc_unres_lock);
  530. for (c=mfc_unres_queue; c; c=c->next) {
  531. if (c->mfc_mcastgrp == iph->daddr &&
  532. c->mfc_origin == iph->saddr)
  533. break;
  534. }
  535. if (c == NULL) {
  536. /*
  537. * Create a new entry if allowable
  538. */
  539. if (atomic_read(&cache_resolve_queue_len)>=10 ||
  540. (c=ipmr_cache_alloc_unres())==NULL) {
  541. spin_unlock_bh(&mfc_unres_lock);
  542. kfree_skb(skb);
  543. return -ENOBUFS;
  544. }
  545. /*
  546. * Fill in the new cache entry
  547. */
  548. c->mfc_parent = -1;
  549. c->mfc_origin = iph->saddr;
  550. c->mfc_mcastgrp = iph->daddr;
  551. /*
  552. * Reflect first query at mrouted.
  553. */
  554. if ((err = ipmr_cache_report(skb, vifi, IGMPMSG_NOCACHE))<0) {
  555. /* If the report failed throw the cache entry
  556. out - Brad Parker
  557. */
  558. spin_unlock_bh(&mfc_unres_lock);
  559. kmem_cache_free(mrt_cachep, c);
  560. kfree_skb(skb);
  561. return err;
  562. }
  563. atomic_inc(&cache_resolve_queue_len);
  564. c->next = mfc_unres_queue;
  565. mfc_unres_queue = c;
  566. mod_timer(&ipmr_expire_timer, c->mfc_un.unres.expires);
  567. }
  568. /*
  569. * See if we can append the packet
  570. */
  571. if (c->mfc_un.unres.unresolved.qlen>3) {
  572. kfree_skb(skb);
  573. err = -ENOBUFS;
  574. } else {
  575. skb_queue_tail(&c->mfc_un.unres.unresolved,skb);
  576. err = 0;
  577. }
  578. spin_unlock_bh(&mfc_unres_lock);
  579. return err;
  580. }
  581. /*
  582. * MFC cache manipulation by user space mroute daemon
  583. */
  584. static int ipmr_mfc_delete(struct mfcctl *mfc)
  585. {
  586. int line;
  587. struct mfc_cache *c, **cp;
  588. line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
  589. for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
  590. if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
  591. c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr) {
  592. write_lock_bh(&mrt_lock);
  593. *cp = c->next;
  594. write_unlock_bh(&mrt_lock);
  595. kmem_cache_free(mrt_cachep, c);
  596. return 0;
  597. }
  598. }
  599. return -ENOENT;
  600. }
  601. static int ipmr_mfc_add(struct mfcctl *mfc, int mrtsock)
  602. {
  603. int line;
  604. struct mfc_cache *uc, *c, **cp;
  605. line=MFC_HASH(mfc->mfcc_mcastgrp.s_addr, mfc->mfcc_origin.s_addr);
  606. for (cp=&mfc_cache_array[line]; (c=*cp) != NULL; cp = &c->next) {
  607. if (c->mfc_origin == mfc->mfcc_origin.s_addr &&
  608. c->mfc_mcastgrp == mfc->mfcc_mcastgrp.s_addr)
  609. break;
  610. }
  611. if (c != NULL) {
  612. write_lock_bh(&mrt_lock);
  613. c->mfc_parent = mfc->mfcc_parent;
  614. ipmr_update_thresholds(c, mfc->mfcc_ttls);
  615. if (!mrtsock)
  616. c->mfc_flags |= MFC_STATIC;
  617. write_unlock_bh(&mrt_lock);
  618. return 0;
  619. }
  620. if (!MULTICAST(mfc->mfcc_mcastgrp.s_addr))
  621. return -EINVAL;
  622. c=ipmr_cache_alloc();
  623. if (c==NULL)
  624. return -ENOMEM;
  625. c->mfc_origin=mfc->mfcc_origin.s_addr;
  626. c->mfc_mcastgrp=mfc->mfcc_mcastgrp.s_addr;
  627. c->mfc_parent=mfc->mfcc_parent;
  628. ipmr_update_thresholds(c, mfc->mfcc_ttls);
  629. if (!mrtsock)
  630. c->mfc_flags |= MFC_STATIC;
  631. write_lock_bh(&mrt_lock);
  632. c->next = mfc_cache_array[line];
  633. mfc_cache_array[line] = c;
  634. write_unlock_bh(&mrt_lock);
  635. /*
  636. * Check to see if we resolved a queued list. If so we
  637. * need to send on the frames and tidy up.
  638. */
  639. spin_lock_bh(&mfc_unres_lock);
  640. for (cp = &mfc_unres_queue; (uc=*cp) != NULL;
  641. cp = &uc->next) {
  642. if (uc->mfc_origin == c->mfc_origin &&
  643. uc->mfc_mcastgrp == c->mfc_mcastgrp) {
  644. *cp = uc->next;
  645. if (atomic_dec_and_test(&cache_resolve_queue_len))
  646. del_timer(&ipmr_expire_timer);
  647. break;
  648. }
  649. }
  650. spin_unlock_bh(&mfc_unres_lock);
  651. if (uc) {
  652. ipmr_cache_resolve(uc, c);
  653. kmem_cache_free(mrt_cachep, uc);
  654. }
  655. return 0;
  656. }
  657. /*
  658. * Close the multicast socket, and clear the vif tables etc
  659. */
  660. static void mroute_clean_tables(struct sock *sk)
  661. {
  662. int i;
  663. /*
  664. * Shut down all active vif entries
  665. */
  666. for (i=0; i<maxvif; i++) {
  667. if (!(vif_table[i].flags&VIFF_STATIC))
  668. vif_delete(i);
  669. }
  670. /*
  671. * Wipe the cache
  672. */
  673. for (i=0;i<MFC_LINES;i++) {
  674. struct mfc_cache *c, **cp;
  675. cp = &mfc_cache_array[i];
  676. while ((c = *cp) != NULL) {
  677. if (c->mfc_flags&MFC_STATIC) {
  678. cp = &c->next;
  679. continue;
  680. }
  681. write_lock_bh(&mrt_lock);
  682. *cp = c->next;
  683. write_unlock_bh(&mrt_lock);
  684. kmem_cache_free(mrt_cachep, c);
  685. }
  686. }
  687. if (atomic_read(&cache_resolve_queue_len) != 0) {
  688. struct mfc_cache *c;
  689. spin_lock_bh(&mfc_unres_lock);
  690. while (mfc_unres_queue != NULL) {
  691. c = mfc_unres_queue;
  692. mfc_unres_queue = c->next;
  693. spin_unlock_bh(&mfc_unres_lock);
  694. ipmr_destroy_unres(c);
  695. spin_lock_bh(&mfc_unres_lock);
  696. }
  697. spin_unlock_bh(&mfc_unres_lock);
  698. }
  699. }
  700. static void mrtsock_destruct(struct sock *sk)
  701. {
  702. rtnl_lock();
  703. if (sk == mroute_socket) {
  704. IPV4_DEVCONF_ALL(MC_FORWARDING)--;
  705. write_lock_bh(&mrt_lock);
  706. mroute_socket=NULL;
  707. write_unlock_bh(&mrt_lock);
  708. mroute_clean_tables(sk);
  709. }
  710. rtnl_unlock();
  711. }
  712. /*
  713. * Socket options and virtual interface manipulation. The whole
  714. * virtual interface system is a complete heap, but unfortunately
  715. * that's how BSD mrouted happens to think. Maybe one day with a proper
  716. * MOSPF/PIM router set up we can clean this up.
  717. */
  718. int ip_mroute_setsockopt(struct sock *sk,int optname,char __user *optval,int optlen)
  719. {
  720. int ret;
  721. struct vifctl vif;
  722. struct mfcctl mfc;
  723. if (optname != MRT_INIT) {
  724. if (sk != mroute_socket && !capable(CAP_NET_ADMIN))
  725. return -EACCES;
  726. }
  727. switch (optname) {
  728. case MRT_INIT:
  729. if (sk->sk_type != SOCK_RAW ||
  730. inet_sk(sk)->num != IPPROTO_IGMP)
  731. return -EOPNOTSUPP;
  732. if (optlen!=sizeof(int))
  733. return -ENOPROTOOPT;
  734. rtnl_lock();
  735. if (mroute_socket) {
  736. rtnl_unlock();
  737. return -EADDRINUSE;
  738. }
  739. ret = ip_ra_control(sk, 1, mrtsock_destruct);
  740. if (ret == 0) {
  741. write_lock_bh(&mrt_lock);
  742. mroute_socket=sk;
  743. write_unlock_bh(&mrt_lock);
  744. IPV4_DEVCONF_ALL(MC_FORWARDING)++;
  745. }
  746. rtnl_unlock();
  747. return ret;
  748. case MRT_DONE:
  749. if (sk!=mroute_socket)
  750. return -EACCES;
  751. return ip_ra_control(sk, 0, NULL);
  752. case MRT_ADD_VIF:
  753. case MRT_DEL_VIF:
  754. if (optlen!=sizeof(vif))
  755. return -EINVAL;
  756. if (copy_from_user(&vif,optval,sizeof(vif)))
  757. return -EFAULT;
  758. if (vif.vifc_vifi >= MAXVIFS)
  759. return -ENFILE;
  760. rtnl_lock();
  761. if (optname==MRT_ADD_VIF) {
  762. ret = vif_add(&vif, sk==mroute_socket);
  763. } else {
  764. ret = vif_delete(vif.vifc_vifi);
  765. }
  766. rtnl_unlock();
  767. return ret;
  768. /*
  769. * Manipulate the forwarding caches. These live
  770. * in a sort of kernel/user symbiosis.
  771. */
  772. case MRT_ADD_MFC:
  773. case MRT_DEL_MFC:
  774. if (optlen!=sizeof(mfc))
  775. return -EINVAL;
  776. if (copy_from_user(&mfc,optval, sizeof(mfc)))
  777. return -EFAULT;
  778. rtnl_lock();
  779. if (optname==MRT_DEL_MFC)
  780. ret = ipmr_mfc_delete(&mfc);
  781. else
  782. ret = ipmr_mfc_add(&mfc, sk==mroute_socket);
  783. rtnl_unlock();
  784. return ret;
  785. /*
  786. * Control PIM assert.
  787. */
  788. case MRT_ASSERT:
  789. {
  790. int v;
  791. if (get_user(v,(int __user *)optval))
  792. return -EFAULT;
  793. mroute_do_assert=(v)?1:0;
  794. return 0;
  795. }
  796. #ifdef CONFIG_IP_PIMSM
  797. case MRT_PIM:
  798. {
  799. int v, ret;
  800. if (get_user(v,(int __user *)optval))
  801. return -EFAULT;
  802. v = (v)?1:0;
  803. rtnl_lock();
  804. ret = 0;
  805. if (v != mroute_do_pim) {
  806. mroute_do_pim = v;
  807. mroute_do_assert = v;
  808. #ifdef CONFIG_IP_PIMSM_V2
  809. if (mroute_do_pim)
  810. ret = inet_add_protocol(&pim_protocol,
  811. IPPROTO_PIM);
  812. else
  813. ret = inet_del_protocol(&pim_protocol,
  814. IPPROTO_PIM);
  815. if (ret < 0)
  816. ret = -EAGAIN;
  817. #endif
  818. }
  819. rtnl_unlock();
  820. return ret;
  821. }
  822. #endif
  823. /*
  824. * Spurious command, or MRT_VERSION which you cannot
  825. * set.
  826. */
  827. default:
  828. return -ENOPROTOOPT;
  829. }
  830. }
  831. /*
  832. * Getsock opt support for the multicast routing system.
  833. */
  834. int ip_mroute_getsockopt(struct sock *sk,int optname,char __user *optval,int __user *optlen)
  835. {
  836. int olr;
  837. int val;
  838. if (optname!=MRT_VERSION &&
  839. #ifdef CONFIG_IP_PIMSM
  840. optname!=MRT_PIM &&
  841. #endif
  842. optname!=MRT_ASSERT)
  843. return -ENOPROTOOPT;
  844. if (get_user(olr, optlen))
  845. return -EFAULT;
  846. olr = min_t(unsigned int, olr, sizeof(int));
  847. if (olr < 0)
  848. return -EINVAL;
  849. if (put_user(olr,optlen))
  850. return -EFAULT;
  851. if (optname==MRT_VERSION)
  852. val=0x0305;
  853. #ifdef CONFIG_IP_PIMSM
  854. else if (optname==MRT_PIM)
  855. val=mroute_do_pim;
  856. #endif
  857. else
  858. val=mroute_do_assert;
  859. if (copy_to_user(optval,&val,olr))
  860. return -EFAULT;
  861. return 0;
  862. }
  863. /*
  864. * The IP multicast ioctl support routines.
  865. */
  866. int ipmr_ioctl(struct sock *sk, int cmd, void __user *arg)
  867. {
  868. struct sioc_sg_req sr;
  869. struct sioc_vif_req vr;
  870. struct vif_device *vif;
  871. struct mfc_cache *c;
  872. switch (cmd) {
  873. case SIOCGETVIFCNT:
  874. if (copy_from_user(&vr,arg,sizeof(vr)))
  875. return -EFAULT;
  876. if (vr.vifi>=maxvif)
  877. return -EINVAL;
  878. read_lock(&mrt_lock);
  879. vif=&vif_table[vr.vifi];
  880. if (VIF_EXISTS(vr.vifi)) {
  881. vr.icount=vif->pkt_in;
  882. vr.ocount=vif->pkt_out;
  883. vr.ibytes=vif->bytes_in;
  884. vr.obytes=vif->bytes_out;
  885. read_unlock(&mrt_lock);
  886. if (copy_to_user(arg,&vr,sizeof(vr)))
  887. return -EFAULT;
  888. return 0;
  889. }
  890. read_unlock(&mrt_lock);
  891. return -EADDRNOTAVAIL;
  892. case SIOCGETSGCNT:
  893. if (copy_from_user(&sr,arg,sizeof(sr)))
  894. return -EFAULT;
  895. read_lock(&mrt_lock);
  896. c = ipmr_cache_find(sr.src.s_addr, sr.grp.s_addr);
  897. if (c) {
  898. sr.pktcnt = c->mfc_un.res.pkt;
  899. sr.bytecnt = c->mfc_un.res.bytes;
  900. sr.wrong_if = c->mfc_un.res.wrong_if;
  901. read_unlock(&mrt_lock);
  902. if (copy_to_user(arg,&sr,sizeof(sr)))
  903. return -EFAULT;
  904. return 0;
  905. }
  906. read_unlock(&mrt_lock);
  907. return -EADDRNOTAVAIL;
  908. default:
  909. return -ENOIOCTLCMD;
  910. }
  911. }
  912. static int ipmr_device_event(struct notifier_block *this, unsigned long event, void *ptr)
  913. {
  914. struct net_device *dev = ptr;
  915. struct vif_device *v;
  916. int ct;
  917. if (dev->nd_net != &init_net)
  918. return NOTIFY_DONE;
  919. if (event != NETDEV_UNREGISTER)
  920. return NOTIFY_DONE;
  921. v=&vif_table[0];
  922. for (ct=0;ct<maxvif;ct++,v++) {
  923. if (v->dev==dev)
  924. vif_delete(ct);
  925. }
  926. return NOTIFY_DONE;
  927. }
  928. static struct notifier_block ip_mr_notifier={
  929. .notifier_call = ipmr_device_event,
  930. };
  931. /*
  932. * Encapsulate a packet by attaching a valid IPIP header to it.
  933. * This avoids tunnel drivers and other mess and gives us the speed so
  934. * important for multicast video.
  935. */
  936. static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr)
  937. {
  938. struct iphdr *iph;
  939. struct iphdr *old_iph = ip_hdr(skb);
  940. skb_push(skb, sizeof(struct iphdr));
  941. skb->transport_header = skb->network_header;
  942. skb_reset_network_header(skb);
  943. iph = ip_hdr(skb);
  944. iph->version = 4;
  945. iph->tos = old_iph->tos;
  946. iph->ttl = old_iph->ttl;
  947. iph->frag_off = 0;
  948. iph->daddr = daddr;
  949. iph->saddr = saddr;
  950. iph->protocol = IPPROTO_IPIP;
  951. iph->ihl = 5;
  952. iph->tot_len = htons(skb->len);
  953. ip_select_ident(iph, skb->dst, NULL);
  954. ip_send_check(iph);
  955. memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
  956. nf_reset(skb);
  957. }
  958. static inline int ipmr_forward_finish(struct sk_buff *skb)
  959. {
  960. struct ip_options * opt = &(IPCB(skb)->opt);
  961. IP_INC_STATS_BH(IPSTATS_MIB_OUTFORWDATAGRAMS);
  962. if (unlikely(opt->optlen))
  963. ip_forward_options(skb);
  964. return dst_output(skb);
  965. }
  966. /*
  967. * Processing handlers for ipmr_forward
  968. */
  969. static void ipmr_queue_xmit(struct sk_buff *skb, struct mfc_cache *c, int vifi)
  970. {
  971. const struct iphdr *iph = ip_hdr(skb);
  972. struct vif_device *vif = &vif_table[vifi];
  973. struct net_device *dev;
  974. struct rtable *rt;
  975. int encap = 0;
  976. if (vif->dev == NULL)
  977. goto out_free;
  978. #ifdef CONFIG_IP_PIMSM
  979. if (vif->flags & VIFF_REGISTER) {
  980. vif->pkt_out++;
  981. vif->bytes_out+=skb->len;
  982. ((struct net_device_stats*)netdev_priv(vif->dev))->tx_bytes += skb->len;
  983. ((struct net_device_stats*)netdev_priv(vif->dev))->tx_packets++;
  984. ipmr_cache_report(skb, vifi, IGMPMSG_WHOLEPKT);
  985. kfree_skb(skb);
  986. return;
  987. }
  988. #endif
  989. if (vif->flags&VIFF_TUNNEL) {
  990. struct flowi fl = { .oif = vif->link,
  991. .nl_u = { .ip4_u =
  992. { .daddr = vif->remote,
  993. .saddr = vif->local,
  994. .tos = RT_TOS(iph->tos) } },
  995. .proto = IPPROTO_IPIP };
  996. if (ip_route_output_key(&rt, &fl))
  997. goto out_free;
  998. encap = sizeof(struct iphdr);
  999. } else {
  1000. struct flowi fl = { .oif = vif->link,
  1001. .nl_u = { .ip4_u =
  1002. { .daddr = iph->daddr,
  1003. .tos = RT_TOS(iph->tos) } },
  1004. .proto = IPPROTO_IPIP };
  1005. if (ip_route_output_key(&rt, &fl))
  1006. goto out_free;
  1007. }
  1008. dev = rt->u.dst.dev;
  1009. if (skb->len+encap > dst_mtu(&rt->u.dst) && (ntohs(iph->frag_off) & IP_DF)) {
  1010. /* Do not fragment multicasts. Alas, IPv4 does not
  1011. allow to send ICMP, so that packets will disappear
  1012. to blackhole.
  1013. */
  1014. IP_INC_STATS_BH(IPSTATS_MIB_FRAGFAILS);
  1015. ip_rt_put(rt);
  1016. goto out_free;
  1017. }
  1018. encap += LL_RESERVED_SPACE(dev) + rt->u.dst.header_len;
  1019. if (skb_cow(skb, encap)) {
  1020. ip_rt_put(rt);
  1021. goto out_free;
  1022. }
  1023. vif->pkt_out++;
  1024. vif->bytes_out+=skb->len;
  1025. dst_release(skb->dst);
  1026. skb->dst = &rt->u.dst;
  1027. ip_decrease_ttl(ip_hdr(skb));
  1028. /* FIXME: forward and output firewalls used to be called here.
  1029. * What do we do with netfilter? -- RR */
  1030. if (vif->flags & VIFF_TUNNEL) {
  1031. ip_encap(skb, vif->local, vif->remote);
  1032. /* FIXME: extra output firewall step used to be here. --RR */
  1033. ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_packets++;
  1034. ((struct ip_tunnel *)netdev_priv(vif->dev))->stat.tx_bytes+=skb->len;
  1035. }
  1036. IPCB(skb)->flags |= IPSKB_FORWARDED;
  1037. /*
  1038. * RFC1584 teaches, that DVMRP/PIM router must deliver packets locally
  1039. * not only before forwarding, but after forwarding on all output
  1040. * interfaces. It is clear, if mrouter runs a multicasting
  1041. * program, it should receive packets not depending to what interface
  1042. * program is joined.
  1043. * If we will not make it, the program will have to join on all
  1044. * interfaces. On the other hand, multihoming host (or router, but
  1045. * not mrouter) cannot join to more than one interface - it will
  1046. * result in receiving multiple packets.
  1047. */
  1048. NF_HOOK(PF_INET, NF_IP_FORWARD, skb, skb->dev, dev,
  1049. ipmr_forward_finish);
  1050. return;
  1051. out_free:
  1052. kfree_skb(skb);
  1053. return;
  1054. }
  1055. static int ipmr_find_vif(struct net_device *dev)
  1056. {
  1057. int ct;
  1058. for (ct=maxvif-1; ct>=0; ct--) {
  1059. if (vif_table[ct].dev == dev)
  1060. break;
  1061. }
  1062. return ct;
  1063. }
  1064. /* "local" means that we should preserve one skb (for local delivery) */
  1065. static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local)
  1066. {
  1067. int psend = -1;
  1068. int vif, ct;
  1069. vif = cache->mfc_parent;
  1070. cache->mfc_un.res.pkt++;
  1071. cache->mfc_un.res.bytes += skb->len;
  1072. /*
  1073. * Wrong interface: drop packet and (maybe) send PIM assert.
  1074. */
  1075. if (vif_table[vif].dev != skb->dev) {
  1076. int true_vifi;
  1077. if (((struct rtable*)skb->dst)->fl.iif == 0) {
  1078. /* It is our own packet, looped back.
  1079. Very complicated situation...
  1080. The best workaround until routing daemons will be
  1081. fixed is not to redistribute packet, if it was
  1082. send through wrong interface. It means, that
  1083. multicast applications WILL NOT work for
  1084. (S,G), which have default multicast route pointing
  1085. to wrong oif. In any case, it is not a good
  1086. idea to use multicasting applications on router.
  1087. */
  1088. goto dont_forward;
  1089. }
  1090. cache->mfc_un.res.wrong_if++;
  1091. true_vifi = ipmr_find_vif(skb->dev);
  1092. if (true_vifi >= 0 && mroute_do_assert &&
  1093. /* pimsm uses asserts, when switching from RPT to SPT,
  1094. so that we cannot check that packet arrived on an oif.
  1095. It is bad, but otherwise we would need to move pretty
  1096. large chunk of pimd to kernel. Ough... --ANK
  1097. */
  1098. (mroute_do_pim || cache->mfc_un.res.ttls[true_vifi] < 255) &&
  1099. time_after(jiffies,
  1100. cache->mfc_un.res.last_assert + MFC_ASSERT_THRESH)) {
  1101. cache->mfc_un.res.last_assert = jiffies;
  1102. ipmr_cache_report(skb, true_vifi, IGMPMSG_WRONGVIF);
  1103. }
  1104. goto dont_forward;
  1105. }
  1106. vif_table[vif].pkt_in++;
  1107. vif_table[vif].bytes_in+=skb->len;
  1108. /*
  1109. * Forward the frame
  1110. */
  1111. for (ct = cache->mfc_un.res.maxvif-1; ct >= cache->mfc_un.res.minvif; ct--) {
  1112. if (ip_hdr(skb)->ttl > cache->mfc_un.res.ttls[ct]) {
  1113. if (psend != -1) {
  1114. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  1115. if (skb2)
  1116. ipmr_queue_xmit(skb2, cache, psend);
  1117. }
  1118. psend=ct;
  1119. }
  1120. }
  1121. if (psend != -1) {
  1122. if (local) {
  1123. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  1124. if (skb2)
  1125. ipmr_queue_xmit(skb2, cache, psend);
  1126. } else {
  1127. ipmr_queue_xmit(skb, cache, psend);
  1128. return 0;
  1129. }
  1130. }
  1131. dont_forward:
  1132. if (!local)
  1133. kfree_skb(skb);
  1134. return 0;
  1135. }
  1136. /*
  1137. * Multicast packets for forwarding arrive here
  1138. */
  1139. int ip_mr_input(struct sk_buff *skb)
  1140. {
  1141. struct mfc_cache *cache;
  1142. int local = ((struct rtable*)skb->dst)->rt_flags&RTCF_LOCAL;
  1143. /* Packet is looped back after forward, it should not be
  1144. forwarded second time, but still can be delivered locally.
  1145. */
  1146. if (IPCB(skb)->flags&IPSKB_FORWARDED)
  1147. goto dont_forward;
  1148. if (!local) {
  1149. if (IPCB(skb)->opt.router_alert) {
  1150. if (ip_call_ra_chain(skb))
  1151. return 0;
  1152. } else if (ip_hdr(skb)->protocol == IPPROTO_IGMP){
  1153. /* IGMPv1 (and broken IGMPv2 implementations sort of
  1154. Cisco IOS <= 11.2(8)) do not put router alert
  1155. option to IGMP packets destined to routable
  1156. groups. It is very bad, because it means
  1157. that we can forward NO IGMP messages.
  1158. */
  1159. read_lock(&mrt_lock);
  1160. if (mroute_socket) {
  1161. nf_reset(skb);
  1162. raw_rcv(mroute_socket, skb);
  1163. read_unlock(&mrt_lock);
  1164. return 0;
  1165. }
  1166. read_unlock(&mrt_lock);
  1167. }
  1168. }
  1169. read_lock(&mrt_lock);
  1170. cache = ipmr_cache_find(ip_hdr(skb)->saddr, ip_hdr(skb)->daddr);
  1171. /*
  1172. * No usable cache entry
  1173. */
  1174. if (cache==NULL) {
  1175. int vif;
  1176. if (local) {
  1177. struct sk_buff *skb2 = skb_clone(skb, GFP_ATOMIC);
  1178. ip_local_deliver(skb);
  1179. if (skb2 == NULL) {
  1180. read_unlock(&mrt_lock);
  1181. return -ENOBUFS;
  1182. }
  1183. skb = skb2;
  1184. }
  1185. vif = ipmr_find_vif(skb->dev);
  1186. if (vif >= 0) {
  1187. int err = ipmr_cache_unresolved(vif, skb);
  1188. read_unlock(&mrt_lock);
  1189. return err;
  1190. }
  1191. read_unlock(&mrt_lock);
  1192. kfree_skb(skb);
  1193. return -ENODEV;
  1194. }
  1195. ip_mr_forward(skb, cache, local);
  1196. read_unlock(&mrt_lock);
  1197. if (local)
  1198. return ip_local_deliver(skb);
  1199. return 0;
  1200. dont_forward:
  1201. if (local)
  1202. return ip_local_deliver(skb);
  1203. kfree_skb(skb);
  1204. return 0;
  1205. }
  1206. #ifdef CONFIG_IP_PIMSM_V1
  1207. /*
  1208. * Handle IGMP messages of PIMv1
  1209. */
  1210. int pim_rcv_v1(struct sk_buff * skb)
  1211. {
  1212. struct igmphdr *pim;
  1213. struct iphdr *encap;
  1214. struct net_device *reg_dev = NULL;
  1215. if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
  1216. goto drop;
  1217. pim = igmp_hdr(skb);
  1218. if (!mroute_do_pim ||
  1219. skb->len < sizeof(*pim) + sizeof(*encap) ||
  1220. pim->group != PIM_V1_VERSION || pim->code != PIM_V1_REGISTER)
  1221. goto drop;
  1222. encap = (struct iphdr *)(skb_transport_header(skb) +
  1223. sizeof(struct igmphdr));
  1224. /*
  1225. Check that:
  1226. a. packet is really destinted to a multicast group
  1227. b. packet is not a NULL-REGISTER
  1228. c. packet is not truncated
  1229. */
  1230. if (!MULTICAST(encap->daddr) ||
  1231. encap->tot_len == 0 ||
  1232. ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
  1233. goto drop;
  1234. read_lock(&mrt_lock);
  1235. if (reg_vif_num >= 0)
  1236. reg_dev = vif_table[reg_vif_num].dev;
  1237. if (reg_dev)
  1238. dev_hold(reg_dev);
  1239. read_unlock(&mrt_lock);
  1240. if (reg_dev == NULL)
  1241. goto drop;
  1242. skb->mac_header = skb->network_header;
  1243. skb_pull(skb, (u8*)encap - skb->data);
  1244. skb_reset_network_header(skb);
  1245. skb->dev = reg_dev;
  1246. skb->protocol = htons(ETH_P_IP);
  1247. skb->ip_summed = 0;
  1248. skb->pkt_type = PACKET_HOST;
  1249. dst_release(skb->dst);
  1250. skb->dst = NULL;
  1251. ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
  1252. ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
  1253. nf_reset(skb);
  1254. netif_rx(skb);
  1255. dev_put(reg_dev);
  1256. return 0;
  1257. drop:
  1258. kfree_skb(skb);
  1259. return 0;
  1260. }
  1261. #endif
  1262. #ifdef CONFIG_IP_PIMSM_V2
  1263. static int pim_rcv(struct sk_buff * skb)
  1264. {
  1265. struct pimreghdr *pim;
  1266. struct iphdr *encap;
  1267. struct net_device *reg_dev = NULL;
  1268. if (!pskb_may_pull(skb, sizeof(*pim) + sizeof(*encap)))
  1269. goto drop;
  1270. pim = (struct pimreghdr *)skb_transport_header(skb);
  1271. if (pim->type != ((PIM_VERSION<<4)|(PIM_REGISTER)) ||
  1272. (pim->flags&PIM_NULL_REGISTER) ||
  1273. (ip_compute_csum((void *)pim, sizeof(*pim)) != 0 &&
  1274. csum_fold(skb_checksum(skb, 0, skb->len, 0))))
  1275. goto drop;
  1276. /* check if the inner packet is destined to mcast group */
  1277. encap = (struct iphdr *)(skb_transport_header(skb) +
  1278. sizeof(struct pimreghdr));
  1279. if (!MULTICAST(encap->daddr) ||
  1280. encap->tot_len == 0 ||
  1281. ntohs(encap->tot_len) + sizeof(*pim) > skb->len)
  1282. goto drop;
  1283. read_lock(&mrt_lock);
  1284. if (reg_vif_num >= 0)
  1285. reg_dev = vif_table[reg_vif_num].dev;
  1286. if (reg_dev)
  1287. dev_hold(reg_dev);
  1288. read_unlock(&mrt_lock);
  1289. if (reg_dev == NULL)
  1290. goto drop;
  1291. skb->mac_header = skb->network_header;
  1292. skb_pull(skb, (u8*)encap - skb->data);
  1293. skb_reset_network_header(skb);
  1294. skb->dev = reg_dev;
  1295. skb->protocol = htons(ETH_P_IP);
  1296. skb->ip_summed = 0;
  1297. skb->pkt_type = PACKET_HOST;
  1298. dst_release(skb->dst);
  1299. ((struct net_device_stats*)netdev_priv(reg_dev))->rx_bytes += skb->len;
  1300. ((struct net_device_stats*)netdev_priv(reg_dev))->rx_packets++;
  1301. skb->dst = NULL;
  1302. nf_reset(skb);
  1303. netif_rx(skb);
  1304. dev_put(reg_dev);
  1305. return 0;
  1306. drop:
  1307. kfree_skb(skb);
  1308. return 0;
  1309. }
  1310. #endif
  1311. static int
  1312. ipmr_fill_mroute(struct sk_buff *skb, struct mfc_cache *c, struct rtmsg *rtm)
  1313. {
  1314. int ct;
  1315. struct rtnexthop *nhp;
  1316. struct net_device *dev = vif_table[c->mfc_parent].dev;
  1317. u8 *b = skb_tail_pointer(skb);
  1318. struct rtattr *mp_head;
  1319. if (dev)
  1320. RTA_PUT(skb, RTA_IIF, 4, &dev->ifindex);
  1321. mp_head = (struct rtattr*)skb_put(skb, RTA_LENGTH(0));
  1322. for (ct = c->mfc_un.res.minvif; ct < c->mfc_un.res.maxvif; ct++) {
  1323. if (c->mfc_un.res.ttls[ct] < 255) {
  1324. if (skb_tailroom(skb) < RTA_ALIGN(RTA_ALIGN(sizeof(*nhp)) + 4))
  1325. goto rtattr_failure;
  1326. nhp = (struct rtnexthop*)skb_put(skb, RTA_ALIGN(sizeof(*nhp)));
  1327. nhp->rtnh_flags = 0;
  1328. nhp->rtnh_hops = c->mfc_un.res.ttls[ct];
  1329. nhp->rtnh_ifindex = vif_table[ct].dev->ifindex;
  1330. nhp->rtnh_len = sizeof(*nhp);
  1331. }
  1332. }
  1333. mp_head->rta_type = RTA_MULTIPATH;
  1334. mp_head->rta_len = skb_tail_pointer(skb) - (u8 *)mp_head;
  1335. rtm->rtm_type = RTN_MULTICAST;
  1336. return 1;
  1337. rtattr_failure:
  1338. nlmsg_trim(skb, b);
  1339. return -EMSGSIZE;
  1340. }
  1341. int ipmr_get_route(struct sk_buff *skb, struct rtmsg *rtm, int nowait)
  1342. {
  1343. int err;
  1344. struct mfc_cache *cache;
  1345. struct rtable *rt = (struct rtable*)skb->dst;
  1346. read_lock(&mrt_lock);
  1347. cache = ipmr_cache_find(rt->rt_src, rt->rt_dst);
  1348. if (cache==NULL) {
  1349. struct sk_buff *skb2;
  1350. struct iphdr *iph;
  1351. struct net_device *dev;
  1352. int vif;
  1353. if (nowait) {
  1354. read_unlock(&mrt_lock);
  1355. return -EAGAIN;
  1356. }
  1357. dev = skb->dev;
  1358. if (dev == NULL || (vif = ipmr_find_vif(dev)) < 0) {
  1359. read_unlock(&mrt_lock);
  1360. return -ENODEV;
  1361. }
  1362. skb2 = skb_clone(skb, GFP_ATOMIC);
  1363. if (!skb2) {
  1364. read_unlock(&mrt_lock);
  1365. return -ENOMEM;
  1366. }
  1367. skb_push(skb2, sizeof(struct iphdr));
  1368. skb_reset_network_header(skb2);
  1369. iph = ip_hdr(skb2);
  1370. iph->ihl = sizeof(struct iphdr) >> 2;
  1371. iph->saddr = rt->rt_src;
  1372. iph->daddr = rt->rt_dst;
  1373. iph->version = 0;
  1374. err = ipmr_cache_unresolved(vif, skb2);
  1375. read_unlock(&mrt_lock);
  1376. return err;
  1377. }
  1378. if (!nowait && (rtm->rtm_flags&RTM_F_NOTIFY))
  1379. cache->mfc_flags |= MFC_NOTIFY;
  1380. err = ipmr_fill_mroute(skb, cache, rtm);
  1381. read_unlock(&mrt_lock);
  1382. return err;
  1383. }
  1384. #ifdef CONFIG_PROC_FS
  1385. /*
  1386. * The /proc interfaces to multicast routing /proc/ip_mr_cache /proc/ip_mr_vif
  1387. */
  1388. struct ipmr_vif_iter {
  1389. int ct;
  1390. };
  1391. static struct vif_device *ipmr_vif_seq_idx(struct ipmr_vif_iter *iter,
  1392. loff_t pos)
  1393. {
  1394. for (iter->ct = 0; iter->ct < maxvif; ++iter->ct) {
  1395. if (!VIF_EXISTS(iter->ct))
  1396. continue;
  1397. if (pos-- == 0)
  1398. return &vif_table[iter->ct];
  1399. }
  1400. return NULL;
  1401. }
  1402. static void *ipmr_vif_seq_start(struct seq_file *seq, loff_t *pos)
  1403. {
  1404. read_lock(&mrt_lock);
  1405. return *pos ? ipmr_vif_seq_idx(seq->private, *pos - 1)
  1406. : SEQ_START_TOKEN;
  1407. }
  1408. static void *ipmr_vif_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1409. {
  1410. struct ipmr_vif_iter *iter = seq->private;
  1411. ++*pos;
  1412. if (v == SEQ_START_TOKEN)
  1413. return ipmr_vif_seq_idx(iter, 0);
  1414. while (++iter->ct < maxvif) {
  1415. if (!VIF_EXISTS(iter->ct))
  1416. continue;
  1417. return &vif_table[iter->ct];
  1418. }
  1419. return NULL;
  1420. }
  1421. static void ipmr_vif_seq_stop(struct seq_file *seq, void *v)
  1422. {
  1423. read_unlock(&mrt_lock);
  1424. }
  1425. static int ipmr_vif_seq_show(struct seq_file *seq, void *v)
  1426. {
  1427. if (v == SEQ_START_TOKEN) {
  1428. seq_puts(seq,
  1429. "Interface BytesIn PktsIn BytesOut PktsOut Flags Local Remote\n");
  1430. } else {
  1431. const struct vif_device *vif = v;
  1432. const char *name = vif->dev ? vif->dev->name : "none";
  1433. seq_printf(seq,
  1434. "%2Zd %-10s %8ld %7ld %8ld %7ld %05X %08X %08X\n",
  1435. vif - vif_table,
  1436. name, vif->bytes_in, vif->pkt_in,
  1437. vif->bytes_out, vif->pkt_out,
  1438. vif->flags, vif->local, vif->remote);
  1439. }
  1440. return 0;
  1441. }
  1442. static const struct seq_operations ipmr_vif_seq_ops = {
  1443. .start = ipmr_vif_seq_start,
  1444. .next = ipmr_vif_seq_next,
  1445. .stop = ipmr_vif_seq_stop,
  1446. .show = ipmr_vif_seq_show,
  1447. };
  1448. static int ipmr_vif_open(struct inode *inode, struct file *file)
  1449. {
  1450. return seq_open_private(file, &ipmr_vif_seq_ops,
  1451. sizeof(struct ipmr_vif_iter));
  1452. }
  1453. static const struct file_operations ipmr_vif_fops = {
  1454. .owner = THIS_MODULE,
  1455. .open = ipmr_vif_open,
  1456. .read = seq_read,
  1457. .llseek = seq_lseek,
  1458. .release = seq_release_private,
  1459. };
  1460. struct ipmr_mfc_iter {
  1461. struct mfc_cache **cache;
  1462. int ct;
  1463. };
  1464. static struct mfc_cache *ipmr_mfc_seq_idx(struct ipmr_mfc_iter *it, loff_t pos)
  1465. {
  1466. struct mfc_cache *mfc;
  1467. it->cache = mfc_cache_array;
  1468. read_lock(&mrt_lock);
  1469. for (it->ct = 0; it->ct < MFC_LINES; it->ct++)
  1470. for (mfc = mfc_cache_array[it->ct]; mfc; mfc = mfc->next)
  1471. if (pos-- == 0)
  1472. return mfc;
  1473. read_unlock(&mrt_lock);
  1474. it->cache = &mfc_unres_queue;
  1475. spin_lock_bh(&mfc_unres_lock);
  1476. for (mfc = mfc_unres_queue; mfc; mfc = mfc->next)
  1477. if (pos-- == 0)
  1478. return mfc;
  1479. spin_unlock_bh(&mfc_unres_lock);
  1480. it->cache = NULL;
  1481. return NULL;
  1482. }
  1483. static void *ipmr_mfc_seq_start(struct seq_file *seq, loff_t *pos)
  1484. {
  1485. struct ipmr_mfc_iter *it = seq->private;
  1486. it->cache = NULL;
  1487. it->ct = 0;
  1488. return *pos ? ipmr_mfc_seq_idx(seq->private, *pos - 1)
  1489. : SEQ_START_TOKEN;
  1490. }
  1491. static void *ipmr_mfc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  1492. {
  1493. struct mfc_cache *mfc = v;
  1494. struct ipmr_mfc_iter *it = seq->private;
  1495. ++*pos;
  1496. if (v == SEQ_START_TOKEN)
  1497. return ipmr_mfc_seq_idx(seq->private, 0);
  1498. if (mfc->next)
  1499. return mfc->next;
  1500. if (it->cache == &mfc_unres_queue)
  1501. goto end_of_list;
  1502. BUG_ON(it->cache != mfc_cache_array);
  1503. while (++it->ct < MFC_LINES) {
  1504. mfc = mfc_cache_array[it->ct];
  1505. if (mfc)
  1506. return mfc;
  1507. }
  1508. /* exhausted cache_array, show unresolved */
  1509. read_unlock(&mrt_lock);
  1510. it->cache = &mfc_unres_queue;
  1511. it->ct = 0;
  1512. spin_lock_bh(&mfc_unres_lock);
  1513. mfc = mfc_unres_queue;
  1514. if (mfc)
  1515. return mfc;
  1516. end_of_list:
  1517. spin_unlock_bh(&mfc_unres_lock);
  1518. it->cache = NULL;
  1519. return NULL;
  1520. }
  1521. static void ipmr_mfc_seq_stop(struct seq_file *seq, void *v)
  1522. {
  1523. struct ipmr_mfc_iter *it = seq->private;
  1524. if (it->cache == &mfc_unres_queue)
  1525. spin_unlock_bh(&mfc_unres_lock);
  1526. else if (it->cache == mfc_cache_array)
  1527. read_unlock(&mrt_lock);
  1528. }
  1529. static int ipmr_mfc_seq_show(struct seq_file *seq, void *v)
  1530. {
  1531. int n;
  1532. if (v == SEQ_START_TOKEN) {
  1533. seq_puts(seq,
  1534. "Group Origin Iif Pkts Bytes Wrong Oifs\n");
  1535. } else {
  1536. const struct mfc_cache *mfc = v;
  1537. const struct ipmr_mfc_iter *it = seq->private;
  1538. seq_printf(seq, "%08lX %08lX %-3d %8ld %8ld %8ld",
  1539. (unsigned long) mfc->mfc_mcastgrp,
  1540. (unsigned long) mfc->mfc_origin,
  1541. mfc->mfc_parent,
  1542. mfc->mfc_un.res.pkt,
  1543. mfc->mfc_un.res.bytes,
  1544. mfc->mfc_un.res.wrong_if);
  1545. if (it->cache != &mfc_unres_queue) {
  1546. for (n = mfc->mfc_un.res.minvif;
  1547. n < mfc->mfc_un.res.maxvif; n++ ) {
  1548. if (VIF_EXISTS(n)
  1549. && mfc->mfc_un.res.ttls[n] < 255)
  1550. seq_printf(seq,
  1551. " %2d:%-3d",
  1552. n, mfc->mfc_un.res.ttls[n]);
  1553. }
  1554. }
  1555. seq_putc(seq, '\n');
  1556. }
  1557. return 0;
  1558. }
  1559. static const struct seq_operations ipmr_mfc_seq_ops = {
  1560. .start = ipmr_mfc_seq_start,
  1561. .next = ipmr_mfc_seq_next,
  1562. .stop = ipmr_mfc_seq_stop,
  1563. .show = ipmr_mfc_seq_show,
  1564. };
  1565. static int ipmr_mfc_open(struct inode *inode, struct file *file)
  1566. {
  1567. return seq_open_private(file, &ipmr_mfc_seq_ops,
  1568. sizeof(struct ipmr_mfc_iter));
  1569. }
  1570. static const struct file_operations ipmr_mfc_fops = {
  1571. .owner = THIS_MODULE,
  1572. .open = ipmr_mfc_open,
  1573. .read = seq_read,
  1574. .llseek = seq_lseek,
  1575. .release = seq_release_private,
  1576. };
  1577. #endif
  1578. #ifdef CONFIG_IP_PIMSM_V2
  1579. static struct net_protocol pim_protocol = {
  1580. .handler = pim_rcv,
  1581. };
  1582. #endif
  1583. /*
  1584. * Setup for IP multicast routing
  1585. */
  1586. void __init ip_mr_init(void)
  1587. {
  1588. mrt_cachep = kmem_cache_create("ip_mrt_cache",
  1589. sizeof(struct mfc_cache),
  1590. 0, SLAB_HWCACHE_ALIGN|SLAB_PANIC,
  1591. NULL);
  1592. init_timer(&ipmr_expire_timer);
  1593. ipmr_expire_timer.function=ipmr_expire_process;
  1594. register_netdevice_notifier(&ip_mr_notifier);
  1595. #ifdef CONFIG_PROC_FS
  1596. proc_net_fops_create(&init_net, "ip_mr_vif", 0, &ipmr_vif_fops);
  1597. proc_net_fops_create(&init_net, "ip_mr_cache", 0, &ipmr_mfc_fops);
  1598. #endif
  1599. }