igmp.c 64 KB

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742
  1. /*
  2. * Linux NET3: Internet Group Management Protocol [IGMP]
  3. *
  4. * This code implements the IGMP protocol as defined in RFC1112. There has
  5. * been a further revision of this protocol since which is now supported.
  6. *
  7. * If you have trouble with this module be careful what gcc you have used,
  8. * the older version didn't come out right using gcc 2.5.8, the newer one
  9. * seems to fall out with gcc 2.6.2.
  10. *
  11. * Authors:
  12. * Alan Cox <alan@lxorguk.ukuu.org.uk>
  13. *
  14. * This program is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU General Public License
  16. * as published by the Free Software Foundation; either version
  17. * 2 of the License, or (at your option) any later version.
  18. *
  19. * Fixes:
  20. *
  21. * Alan Cox : Added lots of __inline__ to optimise
  22. * the memory usage of all the tiny little
  23. * functions.
  24. * Alan Cox : Dumped the header building experiment.
  25. * Alan Cox : Minor tweaks ready for multicast routing
  26. * and extended IGMP protocol.
  27. * Alan Cox : Removed a load of inline directives. Gcc 2.5.8
  28. * writes utterly bogus code otherwise (sigh)
  29. * fixed IGMP loopback to behave in the manner
  30. * desired by mrouted, fixed the fact it has been
  31. * broken since 1.3.6 and cleaned up a few minor
  32. * points.
  33. *
  34. * Chih-Jen Chang : Tried to revise IGMP to Version 2
  35. * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
  36. * The enhancements are mainly based on Steve Deering's
  37. * ipmulti-3.5 source code.
  38. * Chih-Jen Chang : Added the igmp_get_mrouter_info and
  39. * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of
  40. * the mrouted version on that device.
  41. * Chih-Jen Chang : Added the max_resp_time parameter to
  42. * Tsu-Sheng Tsao igmp_heard_query(). Using this parameter
  43. * to identify the multicast router version
  44. * and do what the IGMP version 2 specified.
  45. * Chih-Jen Chang : Added a timer to revert to IGMP V2 router
  46. * Tsu-Sheng Tsao if the specified time expired.
  47. * Alan Cox : Stop IGMP from 0.0.0.0 being accepted.
  48. * Alan Cox : Use GFP_ATOMIC in the right places.
  49. * Christian Daudt : igmp timer wasn't set for local group
  50. * memberships but was being deleted,
  51. * which caused a "del_timer() called
  52. * from %p with timer not initialized\n"
  53. * message (960131).
  54. * Christian Daudt : removed del_timer from
  55. * igmp_timer_expire function (960205).
  56. * Christian Daudt : igmp_heard_report now only calls
  57. * igmp_timer_expire if tm->running is
  58. * true (960216).
  59. * Malcolm Beattie : ttl comparison wrong in igmp_rcv made
  60. * igmp_heard_query never trigger. Expiry
  61. * miscalculation fixed in igmp_heard_query
  62. * and random() made to return unsigned to
  63. * prevent negative expiry times.
  64. * Alexey Kuznetsov: Wrong group leaving behaviour, backport
  65. * fix from pending 2.1.x patches.
  66. * Alan Cox: Forget to enable FDDI support earlier.
  67. * Alexey Kuznetsov: Fixed leaving groups on device down.
  68. * Alexey Kuznetsov: Accordance to igmp-v2-06 draft.
  69. * David L Stevens: IGMPv3 support, with help from
  70. * Vinay Kulkarni
  71. */
  72. #include <linux/module.h>
  73. #include <linux/slab.h>
  74. #include <asm/uaccess.h>
  75. #include <linux/types.h>
  76. #include <linux/kernel.h>
  77. #include <linux/jiffies.h>
  78. #include <linux/string.h>
  79. #include <linux/socket.h>
  80. #include <linux/sockios.h>
  81. #include <linux/in.h>
  82. #include <linux/inet.h>
  83. #include <linux/netdevice.h>
  84. #include <linux/skbuff.h>
  85. #include <linux/inetdevice.h>
  86. #include <linux/igmp.h>
  87. #include <linux/if_arp.h>
  88. #include <linux/rtnetlink.h>
  89. #include <linux/times.h>
  90. #include <net/net_namespace.h>
  91. #include <net/arp.h>
  92. #include <net/ip.h>
  93. #include <net/protocol.h>
  94. #include <net/route.h>
  95. #include <net/sock.h>
  96. #include <net/checksum.h>
  97. #include <linux/netfilter_ipv4.h>
  98. #ifdef CONFIG_IP_MROUTE
  99. #include <linux/mroute.h>
  100. #endif
  101. #ifdef CONFIG_PROC_FS
  102. #include <linux/proc_fs.h>
  103. #include <linux/seq_file.h>
  104. #endif
  105. #define IP_MAX_MEMBERSHIPS 20
  106. #define IP_MAX_MSF 10
  107. #ifdef CONFIG_IP_MULTICAST
  108. /* Parameter names and values are taken from igmp-v2-06 draft */
  109. #define IGMP_V1_Router_Present_Timeout (400*HZ)
  110. #define IGMP_V2_Router_Present_Timeout (400*HZ)
  111. #define IGMP_Unsolicited_Report_Interval (10*HZ)
  112. #define IGMP_Query_Response_Interval (10*HZ)
  113. #define IGMP_Unsolicited_Report_Count 2
  114. #define IGMP_Initial_Report_Delay (1)
  115. /* IGMP_Initial_Report_Delay is not from IGMP specs!
  116. * IGMP specs require to report membership immediately after
  117. * joining a group, but we delay the first report by a
  118. * small interval. It seems more natural and still does not
  119. * contradict to specs provided this delay is small enough.
  120. */
  121. #define IGMP_V1_SEEN(in_dev) \
  122. (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
  123. IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
  124. ((in_dev)->mr_v1_seen && \
  125. time_before(jiffies, (in_dev)->mr_v1_seen)))
  126. #define IGMP_V2_SEEN(in_dev) \
  127. (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
  128. IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
  129. ((in_dev)->mr_v2_seen && \
  130. time_before(jiffies, (in_dev)->mr_v2_seen)))
  131. static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
  132. static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
  133. static void igmpv3_clear_delrec(struct in_device *in_dev);
  134. static int sf_setstate(struct ip_mc_list *pmc);
  135. static void sf_markstate(struct ip_mc_list *pmc);
  136. #endif
  137. static void ip_mc_clear_src(struct ip_mc_list *pmc);
  138. static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
  139. int sfcount, __be32 *psfsrc, int delta);
  140. static void ip_ma_put(struct ip_mc_list *im)
  141. {
  142. if (atomic_dec_and_test(&im->refcnt)) {
  143. in_dev_put(im->interface);
  144. kfree_rcu(im, rcu);
  145. }
  146. }
  147. #define for_each_pmc_rcu(in_dev, pmc) \
  148. for (pmc = rcu_dereference(in_dev->mc_list); \
  149. pmc != NULL; \
  150. pmc = rcu_dereference(pmc->next_rcu))
  151. #define for_each_pmc_rtnl(in_dev, pmc) \
  152. for (pmc = rtnl_dereference(in_dev->mc_list); \
  153. pmc != NULL; \
  154. pmc = rtnl_dereference(pmc->next_rcu))
  155. #ifdef CONFIG_IP_MULTICAST
  156. /*
  157. * Timer management
  158. */
  159. static void igmp_stop_timer(struct ip_mc_list *im)
  160. {
  161. spin_lock_bh(&im->lock);
  162. if (del_timer(&im->timer))
  163. atomic_dec(&im->refcnt);
  164. im->tm_running = 0;
  165. im->reporter = 0;
  166. im->unsolicit_count = 0;
  167. spin_unlock_bh(&im->lock);
  168. }
  169. /* It must be called with locked im->lock */
  170. static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
  171. {
  172. int tv = net_random() % max_delay;
  173. im->tm_running = 1;
  174. if (!mod_timer(&im->timer, jiffies+tv+2))
  175. atomic_inc(&im->refcnt);
  176. }
  177. static void igmp_gq_start_timer(struct in_device *in_dev)
  178. {
  179. int tv = net_random() % in_dev->mr_maxdelay;
  180. in_dev->mr_gq_running = 1;
  181. if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
  182. in_dev_hold(in_dev);
  183. }
  184. static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
  185. {
  186. int tv = net_random() % delay;
  187. if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
  188. in_dev_hold(in_dev);
  189. }
  190. static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
  191. {
  192. spin_lock_bh(&im->lock);
  193. im->unsolicit_count = 0;
  194. if (del_timer(&im->timer)) {
  195. if ((long)(im->timer.expires-jiffies) < max_delay) {
  196. add_timer(&im->timer);
  197. im->tm_running = 1;
  198. spin_unlock_bh(&im->lock);
  199. return;
  200. }
  201. atomic_dec(&im->refcnt);
  202. }
  203. igmp_start_timer(im, max_delay);
  204. spin_unlock_bh(&im->lock);
  205. }
  206. /*
  207. * Send an IGMP report.
  208. */
  209. #define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
  210. static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
  211. int gdeleted, int sdeleted)
  212. {
  213. switch (type) {
  214. case IGMPV3_MODE_IS_INCLUDE:
  215. case IGMPV3_MODE_IS_EXCLUDE:
  216. if (gdeleted || sdeleted)
  217. return 0;
  218. if (!(pmc->gsquery && !psf->sf_gsresp)) {
  219. if (pmc->sfmode == MCAST_INCLUDE)
  220. return 1;
  221. /* don't include if this source is excluded
  222. * in all filters
  223. */
  224. if (psf->sf_count[MCAST_INCLUDE])
  225. return type == IGMPV3_MODE_IS_INCLUDE;
  226. return pmc->sfcount[MCAST_EXCLUDE] ==
  227. psf->sf_count[MCAST_EXCLUDE];
  228. }
  229. return 0;
  230. case IGMPV3_CHANGE_TO_INCLUDE:
  231. if (gdeleted || sdeleted)
  232. return 0;
  233. return psf->sf_count[MCAST_INCLUDE] != 0;
  234. case IGMPV3_CHANGE_TO_EXCLUDE:
  235. if (gdeleted || sdeleted)
  236. return 0;
  237. if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
  238. psf->sf_count[MCAST_INCLUDE])
  239. return 0;
  240. return pmc->sfcount[MCAST_EXCLUDE] ==
  241. psf->sf_count[MCAST_EXCLUDE];
  242. case IGMPV3_ALLOW_NEW_SOURCES:
  243. if (gdeleted || !psf->sf_crcount)
  244. return 0;
  245. return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
  246. case IGMPV3_BLOCK_OLD_SOURCES:
  247. if (pmc->sfmode == MCAST_INCLUDE)
  248. return gdeleted || (psf->sf_crcount && sdeleted);
  249. return psf->sf_crcount && !gdeleted && !sdeleted;
  250. }
  251. return 0;
  252. }
  253. static int
  254. igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
  255. {
  256. struct ip_sf_list *psf;
  257. int scount = 0;
  258. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  259. if (!is_in(pmc, psf, type, gdeleted, sdeleted))
  260. continue;
  261. scount++;
  262. }
  263. return scount;
  264. }
  265. #define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
  266. static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
  267. {
  268. struct sk_buff *skb;
  269. struct rtable *rt;
  270. struct iphdr *pip;
  271. struct igmpv3_report *pig;
  272. struct net *net = dev_net(dev);
  273. struct flowi4 fl4;
  274. int hlen = LL_RESERVED_SPACE(dev);
  275. int tlen = dev->needed_tailroom;
  276. while (1) {
  277. skb = alloc_skb(size + hlen + tlen,
  278. GFP_ATOMIC | __GFP_NOWARN);
  279. if (skb)
  280. break;
  281. size >>= 1;
  282. if (size < 256)
  283. return NULL;
  284. }
  285. igmp_skb_size(skb) = size;
  286. rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
  287. 0, 0,
  288. IPPROTO_IGMP, 0, dev->ifindex);
  289. if (IS_ERR(rt)) {
  290. kfree_skb(skb);
  291. return NULL;
  292. }
  293. skb_dst_set(skb, &rt->dst);
  294. skb->dev = dev;
  295. skb_reserve(skb, hlen);
  296. skb_reset_network_header(skb);
  297. pip = ip_hdr(skb);
  298. skb_put(skb, sizeof(struct iphdr) + 4);
  299. pip->version = 4;
  300. pip->ihl = (sizeof(struct iphdr)+4)>>2;
  301. pip->tos = 0xc0;
  302. pip->frag_off = htons(IP_DF);
  303. pip->ttl = 1;
  304. pip->daddr = fl4.daddr;
  305. pip->saddr = fl4.saddr;
  306. pip->protocol = IPPROTO_IGMP;
  307. pip->tot_len = 0; /* filled in later */
  308. ip_select_ident(pip, &rt->dst, NULL);
  309. ((u8 *)&pip[1])[0] = IPOPT_RA;
  310. ((u8 *)&pip[1])[1] = 4;
  311. ((u8 *)&pip[1])[2] = 0;
  312. ((u8 *)&pip[1])[3] = 0;
  313. skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
  314. skb_put(skb, sizeof(*pig));
  315. pig = igmpv3_report_hdr(skb);
  316. pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
  317. pig->resv1 = 0;
  318. pig->csum = 0;
  319. pig->resv2 = 0;
  320. pig->ngrec = 0;
  321. return skb;
  322. }
  323. static int igmpv3_sendpack(struct sk_buff *skb)
  324. {
  325. struct igmphdr *pig = igmp_hdr(skb);
  326. const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb);
  327. pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
  328. return ip_local_out(skb);
  329. }
  330. static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
  331. {
  332. return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
  333. }
  334. static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
  335. int type, struct igmpv3_grec **ppgr)
  336. {
  337. struct net_device *dev = pmc->interface->dev;
  338. struct igmpv3_report *pih;
  339. struct igmpv3_grec *pgr;
  340. if (!skb)
  341. skb = igmpv3_newpack(dev, dev->mtu);
  342. if (!skb)
  343. return NULL;
  344. pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
  345. pgr->grec_type = type;
  346. pgr->grec_auxwords = 0;
  347. pgr->grec_nsrcs = 0;
  348. pgr->grec_mca = pmc->multiaddr;
  349. pih = igmpv3_report_hdr(skb);
  350. pih->ngrec = htons(ntohs(pih->ngrec)+1);
  351. *ppgr = pgr;
  352. return skb;
  353. }
  354. #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
  355. skb_tailroom(skb)) : 0)
  356. static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
  357. int type, int gdeleted, int sdeleted)
  358. {
  359. struct net_device *dev = pmc->interface->dev;
  360. struct igmpv3_report *pih;
  361. struct igmpv3_grec *pgr = NULL;
  362. struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
  363. int scount, stotal, first, isquery, truncate;
  364. if (pmc->multiaddr == IGMP_ALL_HOSTS)
  365. return skb;
  366. isquery = type == IGMPV3_MODE_IS_INCLUDE ||
  367. type == IGMPV3_MODE_IS_EXCLUDE;
  368. truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
  369. type == IGMPV3_CHANGE_TO_EXCLUDE;
  370. stotal = scount = 0;
  371. psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
  372. if (!*psf_list)
  373. goto empty_source;
  374. pih = skb ? igmpv3_report_hdr(skb) : NULL;
  375. /* EX and TO_EX get a fresh packet, if needed */
  376. if (truncate) {
  377. if (pih && pih->ngrec &&
  378. AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
  379. if (skb)
  380. igmpv3_sendpack(skb);
  381. skb = igmpv3_newpack(dev, dev->mtu);
  382. }
  383. }
  384. first = 1;
  385. psf_prev = NULL;
  386. for (psf=*psf_list; psf; psf=psf_next) {
  387. __be32 *psrc;
  388. psf_next = psf->sf_next;
  389. if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
  390. psf_prev = psf;
  391. continue;
  392. }
  393. /* clear marks on query responses */
  394. if (isquery)
  395. psf->sf_gsresp = 0;
  396. if (AVAILABLE(skb) < sizeof(__be32) +
  397. first*sizeof(struct igmpv3_grec)) {
  398. if (truncate && !first)
  399. break; /* truncate these */
  400. if (pgr)
  401. pgr->grec_nsrcs = htons(scount);
  402. if (skb)
  403. igmpv3_sendpack(skb);
  404. skb = igmpv3_newpack(dev, dev->mtu);
  405. first = 1;
  406. scount = 0;
  407. }
  408. if (first) {
  409. skb = add_grhead(skb, pmc, type, &pgr);
  410. first = 0;
  411. }
  412. if (!skb)
  413. return NULL;
  414. psrc = (__be32 *)skb_put(skb, sizeof(__be32));
  415. *psrc = psf->sf_inaddr;
  416. scount++; stotal++;
  417. if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
  418. type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
  419. psf->sf_crcount--;
  420. if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
  421. if (psf_prev)
  422. psf_prev->sf_next = psf->sf_next;
  423. else
  424. *psf_list = psf->sf_next;
  425. kfree(psf);
  426. continue;
  427. }
  428. }
  429. psf_prev = psf;
  430. }
  431. empty_source:
  432. if (!stotal) {
  433. if (type == IGMPV3_ALLOW_NEW_SOURCES ||
  434. type == IGMPV3_BLOCK_OLD_SOURCES)
  435. return skb;
  436. if (pmc->crcount || isquery) {
  437. /* make sure we have room for group header */
  438. if (skb && AVAILABLE(skb)<sizeof(struct igmpv3_grec)) {
  439. igmpv3_sendpack(skb);
  440. skb = NULL; /* add_grhead will get a new one */
  441. }
  442. skb = add_grhead(skb, pmc, type, &pgr);
  443. }
  444. }
  445. if (pgr)
  446. pgr->grec_nsrcs = htons(scount);
  447. if (isquery)
  448. pmc->gsquery = 0; /* clear query state on report */
  449. return skb;
  450. }
  451. static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
  452. {
  453. struct sk_buff *skb = NULL;
  454. int type;
  455. if (!pmc) {
  456. rcu_read_lock();
  457. for_each_pmc_rcu(in_dev, pmc) {
  458. if (pmc->multiaddr == IGMP_ALL_HOSTS)
  459. continue;
  460. spin_lock_bh(&pmc->lock);
  461. if (pmc->sfcount[MCAST_EXCLUDE])
  462. type = IGMPV3_MODE_IS_EXCLUDE;
  463. else
  464. type = IGMPV3_MODE_IS_INCLUDE;
  465. skb = add_grec(skb, pmc, type, 0, 0);
  466. spin_unlock_bh(&pmc->lock);
  467. }
  468. rcu_read_unlock();
  469. } else {
  470. spin_lock_bh(&pmc->lock);
  471. if (pmc->sfcount[MCAST_EXCLUDE])
  472. type = IGMPV3_MODE_IS_EXCLUDE;
  473. else
  474. type = IGMPV3_MODE_IS_INCLUDE;
  475. skb = add_grec(skb, pmc, type, 0, 0);
  476. spin_unlock_bh(&pmc->lock);
  477. }
  478. if (!skb)
  479. return 0;
  480. return igmpv3_sendpack(skb);
  481. }
  482. /*
  483. * remove zero-count source records from a source filter list
  484. */
  485. static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
  486. {
  487. struct ip_sf_list *psf_prev, *psf_next, *psf;
  488. psf_prev = NULL;
  489. for (psf=*ppsf; psf; psf = psf_next) {
  490. psf_next = psf->sf_next;
  491. if (psf->sf_crcount == 0) {
  492. if (psf_prev)
  493. psf_prev->sf_next = psf->sf_next;
  494. else
  495. *ppsf = psf->sf_next;
  496. kfree(psf);
  497. } else
  498. psf_prev = psf;
  499. }
  500. }
  501. static void igmpv3_send_cr(struct in_device *in_dev)
  502. {
  503. struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
  504. struct sk_buff *skb = NULL;
  505. int type, dtype;
  506. rcu_read_lock();
  507. spin_lock_bh(&in_dev->mc_tomb_lock);
  508. /* deleted MCA's */
  509. pmc_prev = NULL;
  510. for (pmc=in_dev->mc_tomb; pmc; pmc=pmc_next) {
  511. pmc_next = pmc->next;
  512. if (pmc->sfmode == MCAST_INCLUDE) {
  513. type = IGMPV3_BLOCK_OLD_SOURCES;
  514. dtype = IGMPV3_BLOCK_OLD_SOURCES;
  515. skb = add_grec(skb, pmc, type, 1, 0);
  516. skb = add_grec(skb, pmc, dtype, 1, 1);
  517. }
  518. if (pmc->crcount) {
  519. if (pmc->sfmode == MCAST_EXCLUDE) {
  520. type = IGMPV3_CHANGE_TO_INCLUDE;
  521. skb = add_grec(skb, pmc, type, 1, 0);
  522. }
  523. pmc->crcount--;
  524. if (pmc->crcount == 0) {
  525. igmpv3_clear_zeros(&pmc->tomb);
  526. igmpv3_clear_zeros(&pmc->sources);
  527. }
  528. }
  529. if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
  530. if (pmc_prev)
  531. pmc_prev->next = pmc_next;
  532. else
  533. in_dev->mc_tomb = pmc_next;
  534. in_dev_put(pmc->interface);
  535. kfree(pmc);
  536. } else
  537. pmc_prev = pmc;
  538. }
  539. spin_unlock_bh(&in_dev->mc_tomb_lock);
  540. /* change recs */
  541. for_each_pmc_rcu(in_dev, pmc) {
  542. spin_lock_bh(&pmc->lock);
  543. if (pmc->sfcount[MCAST_EXCLUDE]) {
  544. type = IGMPV3_BLOCK_OLD_SOURCES;
  545. dtype = IGMPV3_ALLOW_NEW_SOURCES;
  546. } else {
  547. type = IGMPV3_ALLOW_NEW_SOURCES;
  548. dtype = IGMPV3_BLOCK_OLD_SOURCES;
  549. }
  550. skb = add_grec(skb, pmc, type, 0, 0);
  551. skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
  552. /* filter mode changes */
  553. if (pmc->crcount) {
  554. if (pmc->sfmode == MCAST_EXCLUDE)
  555. type = IGMPV3_CHANGE_TO_EXCLUDE;
  556. else
  557. type = IGMPV3_CHANGE_TO_INCLUDE;
  558. skb = add_grec(skb, pmc, type, 0, 0);
  559. pmc->crcount--;
  560. }
  561. spin_unlock_bh(&pmc->lock);
  562. }
  563. rcu_read_unlock();
  564. if (!skb)
  565. return;
  566. (void) igmpv3_sendpack(skb);
  567. }
  568. static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
  569. int type)
  570. {
  571. struct sk_buff *skb;
  572. struct iphdr *iph;
  573. struct igmphdr *ih;
  574. struct rtable *rt;
  575. struct net_device *dev = in_dev->dev;
  576. struct net *net = dev_net(dev);
  577. __be32 group = pmc ? pmc->multiaddr : 0;
  578. struct flowi4 fl4;
  579. __be32 dst;
  580. int hlen, tlen;
  581. if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
  582. return igmpv3_send_report(in_dev, pmc);
  583. else if (type == IGMP_HOST_LEAVE_MESSAGE)
  584. dst = IGMP_ALL_ROUTER;
  585. else
  586. dst = group;
  587. rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
  588. 0, 0,
  589. IPPROTO_IGMP, 0, dev->ifindex);
  590. if (IS_ERR(rt))
  591. return -1;
  592. hlen = LL_RESERVED_SPACE(dev);
  593. tlen = dev->needed_tailroom;
  594. skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
  595. if (skb == NULL) {
  596. ip_rt_put(rt);
  597. return -1;
  598. }
  599. skb_dst_set(skb, &rt->dst);
  600. skb_reserve(skb, hlen);
  601. skb_reset_network_header(skb);
  602. iph = ip_hdr(skb);
  603. skb_put(skb, sizeof(struct iphdr) + 4);
  604. iph->version = 4;
  605. iph->ihl = (sizeof(struct iphdr)+4)>>2;
  606. iph->tos = 0xc0;
  607. iph->frag_off = htons(IP_DF);
  608. iph->ttl = 1;
  609. iph->daddr = dst;
  610. iph->saddr = fl4.saddr;
  611. iph->protocol = IPPROTO_IGMP;
  612. ip_select_ident(iph, &rt->dst, NULL);
  613. ((u8 *)&iph[1])[0] = IPOPT_RA;
  614. ((u8 *)&iph[1])[1] = 4;
  615. ((u8 *)&iph[1])[2] = 0;
  616. ((u8 *)&iph[1])[3] = 0;
  617. ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
  618. ih->type = type;
  619. ih->code = 0;
  620. ih->csum = 0;
  621. ih->group = group;
  622. ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
  623. return ip_local_out(skb);
  624. }
  625. static void igmp_gq_timer_expire(unsigned long data)
  626. {
  627. struct in_device *in_dev = (struct in_device *)data;
  628. in_dev->mr_gq_running = 0;
  629. igmpv3_send_report(in_dev, NULL);
  630. __in_dev_put(in_dev);
  631. }
  632. static void igmp_ifc_timer_expire(unsigned long data)
  633. {
  634. struct in_device *in_dev = (struct in_device *)data;
  635. igmpv3_send_cr(in_dev);
  636. if (in_dev->mr_ifc_count) {
  637. in_dev->mr_ifc_count--;
  638. igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval);
  639. }
  640. __in_dev_put(in_dev);
  641. }
  642. static void igmp_ifc_event(struct in_device *in_dev)
  643. {
  644. if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
  645. return;
  646. in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
  647. IGMP_Unsolicited_Report_Count;
  648. igmp_ifc_start_timer(in_dev, 1);
  649. }
  650. static void igmp_timer_expire(unsigned long data)
  651. {
  652. struct ip_mc_list *im=(struct ip_mc_list *)data;
  653. struct in_device *in_dev = im->interface;
  654. spin_lock(&im->lock);
  655. im->tm_running = 0;
  656. if (im->unsolicit_count) {
  657. im->unsolicit_count--;
  658. igmp_start_timer(im, IGMP_Unsolicited_Report_Interval);
  659. }
  660. im->reporter = 1;
  661. spin_unlock(&im->lock);
  662. if (IGMP_V1_SEEN(in_dev))
  663. igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
  664. else if (IGMP_V2_SEEN(in_dev))
  665. igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
  666. else
  667. igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
  668. ip_ma_put(im);
  669. }
  670. /* mark EXCLUDE-mode sources */
  671. static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
  672. {
  673. struct ip_sf_list *psf;
  674. int i, scount;
  675. scount = 0;
  676. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  677. if (scount == nsrcs)
  678. break;
  679. for (i=0; i<nsrcs; i++) {
  680. /* skip inactive filters */
  681. if (psf->sf_count[MCAST_INCLUDE] ||
  682. pmc->sfcount[MCAST_EXCLUDE] !=
  683. psf->sf_count[MCAST_EXCLUDE])
  684. break;
  685. if (srcs[i] == psf->sf_inaddr) {
  686. scount++;
  687. break;
  688. }
  689. }
  690. }
  691. pmc->gsquery = 0;
  692. if (scount == nsrcs) /* all sources excluded */
  693. return 0;
  694. return 1;
  695. }
  696. static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
  697. {
  698. struct ip_sf_list *psf;
  699. int i, scount;
  700. if (pmc->sfmode == MCAST_EXCLUDE)
  701. return igmp_xmarksources(pmc, nsrcs, srcs);
  702. /* mark INCLUDE-mode sources */
  703. scount = 0;
  704. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  705. if (scount == nsrcs)
  706. break;
  707. for (i=0; i<nsrcs; i++)
  708. if (srcs[i] == psf->sf_inaddr) {
  709. psf->sf_gsresp = 1;
  710. scount++;
  711. break;
  712. }
  713. }
  714. if (!scount) {
  715. pmc->gsquery = 0;
  716. return 0;
  717. }
  718. pmc->gsquery = 1;
  719. return 1;
  720. }
  721. /* return true if packet was dropped */
  722. static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
  723. {
  724. struct ip_mc_list *im;
  725. /* Timers are only set for non-local groups */
  726. if (group == IGMP_ALL_HOSTS)
  727. return false;
  728. rcu_read_lock();
  729. for_each_pmc_rcu(in_dev, im) {
  730. if (im->multiaddr == group) {
  731. igmp_stop_timer(im);
  732. break;
  733. }
  734. }
  735. rcu_read_unlock();
  736. return false;
  737. }
  738. /* return true if packet was dropped */
  739. static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
  740. int len)
  741. {
  742. struct igmphdr *ih = igmp_hdr(skb);
  743. struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
  744. struct ip_mc_list *im;
  745. __be32 group = ih->group;
  746. int max_delay;
  747. int mark = 0;
  748. if (len == 8) {
  749. if (ih->code == 0) {
  750. /* Alas, old v1 router presents here. */
  751. max_delay = IGMP_Query_Response_Interval;
  752. in_dev->mr_v1_seen = jiffies +
  753. IGMP_V1_Router_Present_Timeout;
  754. group = 0;
  755. } else {
  756. /* v2 router present */
  757. max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
  758. in_dev->mr_v2_seen = jiffies +
  759. IGMP_V2_Router_Present_Timeout;
  760. }
  761. /* cancel the interface change timer */
  762. in_dev->mr_ifc_count = 0;
  763. if (del_timer(&in_dev->mr_ifc_timer))
  764. __in_dev_put(in_dev);
  765. /* clear deleted report items */
  766. igmpv3_clear_delrec(in_dev);
  767. } else if (len < 12) {
  768. return true; /* ignore bogus packet; freed by caller */
  769. } else if (IGMP_V1_SEEN(in_dev)) {
  770. /* This is a v3 query with v1 queriers present */
  771. max_delay = IGMP_Query_Response_Interval;
  772. group = 0;
  773. } else if (IGMP_V2_SEEN(in_dev)) {
  774. /* this is a v3 query with v2 queriers present;
  775. * Interpretation of the max_delay code is problematic here.
  776. * A real v2 host would use ih_code directly, while v3 has a
  777. * different encoding. We use the v3 encoding as more likely
  778. * to be intended in a v3 query.
  779. */
  780. max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
  781. if (!max_delay)
  782. max_delay = 1; /* can't mod w/ 0 */
  783. } else { /* v3 */
  784. if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
  785. return true;
  786. ih3 = igmpv3_query_hdr(skb);
  787. if (ih3->nsrcs) {
  788. if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
  789. + ntohs(ih3->nsrcs)*sizeof(__be32)))
  790. return true;
  791. ih3 = igmpv3_query_hdr(skb);
  792. }
  793. max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
  794. if (!max_delay)
  795. max_delay = 1; /* can't mod w/ 0 */
  796. in_dev->mr_maxdelay = max_delay;
  797. if (ih3->qrv)
  798. in_dev->mr_qrv = ih3->qrv;
  799. if (!group) { /* general query */
  800. if (ih3->nsrcs)
  801. return false; /* no sources allowed */
  802. igmp_gq_start_timer(in_dev);
  803. return false;
  804. }
  805. /* mark sources to include, if group & source-specific */
  806. mark = ih3->nsrcs != 0;
  807. }
  808. /*
  809. * - Start the timers in all of our membership records
  810. * that the query applies to for the interface on
  811. * which the query arrived excl. those that belong
  812. * to a "local" group (224.0.0.X)
  813. * - For timers already running check if they need to
  814. * be reset.
  815. * - Use the igmp->igmp_code field as the maximum
  816. * delay possible
  817. */
  818. rcu_read_lock();
  819. for_each_pmc_rcu(in_dev, im) {
  820. int changed;
  821. if (group && group != im->multiaddr)
  822. continue;
  823. if (im->multiaddr == IGMP_ALL_HOSTS)
  824. continue;
  825. spin_lock_bh(&im->lock);
  826. if (im->tm_running)
  827. im->gsquery = im->gsquery && mark;
  828. else
  829. im->gsquery = mark;
  830. changed = !im->gsquery ||
  831. igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
  832. spin_unlock_bh(&im->lock);
  833. if (changed)
  834. igmp_mod_timer(im, max_delay);
  835. }
  836. rcu_read_unlock();
  837. return false;
  838. }
  839. /* called in rcu_read_lock() section */
  840. int igmp_rcv(struct sk_buff *skb)
  841. {
  842. /* This basically follows the spec line by line -- see RFC1112 */
  843. struct igmphdr *ih;
  844. struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
  845. int len = skb->len;
  846. bool dropped = true;
  847. if (in_dev == NULL)
  848. goto drop;
  849. if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
  850. goto drop;
  851. switch (skb->ip_summed) {
  852. case CHECKSUM_COMPLETE:
  853. if (!csum_fold(skb->csum))
  854. break;
  855. /* fall through */
  856. case CHECKSUM_NONE:
  857. skb->csum = 0;
  858. if (__skb_checksum_complete(skb))
  859. goto drop;
  860. }
  861. ih = igmp_hdr(skb);
  862. switch (ih->type) {
  863. case IGMP_HOST_MEMBERSHIP_QUERY:
  864. dropped = igmp_heard_query(in_dev, skb, len);
  865. break;
  866. case IGMP_HOST_MEMBERSHIP_REPORT:
  867. case IGMPV2_HOST_MEMBERSHIP_REPORT:
  868. /* Is it our report looped back? */
  869. if (rt_is_output_route(skb_rtable(skb)))
  870. break;
  871. /* don't rely on MC router hearing unicast reports */
  872. if (skb->pkt_type == PACKET_MULTICAST ||
  873. skb->pkt_type == PACKET_BROADCAST)
  874. dropped = igmp_heard_report(in_dev, ih->group);
  875. break;
  876. case IGMP_PIM:
  877. #ifdef CONFIG_IP_PIMSM_V1
  878. return pim_rcv_v1(skb);
  879. #endif
  880. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  881. case IGMP_DVMRP:
  882. case IGMP_TRACE:
  883. case IGMP_HOST_LEAVE_MESSAGE:
  884. case IGMP_MTRACE:
  885. case IGMP_MTRACE_RESP:
  886. break;
  887. default:
  888. break;
  889. }
  890. drop:
  891. if (dropped)
  892. kfree_skb(skb);
  893. else
  894. consume_skb(skb);
  895. return 0;
  896. }
  897. #endif
  898. /*
  899. * Add a filter to a device
  900. */
  901. static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
  902. {
  903. char buf[MAX_ADDR_LEN];
  904. struct net_device *dev = in_dev->dev;
  905. /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
  906. We will get multicast token leakage, when IFF_MULTICAST
  907. is changed. This check should be done in ndo_set_rx_mode
  908. routine. Something sort of:
  909. if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
  910. --ANK
  911. */
  912. if (arp_mc_map(addr, buf, dev, 0) == 0)
  913. dev_mc_add(dev, buf);
  914. }
  915. /*
  916. * Remove a filter from a device
  917. */
  918. static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
  919. {
  920. char buf[MAX_ADDR_LEN];
  921. struct net_device *dev = in_dev->dev;
  922. if (arp_mc_map(addr, buf, dev, 0) == 0)
  923. dev_mc_del(dev, buf);
  924. }
  925. #ifdef CONFIG_IP_MULTICAST
  926. /*
  927. * deleted ip_mc_list manipulation
  928. */
  929. static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
  930. {
  931. struct ip_mc_list *pmc;
  932. /* this is an "ip_mc_list" for convenience; only the fields below
  933. * are actually used. In particular, the refcnt and users are not
  934. * used for management of the delete list. Using the same structure
  935. * for deleted items allows change reports to use common code with
  936. * non-deleted or query-response MCA's.
  937. */
  938. pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
  939. if (!pmc)
  940. return;
  941. spin_lock_bh(&im->lock);
  942. pmc->interface = im->interface;
  943. in_dev_hold(in_dev);
  944. pmc->multiaddr = im->multiaddr;
  945. pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  946. IGMP_Unsolicited_Report_Count;
  947. pmc->sfmode = im->sfmode;
  948. if (pmc->sfmode == MCAST_INCLUDE) {
  949. struct ip_sf_list *psf;
  950. pmc->tomb = im->tomb;
  951. pmc->sources = im->sources;
  952. im->tomb = im->sources = NULL;
  953. for (psf=pmc->sources; psf; psf=psf->sf_next)
  954. psf->sf_crcount = pmc->crcount;
  955. }
  956. spin_unlock_bh(&im->lock);
  957. spin_lock_bh(&in_dev->mc_tomb_lock);
  958. pmc->next = in_dev->mc_tomb;
  959. in_dev->mc_tomb = pmc;
  960. spin_unlock_bh(&in_dev->mc_tomb_lock);
  961. }
  962. static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
  963. {
  964. struct ip_mc_list *pmc, *pmc_prev;
  965. struct ip_sf_list *psf, *psf_next;
  966. spin_lock_bh(&in_dev->mc_tomb_lock);
  967. pmc_prev = NULL;
  968. for (pmc=in_dev->mc_tomb; pmc; pmc=pmc->next) {
  969. if (pmc->multiaddr == multiaddr)
  970. break;
  971. pmc_prev = pmc;
  972. }
  973. if (pmc) {
  974. if (pmc_prev)
  975. pmc_prev->next = pmc->next;
  976. else
  977. in_dev->mc_tomb = pmc->next;
  978. }
  979. spin_unlock_bh(&in_dev->mc_tomb_lock);
  980. if (pmc) {
  981. for (psf=pmc->tomb; psf; psf=psf_next) {
  982. psf_next = psf->sf_next;
  983. kfree(psf);
  984. }
  985. in_dev_put(pmc->interface);
  986. kfree(pmc);
  987. }
  988. }
  989. static void igmpv3_clear_delrec(struct in_device *in_dev)
  990. {
  991. struct ip_mc_list *pmc, *nextpmc;
  992. spin_lock_bh(&in_dev->mc_tomb_lock);
  993. pmc = in_dev->mc_tomb;
  994. in_dev->mc_tomb = NULL;
  995. spin_unlock_bh(&in_dev->mc_tomb_lock);
  996. for (; pmc; pmc = nextpmc) {
  997. nextpmc = pmc->next;
  998. ip_mc_clear_src(pmc);
  999. in_dev_put(pmc->interface);
  1000. kfree(pmc);
  1001. }
  1002. /* clear dead sources, too */
  1003. rcu_read_lock();
  1004. for_each_pmc_rcu(in_dev, pmc) {
  1005. struct ip_sf_list *psf, *psf_next;
  1006. spin_lock_bh(&pmc->lock);
  1007. psf = pmc->tomb;
  1008. pmc->tomb = NULL;
  1009. spin_unlock_bh(&pmc->lock);
  1010. for (; psf; psf=psf_next) {
  1011. psf_next = psf->sf_next;
  1012. kfree(psf);
  1013. }
  1014. }
  1015. rcu_read_unlock();
  1016. }
  1017. #endif
  1018. static void igmp_group_dropped(struct ip_mc_list *im)
  1019. {
  1020. struct in_device *in_dev = im->interface;
  1021. #ifdef CONFIG_IP_MULTICAST
  1022. int reporter;
  1023. #endif
  1024. if (im->loaded) {
  1025. im->loaded = 0;
  1026. ip_mc_filter_del(in_dev, im->multiaddr);
  1027. }
  1028. #ifdef CONFIG_IP_MULTICAST
  1029. if (im->multiaddr == IGMP_ALL_HOSTS)
  1030. return;
  1031. reporter = im->reporter;
  1032. igmp_stop_timer(im);
  1033. if (!in_dev->dead) {
  1034. if (IGMP_V1_SEEN(in_dev))
  1035. return;
  1036. if (IGMP_V2_SEEN(in_dev)) {
  1037. if (reporter)
  1038. igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
  1039. return;
  1040. }
  1041. /* IGMPv3 */
  1042. igmpv3_add_delrec(in_dev, im);
  1043. igmp_ifc_event(in_dev);
  1044. }
  1045. #endif
  1046. }
  1047. static void igmp_group_added(struct ip_mc_list *im)
  1048. {
  1049. struct in_device *in_dev = im->interface;
  1050. if (im->loaded == 0) {
  1051. im->loaded = 1;
  1052. ip_mc_filter_add(in_dev, im->multiaddr);
  1053. }
  1054. #ifdef CONFIG_IP_MULTICAST
  1055. if (im->multiaddr == IGMP_ALL_HOSTS)
  1056. return;
  1057. if (in_dev->dead)
  1058. return;
  1059. if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
  1060. spin_lock_bh(&im->lock);
  1061. igmp_start_timer(im, IGMP_Initial_Report_Delay);
  1062. spin_unlock_bh(&im->lock);
  1063. return;
  1064. }
  1065. /* else, v3 */
  1066. im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  1067. IGMP_Unsolicited_Report_Count;
  1068. igmp_ifc_event(in_dev);
  1069. #endif
  1070. }
  1071. /*
  1072. * Multicast list managers
  1073. */
  1074. static u32 ip_mc_hash(const struct ip_mc_list *im)
  1075. {
  1076. return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG);
  1077. }
  1078. static void ip_mc_hash_add(struct in_device *in_dev,
  1079. struct ip_mc_list *im)
  1080. {
  1081. struct ip_mc_list __rcu **mc_hash;
  1082. u32 hash;
  1083. mc_hash = rtnl_dereference(in_dev->mc_hash);
  1084. if (mc_hash) {
  1085. hash = ip_mc_hash(im);
  1086. im->next_hash = mc_hash[hash];
  1087. rcu_assign_pointer(mc_hash[hash], im);
  1088. return;
  1089. }
  1090. /* do not use a hash table for small number of items */
  1091. if (in_dev->mc_count < 4)
  1092. return;
  1093. mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG,
  1094. GFP_KERNEL);
  1095. if (!mc_hash)
  1096. return;
  1097. for_each_pmc_rtnl(in_dev, im) {
  1098. hash = ip_mc_hash(im);
  1099. im->next_hash = mc_hash[hash];
  1100. RCU_INIT_POINTER(mc_hash[hash], im);
  1101. }
  1102. rcu_assign_pointer(in_dev->mc_hash, mc_hash);
  1103. }
  1104. static void ip_mc_hash_remove(struct in_device *in_dev,
  1105. struct ip_mc_list *im)
  1106. {
  1107. struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash);
  1108. struct ip_mc_list *aux;
  1109. if (!mc_hash)
  1110. return;
  1111. mc_hash += ip_mc_hash(im);
  1112. while ((aux = rtnl_dereference(*mc_hash)) != im)
  1113. mc_hash = &aux->next_hash;
  1114. *mc_hash = im->next_hash;
  1115. }
  1116. /*
  1117. * A socket has joined a multicast group on device dev.
  1118. */
  1119. void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
  1120. {
  1121. struct ip_mc_list *im;
  1122. ASSERT_RTNL();
  1123. for_each_pmc_rtnl(in_dev, im) {
  1124. if (im->multiaddr == addr) {
  1125. im->users++;
  1126. ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
  1127. goto out;
  1128. }
  1129. }
  1130. im = kzalloc(sizeof(*im), GFP_KERNEL);
  1131. if (!im)
  1132. goto out;
  1133. im->users = 1;
  1134. im->interface = in_dev;
  1135. in_dev_hold(in_dev);
  1136. im->multiaddr = addr;
  1137. /* initial mode is (EX, empty) */
  1138. im->sfmode = MCAST_EXCLUDE;
  1139. im->sfcount[MCAST_EXCLUDE] = 1;
  1140. atomic_set(&im->refcnt, 1);
  1141. spin_lock_init(&im->lock);
  1142. #ifdef CONFIG_IP_MULTICAST
  1143. setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
  1144. im->unsolicit_count = IGMP_Unsolicited_Report_Count;
  1145. #endif
  1146. im->next_rcu = in_dev->mc_list;
  1147. in_dev->mc_count++;
  1148. rcu_assign_pointer(in_dev->mc_list, im);
  1149. ip_mc_hash_add(in_dev, im);
  1150. #ifdef CONFIG_IP_MULTICAST
  1151. igmpv3_del_delrec(in_dev, im->multiaddr);
  1152. #endif
  1153. igmp_group_added(im);
  1154. if (!in_dev->dead)
  1155. ip_rt_multicast_event(in_dev);
  1156. out:
  1157. return;
  1158. }
  1159. EXPORT_SYMBOL(ip_mc_inc_group);
  1160. /*
  1161. * Resend IGMP JOIN report; used for bonding.
  1162. * Called with rcu_read_lock()
  1163. */
  1164. void ip_mc_rejoin_groups(struct in_device *in_dev)
  1165. {
  1166. #ifdef CONFIG_IP_MULTICAST
  1167. struct ip_mc_list *im;
  1168. int type;
  1169. for_each_pmc_rcu(in_dev, im) {
  1170. if (im->multiaddr == IGMP_ALL_HOSTS)
  1171. continue;
  1172. /* a failover is happening and switches
  1173. * must be notified immediately
  1174. */
  1175. if (IGMP_V1_SEEN(in_dev))
  1176. type = IGMP_HOST_MEMBERSHIP_REPORT;
  1177. else if (IGMP_V2_SEEN(in_dev))
  1178. type = IGMPV2_HOST_MEMBERSHIP_REPORT;
  1179. else
  1180. type = IGMPV3_HOST_MEMBERSHIP_REPORT;
  1181. igmp_send_report(in_dev, im, type);
  1182. }
  1183. #endif
  1184. }
  1185. EXPORT_SYMBOL(ip_mc_rejoin_groups);
  1186. /*
  1187. * A socket has left a multicast group on device dev
  1188. */
  1189. void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
  1190. {
  1191. struct ip_mc_list *i;
  1192. struct ip_mc_list __rcu **ip;
  1193. ASSERT_RTNL();
  1194. for (ip = &in_dev->mc_list;
  1195. (i = rtnl_dereference(*ip)) != NULL;
  1196. ip = &i->next_rcu) {
  1197. if (i->multiaddr == addr) {
  1198. if (--i->users == 0) {
  1199. ip_mc_hash_remove(in_dev, i);
  1200. *ip = i->next_rcu;
  1201. in_dev->mc_count--;
  1202. igmp_group_dropped(i);
  1203. ip_mc_clear_src(i);
  1204. if (!in_dev->dead)
  1205. ip_rt_multicast_event(in_dev);
  1206. ip_ma_put(i);
  1207. return;
  1208. }
  1209. break;
  1210. }
  1211. }
  1212. }
  1213. EXPORT_SYMBOL(ip_mc_dec_group);
  1214. /* Device changing type */
  1215. void ip_mc_unmap(struct in_device *in_dev)
  1216. {
  1217. struct ip_mc_list *pmc;
  1218. ASSERT_RTNL();
  1219. for_each_pmc_rtnl(in_dev, pmc)
  1220. igmp_group_dropped(pmc);
  1221. }
  1222. void ip_mc_remap(struct in_device *in_dev)
  1223. {
  1224. struct ip_mc_list *pmc;
  1225. ASSERT_RTNL();
  1226. for_each_pmc_rtnl(in_dev, pmc)
  1227. igmp_group_added(pmc);
  1228. }
  1229. /* Device going down */
  1230. void ip_mc_down(struct in_device *in_dev)
  1231. {
  1232. struct ip_mc_list *pmc;
  1233. ASSERT_RTNL();
  1234. for_each_pmc_rtnl(in_dev, pmc)
  1235. igmp_group_dropped(pmc);
  1236. #ifdef CONFIG_IP_MULTICAST
  1237. in_dev->mr_ifc_count = 0;
  1238. if (del_timer(&in_dev->mr_ifc_timer))
  1239. __in_dev_put(in_dev);
  1240. in_dev->mr_gq_running = 0;
  1241. if (del_timer(&in_dev->mr_gq_timer))
  1242. __in_dev_put(in_dev);
  1243. igmpv3_clear_delrec(in_dev);
  1244. #endif
  1245. ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
  1246. }
  1247. void ip_mc_init_dev(struct in_device *in_dev)
  1248. {
  1249. ASSERT_RTNL();
  1250. #ifdef CONFIG_IP_MULTICAST
  1251. setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
  1252. (unsigned long)in_dev);
  1253. setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
  1254. (unsigned long)in_dev);
  1255. in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
  1256. #endif
  1257. spin_lock_init(&in_dev->mc_tomb_lock);
  1258. }
  1259. /* Device going up */
  1260. void ip_mc_up(struct in_device *in_dev)
  1261. {
  1262. struct ip_mc_list *pmc;
  1263. ASSERT_RTNL();
  1264. ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
  1265. for_each_pmc_rtnl(in_dev, pmc)
  1266. igmp_group_added(pmc);
  1267. }
  1268. /*
  1269. * Device is about to be destroyed: clean up.
  1270. */
  1271. void ip_mc_destroy_dev(struct in_device *in_dev)
  1272. {
  1273. struct ip_mc_list *i;
  1274. ASSERT_RTNL();
  1275. /* Deactivate timers */
  1276. ip_mc_down(in_dev);
  1277. while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
  1278. in_dev->mc_list = i->next_rcu;
  1279. in_dev->mc_count--;
  1280. /* We've dropped the groups in ip_mc_down already */
  1281. ip_mc_clear_src(i);
  1282. ip_ma_put(i);
  1283. }
  1284. }
  1285. /* RTNL is locked */
  1286. static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
  1287. {
  1288. struct net_device *dev = NULL;
  1289. struct in_device *idev = NULL;
  1290. if (imr->imr_ifindex) {
  1291. idev = inetdev_by_index(net, imr->imr_ifindex);
  1292. return idev;
  1293. }
  1294. if (imr->imr_address.s_addr) {
  1295. dev = __ip_dev_find(net, imr->imr_address.s_addr, false);
  1296. if (!dev)
  1297. return NULL;
  1298. }
  1299. if (!dev) {
  1300. struct rtable *rt = ip_route_output(net,
  1301. imr->imr_multiaddr.s_addr,
  1302. 0, 0, 0);
  1303. if (!IS_ERR(rt)) {
  1304. dev = rt->dst.dev;
  1305. ip_rt_put(rt);
  1306. }
  1307. }
  1308. if (dev) {
  1309. imr->imr_ifindex = dev->ifindex;
  1310. idev = __in_dev_get_rtnl(dev);
  1311. }
  1312. return idev;
  1313. }
  1314. /*
  1315. * Join a socket to a group
  1316. */
  1317. int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
  1318. int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
  1319. static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
  1320. __be32 *psfsrc)
  1321. {
  1322. struct ip_sf_list *psf, *psf_prev;
  1323. int rv = 0;
  1324. psf_prev = NULL;
  1325. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  1326. if (psf->sf_inaddr == *psfsrc)
  1327. break;
  1328. psf_prev = psf;
  1329. }
  1330. if (!psf || psf->sf_count[sfmode] == 0) {
  1331. /* source filter not found, or count wrong => bug */
  1332. return -ESRCH;
  1333. }
  1334. psf->sf_count[sfmode]--;
  1335. if (psf->sf_count[sfmode] == 0) {
  1336. ip_rt_multicast_event(pmc->interface);
  1337. }
  1338. if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
  1339. #ifdef CONFIG_IP_MULTICAST
  1340. struct in_device *in_dev = pmc->interface;
  1341. #endif
  1342. /* no more filters for this source */
  1343. if (psf_prev)
  1344. psf_prev->sf_next = psf->sf_next;
  1345. else
  1346. pmc->sources = psf->sf_next;
  1347. #ifdef CONFIG_IP_MULTICAST
  1348. if (psf->sf_oldin &&
  1349. !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
  1350. psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  1351. IGMP_Unsolicited_Report_Count;
  1352. psf->sf_next = pmc->tomb;
  1353. pmc->tomb = psf;
  1354. rv = 1;
  1355. } else
  1356. #endif
  1357. kfree(psf);
  1358. }
  1359. return rv;
  1360. }
  1361. #ifndef CONFIG_IP_MULTICAST
  1362. #define igmp_ifc_event(x) do { } while (0)
  1363. #endif
  1364. static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
  1365. int sfcount, __be32 *psfsrc, int delta)
  1366. {
  1367. struct ip_mc_list *pmc;
  1368. int changerec = 0;
  1369. int i, err;
  1370. if (!in_dev)
  1371. return -ENODEV;
  1372. rcu_read_lock();
  1373. for_each_pmc_rcu(in_dev, pmc) {
  1374. if (*pmca == pmc->multiaddr)
  1375. break;
  1376. }
  1377. if (!pmc) {
  1378. /* MCA not found?? bug */
  1379. rcu_read_unlock();
  1380. return -ESRCH;
  1381. }
  1382. spin_lock_bh(&pmc->lock);
  1383. rcu_read_unlock();
  1384. #ifdef CONFIG_IP_MULTICAST
  1385. sf_markstate(pmc);
  1386. #endif
  1387. if (!delta) {
  1388. err = -EINVAL;
  1389. if (!pmc->sfcount[sfmode])
  1390. goto out_unlock;
  1391. pmc->sfcount[sfmode]--;
  1392. }
  1393. err = 0;
  1394. for (i=0; i<sfcount; i++) {
  1395. int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
  1396. changerec |= rv > 0;
  1397. if (!err && rv < 0)
  1398. err = rv;
  1399. }
  1400. if (pmc->sfmode == MCAST_EXCLUDE &&
  1401. pmc->sfcount[MCAST_EXCLUDE] == 0 &&
  1402. pmc->sfcount[MCAST_INCLUDE]) {
  1403. #ifdef CONFIG_IP_MULTICAST
  1404. struct ip_sf_list *psf;
  1405. #endif
  1406. /* filter mode change */
  1407. pmc->sfmode = MCAST_INCLUDE;
  1408. #ifdef CONFIG_IP_MULTICAST
  1409. pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  1410. IGMP_Unsolicited_Report_Count;
  1411. in_dev->mr_ifc_count = pmc->crcount;
  1412. for (psf=pmc->sources; psf; psf = psf->sf_next)
  1413. psf->sf_crcount = 0;
  1414. igmp_ifc_event(pmc->interface);
  1415. } else if (sf_setstate(pmc) || changerec) {
  1416. igmp_ifc_event(pmc->interface);
  1417. #endif
  1418. }
  1419. out_unlock:
  1420. spin_unlock_bh(&pmc->lock);
  1421. return err;
  1422. }
  1423. /*
  1424. * Add multicast single-source filter to the interface list
  1425. */
  1426. static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
  1427. __be32 *psfsrc)
  1428. {
  1429. struct ip_sf_list *psf, *psf_prev;
  1430. psf_prev = NULL;
  1431. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  1432. if (psf->sf_inaddr == *psfsrc)
  1433. break;
  1434. psf_prev = psf;
  1435. }
  1436. if (!psf) {
  1437. psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
  1438. if (!psf)
  1439. return -ENOBUFS;
  1440. psf->sf_inaddr = *psfsrc;
  1441. if (psf_prev) {
  1442. psf_prev->sf_next = psf;
  1443. } else
  1444. pmc->sources = psf;
  1445. }
  1446. psf->sf_count[sfmode]++;
  1447. if (psf->sf_count[sfmode] == 1) {
  1448. ip_rt_multicast_event(pmc->interface);
  1449. }
  1450. return 0;
  1451. }
  1452. #ifdef CONFIG_IP_MULTICAST
  1453. static void sf_markstate(struct ip_mc_list *pmc)
  1454. {
  1455. struct ip_sf_list *psf;
  1456. int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
  1457. for (psf=pmc->sources; psf; psf=psf->sf_next)
  1458. if (pmc->sfcount[MCAST_EXCLUDE]) {
  1459. psf->sf_oldin = mca_xcount ==
  1460. psf->sf_count[MCAST_EXCLUDE] &&
  1461. !psf->sf_count[MCAST_INCLUDE];
  1462. } else
  1463. psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
  1464. }
  1465. static int sf_setstate(struct ip_mc_list *pmc)
  1466. {
  1467. struct ip_sf_list *psf, *dpsf;
  1468. int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
  1469. int qrv = pmc->interface->mr_qrv;
  1470. int new_in, rv;
  1471. rv = 0;
  1472. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  1473. if (pmc->sfcount[MCAST_EXCLUDE]) {
  1474. new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
  1475. !psf->sf_count[MCAST_INCLUDE];
  1476. } else
  1477. new_in = psf->sf_count[MCAST_INCLUDE] != 0;
  1478. if (new_in) {
  1479. if (!psf->sf_oldin) {
  1480. struct ip_sf_list *prev = NULL;
  1481. for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) {
  1482. if (dpsf->sf_inaddr == psf->sf_inaddr)
  1483. break;
  1484. prev = dpsf;
  1485. }
  1486. if (dpsf) {
  1487. if (prev)
  1488. prev->sf_next = dpsf->sf_next;
  1489. else
  1490. pmc->tomb = dpsf->sf_next;
  1491. kfree(dpsf);
  1492. }
  1493. psf->sf_crcount = qrv;
  1494. rv++;
  1495. }
  1496. } else if (psf->sf_oldin) {
  1497. psf->sf_crcount = 0;
  1498. /*
  1499. * add or update "delete" records if an active filter
  1500. * is now inactive
  1501. */
  1502. for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next)
  1503. if (dpsf->sf_inaddr == psf->sf_inaddr)
  1504. break;
  1505. if (!dpsf) {
  1506. dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
  1507. if (!dpsf)
  1508. continue;
  1509. *dpsf = *psf;
  1510. /* pmc->lock held by callers */
  1511. dpsf->sf_next = pmc->tomb;
  1512. pmc->tomb = dpsf;
  1513. }
  1514. dpsf->sf_crcount = qrv;
  1515. rv++;
  1516. }
  1517. }
  1518. return rv;
  1519. }
  1520. #endif
  1521. /*
  1522. * Add multicast source filter list to the interface list
  1523. */
  1524. static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
  1525. int sfcount, __be32 *psfsrc, int delta)
  1526. {
  1527. struct ip_mc_list *pmc;
  1528. int isexclude;
  1529. int i, err;
  1530. if (!in_dev)
  1531. return -ENODEV;
  1532. rcu_read_lock();
  1533. for_each_pmc_rcu(in_dev, pmc) {
  1534. if (*pmca == pmc->multiaddr)
  1535. break;
  1536. }
  1537. if (!pmc) {
  1538. /* MCA not found?? bug */
  1539. rcu_read_unlock();
  1540. return -ESRCH;
  1541. }
  1542. spin_lock_bh(&pmc->lock);
  1543. rcu_read_unlock();
  1544. #ifdef CONFIG_IP_MULTICAST
  1545. sf_markstate(pmc);
  1546. #endif
  1547. isexclude = pmc->sfmode == MCAST_EXCLUDE;
  1548. if (!delta)
  1549. pmc->sfcount[sfmode]++;
  1550. err = 0;
  1551. for (i=0; i<sfcount; i++) {
  1552. err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
  1553. if (err)
  1554. break;
  1555. }
  1556. if (err) {
  1557. int j;
  1558. if (!delta)
  1559. pmc->sfcount[sfmode]--;
  1560. for (j=0; j<i; j++)
  1561. (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
  1562. } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
  1563. #ifdef CONFIG_IP_MULTICAST
  1564. struct ip_sf_list *psf;
  1565. in_dev = pmc->interface;
  1566. #endif
  1567. /* filter mode change */
  1568. if (pmc->sfcount[MCAST_EXCLUDE])
  1569. pmc->sfmode = MCAST_EXCLUDE;
  1570. else if (pmc->sfcount[MCAST_INCLUDE])
  1571. pmc->sfmode = MCAST_INCLUDE;
  1572. #ifdef CONFIG_IP_MULTICAST
  1573. /* else no filters; keep old mode for reports */
  1574. pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  1575. IGMP_Unsolicited_Report_Count;
  1576. in_dev->mr_ifc_count = pmc->crcount;
  1577. for (psf=pmc->sources; psf; psf = psf->sf_next)
  1578. psf->sf_crcount = 0;
  1579. igmp_ifc_event(in_dev);
  1580. } else if (sf_setstate(pmc)) {
  1581. igmp_ifc_event(in_dev);
  1582. #endif
  1583. }
  1584. spin_unlock_bh(&pmc->lock);
  1585. return err;
  1586. }
  1587. static void ip_mc_clear_src(struct ip_mc_list *pmc)
  1588. {
  1589. struct ip_sf_list *psf, *nextpsf;
  1590. for (psf=pmc->tomb; psf; psf=nextpsf) {
  1591. nextpsf = psf->sf_next;
  1592. kfree(psf);
  1593. }
  1594. pmc->tomb = NULL;
  1595. for (psf=pmc->sources; psf; psf=nextpsf) {
  1596. nextpsf = psf->sf_next;
  1597. kfree(psf);
  1598. }
  1599. pmc->sources = NULL;
  1600. pmc->sfmode = MCAST_EXCLUDE;
  1601. pmc->sfcount[MCAST_INCLUDE] = 0;
  1602. pmc->sfcount[MCAST_EXCLUDE] = 1;
  1603. }
  1604. /*
  1605. * Join a multicast group
  1606. */
  1607. int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
  1608. {
  1609. int err;
  1610. __be32 addr = imr->imr_multiaddr.s_addr;
  1611. struct ip_mc_socklist *iml = NULL, *i;
  1612. struct in_device *in_dev;
  1613. struct inet_sock *inet = inet_sk(sk);
  1614. struct net *net = sock_net(sk);
  1615. int ifindex;
  1616. int count = 0;
  1617. if (!ipv4_is_multicast(addr))
  1618. return -EINVAL;
  1619. rtnl_lock();
  1620. in_dev = ip_mc_find_dev(net, imr);
  1621. if (!in_dev) {
  1622. iml = NULL;
  1623. err = -ENODEV;
  1624. goto done;
  1625. }
  1626. err = -EADDRINUSE;
  1627. ifindex = imr->imr_ifindex;
  1628. for_each_pmc_rtnl(inet, i) {
  1629. if (i->multi.imr_multiaddr.s_addr == addr &&
  1630. i->multi.imr_ifindex == ifindex)
  1631. goto done;
  1632. count++;
  1633. }
  1634. err = -ENOBUFS;
  1635. if (count >= sysctl_igmp_max_memberships)
  1636. goto done;
  1637. iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
  1638. if (iml == NULL)
  1639. goto done;
  1640. memcpy(&iml->multi, imr, sizeof(*imr));
  1641. iml->next_rcu = inet->mc_list;
  1642. iml->sflist = NULL;
  1643. iml->sfmode = MCAST_EXCLUDE;
  1644. rcu_assign_pointer(inet->mc_list, iml);
  1645. ip_mc_inc_group(in_dev, addr);
  1646. err = 0;
  1647. done:
  1648. rtnl_unlock();
  1649. return err;
  1650. }
  1651. EXPORT_SYMBOL(ip_mc_join_group);
  1652. static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
  1653. struct in_device *in_dev)
  1654. {
  1655. struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
  1656. int err;
  1657. if (psf == NULL) {
  1658. /* any-source empty exclude case */
  1659. return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
  1660. iml->sfmode, 0, NULL, 0);
  1661. }
  1662. err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
  1663. iml->sfmode, psf->sl_count, psf->sl_addr, 0);
  1664. RCU_INIT_POINTER(iml->sflist, NULL);
  1665. /* decrease mem now to avoid the memleak warning */
  1666. atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
  1667. kfree_rcu(psf, rcu);
  1668. return err;
  1669. }
  1670. /*
  1671. * Ask a socket to leave a group.
  1672. */
  1673. int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
  1674. {
  1675. struct inet_sock *inet = inet_sk(sk);
  1676. struct ip_mc_socklist *iml;
  1677. struct ip_mc_socklist __rcu **imlp;
  1678. struct in_device *in_dev;
  1679. struct net *net = sock_net(sk);
  1680. __be32 group = imr->imr_multiaddr.s_addr;
  1681. u32 ifindex;
  1682. int ret = -EADDRNOTAVAIL;
  1683. rtnl_lock();
  1684. in_dev = ip_mc_find_dev(net, imr);
  1685. ifindex = imr->imr_ifindex;
  1686. for (imlp = &inet->mc_list;
  1687. (iml = rtnl_dereference(*imlp)) != NULL;
  1688. imlp = &iml->next_rcu) {
  1689. if (iml->multi.imr_multiaddr.s_addr != group)
  1690. continue;
  1691. if (ifindex) {
  1692. if (iml->multi.imr_ifindex != ifindex)
  1693. continue;
  1694. } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
  1695. iml->multi.imr_address.s_addr)
  1696. continue;
  1697. (void) ip_mc_leave_src(sk, iml, in_dev);
  1698. *imlp = iml->next_rcu;
  1699. if (in_dev)
  1700. ip_mc_dec_group(in_dev, group);
  1701. rtnl_unlock();
  1702. /* decrease mem now to avoid the memleak warning */
  1703. atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
  1704. kfree_rcu(iml, rcu);
  1705. return 0;
  1706. }
  1707. if (!in_dev)
  1708. ret = -ENODEV;
  1709. rtnl_unlock();
  1710. return ret;
  1711. }
  1712. EXPORT_SYMBOL(ip_mc_leave_group);
  1713. int ip_mc_source(int add, int omode, struct sock *sk, struct
  1714. ip_mreq_source *mreqs, int ifindex)
  1715. {
  1716. int err;
  1717. struct ip_mreqn imr;
  1718. __be32 addr = mreqs->imr_multiaddr;
  1719. struct ip_mc_socklist *pmc;
  1720. struct in_device *in_dev = NULL;
  1721. struct inet_sock *inet = inet_sk(sk);
  1722. struct ip_sf_socklist *psl;
  1723. struct net *net = sock_net(sk);
  1724. int leavegroup = 0;
  1725. int i, j, rv;
  1726. if (!ipv4_is_multicast(addr))
  1727. return -EINVAL;
  1728. rtnl_lock();
  1729. imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
  1730. imr.imr_address.s_addr = mreqs->imr_interface;
  1731. imr.imr_ifindex = ifindex;
  1732. in_dev = ip_mc_find_dev(net, &imr);
  1733. if (!in_dev) {
  1734. err = -ENODEV;
  1735. goto done;
  1736. }
  1737. err = -EADDRNOTAVAIL;
  1738. for_each_pmc_rtnl(inet, pmc) {
  1739. if ((pmc->multi.imr_multiaddr.s_addr ==
  1740. imr.imr_multiaddr.s_addr) &&
  1741. (pmc->multi.imr_ifindex == imr.imr_ifindex))
  1742. break;
  1743. }
  1744. if (!pmc) { /* must have a prior join */
  1745. err = -EINVAL;
  1746. goto done;
  1747. }
  1748. /* if a source filter was set, must be the same mode as before */
  1749. if (pmc->sflist) {
  1750. if (pmc->sfmode != omode) {
  1751. err = -EINVAL;
  1752. goto done;
  1753. }
  1754. } else if (pmc->sfmode != omode) {
  1755. /* allow mode switches for empty-set filters */
  1756. ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
  1757. ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
  1758. NULL, 0);
  1759. pmc->sfmode = omode;
  1760. }
  1761. psl = rtnl_dereference(pmc->sflist);
  1762. if (!add) {
  1763. if (!psl)
  1764. goto done; /* err = -EADDRNOTAVAIL */
  1765. rv = !0;
  1766. for (i=0; i<psl->sl_count; i++) {
  1767. rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
  1768. sizeof(__be32));
  1769. if (rv == 0)
  1770. break;
  1771. }
  1772. if (rv) /* source not found */
  1773. goto done; /* err = -EADDRNOTAVAIL */
  1774. /* special case - (INCLUDE, empty) == LEAVE_GROUP */
  1775. if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
  1776. leavegroup = 1;
  1777. goto done;
  1778. }
  1779. /* update the interface filter */
  1780. ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
  1781. &mreqs->imr_sourceaddr, 1);
  1782. for (j=i+1; j<psl->sl_count; j++)
  1783. psl->sl_addr[j-1] = psl->sl_addr[j];
  1784. psl->sl_count--;
  1785. err = 0;
  1786. goto done;
  1787. }
  1788. /* else, add a new source to the filter */
  1789. if (psl && psl->sl_count >= sysctl_igmp_max_msf) {
  1790. err = -ENOBUFS;
  1791. goto done;
  1792. }
  1793. if (!psl || psl->sl_count == psl->sl_max) {
  1794. struct ip_sf_socklist *newpsl;
  1795. int count = IP_SFBLOCK;
  1796. if (psl)
  1797. count += psl->sl_max;
  1798. newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
  1799. if (!newpsl) {
  1800. err = -ENOBUFS;
  1801. goto done;
  1802. }
  1803. newpsl->sl_max = count;
  1804. newpsl->sl_count = count - IP_SFBLOCK;
  1805. if (psl) {
  1806. for (i=0; i<psl->sl_count; i++)
  1807. newpsl->sl_addr[i] = psl->sl_addr[i];
  1808. /* decrease mem now to avoid the memleak warning */
  1809. atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
  1810. kfree_rcu(psl, rcu);
  1811. }
  1812. rcu_assign_pointer(pmc->sflist, newpsl);
  1813. psl = newpsl;
  1814. }
  1815. rv = 1; /* > 0 for insert logic below if sl_count is 0 */
  1816. for (i=0; i<psl->sl_count; i++) {
  1817. rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
  1818. sizeof(__be32));
  1819. if (rv == 0)
  1820. break;
  1821. }
  1822. if (rv == 0) /* address already there is an error */
  1823. goto done;
  1824. for (j=psl->sl_count-1; j>=i; j--)
  1825. psl->sl_addr[j+1] = psl->sl_addr[j];
  1826. psl->sl_addr[i] = mreqs->imr_sourceaddr;
  1827. psl->sl_count++;
  1828. err = 0;
  1829. /* update the interface list */
  1830. ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
  1831. &mreqs->imr_sourceaddr, 1);
  1832. done:
  1833. rtnl_unlock();
  1834. if (leavegroup)
  1835. return ip_mc_leave_group(sk, &imr);
  1836. return err;
  1837. }
  1838. int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
  1839. {
  1840. int err = 0;
  1841. struct ip_mreqn imr;
  1842. __be32 addr = msf->imsf_multiaddr;
  1843. struct ip_mc_socklist *pmc;
  1844. struct in_device *in_dev;
  1845. struct inet_sock *inet = inet_sk(sk);
  1846. struct ip_sf_socklist *newpsl, *psl;
  1847. struct net *net = sock_net(sk);
  1848. int leavegroup = 0;
  1849. if (!ipv4_is_multicast(addr))
  1850. return -EINVAL;
  1851. if (msf->imsf_fmode != MCAST_INCLUDE &&
  1852. msf->imsf_fmode != MCAST_EXCLUDE)
  1853. return -EINVAL;
  1854. rtnl_lock();
  1855. imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
  1856. imr.imr_address.s_addr = msf->imsf_interface;
  1857. imr.imr_ifindex = ifindex;
  1858. in_dev = ip_mc_find_dev(net, &imr);
  1859. if (!in_dev) {
  1860. err = -ENODEV;
  1861. goto done;
  1862. }
  1863. /* special case - (INCLUDE, empty) == LEAVE_GROUP */
  1864. if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
  1865. leavegroup = 1;
  1866. goto done;
  1867. }
  1868. for_each_pmc_rtnl(inet, pmc) {
  1869. if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
  1870. pmc->multi.imr_ifindex == imr.imr_ifindex)
  1871. break;
  1872. }
  1873. if (!pmc) { /* must have a prior join */
  1874. err = -EINVAL;
  1875. goto done;
  1876. }
  1877. if (msf->imsf_numsrc) {
  1878. newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
  1879. GFP_KERNEL);
  1880. if (!newpsl) {
  1881. err = -ENOBUFS;
  1882. goto done;
  1883. }
  1884. newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
  1885. memcpy(newpsl->sl_addr, msf->imsf_slist,
  1886. msf->imsf_numsrc * sizeof(msf->imsf_slist[0]));
  1887. err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
  1888. msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
  1889. if (err) {
  1890. sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
  1891. goto done;
  1892. }
  1893. } else {
  1894. newpsl = NULL;
  1895. (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
  1896. msf->imsf_fmode, 0, NULL, 0);
  1897. }
  1898. psl = rtnl_dereference(pmc->sflist);
  1899. if (psl) {
  1900. (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
  1901. psl->sl_count, psl->sl_addr, 0);
  1902. /* decrease mem now to avoid the memleak warning */
  1903. atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
  1904. kfree_rcu(psl, rcu);
  1905. } else
  1906. (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
  1907. 0, NULL, 0);
  1908. rcu_assign_pointer(pmc->sflist, newpsl);
  1909. pmc->sfmode = msf->imsf_fmode;
  1910. err = 0;
  1911. done:
  1912. rtnl_unlock();
  1913. if (leavegroup)
  1914. err = ip_mc_leave_group(sk, &imr);
  1915. return err;
  1916. }
  1917. int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
  1918. struct ip_msfilter __user *optval, int __user *optlen)
  1919. {
  1920. int err, len, count, copycount;
  1921. struct ip_mreqn imr;
  1922. __be32 addr = msf->imsf_multiaddr;
  1923. struct ip_mc_socklist *pmc;
  1924. struct in_device *in_dev;
  1925. struct inet_sock *inet = inet_sk(sk);
  1926. struct ip_sf_socklist *psl;
  1927. struct net *net = sock_net(sk);
  1928. if (!ipv4_is_multicast(addr))
  1929. return -EINVAL;
  1930. rtnl_lock();
  1931. imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
  1932. imr.imr_address.s_addr = msf->imsf_interface;
  1933. imr.imr_ifindex = 0;
  1934. in_dev = ip_mc_find_dev(net, &imr);
  1935. if (!in_dev) {
  1936. err = -ENODEV;
  1937. goto done;
  1938. }
  1939. err = -EADDRNOTAVAIL;
  1940. for_each_pmc_rtnl(inet, pmc) {
  1941. if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
  1942. pmc->multi.imr_ifindex == imr.imr_ifindex)
  1943. break;
  1944. }
  1945. if (!pmc) /* must have a prior join */
  1946. goto done;
  1947. msf->imsf_fmode = pmc->sfmode;
  1948. psl = rtnl_dereference(pmc->sflist);
  1949. rtnl_unlock();
  1950. if (!psl) {
  1951. len = 0;
  1952. count = 0;
  1953. } else {
  1954. count = psl->sl_count;
  1955. }
  1956. copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
  1957. len = copycount * sizeof(psl->sl_addr[0]);
  1958. msf->imsf_numsrc = count;
  1959. if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
  1960. copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) {
  1961. return -EFAULT;
  1962. }
  1963. if (len &&
  1964. copy_to_user(&optval->imsf_slist[0], psl->sl_addr, len))
  1965. return -EFAULT;
  1966. return 0;
  1967. done:
  1968. rtnl_unlock();
  1969. return err;
  1970. }
  1971. int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
  1972. struct group_filter __user *optval, int __user *optlen)
  1973. {
  1974. int err, i, count, copycount;
  1975. struct sockaddr_in *psin;
  1976. __be32 addr;
  1977. struct ip_mc_socklist *pmc;
  1978. struct inet_sock *inet = inet_sk(sk);
  1979. struct ip_sf_socklist *psl;
  1980. psin = (struct sockaddr_in *)&gsf->gf_group;
  1981. if (psin->sin_family != AF_INET)
  1982. return -EINVAL;
  1983. addr = psin->sin_addr.s_addr;
  1984. if (!ipv4_is_multicast(addr))
  1985. return -EINVAL;
  1986. rtnl_lock();
  1987. err = -EADDRNOTAVAIL;
  1988. for_each_pmc_rtnl(inet, pmc) {
  1989. if (pmc->multi.imr_multiaddr.s_addr == addr &&
  1990. pmc->multi.imr_ifindex == gsf->gf_interface)
  1991. break;
  1992. }
  1993. if (!pmc) /* must have a prior join */
  1994. goto done;
  1995. gsf->gf_fmode = pmc->sfmode;
  1996. psl = rtnl_dereference(pmc->sflist);
  1997. rtnl_unlock();
  1998. count = psl ? psl->sl_count : 0;
  1999. copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
  2000. gsf->gf_numsrc = count;
  2001. if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
  2002. copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
  2003. return -EFAULT;
  2004. }
  2005. for (i=0; i<copycount; i++) {
  2006. struct sockaddr_storage ss;
  2007. psin = (struct sockaddr_in *)&ss;
  2008. memset(&ss, 0, sizeof(ss));
  2009. psin->sin_family = AF_INET;
  2010. psin->sin_addr.s_addr = psl->sl_addr[i];
  2011. if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
  2012. return -EFAULT;
  2013. }
  2014. return 0;
  2015. done:
  2016. rtnl_unlock();
  2017. return err;
  2018. }
  2019. /*
  2020. * check if a multicast source filter allows delivery for a given <src,dst,intf>
  2021. */
  2022. int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
  2023. {
  2024. struct inet_sock *inet = inet_sk(sk);
  2025. struct ip_mc_socklist *pmc;
  2026. struct ip_sf_socklist *psl;
  2027. int i;
  2028. int ret;
  2029. ret = 1;
  2030. if (!ipv4_is_multicast(loc_addr))
  2031. goto out;
  2032. rcu_read_lock();
  2033. for_each_pmc_rcu(inet, pmc) {
  2034. if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
  2035. pmc->multi.imr_ifindex == dif)
  2036. break;
  2037. }
  2038. ret = inet->mc_all;
  2039. if (!pmc)
  2040. goto unlock;
  2041. psl = rcu_dereference(pmc->sflist);
  2042. ret = (pmc->sfmode == MCAST_EXCLUDE);
  2043. if (!psl)
  2044. goto unlock;
  2045. for (i=0; i<psl->sl_count; i++) {
  2046. if (psl->sl_addr[i] == rmt_addr)
  2047. break;
  2048. }
  2049. ret = 0;
  2050. if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
  2051. goto unlock;
  2052. if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
  2053. goto unlock;
  2054. ret = 1;
  2055. unlock:
  2056. rcu_read_unlock();
  2057. out:
  2058. return ret;
  2059. }
  2060. /*
  2061. * A socket is closing.
  2062. */
  2063. void ip_mc_drop_socket(struct sock *sk)
  2064. {
  2065. struct inet_sock *inet = inet_sk(sk);
  2066. struct ip_mc_socklist *iml;
  2067. struct net *net = sock_net(sk);
  2068. if (inet->mc_list == NULL)
  2069. return;
  2070. rtnl_lock();
  2071. while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
  2072. struct in_device *in_dev;
  2073. inet->mc_list = iml->next_rcu;
  2074. in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
  2075. (void) ip_mc_leave_src(sk, iml, in_dev);
  2076. if (in_dev != NULL)
  2077. ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
  2078. /* decrease mem now to avoid the memleak warning */
  2079. atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
  2080. kfree_rcu(iml, rcu);
  2081. }
  2082. rtnl_unlock();
  2083. }
  2084. /* called with rcu_read_lock() */
  2085. int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
  2086. {
  2087. struct ip_mc_list *im;
  2088. struct ip_mc_list __rcu **mc_hash;
  2089. struct ip_sf_list *psf;
  2090. int rv = 0;
  2091. mc_hash = rcu_dereference(in_dev->mc_hash);
  2092. if (mc_hash) {
  2093. u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG);
  2094. for (im = rcu_dereference(mc_hash[hash]);
  2095. im != NULL;
  2096. im = rcu_dereference(im->next_hash)) {
  2097. if (im->multiaddr == mc_addr)
  2098. break;
  2099. }
  2100. } else {
  2101. for_each_pmc_rcu(in_dev, im) {
  2102. if (im->multiaddr == mc_addr)
  2103. break;
  2104. }
  2105. }
  2106. if (im && proto == IPPROTO_IGMP) {
  2107. rv = 1;
  2108. } else if (im) {
  2109. if (src_addr) {
  2110. for (psf=im->sources; psf; psf=psf->sf_next) {
  2111. if (psf->sf_inaddr == src_addr)
  2112. break;
  2113. }
  2114. if (psf)
  2115. rv = psf->sf_count[MCAST_INCLUDE] ||
  2116. psf->sf_count[MCAST_EXCLUDE] !=
  2117. im->sfcount[MCAST_EXCLUDE];
  2118. else
  2119. rv = im->sfcount[MCAST_EXCLUDE] != 0;
  2120. } else
  2121. rv = 1; /* unspecified source; tentatively allow */
  2122. }
  2123. return rv;
  2124. }
  2125. #if defined(CONFIG_PROC_FS)
  2126. struct igmp_mc_iter_state {
  2127. struct seq_net_private p;
  2128. struct net_device *dev;
  2129. struct in_device *in_dev;
  2130. };
  2131. #define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private)
  2132. static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
  2133. {
  2134. struct net *net = seq_file_net(seq);
  2135. struct ip_mc_list *im = NULL;
  2136. struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
  2137. state->in_dev = NULL;
  2138. for_each_netdev_rcu(net, state->dev) {
  2139. struct in_device *in_dev;
  2140. in_dev = __in_dev_get_rcu(state->dev);
  2141. if (!in_dev)
  2142. continue;
  2143. im = rcu_dereference(in_dev->mc_list);
  2144. if (im) {
  2145. state->in_dev = in_dev;
  2146. break;
  2147. }
  2148. }
  2149. return im;
  2150. }
  2151. static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
  2152. {
  2153. struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
  2154. im = rcu_dereference(im->next_rcu);
  2155. while (!im) {
  2156. state->dev = next_net_device_rcu(state->dev);
  2157. if (!state->dev) {
  2158. state->in_dev = NULL;
  2159. break;
  2160. }
  2161. state->in_dev = __in_dev_get_rcu(state->dev);
  2162. if (!state->in_dev)
  2163. continue;
  2164. im = rcu_dereference(state->in_dev->mc_list);
  2165. }
  2166. return im;
  2167. }
  2168. static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
  2169. {
  2170. struct ip_mc_list *im = igmp_mc_get_first(seq);
  2171. if (im)
  2172. while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
  2173. --pos;
  2174. return pos ? NULL : im;
  2175. }
  2176. static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
  2177. __acquires(rcu)
  2178. {
  2179. rcu_read_lock();
  2180. return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  2181. }
  2182. static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2183. {
  2184. struct ip_mc_list *im;
  2185. if (v == SEQ_START_TOKEN)
  2186. im = igmp_mc_get_first(seq);
  2187. else
  2188. im = igmp_mc_get_next(seq, v);
  2189. ++*pos;
  2190. return im;
  2191. }
  2192. static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
  2193. __releases(rcu)
  2194. {
  2195. struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
  2196. state->in_dev = NULL;
  2197. state->dev = NULL;
  2198. rcu_read_unlock();
  2199. }
  2200. static int igmp_mc_seq_show(struct seq_file *seq, void *v)
  2201. {
  2202. if (v == SEQ_START_TOKEN)
  2203. seq_puts(seq,
  2204. "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
  2205. else {
  2206. struct ip_mc_list *im = (struct ip_mc_list *)v;
  2207. struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
  2208. char *querier;
  2209. long delta;
  2210. #ifdef CONFIG_IP_MULTICAST
  2211. querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
  2212. IGMP_V2_SEEN(state->in_dev) ? "V2" :
  2213. "V3";
  2214. #else
  2215. querier = "NONE";
  2216. #endif
  2217. if (rcu_dereference(state->in_dev->mc_list) == im) {
  2218. seq_printf(seq, "%d\t%-10s: %5d %7s\n",
  2219. state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
  2220. }
  2221. delta = im->timer.expires - jiffies;
  2222. seq_printf(seq,
  2223. "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
  2224. im->multiaddr, im->users,
  2225. im->tm_running,
  2226. im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
  2227. im->reporter);
  2228. }
  2229. return 0;
  2230. }
  2231. static const struct seq_operations igmp_mc_seq_ops = {
  2232. .start = igmp_mc_seq_start,
  2233. .next = igmp_mc_seq_next,
  2234. .stop = igmp_mc_seq_stop,
  2235. .show = igmp_mc_seq_show,
  2236. };
  2237. static int igmp_mc_seq_open(struct inode *inode, struct file *file)
  2238. {
  2239. return seq_open_net(inode, file, &igmp_mc_seq_ops,
  2240. sizeof(struct igmp_mc_iter_state));
  2241. }
  2242. static const struct file_operations igmp_mc_seq_fops = {
  2243. .owner = THIS_MODULE,
  2244. .open = igmp_mc_seq_open,
  2245. .read = seq_read,
  2246. .llseek = seq_lseek,
  2247. .release = seq_release_net,
  2248. };
  2249. struct igmp_mcf_iter_state {
  2250. struct seq_net_private p;
  2251. struct net_device *dev;
  2252. struct in_device *idev;
  2253. struct ip_mc_list *im;
  2254. };
  2255. #define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private)
  2256. static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
  2257. {
  2258. struct net *net = seq_file_net(seq);
  2259. struct ip_sf_list *psf = NULL;
  2260. struct ip_mc_list *im = NULL;
  2261. struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
  2262. state->idev = NULL;
  2263. state->im = NULL;
  2264. for_each_netdev_rcu(net, state->dev) {
  2265. struct in_device *idev;
  2266. idev = __in_dev_get_rcu(state->dev);
  2267. if (unlikely(idev == NULL))
  2268. continue;
  2269. im = rcu_dereference(idev->mc_list);
  2270. if (likely(im != NULL)) {
  2271. spin_lock_bh(&im->lock);
  2272. psf = im->sources;
  2273. if (likely(psf != NULL)) {
  2274. state->im = im;
  2275. state->idev = idev;
  2276. break;
  2277. }
  2278. spin_unlock_bh(&im->lock);
  2279. }
  2280. }
  2281. return psf;
  2282. }
  2283. static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
  2284. {
  2285. struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
  2286. psf = psf->sf_next;
  2287. while (!psf) {
  2288. spin_unlock_bh(&state->im->lock);
  2289. state->im = state->im->next;
  2290. while (!state->im) {
  2291. state->dev = next_net_device_rcu(state->dev);
  2292. if (!state->dev) {
  2293. state->idev = NULL;
  2294. goto out;
  2295. }
  2296. state->idev = __in_dev_get_rcu(state->dev);
  2297. if (!state->idev)
  2298. continue;
  2299. state->im = rcu_dereference(state->idev->mc_list);
  2300. }
  2301. if (!state->im)
  2302. break;
  2303. spin_lock_bh(&state->im->lock);
  2304. psf = state->im->sources;
  2305. }
  2306. out:
  2307. return psf;
  2308. }
  2309. static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
  2310. {
  2311. struct ip_sf_list *psf = igmp_mcf_get_first(seq);
  2312. if (psf)
  2313. while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
  2314. --pos;
  2315. return pos ? NULL : psf;
  2316. }
  2317. static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
  2318. __acquires(rcu)
  2319. {
  2320. rcu_read_lock();
  2321. return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  2322. }
  2323. static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2324. {
  2325. struct ip_sf_list *psf;
  2326. if (v == SEQ_START_TOKEN)
  2327. psf = igmp_mcf_get_first(seq);
  2328. else
  2329. psf = igmp_mcf_get_next(seq, v);
  2330. ++*pos;
  2331. return psf;
  2332. }
  2333. static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
  2334. __releases(rcu)
  2335. {
  2336. struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
  2337. if (likely(state->im != NULL)) {
  2338. spin_unlock_bh(&state->im->lock);
  2339. state->im = NULL;
  2340. }
  2341. state->idev = NULL;
  2342. state->dev = NULL;
  2343. rcu_read_unlock();
  2344. }
  2345. static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
  2346. {
  2347. struct ip_sf_list *psf = (struct ip_sf_list *)v;
  2348. struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
  2349. if (v == SEQ_START_TOKEN) {
  2350. seq_printf(seq,
  2351. "%3s %6s "
  2352. "%10s %10s %6s %6s\n", "Idx",
  2353. "Device", "MCA",
  2354. "SRC", "INC", "EXC");
  2355. } else {
  2356. seq_printf(seq,
  2357. "%3d %6.6s 0x%08x "
  2358. "0x%08x %6lu %6lu\n",
  2359. state->dev->ifindex, state->dev->name,
  2360. ntohl(state->im->multiaddr),
  2361. ntohl(psf->sf_inaddr),
  2362. psf->sf_count[MCAST_INCLUDE],
  2363. psf->sf_count[MCAST_EXCLUDE]);
  2364. }
  2365. return 0;
  2366. }
  2367. static const struct seq_operations igmp_mcf_seq_ops = {
  2368. .start = igmp_mcf_seq_start,
  2369. .next = igmp_mcf_seq_next,
  2370. .stop = igmp_mcf_seq_stop,
  2371. .show = igmp_mcf_seq_show,
  2372. };
  2373. static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
  2374. {
  2375. return seq_open_net(inode, file, &igmp_mcf_seq_ops,
  2376. sizeof(struct igmp_mcf_iter_state));
  2377. }
  2378. static const struct file_operations igmp_mcf_seq_fops = {
  2379. .owner = THIS_MODULE,
  2380. .open = igmp_mcf_seq_open,
  2381. .read = seq_read,
  2382. .llseek = seq_lseek,
  2383. .release = seq_release_net,
  2384. };
  2385. static int __net_init igmp_net_init(struct net *net)
  2386. {
  2387. struct proc_dir_entry *pde;
  2388. pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
  2389. if (!pde)
  2390. goto out_igmp;
  2391. pde = proc_create("mcfilter", S_IRUGO, net->proc_net,
  2392. &igmp_mcf_seq_fops);
  2393. if (!pde)
  2394. goto out_mcfilter;
  2395. return 0;
  2396. out_mcfilter:
  2397. remove_proc_entry("igmp", net->proc_net);
  2398. out_igmp:
  2399. return -ENOMEM;
  2400. }
  2401. static void __net_exit igmp_net_exit(struct net *net)
  2402. {
  2403. remove_proc_entry("mcfilter", net->proc_net);
  2404. remove_proc_entry("igmp", net->proc_net);
  2405. }
  2406. static struct pernet_operations igmp_net_ops = {
  2407. .init = igmp_net_init,
  2408. .exit = igmp_net_exit,
  2409. };
  2410. int __init igmp_mc_proc_init(void)
  2411. {
  2412. return register_pernet_subsys(&igmp_net_ops);
  2413. }
  2414. #endif