igmp.c 66 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561256225632564256525662567256825692570257125722573257425752576257725782579258025812582258325842585258625872588258925902591259225932594259525962597259825992600260126022603260426052606260726082609261026112612261326142615261626172618261926202621262226232624262526262627262826292630263126322633263426352636263726382639264026412642264326442645264626472648264926502651265226532654265526562657265826592660266126622663266426652666266726682669267026712672267326742675267626772678267926802681268226832684268526862687268826892690269126922693269426952696269726982699270027012702270327042705270627072708270927102711271227132714271527162717271827192720272127222723272427252726272727282729273027312732273327342735273627372738273927402741274227432744274527462747274827492750275127522753275427552756275727582759276027612762276327642765276627672768276927702771277227732774277527762777277827792780278127822783278427852786278727882789279027912792279327942795279627972798279928002801280228032804
  1. /*
  2. * Linux NET3: Internet Group Management Protocol [IGMP]
  3. *
  4. * This code implements the IGMP protocol as defined in RFC1112. There has
  5. * been a further revision of this protocol since which is now supported.
  6. *
  7. * If you have trouble with this module be careful what gcc you have used,
  8. * the older version didn't come out right using gcc 2.5.8, the newer one
  9. * seems to fall out with gcc 2.6.2.
  10. *
  11. * Authors:
  12. * Alan Cox <alan@lxorguk.ukuu.org.uk>
  13. *
  14. * This program is free software; you can redistribute it and/or
  15. * modify it under the terms of the GNU General Public License
  16. * as published by the Free Software Foundation; either version
  17. * 2 of the License, or (at your option) any later version.
  18. *
  19. * Fixes:
  20. *
  21. * Alan Cox : Added lots of __inline__ to optimise
  22. * the memory usage of all the tiny little
  23. * functions.
  24. * Alan Cox : Dumped the header building experiment.
  25. * Alan Cox : Minor tweaks ready for multicast routing
  26. * and extended IGMP protocol.
  27. * Alan Cox : Removed a load of inline directives. Gcc 2.5.8
  28. * writes utterly bogus code otherwise (sigh)
  29. * fixed IGMP loopback to behave in the manner
  30. * desired by mrouted, fixed the fact it has been
  31. * broken since 1.3.6 and cleaned up a few minor
  32. * points.
  33. *
  34. * Chih-Jen Chang : Tried to revise IGMP to Version 2
  35. * Tsu-Sheng Tsao E-mail: chihjenc@scf.usc.edu and tsusheng@scf.usc.edu
  36. * The enhancements are mainly based on Steve Deering's
  37. * ipmulti-3.5 source code.
  38. * Chih-Jen Chang : Added the igmp_get_mrouter_info and
  39. * Tsu-Sheng Tsao igmp_set_mrouter_info to keep track of
  40. * the mrouted version on that device.
  41. * Chih-Jen Chang : Added the max_resp_time parameter to
  42. * Tsu-Sheng Tsao igmp_heard_query(). Using this parameter
  43. * to identify the multicast router version
  44. * and do what the IGMP version 2 specified.
  45. * Chih-Jen Chang : Added a timer to revert to IGMP V2 router
  46. * Tsu-Sheng Tsao if the specified time expired.
  47. * Alan Cox : Stop IGMP from 0.0.0.0 being accepted.
  48. * Alan Cox : Use GFP_ATOMIC in the right places.
  49. * Christian Daudt : igmp timer wasn't set for local group
  50. * memberships but was being deleted,
  51. * which caused a "del_timer() called
  52. * from %p with timer not initialized\n"
  53. * message (960131).
  54. * Christian Daudt : removed del_timer from
  55. * igmp_timer_expire function (960205).
  56. * Christian Daudt : igmp_heard_report now only calls
  57. * igmp_timer_expire if tm->running is
  58. * true (960216).
  59. * Malcolm Beattie : ttl comparison wrong in igmp_rcv made
  60. * igmp_heard_query never trigger. Expiry
  61. * miscalculation fixed in igmp_heard_query
  62. * and random() made to return unsigned to
  63. * prevent negative expiry times.
  64. * Alexey Kuznetsov: Wrong group leaving behaviour, backport
  65. * fix from pending 2.1.x patches.
  66. * Alan Cox: Forget to enable FDDI support earlier.
  67. * Alexey Kuznetsov: Fixed leaving groups on device down.
  68. * Alexey Kuznetsov: Accordance to igmp-v2-06 draft.
  69. * David L Stevens: IGMPv3 support, with help from
  70. * Vinay Kulkarni
  71. */
  72. #include <linux/module.h>
  73. #include <linux/slab.h>
  74. #include <asm/uaccess.h>
  75. #include <linux/types.h>
  76. #include <linux/kernel.h>
  77. #include <linux/jiffies.h>
  78. #include <linux/string.h>
  79. #include <linux/socket.h>
  80. #include <linux/sockios.h>
  81. #include <linux/in.h>
  82. #include <linux/inet.h>
  83. #include <linux/netdevice.h>
  84. #include <linux/skbuff.h>
  85. #include <linux/inetdevice.h>
  86. #include <linux/igmp.h>
  87. #include <linux/if_arp.h>
  88. #include <linux/rtnetlink.h>
  89. #include <linux/times.h>
  90. #include <linux/pkt_sched.h>
  91. #include <net/net_namespace.h>
  92. #include <net/arp.h>
  93. #include <net/ip.h>
  94. #include <net/protocol.h>
  95. #include <net/route.h>
  96. #include <net/sock.h>
  97. #include <net/checksum.h>
  98. #include <linux/netfilter_ipv4.h>
  99. #ifdef CONFIG_IP_MROUTE
  100. #include <linux/mroute.h>
  101. #endif
  102. #ifdef CONFIG_PROC_FS
  103. #include <linux/proc_fs.h>
  104. #include <linux/seq_file.h>
  105. #endif
  106. #define IP_MAX_MEMBERSHIPS 20
  107. #define IP_MAX_MSF 10
  108. #ifdef CONFIG_IP_MULTICAST
  109. /* Parameter names and values are taken from igmp-v2-06 draft */
  110. #define IGMP_V1_Router_Present_Timeout (400*HZ)
  111. #define IGMP_V2_Router_Present_Timeout (400*HZ)
  112. #define IGMP_V2_Unsolicited_Report_Interval (10*HZ)
  113. #define IGMP_V3_Unsolicited_Report_Interval (1*HZ)
  114. #define IGMP_Query_Response_Interval (10*HZ)
  115. #define IGMP_Unsolicited_Report_Count 2
  116. #define IGMP_Initial_Report_Delay (1)
  117. /* IGMP_Initial_Report_Delay is not from IGMP specs!
  118. * IGMP specs require to report membership immediately after
  119. * joining a group, but we delay the first report by a
  120. * small interval. It seems more natural and still does not
  121. * contradict to specs provided this delay is small enough.
  122. */
  123. #define IGMP_V1_SEEN(in_dev) \
  124. (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 1 || \
  125. IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 1 || \
  126. ((in_dev)->mr_v1_seen && \
  127. time_before(jiffies, (in_dev)->mr_v1_seen)))
  128. #define IGMP_V2_SEEN(in_dev) \
  129. (IPV4_DEVCONF_ALL(dev_net(in_dev->dev), FORCE_IGMP_VERSION) == 2 || \
  130. IN_DEV_CONF_GET((in_dev), FORCE_IGMP_VERSION) == 2 || \
  131. ((in_dev)->mr_v2_seen && \
  132. time_before(jiffies, (in_dev)->mr_v2_seen)))
  133. static int unsolicited_report_interval(struct in_device *in_dev)
  134. {
  135. int interval_ms, interval_jiffies;
  136. if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
  137. interval_ms = IN_DEV_CONF_GET(
  138. in_dev,
  139. IGMPV2_UNSOLICITED_REPORT_INTERVAL);
  140. else /* v3 */
  141. interval_ms = IN_DEV_CONF_GET(
  142. in_dev,
  143. IGMPV3_UNSOLICITED_REPORT_INTERVAL);
  144. interval_jiffies = msecs_to_jiffies(interval_ms);
  145. /* _timer functions can't handle a delay of 0 jiffies so ensure
  146. * we always return a positive value.
  147. */
  148. if (interval_jiffies <= 0)
  149. interval_jiffies = 1;
  150. return interval_jiffies;
  151. }
  152. static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im);
  153. static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr);
  154. static void igmpv3_clear_delrec(struct in_device *in_dev);
  155. static int sf_setstate(struct ip_mc_list *pmc);
  156. static void sf_markstate(struct ip_mc_list *pmc);
  157. #endif
  158. static void ip_mc_clear_src(struct ip_mc_list *pmc);
  159. static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
  160. int sfcount, __be32 *psfsrc, int delta);
  161. static void ip_ma_put(struct ip_mc_list *im)
  162. {
  163. if (atomic_dec_and_test(&im->refcnt)) {
  164. in_dev_put(im->interface);
  165. kfree_rcu(im, rcu);
  166. }
  167. }
  168. #define for_each_pmc_rcu(in_dev, pmc) \
  169. for (pmc = rcu_dereference(in_dev->mc_list); \
  170. pmc != NULL; \
  171. pmc = rcu_dereference(pmc->next_rcu))
  172. #define for_each_pmc_rtnl(in_dev, pmc) \
  173. for (pmc = rtnl_dereference(in_dev->mc_list); \
  174. pmc != NULL; \
  175. pmc = rtnl_dereference(pmc->next_rcu))
  176. #ifdef CONFIG_IP_MULTICAST
  177. /*
  178. * Timer management
  179. */
  180. static void igmp_stop_timer(struct ip_mc_list *im)
  181. {
  182. spin_lock_bh(&im->lock);
  183. if (del_timer(&im->timer))
  184. atomic_dec(&im->refcnt);
  185. im->tm_running = 0;
  186. im->reporter = 0;
  187. im->unsolicit_count = 0;
  188. spin_unlock_bh(&im->lock);
  189. }
  190. /* It must be called with locked im->lock */
  191. static void igmp_start_timer(struct ip_mc_list *im, int max_delay)
  192. {
  193. int tv = net_random() % max_delay;
  194. im->tm_running = 1;
  195. if (!mod_timer(&im->timer, jiffies+tv+2))
  196. atomic_inc(&im->refcnt);
  197. }
  198. static void igmp_gq_start_timer(struct in_device *in_dev)
  199. {
  200. int tv = net_random() % in_dev->mr_maxdelay;
  201. in_dev->mr_gq_running = 1;
  202. if (!mod_timer(&in_dev->mr_gq_timer, jiffies+tv+2))
  203. in_dev_hold(in_dev);
  204. }
  205. static void igmp_ifc_start_timer(struct in_device *in_dev, int delay)
  206. {
  207. int tv = net_random() % delay;
  208. if (!mod_timer(&in_dev->mr_ifc_timer, jiffies+tv+2))
  209. in_dev_hold(in_dev);
  210. }
  211. static void igmp_mod_timer(struct ip_mc_list *im, int max_delay)
  212. {
  213. spin_lock_bh(&im->lock);
  214. im->unsolicit_count = 0;
  215. if (del_timer(&im->timer)) {
  216. if ((long)(im->timer.expires-jiffies) < max_delay) {
  217. add_timer(&im->timer);
  218. im->tm_running = 1;
  219. spin_unlock_bh(&im->lock);
  220. return;
  221. }
  222. atomic_dec(&im->refcnt);
  223. }
  224. igmp_start_timer(im, max_delay);
  225. spin_unlock_bh(&im->lock);
  226. }
  227. /*
  228. * Send an IGMP report.
  229. */
  230. #define IGMP_SIZE (sizeof(struct igmphdr)+sizeof(struct iphdr)+4)
  231. static int is_in(struct ip_mc_list *pmc, struct ip_sf_list *psf, int type,
  232. int gdeleted, int sdeleted)
  233. {
  234. switch (type) {
  235. case IGMPV3_MODE_IS_INCLUDE:
  236. case IGMPV3_MODE_IS_EXCLUDE:
  237. if (gdeleted || sdeleted)
  238. return 0;
  239. if (!(pmc->gsquery && !psf->sf_gsresp)) {
  240. if (pmc->sfmode == MCAST_INCLUDE)
  241. return 1;
  242. /* don't include if this source is excluded
  243. * in all filters
  244. */
  245. if (psf->sf_count[MCAST_INCLUDE])
  246. return type == IGMPV3_MODE_IS_INCLUDE;
  247. return pmc->sfcount[MCAST_EXCLUDE] ==
  248. psf->sf_count[MCAST_EXCLUDE];
  249. }
  250. return 0;
  251. case IGMPV3_CHANGE_TO_INCLUDE:
  252. if (gdeleted || sdeleted)
  253. return 0;
  254. return psf->sf_count[MCAST_INCLUDE] != 0;
  255. case IGMPV3_CHANGE_TO_EXCLUDE:
  256. if (gdeleted || sdeleted)
  257. return 0;
  258. if (pmc->sfcount[MCAST_EXCLUDE] == 0 ||
  259. psf->sf_count[MCAST_INCLUDE])
  260. return 0;
  261. return pmc->sfcount[MCAST_EXCLUDE] ==
  262. psf->sf_count[MCAST_EXCLUDE];
  263. case IGMPV3_ALLOW_NEW_SOURCES:
  264. if (gdeleted || !psf->sf_crcount)
  265. return 0;
  266. return (pmc->sfmode == MCAST_INCLUDE) ^ sdeleted;
  267. case IGMPV3_BLOCK_OLD_SOURCES:
  268. if (pmc->sfmode == MCAST_INCLUDE)
  269. return gdeleted || (psf->sf_crcount && sdeleted);
  270. return psf->sf_crcount && !gdeleted && !sdeleted;
  271. }
  272. return 0;
  273. }
  274. static int
  275. igmp_scount(struct ip_mc_list *pmc, int type, int gdeleted, int sdeleted)
  276. {
  277. struct ip_sf_list *psf;
  278. int scount = 0;
  279. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  280. if (!is_in(pmc, psf, type, gdeleted, sdeleted))
  281. continue;
  282. scount++;
  283. }
  284. return scount;
  285. }
  286. #define igmp_skb_size(skb) (*(unsigned int *)((skb)->cb))
  287. static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size)
  288. {
  289. struct sk_buff *skb;
  290. struct rtable *rt;
  291. struct iphdr *pip;
  292. struct igmpv3_report *pig;
  293. struct net *net = dev_net(dev);
  294. struct flowi4 fl4;
  295. int hlen = LL_RESERVED_SPACE(dev);
  296. int tlen = dev->needed_tailroom;
  297. while (1) {
  298. skb = alloc_skb(size + hlen + tlen,
  299. GFP_ATOMIC | __GFP_NOWARN);
  300. if (skb)
  301. break;
  302. size >>= 1;
  303. if (size < 256)
  304. return NULL;
  305. }
  306. skb->priority = TC_PRIO_CONTROL;
  307. igmp_skb_size(skb) = size;
  308. rt = ip_route_output_ports(net, &fl4, NULL, IGMPV3_ALL_MCR, 0,
  309. 0, 0,
  310. IPPROTO_IGMP, 0, dev->ifindex);
  311. if (IS_ERR(rt)) {
  312. kfree_skb(skb);
  313. return NULL;
  314. }
  315. skb_dst_set(skb, &rt->dst);
  316. skb->dev = dev;
  317. skb_reserve(skb, hlen);
  318. skb_reset_network_header(skb);
  319. pip = ip_hdr(skb);
  320. skb_put(skb, sizeof(struct iphdr) + 4);
  321. pip->version = 4;
  322. pip->ihl = (sizeof(struct iphdr)+4)>>2;
  323. pip->tos = 0xc0;
  324. pip->frag_off = htons(IP_DF);
  325. pip->ttl = 1;
  326. pip->daddr = fl4.daddr;
  327. pip->saddr = fl4.saddr;
  328. pip->protocol = IPPROTO_IGMP;
  329. pip->tot_len = 0; /* filled in later */
  330. ip_select_ident(pip, &rt->dst, NULL);
  331. ((u8 *)&pip[1])[0] = IPOPT_RA;
  332. ((u8 *)&pip[1])[1] = 4;
  333. ((u8 *)&pip[1])[2] = 0;
  334. ((u8 *)&pip[1])[3] = 0;
  335. skb->transport_header = skb->network_header + sizeof(struct iphdr) + 4;
  336. skb_put(skb, sizeof(*pig));
  337. pig = igmpv3_report_hdr(skb);
  338. pig->type = IGMPV3_HOST_MEMBERSHIP_REPORT;
  339. pig->resv1 = 0;
  340. pig->csum = 0;
  341. pig->resv2 = 0;
  342. pig->ngrec = 0;
  343. return skb;
  344. }
  345. static int igmpv3_sendpack(struct sk_buff *skb)
  346. {
  347. struct igmphdr *pig = igmp_hdr(skb);
  348. const int igmplen = skb_tail_pointer(skb) - skb_transport_header(skb);
  349. pig->csum = ip_compute_csum(igmp_hdr(skb), igmplen);
  350. return ip_local_out(skb);
  351. }
  352. static int grec_size(struct ip_mc_list *pmc, int type, int gdel, int sdel)
  353. {
  354. return sizeof(struct igmpv3_grec) + 4*igmp_scount(pmc, type, gdel, sdel);
  355. }
  356. static struct sk_buff *add_grhead(struct sk_buff *skb, struct ip_mc_list *pmc,
  357. int type, struct igmpv3_grec **ppgr)
  358. {
  359. struct net_device *dev = pmc->interface->dev;
  360. struct igmpv3_report *pih;
  361. struct igmpv3_grec *pgr;
  362. if (!skb)
  363. skb = igmpv3_newpack(dev, dev->mtu);
  364. if (!skb)
  365. return NULL;
  366. pgr = (struct igmpv3_grec *)skb_put(skb, sizeof(struct igmpv3_grec));
  367. pgr->grec_type = type;
  368. pgr->grec_auxwords = 0;
  369. pgr->grec_nsrcs = 0;
  370. pgr->grec_mca = pmc->multiaddr;
  371. pih = igmpv3_report_hdr(skb);
  372. pih->ngrec = htons(ntohs(pih->ngrec)+1);
  373. *ppgr = pgr;
  374. return skb;
  375. }
  376. #define AVAILABLE(skb) ((skb) ? ((skb)->dev ? igmp_skb_size(skb) - (skb)->len : \
  377. skb_tailroom(skb)) : 0)
  378. static struct sk_buff *add_grec(struct sk_buff *skb, struct ip_mc_list *pmc,
  379. int type, int gdeleted, int sdeleted)
  380. {
  381. struct net_device *dev = pmc->interface->dev;
  382. struct igmpv3_report *pih;
  383. struct igmpv3_grec *pgr = NULL;
  384. struct ip_sf_list *psf, *psf_next, *psf_prev, **psf_list;
  385. int scount, stotal, first, isquery, truncate;
  386. if (pmc->multiaddr == IGMP_ALL_HOSTS)
  387. return skb;
  388. isquery = type == IGMPV3_MODE_IS_INCLUDE ||
  389. type == IGMPV3_MODE_IS_EXCLUDE;
  390. truncate = type == IGMPV3_MODE_IS_EXCLUDE ||
  391. type == IGMPV3_CHANGE_TO_EXCLUDE;
  392. stotal = scount = 0;
  393. psf_list = sdeleted ? &pmc->tomb : &pmc->sources;
  394. if (!*psf_list)
  395. goto empty_source;
  396. pih = skb ? igmpv3_report_hdr(skb) : NULL;
  397. /* EX and TO_EX get a fresh packet, if needed */
  398. if (truncate) {
  399. if (pih && pih->ngrec &&
  400. AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) {
  401. if (skb)
  402. igmpv3_sendpack(skb);
  403. skb = igmpv3_newpack(dev, dev->mtu);
  404. }
  405. }
  406. first = 1;
  407. psf_prev = NULL;
  408. for (psf=*psf_list; psf; psf=psf_next) {
  409. __be32 *psrc;
  410. psf_next = psf->sf_next;
  411. if (!is_in(pmc, psf, type, gdeleted, sdeleted)) {
  412. psf_prev = psf;
  413. continue;
  414. }
  415. /* clear marks on query responses */
  416. if (isquery)
  417. psf->sf_gsresp = 0;
  418. if (AVAILABLE(skb) < sizeof(__be32) +
  419. first*sizeof(struct igmpv3_grec)) {
  420. if (truncate && !first)
  421. break; /* truncate these */
  422. if (pgr)
  423. pgr->grec_nsrcs = htons(scount);
  424. if (skb)
  425. igmpv3_sendpack(skb);
  426. skb = igmpv3_newpack(dev, dev->mtu);
  427. first = 1;
  428. scount = 0;
  429. }
  430. if (first) {
  431. skb = add_grhead(skb, pmc, type, &pgr);
  432. first = 0;
  433. }
  434. if (!skb)
  435. return NULL;
  436. psrc = (__be32 *)skb_put(skb, sizeof(__be32));
  437. *psrc = psf->sf_inaddr;
  438. scount++; stotal++;
  439. if ((type == IGMPV3_ALLOW_NEW_SOURCES ||
  440. type == IGMPV3_BLOCK_OLD_SOURCES) && psf->sf_crcount) {
  441. psf->sf_crcount--;
  442. if ((sdeleted || gdeleted) && psf->sf_crcount == 0) {
  443. if (psf_prev)
  444. psf_prev->sf_next = psf->sf_next;
  445. else
  446. *psf_list = psf->sf_next;
  447. kfree(psf);
  448. continue;
  449. }
  450. }
  451. psf_prev = psf;
  452. }
  453. empty_source:
  454. if (!stotal) {
  455. if (type == IGMPV3_ALLOW_NEW_SOURCES ||
  456. type == IGMPV3_BLOCK_OLD_SOURCES)
  457. return skb;
  458. if (pmc->crcount || isquery) {
  459. /* make sure we have room for group header */
  460. if (skb && AVAILABLE(skb)<sizeof(struct igmpv3_grec)) {
  461. igmpv3_sendpack(skb);
  462. skb = NULL; /* add_grhead will get a new one */
  463. }
  464. skb = add_grhead(skb, pmc, type, &pgr);
  465. }
  466. }
  467. if (pgr)
  468. pgr->grec_nsrcs = htons(scount);
  469. if (isquery)
  470. pmc->gsquery = 0; /* clear query state on report */
  471. return skb;
  472. }
  473. static int igmpv3_send_report(struct in_device *in_dev, struct ip_mc_list *pmc)
  474. {
  475. struct sk_buff *skb = NULL;
  476. int type;
  477. if (!pmc) {
  478. rcu_read_lock();
  479. for_each_pmc_rcu(in_dev, pmc) {
  480. if (pmc->multiaddr == IGMP_ALL_HOSTS)
  481. continue;
  482. spin_lock_bh(&pmc->lock);
  483. if (pmc->sfcount[MCAST_EXCLUDE])
  484. type = IGMPV3_MODE_IS_EXCLUDE;
  485. else
  486. type = IGMPV3_MODE_IS_INCLUDE;
  487. skb = add_grec(skb, pmc, type, 0, 0);
  488. spin_unlock_bh(&pmc->lock);
  489. }
  490. rcu_read_unlock();
  491. } else {
  492. spin_lock_bh(&pmc->lock);
  493. if (pmc->sfcount[MCAST_EXCLUDE])
  494. type = IGMPV3_MODE_IS_EXCLUDE;
  495. else
  496. type = IGMPV3_MODE_IS_INCLUDE;
  497. skb = add_grec(skb, pmc, type, 0, 0);
  498. spin_unlock_bh(&pmc->lock);
  499. }
  500. if (!skb)
  501. return 0;
  502. return igmpv3_sendpack(skb);
  503. }
  504. /*
  505. * remove zero-count source records from a source filter list
  506. */
  507. static void igmpv3_clear_zeros(struct ip_sf_list **ppsf)
  508. {
  509. struct ip_sf_list *psf_prev, *psf_next, *psf;
  510. psf_prev = NULL;
  511. for (psf=*ppsf; psf; psf = psf_next) {
  512. psf_next = psf->sf_next;
  513. if (psf->sf_crcount == 0) {
  514. if (psf_prev)
  515. psf_prev->sf_next = psf->sf_next;
  516. else
  517. *ppsf = psf->sf_next;
  518. kfree(psf);
  519. } else
  520. psf_prev = psf;
  521. }
  522. }
  523. static void igmpv3_send_cr(struct in_device *in_dev)
  524. {
  525. struct ip_mc_list *pmc, *pmc_prev, *pmc_next;
  526. struct sk_buff *skb = NULL;
  527. int type, dtype;
  528. rcu_read_lock();
  529. spin_lock_bh(&in_dev->mc_tomb_lock);
  530. /* deleted MCA's */
  531. pmc_prev = NULL;
  532. for (pmc=in_dev->mc_tomb; pmc; pmc=pmc_next) {
  533. pmc_next = pmc->next;
  534. if (pmc->sfmode == MCAST_INCLUDE) {
  535. type = IGMPV3_BLOCK_OLD_SOURCES;
  536. dtype = IGMPV3_BLOCK_OLD_SOURCES;
  537. skb = add_grec(skb, pmc, type, 1, 0);
  538. skb = add_grec(skb, pmc, dtype, 1, 1);
  539. }
  540. if (pmc->crcount) {
  541. if (pmc->sfmode == MCAST_EXCLUDE) {
  542. type = IGMPV3_CHANGE_TO_INCLUDE;
  543. skb = add_grec(skb, pmc, type, 1, 0);
  544. }
  545. pmc->crcount--;
  546. if (pmc->crcount == 0) {
  547. igmpv3_clear_zeros(&pmc->tomb);
  548. igmpv3_clear_zeros(&pmc->sources);
  549. }
  550. }
  551. if (pmc->crcount == 0 && !pmc->tomb && !pmc->sources) {
  552. if (pmc_prev)
  553. pmc_prev->next = pmc_next;
  554. else
  555. in_dev->mc_tomb = pmc_next;
  556. in_dev_put(pmc->interface);
  557. kfree(pmc);
  558. } else
  559. pmc_prev = pmc;
  560. }
  561. spin_unlock_bh(&in_dev->mc_tomb_lock);
  562. /* change recs */
  563. for_each_pmc_rcu(in_dev, pmc) {
  564. spin_lock_bh(&pmc->lock);
  565. if (pmc->sfcount[MCAST_EXCLUDE]) {
  566. type = IGMPV3_BLOCK_OLD_SOURCES;
  567. dtype = IGMPV3_ALLOW_NEW_SOURCES;
  568. } else {
  569. type = IGMPV3_ALLOW_NEW_SOURCES;
  570. dtype = IGMPV3_BLOCK_OLD_SOURCES;
  571. }
  572. skb = add_grec(skb, pmc, type, 0, 0);
  573. skb = add_grec(skb, pmc, dtype, 0, 1); /* deleted sources */
  574. /* filter mode changes */
  575. if (pmc->crcount) {
  576. if (pmc->sfmode == MCAST_EXCLUDE)
  577. type = IGMPV3_CHANGE_TO_EXCLUDE;
  578. else
  579. type = IGMPV3_CHANGE_TO_INCLUDE;
  580. skb = add_grec(skb, pmc, type, 0, 0);
  581. pmc->crcount--;
  582. }
  583. spin_unlock_bh(&pmc->lock);
  584. }
  585. rcu_read_unlock();
  586. if (!skb)
  587. return;
  588. (void) igmpv3_sendpack(skb);
  589. }
  590. static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc,
  591. int type)
  592. {
  593. struct sk_buff *skb;
  594. struct iphdr *iph;
  595. struct igmphdr *ih;
  596. struct rtable *rt;
  597. struct net_device *dev = in_dev->dev;
  598. struct net *net = dev_net(dev);
  599. __be32 group = pmc ? pmc->multiaddr : 0;
  600. struct flowi4 fl4;
  601. __be32 dst;
  602. int hlen, tlen;
  603. if (type == IGMPV3_HOST_MEMBERSHIP_REPORT)
  604. return igmpv3_send_report(in_dev, pmc);
  605. else if (type == IGMP_HOST_LEAVE_MESSAGE)
  606. dst = IGMP_ALL_ROUTER;
  607. else
  608. dst = group;
  609. rt = ip_route_output_ports(net, &fl4, NULL, dst, 0,
  610. 0, 0,
  611. IPPROTO_IGMP, 0, dev->ifindex);
  612. if (IS_ERR(rt))
  613. return -1;
  614. hlen = LL_RESERVED_SPACE(dev);
  615. tlen = dev->needed_tailroom;
  616. skb = alloc_skb(IGMP_SIZE + hlen + tlen, GFP_ATOMIC);
  617. if (skb == NULL) {
  618. ip_rt_put(rt);
  619. return -1;
  620. }
  621. skb->priority = TC_PRIO_CONTROL;
  622. skb_dst_set(skb, &rt->dst);
  623. skb_reserve(skb, hlen);
  624. skb_reset_network_header(skb);
  625. iph = ip_hdr(skb);
  626. skb_put(skb, sizeof(struct iphdr) + 4);
  627. iph->version = 4;
  628. iph->ihl = (sizeof(struct iphdr)+4)>>2;
  629. iph->tos = 0xc0;
  630. iph->frag_off = htons(IP_DF);
  631. iph->ttl = 1;
  632. iph->daddr = dst;
  633. iph->saddr = fl4.saddr;
  634. iph->protocol = IPPROTO_IGMP;
  635. ip_select_ident(iph, &rt->dst, NULL);
  636. ((u8 *)&iph[1])[0] = IPOPT_RA;
  637. ((u8 *)&iph[1])[1] = 4;
  638. ((u8 *)&iph[1])[2] = 0;
  639. ((u8 *)&iph[1])[3] = 0;
  640. ih = (struct igmphdr *)skb_put(skb, sizeof(struct igmphdr));
  641. ih->type = type;
  642. ih->code = 0;
  643. ih->csum = 0;
  644. ih->group = group;
  645. ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr));
  646. return ip_local_out(skb);
  647. }
  648. static void igmp_gq_timer_expire(unsigned long data)
  649. {
  650. struct in_device *in_dev = (struct in_device *)data;
  651. in_dev->mr_gq_running = 0;
  652. igmpv3_send_report(in_dev, NULL);
  653. __in_dev_put(in_dev);
  654. }
  655. static void igmp_ifc_timer_expire(unsigned long data)
  656. {
  657. struct in_device *in_dev = (struct in_device *)data;
  658. igmpv3_send_cr(in_dev);
  659. if (in_dev->mr_ifc_count) {
  660. in_dev->mr_ifc_count--;
  661. igmp_ifc_start_timer(in_dev,
  662. unsolicited_report_interval(in_dev));
  663. }
  664. __in_dev_put(in_dev);
  665. }
  666. static void igmp_ifc_event(struct in_device *in_dev)
  667. {
  668. if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev))
  669. return;
  670. in_dev->mr_ifc_count = in_dev->mr_qrv ? in_dev->mr_qrv :
  671. IGMP_Unsolicited_Report_Count;
  672. igmp_ifc_start_timer(in_dev, 1);
  673. }
  674. static void igmp_timer_expire(unsigned long data)
  675. {
  676. struct ip_mc_list *im=(struct ip_mc_list *)data;
  677. struct in_device *in_dev = im->interface;
  678. spin_lock(&im->lock);
  679. im->tm_running = 0;
  680. if (im->unsolicit_count) {
  681. im->unsolicit_count--;
  682. igmp_start_timer(im, unsolicited_report_interval(in_dev));
  683. }
  684. im->reporter = 1;
  685. spin_unlock(&im->lock);
  686. if (IGMP_V1_SEEN(in_dev))
  687. igmp_send_report(in_dev, im, IGMP_HOST_MEMBERSHIP_REPORT);
  688. else if (IGMP_V2_SEEN(in_dev))
  689. igmp_send_report(in_dev, im, IGMPV2_HOST_MEMBERSHIP_REPORT);
  690. else
  691. igmp_send_report(in_dev, im, IGMPV3_HOST_MEMBERSHIP_REPORT);
  692. ip_ma_put(im);
  693. }
  694. /* mark EXCLUDE-mode sources */
  695. static int igmp_xmarksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
  696. {
  697. struct ip_sf_list *psf;
  698. int i, scount;
  699. scount = 0;
  700. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  701. if (scount == nsrcs)
  702. break;
  703. for (i=0; i<nsrcs; i++) {
  704. /* skip inactive filters */
  705. if (psf->sf_count[MCAST_INCLUDE] ||
  706. pmc->sfcount[MCAST_EXCLUDE] !=
  707. psf->sf_count[MCAST_EXCLUDE])
  708. break;
  709. if (srcs[i] == psf->sf_inaddr) {
  710. scount++;
  711. break;
  712. }
  713. }
  714. }
  715. pmc->gsquery = 0;
  716. if (scount == nsrcs) /* all sources excluded */
  717. return 0;
  718. return 1;
  719. }
  720. static int igmp_marksources(struct ip_mc_list *pmc, int nsrcs, __be32 *srcs)
  721. {
  722. struct ip_sf_list *psf;
  723. int i, scount;
  724. if (pmc->sfmode == MCAST_EXCLUDE)
  725. return igmp_xmarksources(pmc, nsrcs, srcs);
  726. /* mark INCLUDE-mode sources */
  727. scount = 0;
  728. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  729. if (scount == nsrcs)
  730. break;
  731. for (i=0; i<nsrcs; i++)
  732. if (srcs[i] == psf->sf_inaddr) {
  733. psf->sf_gsresp = 1;
  734. scount++;
  735. break;
  736. }
  737. }
  738. if (!scount) {
  739. pmc->gsquery = 0;
  740. return 0;
  741. }
  742. pmc->gsquery = 1;
  743. return 1;
  744. }
  745. /* return true if packet was dropped */
  746. static bool igmp_heard_report(struct in_device *in_dev, __be32 group)
  747. {
  748. struct ip_mc_list *im;
  749. /* Timers are only set for non-local groups */
  750. if (group == IGMP_ALL_HOSTS)
  751. return false;
  752. rcu_read_lock();
  753. for_each_pmc_rcu(in_dev, im) {
  754. if (im->multiaddr == group) {
  755. igmp_stop_timer(im);
  756. break;
  757. }
  758. }
  759. rcu_read_unlock();
  760. return false;
  761. }
  762. /* return true if packet was dropped */
  763. static bool igmp_heard_query(struct in_device *in_dev, struct sk_buff *skb,
  764. int len)
  765. {
  766. struct igmphdr *ih = igmp_hdr(skb);
  767. struct igmpv3_query *ih3 = igmpv3_query_hdr(skb);
  768. struct ip_mc_list *im;
  769. __be32 group = ih->group;
  770. int max_delay;
  771. int mark = 0;
  772. if (len == 8) {
  773. if (ih->code == 0) {
  774. /* Alas, old v1 router presents here. */
  775. max_delay = IGMP_Query_Response_Interval;
  776. in_dev->mr_v1_seen = jiffies +
  777. IGMP_V1_Router_Present_Timeout;
  778. group = 0;
  779. } else {
  780. /* v2 router present */
  781. max_delay = ih->code*(HZ/IGMP_TIMER_SCALE);
  782. in_dev->mr_v2_seen = jiffies +
  783. IGMP_V2_Router_Present_Timeout;
  784. }
  785. /* cancel the interface change timer */
  786. in_dev->mr_ifc_count = 0;
  787. if (del_timer(&in_dev->mr_ifc_timer))
  788. __in_dev_put(in_dev);
  789. /* clear deleted report items */
  790. igmpv3_clear_delrec(in_dev);
  791. } else if (len < 12) {
  792. return true; /* ignore bogus packet; freed by caller */
  793. } else if (IGMP_V1_SEEN(in_dev)) {
  794. /* This is a v3 query with v1 queriers present */
  795. max_delay = IGMP_Query_Response_Interval;
  796. group = 0;
  797. } else if (IGMP_V2_SEEN(in_dev)) {
  798. /* this is a v3 query with v2 queriers present;
  799. * Interpretation of the max_delay code is problematic here.
  800. * A real v2 host would use ih_code directly, while v3 has a
  801. * different encoding. We use the v3 encoding as more likely
  802. * to be intended in a v3 query.
  803. */
  804. max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
  805. if (!max_delay)
  806. max_delay = 1; /* can't mod w/ 0 */
  807. } else { /* v3 */
  808. if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)))
  809. return true;
  810. ih3 = igmpv3_query_hdr(skb);
  811. if (ih3->nsrcs) {
  812. if (!pskb_may_pull(skb, sizeof(struct igmpv3_query)
  813. + ntohs(ih3->nsrcs)*sizeof(__be32)))
  814. return true;
  815. ih3 = igmpv3_query_hdr(skb);
  816. }
  817. max_delay = IGMPV3_MRC(ih3->code)*(HZ/IGMP_TIMER_SCALE);
  818. if (!max_delay)
  819. max_delay = 1; /* can't mod w/ 0 */
  820. in_dev->mr_maxdelay = max_delay;
  821. if (ih3->qrv)
  822. in_dev->mr_qrv = ih3->qrv;
  823. if (!group) { /* general query */
  824. if (ih3->nsrcs)
  825. return false; /* no sources allowed */
  826. igmp_gq_start_timer(in_dev);
  827. return false;
  828. }
  829. /* mark sources to include, if group & source-specific */
  830. mark = ih3->nsrcs != 0;
  831. }
  832. /*
  833. * - Start the timers in all of our membership records
  834. * that the query applies to for the interface on
  835. * which the query arrived excl. those that belong
  836. * to a "local" group (224.0.0.X)
  837. * - For timers already running check if they need to
  838. * be reset.
  839. * - Use the igmp->igmp_code field as the maximum
  840. * delay possible
  841. */
  842. rcu_read_lock();
  843. for_each_pmc_rcu(in_dev, im) {
  844. int changed;
  845. if (group && group != im->multiaddr)
  846. continue;
  847. if (im->multiaddr == IGMP_ALL_HOSTS)
  848. continue;
  849. spin_lock_bh(&im->lock);
  850. if (im->tm_running)
  851. im->gsquery = im->gsquery && mark;
  852. else
  853. im->gsquery = mark;
  854. changed = !im->gsquery ||
  855. igmp_marksources(im, ntohs(ih3->nsrcs), ih3->srcs);
  856. spin_unlock_bh(&im->lock);
  857. if (changed)
  858. igmp_mod_timer(im, max_delay);
  859. }
  860. rcu_read_unlock();
  861. return false;
  862. }
  863. /* called in rcu_read_lock() section */
  864. int igmp_rcv(struct sk_buff *skb)
  865. {
  866. /* This basically follows the spec line by line -- see RFC1112 */
  867. struct igmphdr *ih;
  868. struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
  869. int len = skb->len;
  870. bool dropped = true;
  871. if (in_dev == NULL)
  872. goto drop;
  873. if (!pskb_may_pull(skb, sizeof(struct igmphdr)))
  874. goto drop;
  875. switch (skb->ip_summed) {
  876. case CHECKSUM_COMPLETE:
  877. if (!csum_fold(skb->csum))
  878. break;
  879. /* fall through */
  880. case CHECKSUM_NONE:
  881. skb->csum = 0;
  882. if (__skb_checksum_complete(skb))
  883. goto drop;
  884. }
  885. ih = igmp_hdr(skb);
  886. switch (ih->type) {
  887. case IGMP_HOST_MEMBERSHIP_QUERY:
  888. dropped = igmp_heard_query(in_dev, skb, len);
  889. break;
  890. case IGMP_HOST_MEMBERSHIP_REPORT:
  891. case IGMPV2_HOST_MEMBERSHIP_REPORT:
  892. /* Is it our report looped back? */
  893. if (rt_is_output_route(skb_rtable(skb)))
  894. break;
  895. /* don't rely on MC router hearing unicast reports */
  896. if (skb->pkt_type == PACKET_MULTICAST ||
  897. skb->pkt_type == PACKET_BROADCAST)
  898. dropped = igmp_heard_report(in_dev, ih->group);
  899. break;
  900. case IGMP_PIM:
  901. #ifdef CONFIG_IP_PIMSM_V1
  902. return pim_rcv_v1(skb);
  903. #endif
  904. case IGMPV3_HOST_MEMBERSHIP_REPORT:
  905. case IGMP_DVMRP:
  906. case IGMP_TRACE:
  907. case IGMP_HOST_LEAVE_MESSAGE:
  908. case IGMP_MTRACE:
  909. case IGMP_MTRACE_RESP:
  910. break;
  911. default:
  912. break;
  913. }
  914. drop:
  915. if (dropped)
  916. kfree_skb(skb);
  917. else
  918. consume_skb(skb);
  919. return 0;
  920. }
  921. #endif
  922. /*
  923. * Add a filter to a device
  924. */
  925. static void ip_mc_filter_add(struct in_device *in_dev, __be32 addr)
  926. {
  927. char buf[MAX_ADDR_LEN];
  928. struct net_device *dev = in_dev->dev;
  929. /* Checking for IFF_MULTICAST here is WRONG-WRONG-WRONG.
  930. We will get multicast token leakage, when IFF_MULTICAST
  931. is changed. This check should be done in ndo_set_rx_mode
  932. routine. Something sort of:
  933. if (dev->mc_list && dev->flags&IFF_MULTICAST) { do it; }
  934. --ANK
  935. */
  936. if (arp_mc_map(addr, buf, dev, 0) == 0)
  937. dev_mc_add(dev, buf);
  938. }
  939. /*
  940. * Remove a filter from a device
  941. */
  942. static void ip_mc_filter_del(struct in_device *in_dev, __be32 addr)
  943. {
  944. char buf[MAX_ADDR_LEN];
  945. struct net_device *dev = in_dev->dev;
  946. if (arp_mc_map(addr, buf, dev, 0) == 0)
  947. dev_mc_del(dev, buf);
  948. }
  949. #ifdef CONFIG_IP_MULTICAST
  950. /*
  951. * deleted ip_mc_list manipulation
  952. */
  953. static void igmpv3_add_delrec(struct in_device *in_dev, struct ip_mc_list *im)
  954. {
  955. struct ip_mc_list *pmc;
  956. /* this is an "ip_mc_list" for convenience; only the fields below
  957. * are actually used. In particular, the refcnt and users are not
  958. * used for management of the delete list. Using the same structure
  959. * for deleted items allows change reports to use common code with
  960. * non-deleted or query-response MCA's.
  961. */
  962. pmc = kzalloc(sizeof(*pmc), GFP_KERNEL);
  963. if (!pmc)
  964. return;
  965. spin_lock_bh(&im->lock);
  966. pmc->interface = im->interface;
  967. in_dev_hold(in_dev);
  968. pmc->multiaddr = im->multiaddr;
  969. pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  970. IGMP_Unsolicited_Report_Count;
  971. pmc->sfmode = im->sfmode;
  972. if (pmc->sfmode == MCAST_INCLUDE) {
  973. struct ip_sf_list *psf;
  974. pmc->tomb = im->tomb;
  975. pmc->sources = im->sources;
  976. im->tomb = im->sources = NULL;
  977. for (psf=pmc->sources; psf; psf=psf->sf_next)
  978. psf->sf_crcount = pmc->crcount;
  979. }
  980. spin_unlock_bh(&im->lock);
  981. spin_lock_bh(&in_dev->mc_tomb_lock);
  982. pmc->next = in_dev->mc_tomb;
  983. in_dev->mc_tomb = pmc;
  984. spin_unlock_bh(&in_dev->mc_tomb_lock);
  985. }
  986. static void igmpv3_del_delrec(struct in_device *in_dev, __be32 multiaddr)
  987. {
  988. struct ip_mc_list *pmc, *pmc_prev;
  989. struct ip_sf_list *psf, *psf_next;
  990. spin_lock_bh(&in_dev->mc_tomb_lock);
  991. pmc_prev = NULL;
  992. for (pmc=in_dev->mc_tomb; pmc; pmc=pmc->next) {
  993. if (pmc->multiaddr == multiaddr)
  994. break;
  995. pmc_prev = pmc;
  996. }
  997. if (pmc) {
  998. if (pmc_prev)
  999. pmc_prev->next = pmc->next;
  1000. else
  1001. in_dev->mc_tomb = pmc->next;
  1002. }
  1003. spin_unlock_bh(&in_dev->mc_tomb_lock);
  1004. if (pmc) {
  1005. for (psf=pmc->tomb; psf; psf=psf_next) {
  1006. psf_next = psf->sf_next;
  1007. kfree(psf);
  1008. }
  1009. in_dev_put(pmc->interface);
  1010. kfree(pmc);
  1011. }
  1012. }
  1013. static void igmpv3_clear_delrec(struct in_device *in_dev)
  1014. {
  1015. struct ip_mc_list *pmc, *nextpmc;
  1016. spin_lock_bh(&in_dev->mc_tomb_lock);
  1017. pmc = in_dev->mc_tomb;
  1018. in_dev->mc_tomb = NULL;
  1019. spin_unlock_bh(&in_dev->mc_tomb_lock);
  1020. for (; pmc; pmc = nextpmc) {
  1021. nextpmc = pmc->next;
  1022. ip_mc_clear_src(pmc);
  1023. in_dev_put(pmc->interface);
  1024. kfree(pmc);
  1025. }
  1026. /* clear dead sources, too */
  1027. rcu_read_lock();
  1028. for_each_pmc_rcu(in_dev, pmc) {
  1029. struct ip_sf_list *psf, *psf_next;
  1030. spin_lock_bh(&pmc->lock);
  1031. psf = pmc->tomb;
  1032. pmc->tomb = NULL;
  1033. spin_unlock_bh(&pmc->lock);
  1034. for (; psf; psf=psf_next) {
  1035. psf_next = psf->sf_next;
  1036. kfree(psf);
  1037. }
  1038. }
  1039. rcu_read_unlock();
  1040. }
  1041. #endif
  1042. static void igmp_group_dropped(struct ip_mc_list *im)
  1043. {
  1044. struct in_device *in_dev = im->interface;
  1045. #ifdef CONFIG_IP_MULTICAST
  1046. int reporter;
  1047. #endif
  1048. if (im->loaded) {
  1049. im->loaded = 0;
  1050. ip_mc_filter_del(in_dev, im->multiaddr);
  1051. }
  1052. #ifdef CONFIG_IP_MULTICAST
  1053. if (im->multiaddr == IGMP_ALL_HOSTS)
  1054. return;
  1055. reporter = im->reporter;
  1056. igmp_stop_timer(im);
  1057. if (!in_dev->dead) {
  1058. if (IGMP_V1_SEEN(in_dev))
  1059. return;
  1060. if (IGMP_V2_SEEN(in_dev)) {
  1061. if (reporter)
  1062. igmp_send_report(in_dev, im, IGMP_HOST_LEAVE_MESSAGE);
  1063. return;
  1064. }
  1065. /* IGMPv3 */
  1066. igmpv3_add_delrec(in_dev, im);
  1067. igmp_ifc_event(in_dev);
  1068. }
  1069. #endif
  1070. }
  1071. static void igmp_group_added(struct ip_mc_list *im)
  1072. {
  1073. struct in_device *in_dev = im->interface;
  1074. if (im->loaded == 0) {
  1075. im->loaded = 1;
  1076. ip_mc_filter_add(in_dev, im->multiaddr);
  1077. }
  1078. #ifdef CONFIG_IP_MULTICAST
  1079. if (im->multiaddr == IGMP_ALL_HOSTS)
  1080. return;
  1081. if (in_dev->dead)
  1082. return;
  1083. if (IGMP_V1_SEEN(in_dev) || IGMP_V2_SEEN(in_dev)) {
  1084. spin_lock_bh(&im->lock);
  1085. igmp_start_timer(im, IGMP_Initial_Report_Delay);
  1086. spin_unlock_bh(&im->lock);
  1087. return;
  1088. }
  1089. /* else, v3 */
  1090. im->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  1091. IGMP_Unsolicited_Report_Count;
  1092. igmp_ifc_event(in_dev);
  1093. #endif
  1094. }
  1095. /*
  1096. * Multicast list managers
  1097. */
  1098. static u32 ip_mc_hash(const struct ip_mc_list *im)
  1099. {
  1100. return hash_32((__force u32)im->multiaddr, MC_HASH_SZ_LOG);
  1101. }
  1102. static void ip_mc_hash_add(struct in_device *in_dev,
  1103. struct ip_mc_list *im)
  1104. {
  1105. struct ip_mc_list __rcu **mc_hash;
  1106. u32 hash;
  1107. mc_hash = rtnl_dereference(in_dev->mc_hash);
  1108. if (mc_hash) {
  1109. hash = ip_mc_hash(im);
  1110. im->next_hash = mc_hash[hash];
  1111. rcu_assign_pointer(mc_hash[hash], im);
  1112. return;
  1113. }
  1114. /* do not use a hash table for small number of items */
  1115. if (in_dev->mc_count < 4)
  1116. return;
  1117. mc_hash = kzalloc(sizeof(struct ip_mc_list *) << MC_HASH_SZ_LOG,
  1118. GFP_KERNEL);
  1119. if (!mc_hash)
  1120. return;
  1121. for_each_pmc_rtnl(in_dev, im) {
  1122. hash = ip_mc_hash(im);
  1123. im->next_hash = mc_hash[hash];
  1124. RCU_INIT_POINTER(mc_hash[hash], im);
  1125. }
  1126. rcu_assign_pointer(in_dev->mc_hash, mc_hash);
  1127. }
  1128. static void ip_mc_hash_remove(struct in_device *in_dev,
  1129. struct ip_mc_list *im)
  1130. {
  1131. struct ip_mc_list __rcu **mc_hash = rtnl_dereference(in_dev->mc_hash);
  1132. struct ip_mc_list *aux;
  1133. if (!mc_hash)
  1134. return;
  1135. mc_hash += ip_mc_hash(im);
  1136. while ((aux = rtnl_dereference(*mc_hash)) != im)
  1137. mc_hash = &aux->next_hash;
  1138. *mc_hash = im->next_hash;
  1139. }
  1140. /*
  1141. * A socket has joined a multicast group on device dev.
  1142. */
  1143. void ip_mc_inc_group(struct in_device *in_dev, __be32 addr)
  1144. {
  1145. struct ip_mc_list *im;
  1146. ASSERT_RTNL();
  1147. for_each_pmc_rtnl(in_dev, im) {
  1148. if (im->multiaddr == addr) {
  1149. im->users++;
  1150. ip_mc_add_src(in_dev, &addr, MCAST_EXCLUDE, 0, NULL, 0);
  1151. goto out;
  1152. }
  1153. }
  1154. im = kzalloc(sizeof(*im), GFP_KERNEL);
  1155. if (!im)
  1156. goto out;
  1157. im->users = 1;
  1158. im->interface = in_dev;
  1159. in_dev_hold(in_dev);
  1160. im->multiaddr = addr;
  1161. /* initial mode is (EX, empty) */
  1162. im->sfmode = MCAST_EXCLUDE;
  1163. im->sfcount[MCAST_EXCLUDE] = 1;
  1164. atomic_set(&im->refcnt, 1);
  1165. spin_lock_init(&im->lock);
  1166. #ifdef CONFIG_IP_MULTICAST
  1167. setup_timer(&im->timer, &igmp_timer_expire, (unsigned long)im);
  1168. im->unsolicit_count = IGMP_Unsolicited_Report_Count;
  1169. #endif
  1170. im->next_rcu = in_dev->mc_list;
  1171. in_dev->mc_count++;
  1172. rcu_assign_pointer(in_dev->mc_list, im);
  1173. ip_mc_hash_add(in_dev, im);
  1174. #ifdef CONFIG_IP_MULTICAST
  1175. igmpv3_del_delrec(in_dev, im->multiaddr);
  1176. #endif
  1177. igmp_group_added(im);
  1178. if (!in_dev->dead)
  1179. ip_rt_multicast_event(in_dev);
  1180. out:
  1181. return;
  1182. }
  1183. EXPORT_SYMBOL(ip_mc_inc_group);
  1184. /*
  1185. * Resend IGMP JOIN report; used by netdev notifier.
  1186. */
  1187. static void ip_mc_rejoin_groups(struct in_device *in_dev)
  1188. {
  1189. #ifdef CONFIG_IP_MULTICAST
  1190. struct ip_mc_list *im;
  1191. int type;
  1192. ASSERT_RTNL();
  1193. for_each_pmc_rtnl(in_dev, im) {
  1194. if (im->multiaddr == IGMP_ALL_HOSTS)
  1195. continue;
  1196. /* a failover is happening and switches
  1197. * must be notified immediately
  1198. */
  1199. if (IGMP_V1_SEEN(in_dev))
  1200. type = IGMP_HOST_MEMBERSHIP_REPORT;
  1201. else if (IGMP_V2_SEEN(in_dev))
  1202. type = IGMPV2_HOST_MEMBERSHIP_REPORT;
  1203. else
  1204. type = IGMPV3_HOST_MEMBERSHIP_REPORT;
  1205. igmp_send_report(in_dev, im, type);
  1206. }
  1207. #endif
  1208. }
  1209. /*
  1210. * A socket has left a multicast group on device dev
  1211. */
  1212. void ip_mc_dec_group(struct in_device *in_dev, __be32 addr)
  1213. {
  1214. struct ip_mc_list *i;
  1215. struct ip_mc_list __rcu **ip;
  1216. ASSERT_RTNL();
  1217. for (ip = &in_dev->mc_list;
  1218. (i = rtnl_dereference(*ip)) != NULL;
  1219. ip = &i->next_rcu) {
  1220. if (i->multiaddr == addr) {
  1221. if (--i->users == 0) {
  1222. ip_mc_hash_remove(in_dev, i);
  1223. *ip = i->next_rcu;
  1224. in_dev->mc_count--;
  1225. igmp_group_dropped(i);
  1226. ip_mc_clear_src(i);
  1227. if (!in_dev->dead)
  1228. ip_rt_multicast_event(in_dev);
  1229. ip_ma_put(i);
  1230. return;
  1231. }
  1232. break;
  1233. }
  1234. }
  1235. }
  1236. EXPORT_SYMBOL(ip_mc_dec_group);
  1237. /* Device changing type */
  1238. void ip_mc_unmap(struct in_device *in_dev)
  1239. {
  1240. struct ip_mc_list *pmc;
  1241. ASSERT_RTNL();
  1242. for_each_pmc_rtnl(in_dev, pmc)
  1243. igmp_group_dropped(pmc);
  1244. }
  1245. void ip_mc_remap(struct in_device *in_dev)
  1246. {
  1247. struct ip_mc_list *pmc;
  1248. ASSERT_RTNL();
  1249. for_each_pmc_rtnl(in_dev, pmc)
  1250. igmp_group_added(pmc);
  1251. }
  1252. /* Device going down */
  1253. void ip_mc_down(struct in_device *in_dev)
  1254. {
  1255. struct ip_mc_list *pmc;
  1256. ASSERT_RTNL();
  1257. for_each_pmc_rtnl(in_dev, pmc)
  1258. igmp_group_dropped(pmc);
  1259. #ifdef CONFIG_IP_MULTICAST
  1260. in_dev->mr_ifc_count = 0;
  1261. if (del_timer(&in_dev->mr_ifc_timer))
  1262. __in_dev_put(in_dev);
  1263. in_dev->mr_gq_running = 0;
  1264. if (del_timer(&in_dev->mr_gq_timer))
  1265. __in_dev_put(in_dev);
  1266. igmpv3_clear_delrec(in_dev);
  1267. #endif
  1268. ip_mc_dec_group(in_dev, IGMP_ALL_HOSTS);
  1269. }
  1270. void ip_mc_init_dev(struct in_device *in_dev)
  1271. {
  1272. ASSERT_RTNL();
  1273. #ifdef CONFIG_IP_MULTICAST
  1274. setup_timer(&in_dev->mr_gq_timer, igmp_gq_timer_expire,
  1275. (unsigned long)in_dev);
  1276. setup_timer(&in_dev->mr_ifc_timer, igmp_ifc_timer_expire,
  1277. (unsigned long)in_dev);
  1278. in_dev->mr_qrv = IGMP_Unsolicited_Report_Count;
  1279. #endif
  1280. spin_lock_init(&in_dev->mc_tomb_lock);
  1281. }
  1282. /* Device going up */
  1283. void ip_mc_up(struct in_device *in_dev)
  1284. {
  1285. struct ip_mc_list *pmc;
  1286. ASSERT_RTNL();
  1287. ip_mc_inc_group(in_dev, IGMP_ALL_HOSTS);
  1288. for_each_pmc_rtnl(in_dev, pmc)
  1289. igmp_group_added(pmc);
  1290. }
  1291. /*
  1292. * Device is about to be destroyed: clean up.
  1293. */
  1294. void ip_mc_destroy_dev(struct in_device *in_dev)
  1295. {
  1296. struct ip_mc_list *i;
  1297. ASSERT_RTNL();
  1298. /* Deactivate timers */
  1299. ip_mc_down(in_dev);
  1300. while ((i = rtnl_dereference(in_dev->mc_list)) != NULL) {
  1301. in_dev->mc_list = i->next_rcu;
  1302. in_dev->mc_count--;
  1303. /* We've dropped the groups in ip_mc_down already */
  1304. ip_mc_clear_src(i);
  1305. ip_ma_put(i);
  1306. }
  1307. }
  1308. /* RTNL is locked */
  1309. static struct in_device *ip_mc_find_dev(struct net *net, struct ip_mreqn *imr)
  1310. {
  1311. struct net_device *dev = NULL;
  1312. struct in_device *idev = NULL;
  1313. if (imr->imr_ifindex) {
  1314. idev = inetdev_by_index(net, imr->imr_ifindex);
  1315. return idev;
  1316. }
  1317. if (imr->imr_address.s_addr) {
  1318. dev = __ip_dev_find(net, imr->imr_address.s_addr, false);
  1319. if (!dev)
  1320. return NULL;
  1321. }
  1322. if (!dev) {
  1323. struct rtable *rt = ip_route_output(net,
  1324. imr->imr_multiaddr.s_addr,
  1325. 0, 0, 0);
  1326. if (!IS_ERR(rt)) {
  1327. dev = rt->dst.dev;
  1328. ip_rt_put(rt);
  1329. }
  1330. }
  1331. if (dev) {
  1332. imr->imr_ifindex = dev->ifindex;
  1333. idev = __in_dev_get_rtnl(dev);
  1334. }
  1335. return idev;
  1336. }
  1337. /*
  1338. * Join a socket to a group
  1339. */
  1340. int sysctl_igmp_max_memberships __read_mostly = IP_MAX_MEMBERSHIPS;
  1341. int sysctl_igmp_max_msf __read_mostly = IP_MAX_MSF;
  1342. static int ip_mc_del1_src(struct ip_mc_list *pmc, int sfmode,
  1343. __be32 *psfsrc)
  1344. {
  1345. struct ip_sf_list *psf, *psf_prev;
  1346. int rv = 0;
  1347. psf_prev = NULL;
  1348. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  1349. if (psf->sf_inaddr == *psfsrc)
  1350. break;
  1351. psf_prev = psf;
  1352. }
  1353. if (!psf || psf->sf_count[sfmode] == 0) {
  1354. /* source filter not found, or count wrong => bug */
  1355. return -ESRCH;
  1356. }
  1357. psf->sf_count[sfmode]--;
  1358. if (psf->sf_count[sfmode] == 0) {
  1359. ip_rt_multicast_event(pmc->interface);
  1360. }
  1361. if (!psf->sf_count[MCAST_INCLUDE] && !psf->sf_count[MCAST_EXCLUDE]) {
  1362. #ifdef CONFIG_IP_MULTICAST
  1363. struct in_device *in_dev = pmc->interface;
  1364. #endif
  1365. /* no more filters for this source */
  1366. if (psf_prev)
  1367. psf_prev->sf_next = psf->sf_next;
  1368. else
  1369. pmc->sources = psf->sf_next;
  1370. #ifdef CONFIG_IP_MULTICAST
  1371. if (psf->sf_oldin &&
  1372. !IGMP_V1_SEEN(in_dev) && !IGMP_V2_SEEN(in_dev)) {
  1373. psf->sf_crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  1374. IGMP_Unsolicited_Report_Count;
  1375. psf->sf_next = pmc->tomb;
  1376. pmc->tomb = psf;
  1377. rv = 1;
  1378. } else
  1379. #endif
  1380. kfree(psf);
  1381. }
  1382. return rv;
  1383. }
  1384. #ifndef CONFIG_IP_MULTICAST
  1385. #define igmp_ifc_event(x) do { } while (0)
  1386. #endif
  1387. static int ip_mc_del_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
  1388. int sfcount, __be32 *psfsrc, int delta)
  1389. {
  1390. struct ip_mc_list *pmc;
  1391. int changerec = 0;
  1392. int i, err;
  1393. if (!in_dev)
  1394. return -ENODEV;
  1395. rcu_read_lock();
  1396. for_each_pmc_rcu(in_dev, pmc) {
  1397. if (*pmca == pmc->multiaddr)
  1398. break;
  1399. }
  1400. if (!pmc) {
  1401. /* MCA not found?? bug */
  1402. rcu_read_unlock();
  1403. return -ESRCH;
  1404. }
  1405. spin_lock_bh(&pmc->lock);
  1406. rcu_read_unlock();
  1407. #ifdef CONFIG_IP_MULTICAST
  1408. sf_markstate(pmc);
  1409. #endif
  1410. if (!delta) {
  1411. err = -EINVAL;
  1412. if (!pmc->sfcount[sfmode])
  1413. goto out_unlock;
  1414. pmc->sfcount[sfmode]--;
  1415. }
  1416. err = 0;
  1417. for (i=0; i<sfcount; i++) {
  1418. int rv = ip_mc_del1_src(pmc, sfmode, &psfsrc[i]);
  1419. changerec |= rv > 0;
  1420. if (!err && rv < 0)
  1421. err = rv;
  1422. }
  1423. if (pmc->sfmode == MCAST_EXCLUDE &&
  1424. pmc->sfcount[MCAST_EXCLUDE] == 0 &&
  1425. pmc->sfcount[MCAST_INCLUDE]) {
  1426. #ifdef CONFIG_IP_MULTICAST
  1427. struct ip_sf_list *psf;
  1428. #endif
  1429. /* filter mode change */
  1430. pmc->sfmode = MCAST_INCLUDE;
  1431. #ifdef CONFIG_IP_MULTICAST
  1432. pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  1433. IGMP_Unsolicited_Report_Count;
  1434. in_dev->mr_ifc_count = pmc->crcount;
  1435. for (psf=pmc->sources; psf; psf = psf->sf_next)
  1436. psf->sf_crcount = 0;
  1437. igmp_ifc_event(pmc->interface);
  1438. } else if (sf_setstate(pmc) || changerec) {
  1439. igmp_ifc_event(pmc->interface);
  1440. #endif
  1441. }
  1442. out_unlock:
  1443. spin_unlock_bh(&pmc->lock);
  1444. return err;
  1445. }
  1446. /*
  1447. * Add multicast single-source filter to the interface list
  1448. */
  1449. static int ip_mc_add1_src(struct ip_mc_list *pmc, int sfmode,
  1450. __be32 *psfsrc)
  1451. {
  1452. struct ip_sf_list *psf, *psf_prev;
  1453. psf_prev = NULL;
  1454. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  1455. if (psf->sf_inaddr == *psfsrc)
  1456. break;
  1457. psf_prev = psf;
  1458. }
  1459. if (!psf) {
  1460. psf = kzalloc(sizeof(*psf), GFP_ATOMIC);
  1461. if (!psf)
  1462. return -ENOBUFS;
  1463. psf->sf_inaddr = *psfsrc;
  1464. if (psf_prev) {
  1465. psf_prev->sf_next = psf;
  1466. } else
  1467. pmc->sources = psf;
  1468. }
  1469. psf->sf_count[sfmode]++;
  1470. if (psf->sf_count[sfmode] == 1) {
  1471. ip_rt_multicast_event(pmc->interface);
  1472. }
  1473. return 0;
  1474. }
  1475. #ifdef CONFIG_IP_MULTICAST
  1476. static void sf_markstate(struct ip_mc_list *pmc)
  1477. {
  1478. struct ip_sf_list *psf;
  1479. int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
  1480. for (psf=pmc->sources; psf; psf=psf->sf_next)
  1481. if (pmc->sfcount[MCAST_EXCLUDE]) {
  1482. psf->sf_oldin = mca_xcount ==
  1483. psf->sf_count[MCAST_EXCLUDE] &&
  1484. !psf->sf_count[MCAST_INCLUDE];
  1485. } else
  1486. psf->sf_oldin = psf->sf_count[MCAST_INCLUDE] != 0;
  1487. }
  1488. static int sf_setstate(struct ip_mc_list *pmc)
  1489. {
  1490. struct ip_sf_list *psf, *dpsf;
  1491. int mca_xcount = pmc->sfcount[MCAST_EXCLUDE];
  1492. int qrv = pmc->interface->mr_qrv;
  1493. int new_in, rv;
  1494. rv = 0;
  1495. for (psf=pmc->sources; psf; psf=psf->sf_next) {
  1496. if (pmc->sfcount[MCAST_EXCLUDE]) {
  1497. new_in = mca_xcount == psf->sf_count[MCAST_EXCLUDE] &&
  1498. !psf->sf_count[MCAST_INCLUDE];
  1499. } else
  1500. new_in = psf->sf_count[MCAST_INCLUDE] != 0;
  1501. if (new_in) {
  1502. if (!psf->sf_oldin) {
  1503. struct ip_sf_list *prev = NULL;
  1504. for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next) {
  1505. if (dpsf->sf_inaddr == psf->sf_inaddr)
  1506. break;
  1507. prev = dpsf;
  1508. }
  1509. if (dpsf) {
  1510. if (prev)
  1511. prev->sf_next = dpsf->sf_next;
  1512. else
  1513. pmc->tomb = dpsf->sf_next;
  1514. kfree(dpsf);
  1515. }
  1516. psf->sf_crcount = qrv;
  1517. rv++;
  1518. }
  1519. } else if (psf->sf_oldin) {
  1520. psf->sf_crcount = 0;
  1521. /*
  1522. * add or update "delete" records if an active filter
  1523. * is now inactive
  1524. */
  1525. for (dpsf=pmc->tomb; dpsf; dpsf=dpsf->sf_next)
  1526. if (dpsf->sf_inaddr == psf->sf_inaddr)
  1527. break;
  1528. if (!dpsf) {
  1529. dpsf = kmalloc(sizeof(*dpsf), GFP_ATOMIC);
  1530. if (!dpsf)
  1531. continue;
  1532. *dpsf = *psf;
  1533. /* pmc->lock held by callers */
  1534. dpsf->sf_next = pmc->tomb;
  1535. pmc->tomb = dpsf;
  1536. }
  1537. dpsf->sf_crcount = qrv;
  1538. rv++;
  1539. }
  1540. }
  1541. return rv;
  1542. }
  1543. #endif
  1544. /*
  1545. * Add multicast source filter list to the interface list
  1546. */
  1547. static int ip_mc_add_src(struct in_device *in_dev, __be32 *pmca, int sfmode,
  1548. int sfcount, __be32 *psfsrc, int delta)
  1549. {
  1550. struct ip_mc_list *pmc;
  1551. int isexclude;
  1552. int i, err;
  1553. if (!in_dev)
  1554. return -ENODEV;
  1555. rcu_read_lock();
  1556. for_each_pmc_rcu(in_dev, pmc) {
  1557. if (*pmca == pmc->multiaddr)
  1558. break;
  1559. }
  1560. if (!pmc) {
  1561. /* MCA not found?? bug */
  1562. rcu_read_unlock();
  1563. return -ESRCH;
  1564. }
  1565. spin_lock_bh(&pmc->lock);
  1566. rcu_read_unlock();
  1567. #ifdef CONFIG_IP_MULTICAST
  1568. sf_markstate(pmc);
  1569. #endif
  1570. isexclude = pmc->sfmode == MCAST_EXCLUDE;
  1571. if (!delta)
  1572. pmc->sfcount[sfmode]++;
  1573. err = 0;
  1574. for (i=0; i<sfcount; i++) {
  1575. err = ip_mc_add1_src(pmc, sfmode, &psfsrc[i]);
  1576. if (err)
  1577. break;
  1578. }
  1579. if (err) {
  1580. int j;
  1581. if (!delta)
  1582. pmc->sfcount[sfmode]--;
  1583. for (j=0; j<i; j++)
  1584. (void) ip_mc_del1_src(pmc, sfmode, &psfsrc[j]);
  1585. } else if (isexclude != (pmc->sfcount[MCAST_EXCLUDE] != 0)) {
  1586. #ifdef CONFIG_IP_MULTICAST
  1587. struct ip_sf_list *psf;
  1588. in_dev = pmc->interface;
  1589. #endif
  1590. /* filter mode change */
  1591. if (pmc->sfcount[MCAST_EXCLUDE])
  1592. pmc->sfmode = MCAST_EXCLUDE;
  1593. else if (pmc->sfcount[MCAST_INCLUDE])
  1594. pmc->sfmode = MCAST_INCLUDE;
  1595. #ifdef CONFIG_IP_MULTICAST
  1596. /* else no filters; keep old mode for reports */
  1597. pmc->crcount = in_dev->mr_qrv ? in_dev->mr_qrv :
  1598. IGMP_Unsolicited_Report_Count;
  1599. in_dev->mr_ifc_count = pmc->crcount;
  1600. for (psf=pmc->sources; psf; psf = psf->sf_next)
  1601. psf->sf_crcount = 0;
  1602. igmp_ifc_event(in_dev);
  1603. } else if (sf_setstate(pmc)) {
  1604. igmp_ifc_event(in_dev);
  1605. #endif
  1606. }
  1607. spin_unlock_bh(&pmc->lock);
  1608. return err;
  1609. }
  1610. static void ip_mc_clear_src(struct ip_mc_list *pmc)
  1611. {
  1612. struct ip_sf_list *psf, *nextpsf;
  1613. for (psf=pmc->tomb; psf; psf=nextpsf) {
  1614. nextpsf = psf->sf_next;
  1615. kfree(psf);
  1616. }
  1617. pmc->tomb = NULL;
  1618. for (psf=pmc->sources; psf; psf=nextpsf) {
  1619. nextpsf = psf->sf_next;
  1620. kfree(psf);
  1621. }
  1622. pmc->sources = NULL;
  1623. pmc->sfmode = MCAST_EXCLUDE;
  1624. pmc->sfcount[MCAST_INCLUDE] = 0;
  1625. pmc->sfcount[MCAST_EXCLUDE] = 1;
  1626. }
  1627. /*
  1628. * Join a multicast group
  1629. */
  1630. int ip_mc_join_group(struct sock *sk , struct ip_mreqn *imr)
  1631. {
  1632. int err;
  1633. __be32 addr = imr->imr_multiaddr.s_addr;
  1634. struct ip_mc_socklist *iml = NULL, *i;
  1635. struct in_device *in_dev;
  1636. struct inet_sock *inet = inet_sk(sk);
  1637. struct net *net = sock_net(sk);
  1638. int ifindex;
  1639. int count = 0;
  1640. if (!ipv4_is_multicast(addr))
  1641. return -EINVAL;
  1642. rtnl_lock();
  1643. in_dev = ip_mc_find_dev(net, imr);
  1644. if (!in_dev) {
  1645. iml = NULL;
  1646. err = -ENODEV;
  1647. goto done;
  1648. }
  1649. err = -EADDRINUSE;
  1650. ifindex = imr->imr_ifindex;
  1651. for_each_pmc_rtnl(inet, i) {
  1652. if (i->multi.imr_multiaddr.s_addr == addr &&
  1653. i->multi.imr_ifindex == ifindex)
  1654. goto done;
  1655. count++;
  1656. }
  1657. err = -ENOBUFS;
  1658. if (count >= sysctl_igmp_max_memberships)
  1659. goto done;
  1660. iml = sock_kmalloc(sk, sizeof(*iml), GFP_KERNEL);
  1661. if (iml == NULL)
  1662. goto done;
  1663. memcpy(&iml->multi, imr, sizeof(*imr));
  1664. iml->next_rcu = inet->mc_list;
  1665. iml->sflist = NULL;
  1666. iml->sfmode = MCAST_EXCLUDE;
  1667. rcu_assign_pointer(inet->mc_list, iml);
  1668. ip_mc_inc_group(in_dev, addr);
  1669. err = 0;
  1670. done:
  1671. rtnl_unlock();
  1672. return err;
  1673. }
  1674. EXPORT_SYMBOL(ip_mc_join_group);
  1675. static int ip_mc_leave_src(struct sock *sk, struct ip_mc_socklist *iml,
  1676. struct in_device *in_dev)
  1677. {
  1678. struct ip_sf_socklist *psf = rtnl_dereference(iml->sflist);
  1679. int err;
  1680. if (psf == NULL) {
  1681. /* any-source empty exclude case */
  1682. return ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
  1683. iml->sfmode, 0, NULL, 0);
  1684. }
  1685. err = ip_mc_del_src(in_dev, &iml->multi.imr_multiaddr.s_addr,
  1686. iml->sfmode, psf->sl_count, psf->sl_addr, 0);
  1687. RCU_INIT_POINTER(iml->sflist, NULL);
  1688. /* decrease mem now to avoid the memleak warning */
  1689. atomic_sub(IP_SFLSIZE(psf->sl_max), &sk->sk_omem_alloc);
  1690. kfree_rcu(psf, rcu);
  1691. return err;
  1692. }
  1693. /*
  1694. * Ask a socket to leave a group.
  1695. */
  1696. int ip_mc_leave_group(struct sock *sk, struct ip_mreqn *imr)
  1697. {
  1698. struct inet_sock *inet = inet_sk(sk);
  1699. struct ip_mc_socklist *iml;
  1700. struct ip_mc_socklist __rcu **imlp;
  1701. struct in_device *in_dev;
  1702. struct net *net = sock_net(sk);
  1703. __be32 group = imr->imr_multiaddr.s_addr;
  1704. u32 ifindex;
  1705. int ret = -EADDRNOTAVAIL;
  1706. rtnl_lock();
  1707. in_dev = ip_mc_find_dev(net, imr);
  1708. ifindex = imr->imr_ifindex;
  1709. for (imlp = &inet->mc_list;
  1710. (iml = rtnl_dereference(*imlp)) != NULL;
  1711. imlp = &iml->next_rcu) {
  1712. if (iml->multi.imr_multiaddr.s_addr != group)
  1713. continue;
  1714. if (ifindex) {
  1715. if (iml->multi.imr_ifindex != ifindex)
  1716. continue;
  1717. } else if (imr->imr_address.s_addr && imr->imr_address.s_addr !=
  1718. iml->multi.imr_address.s_addr)
  1719. continue;
  1720. (void) ip_mc_leave_src(sk, iml, in_dev);
  1721. *imlp = iml->next_rcu;
  1722. if (in_dev)
  1723. ip_mc_dec_group(in_dev, group);
  1724. rtnl_unlock();
  1725. /* decrease mem now to avoid the memleak warning */
  1726. atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
  1727. kfree_rcu(iml, rcu);
  1728. return 0;
  1729. }
  1730. if (!in_dev)
  1731. ret = -ENODEV;
  1732. rtnl_unlock();
  1733. return ret;
  1734. }
  1735. EXPORT_SYMBOL(ip_mc_leave_group);
  1736. int ip_mc_source(int add, int omode, struct sock *sk, struct
  1737. ip_mreq_source *mreqs, int ifindex)
  1738. {
  1739. int err;
  1740. struct ip_mreqn imr;
  1741. __be32 addr = mreqs->imr_multiaddr;
  1742. struct ip_mc_socklist *pmc;
  1743. struct in_device *in_dev = NULL;
  1744. struct inet_sock *inet = inet_sk(sk);
  1745. struct ip_sf_socklist *psl;
  1746. struct net *net = sock_net(sk);
  1747. int leavegroup = 0;
  1748. int i, j, rv;
  1749. if (!ipv4_is_multicast(addr))
  1750. return -EINVAL;
  1751. rtnl_lock();
  1752. imr.imr_multiaddr.s_addr = mreqs->imr_multiaddr;
  1753. imr.imr_address.s_addr = mreqs->imr_interface;
  1754. imr.imr_ifindex = ifindex;
  1755. in_dev = ip_mc_find_dev(net, &imr);
  1756. if (!in_dev) {
  1757. err = -ENODEV;
  1758. goto done;
  1759. }
  1760. err = -EADDRNOTAVAIL;
  1761. for_each_pmc_rtnl(inet, pmc) {
  1762. if ((pmc->multi.imr_multiaddr.s_addr ==
  1763. imr.imr_multiaddr.s_addr) &&
  1764. (pmc->multi.imr_ifindex == imr.imr_ifindex))
  1765. break;
  1766. }
  1767. if (!pmc) { /* must have a prior join */
  1768. err = -EINVAL;
  1769. goto done;
  1770. }
  1771. /* if a source filter was set, must be the same mode as before */
  1772. if (pmc->sflist) {
  1773. if (pmc->sfmode != omode) {
  1774. err = -EINVAL;
  1775. goto done;
  1776. }
  1777. } else if (pmc->sfmode != omode) {
  1778. /* allow mode switches for empty-set filters */
  1779. ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 0, NULL, 0);
  1780. ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, pmc->sfmode, 0,
  1781. NULL, 0);
  1782. pmc->sfmode = omode;
  1783. }
  1784. psl = rtnl_dereference(pmc->sflist);
  1785. if (!add) {
  1786. if (!psl)
  1787. goto done; /* err = -EADDRNOTAVAIL */
  1788. rv = !0;
  1789. for (i=0; i<psl->sl_count; i++) {
  1790. rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
  1791. sizeof(__be32));
  1792. if (rv == 0)
  1793. break;
  1794. }
  1795. if (rv) /* source not found */
  1796. goto done; /* err = -EADDRNOTAVAIL */
  1797. /* special case - (INCLUDE, empty) == LEAVE_GROUP */
  1798. if (psl->sl_count == 1 && omode == MCAST_INCLUDE) {
  1799. leavegroup = 1;
  1800. goto done;
  1801. }
  1802. /* update the interface filter */
  1803. ip_mc_del_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
  1804. &mreqs->imr_sourceaddr, 1);
  1805. for (j=i+1; j<psl->sl_count; j++)
  1806. psl->sl_addr[j-1] = psl->sl_addr[j];
  1807. psl->sl_count--;
  1808. err = 0;
  1809. goto done;
  1810. }
  1811. /* else, add a new source to the filter */
  1812. if (psl && psl->sl_count >= sysctl_igmp_max_msf) {
  1813. err = -ENOBUFS;
  1814. goto done;
  1815. }
  1816. if (!psl || psl->sl_count == psl->sl_max) {
  1817. struct ip_sf_socklist *newpsl;
  1818. int count = IP_SFBLOCK;
  1819. if (psl)
  1820. count += psl->sl_max;
  1821. newpsl = sock_kmalloc(sk, IP_SFLSIZE(count), GFP_KERNEL);
  1822. if (!newpsl) {
  1823. err = -ENOBUFS;
  1824. goto done;
  1825. }
  1826. newpsl->sl_max = count;
  1827. newpsl->sl_count = count - IP_SFBLOCK;
  1828. if (psl) {
  1829. for (i=0; i<psl->sl_count; i++)
  1830. newpsl->sl_addr[i] = psl->sl_addr[i];
  1831. /* decrease mem now to avoid the memleak warning */
  1832. atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
  1833. kfree_rcu(psl, rcu);
  1834. }
  1835. rcu_assign_pointer(pmc->sflist, newpsl);
  1836. psl = newpsl;
  1837. }
  1838. rv = 1; /* > 0 for insert logic below if sl_count is 0 */
  1839. for (i=0; i<psl->sl_count; i++) {
  1840. rv = memcmp(&psl->sl_addr[i], &mreqs->imr_sourceaddr,
  1841. sizeof(__be32));
  1842. if (rv == 0)
  1843. break;
  1844. }
  1845. if (rv == 0) /* address already there is an error */
  1846. goto done;
  1847. for (j=psl->sl_count-1; j>=i; j--)
  1848. psl->sl_addr[j+1] = psl->sl_addr[j];
  1849. psl->sl_addr[i] = mreqs->imr_sourceaddr;
  1850. psl->sl_count++;
  1851. err = 0;
  1852. /* update the interface list */
  1853. ip_mc_add_src(in_dev, &mreqs->imr_multiaddr, omode, 1,
  1854. &mreqs->imr_sourceaddr, 1);
  1855. done:
  1856. rtnl_unlock();
  1857. if (leavegroup)
  1858. return ip_mc_leave_group(sk, &imr);
  1859. return err;
  1860. }
  1861. int ip_mc_msfilter(struct sock *sk, struct ip_msfilter *msf, int ifindex)
  1862. {
  1863. int err = 0;
  1864. struct ip_mreqn imr;
  1865. __be32 addr = msf->imsf_multiaddr;
  1866. struct ip_mc_socklist *pmc;
  1867. struct in_device *in_dev;
  1868. struct inet_sock *inet = inet_sk(sk);
  1869. struct ip_sf_socklist *newpsl, *psl;
  1870. struct net *net = sock_net(sk);
  1871. int leavegroup = 0;
  1872. if (!ipv4_is_multicast(addr))
  1873. return -EINVAL;
  1874. if (msf->imsf_fmode != MCAST_INCLUDE &&
  1875. msf->imsf_fmode != MCAST_EXCLUDE)
  1876. return -EINVAL;
  1877. rtnl_lock();
  1878. imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
  1879. imr.imr_address.s_addr = msf->imsf_interface;
  1880. imr.imr_ifindex = ifindex;
  1881. in_dev = ip_mc_find_dev(net, &imr);
  1882. if (!in_dev) {
  1883. err = -ENODEV;
  1884. goto done;
  1885. }
  1886. /* special case - (INCLUDE, empty) == LEAVE_GROUP */
  1887. if (msf->imsf_fmode == MCAST_INCLUDE && msf->imsf_numsrc == 0) {
  1888. leavegroup = 1;
  1889. goto done;
  1890. }
  1891. for_each_pmc_rtnl(inet, pmc) {
  1892. if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
  1893. pmc->multi.imr_ifindex == imr.imr_ifindex)
  1894. break;
  1895. }
  1896. if (!pmc) { /* must have a prior join */
  1897. err = -EINVAL;
  1898. goto done;
  1899. }
  1900. if (msf->imsf_numsrc) {
  1901. newpsl = sock_kmalloc(sk, IP_SFLSIZE(msf->imsf_numsrc),
  1902. GFP_KERNEL);
  1903. if (!newpsl) {
  1904. err = -ENOBUFS;
  1905. goto done;
  1906. }
  1907. newpsl->sl_max = newpsl->sl_count = msf->imsf_numsrc;
  1908. memcpy(newpsl->sl_addr, msf->imsf_slist,
  1909. msf->imsf_numsrc * sizeof(msf->imsf_slist[0]));
  1910. err = ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
  1911. msf->imsf_fmode, newpsl->sl_count, newpsl->sl_addr, 0);
  1912. if (err) {
  1913. sock_kfree_s(sk, newpsl, IP_SFLSIZE(newpsl->sl_max));
  1914. goto done;
  1915. }
  1916. } else {
  1917. newpsl = NULL;
  1918. (void) ip_mc_add_src(in_dev, &msf->imsf_multiaddr,
  1919. msf->imsf_fmode, 0, NULL, 0);
  1920. }
  1921. psl = rtnl_dereference(pmc->sflist);
  1922. if (psl) {
  1923. (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
  1924. psl->sl_count, psl->sl_addr, 0);
  1925. /* decrease mem now to avoid the memleak warning */
  1926. atomic_sub(IP_SFLSIZE(psl->sl_max), &sk->sk_omem_alloc);
  1927. kfree_rcu(psl, rcu);
  1928. } else
  1929. (void) ip_mc_del_src(in_dev, &msf->imsf_multiaddr, pmc->sfmode,
  1930. 0, NULL, 0);
  1931. rcu_assign_pointer(pmc->sflist, newpsl);
  1932. pmc->sfmode = msf->imsf_fmode;
  1933. err = 0;
  1934. done:
  1935. rtnl_unlock();
  1936. if (leavegroup)
  1937. err = ip_mc_leave_group(sk, &imr);
  1938. return err;
  1939. }
  1940. int ip_mc_msfget(struct sock *sk, struct ip_msfilter *msf,
  1941. struct ip_msfilter __user *optval, int __user *optlen)
  1942. {
  1943. int err, len, count, copycount;
  1944. struct ip_mreqn imr;
  1945. __be32 addr = msf->imsf_multiaddr;
  1946. struct ip_mc_socklist *pmc;
  1947. struct in_device *in_dev;
  1948. struct inet_sock *inet = inet_sk(sk);
  1949. struct ip_sf_socklist *psl;
  1950. struct net *net = sock_net(sk);
  1951. if (!ipv4_is_multicast(addr))
  1952. return -EINVAL;
  1953. rtnl_lock();
  1954. imr.imr_multiaddr.s_addr = msf->imsf_multiaddr;
  1955. imr.imr_address.s_addr = msf->imsf_interface;
  1956. imr.imr_ifindex = 0;
  1957. in_dev = ip_mc_find_dev(net, &imr);
  1958. if (!in_dev) {
  1959. err = -ENODEV;
  1960. goto done;
  1961. }
  1962. err = -EADDRNOTAVAIL;
  1963. for_each_pmc_rtnl(inet, pmc) {
  1964. if (pmc->multi.imr_multiaddr.s_addr == msf->imsf_multiaddr &&
  1965. pmc->multi.imr_ifindex == imr.imr_ifindex)
  1966. break;
  1967. }
  1968. if (!pmc) /* must have a prior join */
  1969. goto done;
  1970. msf->imsf_fmode = pmc->sfmode;
  1971. psl = rtnl_dereference(pmc->sflist);
  1972. rtnl_unlock();
  1973. if (!psl) {
  1974. len = 0;
  1975. count = 0;
  1976. } else {
  1977. count = psl->sl_count;
  1978. }
  1979. copycount = count < msf->imsf_numsrc ? count : msf->imsf_numsrc;
  1980. len = copycount * sizeof(psl->sl_addr[0]);
  1981. msf->imsf_numsrc = count;
  1982. if (put_user(IP_MSFILTER_SIZE(copycount), optlen) ||
  1983. copy_to_user(optval, msf, IP_MSFILTER_SIZE(0))) {
  1984. return -EFAULT;
  1985. }
  1986. if (len &&
  1987. copy_to_user(&optval->imsf_slist[0], psl->sl_addr, len))
  1988. return -EFAULT;
  1989. return 0;
  1990. done:
  1991. rtnl_unlock();
  1992. return err;
  1993. }
  1994. int ip_mc_gsfget(struct sock *sk, struct group_filter *gsf,
  1995. struct group_filter __user *optval, int __user *optlen)
  1996. {
  1997. int err, i, count, copycount;
  1998. struct sockaddr_in *psin;
  1999. __be32 addr;
  2000. struct ip_mc_socklist *pmc;
  2001. struct inet_sock *inet = inet_sk(sk);
  2002. struct ip_sf_socklist *psl;
  2003. psin = (struct sockaddr_in *)&gsf->gf_group;
  2004. if (psin->sin_family != AF_INET)
  2005. return -EINVAL;
  2006. addr = psin->sin_addr.s_addr;
  2007. if (!ipv4_is_multicast(addr))
  2008. return -EINVAL;
  2009. rtnl_lock();
  2010. err = -EADDRNOTAVAIL;
  2011. for_each_pmc_rtnl(inet, pmc) {
  2012. if (pmc->multi.imr_multiaddr.s_addr == addr &&
  2013. pmc->multi.imr_ifindex == gsf->gf_interface)
  2014. break;
  2015. }
  2016. if (!pmc) /* must have a prior join */
  2017. goto done;
  2018. gsf->gf_fmode = pmc->sfmode;
  2019. psl = rtnl_dereference(pmc->sflist);
  2020. rtnl_unlock();
  2021. count = psl ? psl->sl_count : 0;
  2022. copycount = count < gsf->gf_numsrc ? count : gsf->gf_numsrc;
  2023. gsf->gf_numsrc = count;
  2024. if (put_user(GROUP_FILTER_SIZE(copycount), optlen) ||
  2025. copy_to_user(optval, gsf, GROUP_FILTER_SIZE(0))) {
  2026. return -EFAULT;
  2027. }
  2028. for (i=0; i<copycount; i++) {
  2029. struct sockaddr_storage ss;
  2030. psin = (struct sockaddr_in *)&ss;
  2031. memset(&ss, 0, sizeof(ss));
  2032. psin->sin_family = AF_INET;
  2033. psin->sin_addr.s_addr = psl->sl_addr[i];
  2034. if (copy_to_user(&optval->gf_slist[i], &ss, sizeof(ss)))
  2035. return -EFAULT;
  2036. }
  2037. return 0;
  2038. done:
  2039. rtnl_unlock();
  2040. return err;
  2041. }
  2042. /*
  2043. * check if a multicast source filter allows delivery for a given <src,dst,intf>
  2044. */
  2045. int ip_mc_sf_allow(struct sock *sk, __be32 loc_addr, __be32 rmt_addr, int dif)
  2046. {
  2047. struct inet_sock *inet = inet_sk(sk);
  2048. struct ip_mc_socklist *pmc;
  2049. struct ip_sf_socklist *psl;
  2050. int i;
  2051. int ret;
  2052. ret = 1;
  2053. if (!ipv4_is_multicast(loc_addr))
  2054. goto out;
  2055. rcu_read_lock();
  2056. for_each_pmc_rcu(inet, pmc) {
  2057. if (pmc->multi.imr_multiaddr.s_addr == loc_addr &&
  2058. pmc->multi.imr_ifindex == dif)
  2059. break;
  2060. }
  2061. ret = inet->mc_all;
  2062. if (!pmc)
  2063. goto unlock;
  2064. psl = rcu_dereference(pmc->sflist);
  2065. ret = (pmc->sfmode == MCAST_EXCLUDE);
  2066. if (!psl)
  2067. goto unlock;
  2068. for (i=0; i<psl->sl_count; i++) {
  2069. if (psl->sl_addr[i] == rmt_addr)
  2070. break;
  2071. }
  2072. ret = 0;
  2073. if (pmc->sfmode == MCAST_INCLUDE && i >= psl->sl_count)
  2074. goto unlock;
  2075. if (pmc->sfmode == MCAST_EXCLUDE && i < psl->sl_count)
  2076. goto unlock;
  2077. ret = 1;
  2078. unlock:
  2079. rcu_read_unlock();
  2080. out:
  2081. return ret;
  2082. }
  2083. /*
  2084. * A socket is closing.
  2085. */
  2086. void ip_mc_drop_socket(struct sock *sk)
  2087. {
  2088. struct inet_sock *inet = inet_sk(sk);
  2089. struct ip_mc_socklist *iml;
  2090. struct net *net = sock_net(sk);
  2091. if (inet->mc_list == NULL)
  2092. return;
  2093. rtnl_lock();
  2094. while ((iml = rtnl_dereference(inet->mc_list)) != NULL) {
  2095. struct in_device *in_dev;
  2096. inet->mc_list = iml->next_rcu;
  2097. in_dev = inetdev_by_index(net, iml->multi.imr_ifindex);
  2098. (void) ip_mc_leave_src(sk, iml, in_dev);
  2099. if (in_dev != NULL)
  2100. ip_mc_dec_group(in_dev, iml->multi.imr_multiaddr.s_addr);
  2101. /* decrease mem now to avoid the memleak warning */
  2102. atomic_sub(sizeof(*iml), &sk->sk_omem_alloc);
  2103. kfree_rcu(iml, rcu);
  2104. }
  2105. rtnl_unlock();
  2106. }
  2107. /* called with rcu_read_lock() */
  2108. int ip_check_mc_rcu(struct in_device *in_dev, __be32 mc_addr, __be32 src_addr, u16 proto)
  2109. {
  2110. struct ip_mc_list *im;
  2111. struct ip_mc_list __rcu **mc_hash;
  2112. struct ip_sf_list *psf;
  2113. int rv = 0;
  2114. mc_hash = rcu_dereference(in_dev->mc_hash);
  2115. if (mc_hash) {
  2116. u32 hash = hash_32((__force u32)mc_addr, MC_HASH_SZ_LOG);
  2117. for (im = rcu_dereference(mc_hash[hash]);
  2118. im != NULL;
  2119. im = rcu_dereference(im->next_hash)) {
  2120. if (im->multiaddr == mc_addr)
  2121. break;
  2122. }
  2123. } else {
  2124. for_each_pmc_rcu(in_dev, im) {
  2125. if (im->multiaddr == mc_addr)
  2126. break;
  2127. }
  2128. }
  2129. if (im && proto == IPPROTO_IGMP) {
  2130. rv = 1;
  2131. } else if (im) {
  2132. if (src_addr) {
  2133. for (psf=im->sources; psf; psf=psf->sf_next) {
  2134. if (psf->sf_inaddr == src_addr)
  2135. break;
  2136. }
  2137. if (psf)
  2138. rv = psf->sf_count[MCAST_INCLUDE] ||
  2139. psf->sf_count[MCAST_EXCLUDE] !=
  2140. im->sfcount[MCAST_EXCLUDE];
  2141. else
  2142. rv = im->sfcount[MCAST_EXCLUDE] != 0;
  2143. } else
  2144. rv = 1; /* unspecified source; tentatively allow */
  2145. }
  2146. return rv;
  2147. }
  2148. #if defined(CONFIG_PROC_FS)
  2149. struct igmp_mc_iter_state {
  2150. struct seq_net_private p;
  2151. struct net_device *dev;
  2152. struct in_device *in_dev;
  2153. };
  2154. #define igmp_mc_seq_private(seq) ((struct igmp_mc_iter_state *)(seq)->private)
  2155. static inline struct ip_mc_list *igmp_mc_get_first(struct seq_file *seq)
  2156. {
  2157. struct net *net = seq_file_net(seq);
  2158. struct ip_mc_list *im = NULL;
  2159. struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
  2160. state->in_dev = NULL;
  2161. for_each_netdev_rcu(net, state->dev) {
  2162. struct in_device *in_dev;
  2163. in_dev = __in_dev_get_rcu(state->dev);
  2164. if (!in_dev)
  2165. continue;
  2166. im = rcu_dereference(in_dev->mc_list);
  2167. if (im) {
  2168. state->in_dev = in_dev;
  2169. break;
  2170. }
  2171. }
  2172. return im;
  2173. }
  2174. static struct ip_mc_list *igmp_mc_get_next(struct seq_file *seq, struct ip_mc_list *im)
  2175. {
  2176. struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
  2177. im = rcu_dereference(im->next_rcu);
  2178. while (!im) {
  2179. state->dev = next_net_device_rcu(state->dev);
  2180. if (!state->dev) {
  2181. state->in_dev = NULL;
  2182. break;
  2183. }
  2184. state->in_dev = __in_dev_get_rcu(state->dev);
  2185. if (!state->in_dev)
  2186. continue;
  2187. im = rcu_dereference(state->in_dev->mc_list);
  2188. }
  2189. return im;
  2190. }
  2191. static struct ip_mc_list *igmp_mc_get_idx(struct seq_file *seq, loff_t pos)
  2192. {
  2193. struct ip_mc_list *im = igmp_mc_get_first(seq);
  2194. if (im)
  2195. while (pos && (im = igmp_mc_get_next(seq, im)) != NULL)
  2196. --pos;
  2197. return pos ? NULL : im;
  2198. }
  2199. static void *igmp_mc_seq_start(struct seq_file *seq, loff_t *pos)
  2200. __acquires(rcu)
  2201. {
  2202. rcu_read_lock();
  2203. return *pos ? igmp_mc_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  2204. }
  2205. static void *igmp_mc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2206. {
  2207. struct ip_mc_list *im;
  2208. if (v == SEQ_START_TOKEN)
  2209. im = igmp_mc_get_first(seq);
  2210. else
  2211. im = igmp_mc_get_next(seq, v);
  2212. ++*pos;
  2213. return im;
  2214. }
  2215. static void igmp_mc_seq_stop(struct seq_file *seq, void *v)
  2216. __releases(rcu)
  2217. {
  2218. struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
  2219. state->in_dev = NULL;
  2220. state->dev = NULL;
  2221. rcu_read_unlock();
  2222. }
  2223. static int igmp_mc_seq_show(struct seq_file *seq, void *v)
  2224. {
  2225. if (v == SEQ_START_TOKEN)
  2226. seq_puts(seq,
  2227. "Idx\tDevice : Count Querier\tGroup Users Timer\tReporter\n");
  2228. else {
  2229. struct ip_mc_list *im = (struct ip_mc_list *)v;
  2230. struct igmp_mc_iter_state *state = igmp_mc_seq_private(seq);
  2231. char *querier;
  2232. long delta;
  2233. #ifdef CONFIG_IP_MULTICAST
  2234. querier = IGMP_V1_SEEN(state->in_dev) ? "V1" :
  2235. IGMP_V2_SEEN(state->in_dev) ? "V2" :
  2236. "V3";
  2237. #else
  2238. querier = "NONE";
  2239. #endif
  2240. if (rcu_dereference(state->in_dev->mc_list) == im) {
  2241. seq_printf(seq, "%d\t%-10s: %5d %7s\n",
  2242. state->dev->ifindex, state->dev->name, state->in_dev->mc_count, querier);
  2243. }
  2244. delta = im->timer.expires - jiffies;
  2245. seq_printf(seq,
  2246. "\t\t\t\t%08X %5d %d:%08lX\t\t%d\n",
  2247. im->multiaddr, im->users,
  2248. im->tm_running,
  2249. im->tm_running ? jiffies_delta_to_clock_t(delta) : 0,
  2250. im->reporter);
  2251. }
  2252. return 0;
  2253. }
  2254. static const struct seq_operations igmp_mc_seq_ops = {
  2255. .start = igmp_mc_seq_start,
  2256. .next = igmp_mc_seq_next,
  2257. .stop = igmp_mc_seq_stop,
  2258. .show = igmp_mc_seq_show,
  2259. };
  2260. static int igmp_mc_seq_open(struct inode *inode, struct file *file)
  2261. {
  2262. return seq_open_net(inode, file, &igmp_mc_seq_ops,
  2263. sizeof(struct igmp_mc_iter_state));
  2264. }
  2265. static const struct file_operations igmp_mc_seq_fops = {
  2266. .owner = THIS_MODULE,
  2267. .open = igmp_mc_seq_open,
  2268. .read = seq_read,
  2269. .llseek = seq_lseek,
  2270. .release = seq_release_net,
  2271. };
  2272. struct igmp_mcf_iter_state {
  2273. struct seq_net_private p;
  2274. struct net_device *dev;
  2275. struct in_device *idev;
  2276. struct ip_mc_list *im;
  2277. };
  2278. #define igmp_mcf_seq_private(seq) ((struct igmp_mcf_iter_state *)(seq)->private)
  2279. static inline struct ip_sf_list *igmp_mcf_get_first(struct seq_file *seq)
  2280. {
  2281. struct net *net = seq_file_net(seq);
  2282. struct ip_sf_list *psf = NULL;
  2283. struct ip_mc_list *im = NULL;
  2284. struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
  2285. state->idev = NULL;
  2286. state->im = NULL;
  2287. for_each_netdev_rcu(net, state->dev) {
  2288. struct in_device *idev;
  2289. idev = __in_dev_get_rcu(state->dev);
  2290. if (unlikely(idev == NULL))
  2291. continue;
  2292. im = rcu_dereference(idev->mc_list);
  2293. if (likely(im != NULL)) {
  2294. spin_lock_bh(&im->lock);
  2295. psf = im->sources;
  2296. if (likely(psf != NULL)) {
  2297. state->im = im;
  2298. state->idev = idev;
  2299. break;
  2300. }
  2301. spin_unlock_bh(&im->lock);
  2302. }
  2303. }
  2304. return psf;
  2305. }
  2306. static struct ip_sf_list *igmp_mcf_get_next(struct seq_file *seq, struct ip_sf_list *psf)
  2307. {
  2308. struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
  2309. psf = psf->sf_next;
  2310. while (!psf) {
  2311. spin_unlock_bh(&state->im->lock);
  2312. state->im = state->im->next;
  2313. while (!state->im) {
  2314. state->dev = next_net_device_rcu(state->dev);
  2315. if (!state->dev) {
  2316. state->idev = NULL;
  2317. goto out;
  2318. }
  2319. state->idev = __in_dev_get_rcu(state->dev);
  2320. if (!state->idev)
  2321. continue;
  2322. state->im = rcu_dereference(state->idev->mc_list);
  2323. }
  2324. if (!state->im)
  2325. break;
  2326. spin_lock_bh(&state->im->lock);
  2327. psf = state->im->sources;
  2328. }
  2329. out:
  2330. return psf;
  2331. }
  2332. static struct ip_sf_list *igmp_mcf_get_idx(struct seq_file *seq, loff_t pos)
  2333. {
  2334. struct ip_sf_list *psf = igmp_mcf_get_first(seq);
  2335. if (psf)
  2336. while (pos && (psf = igmp_mcf_get_next(seq, psf)) != NULL)
  2337. --pos;
  2338. return pos ? NULL : psf;
  2339. }
  2340. static void *igmp_mcf_seq_start(struct seq_file *seq, loff_t *pos)
  2341. __acquires(rcu)
  2342. {
  2343. rcu_read_lock();
  2344. return *pos ? igmp_mcf_get_idx(seq, *pos - 1) : SEQ_START_TOKEN;
  2345. }
  2346. static void *igmp_mcf_seq_next(struct seq_file *seq, void *v, loff_t *pos)
  2347. {
  2348. struct ip_sf_list *psf;
  2349. if (v == SEQ_START_TOKEN)
  2350. psf = igmp_mcf_get_first(seq);
  2351. else
  2352. psf = igmp_mcf_get_next(seq, v);
  2353. ++*pos;
  2354. return psf;
  2355. }
  2356. static void igmp_mcf_seq_stop(struct seq_file *seq, void *v)
  2357. __releases(rcu)
  2358. {
  2359. struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
  2360. if (likely(state->im != NULL)) {
  2361. spin_unlock_bh(&state->im->lock);
  2362. state->im = NULL;
  2363. }
  2364. state->idev = NULL;
  2365. state->dev = NULL;
  2366. rcu_read_unlock();
  2367. }
  2368. static int igmp_mcf_seq_show(struct seq_file *seq, void *v)
  2369. {
  2370. struct ip_sf_list *psf = (struct ip_sf_list *)v;
  2371. struct igmp_mcf_iter_state *state = igmp_mcf_seq_private(seq);
  2372. if (v == SEQ_START_TOKEN) {
  2373. seq_printf(seq,
  2374. "%3s %6s "
  2375. "%10s %10s %6s %6s\n", "Idx",
  2376. "Device", "MCA",
  2377. "SRC", "INC", "EXC");
  2378. } else {
  2379. seq_printf(seq,
  2380. "%3d %6.6s 0x%08x "
  2381. "0x%08x %6lu %6lu\n",
  2382. state->dev->ifindex, state->dev->name,
  2383. ntohl(state->im->multiaddr),
  2384. ntohl(psf->sf_inaddr),
  2385. psf->sf_count[MCAST_INCLUDE],
  2386. psf->sf_count[MCAST_EXCLUDE]);
  2387. }
  2388. return 0;
  2389. }
  2390. static const struct seq_operations igmp_mcf_seq_ops = {
  2391. .start = igmp_mcf_seq_start,
  2392. .next = igmp_mcf_seq_next,
  2393. .stop = igmp_mcf_seq_stop,
  2394. .show = igmp_mcf_seq_show,
  2395. };
  2396. static int igmp_mcf_seq_open(struct inode *inode, struct file *file)
  2397. {
  2398. return seq_open_net(inode, file, &igmp_mcf_seq_ops,
  2399. sizeof(struct igmp_mcf_iter_state));
  2400. }
  2401. static const struct file_operations igmp_mcf_seq_fops = {
  2402. .owner = THIS_MODULE,
  2403. .open = igmp_mcf_seq_open,
  2404. .read = seq_read,
  2405. .llseek = seq_lseek,
  2406. .release = seq_release_net,
  2407. };
  2408. static int __net_init igmp_net_init(struct net *net)
  2409. {
  2410. struct proc_dir_entry *pde;
  2411. pde = proc_create("igmp", S_IRUGO, net->proc_net, &igmp_mc_seq_fops);
  2412. if (!pde)
  2413. goto out_igmp;
  2414. pde = proc_create("mcfilter", S_IRUGO, net->proc_net,
  2415. &igmp_mcf_seq_fops);
  2416. if (!pde)
  2417. goto out_mcfilter;
  2418. return 0;
  2419. out_mcfilter:
  2420. remove_proc_entry("igmp", net->proc_net);
  2421. out_igmp:
  2422. return -ENOMEM;
  2423. }
  2424. static void __net_exit igmp_net_exit(struct net *net)
  2425. {
  2426. remove_proc_entry("mcfilter", net->proc_net);
  2427. remove_proc_entry("igmp", net->proc_net);
  2428. }
  2429. static struct pernet_operations igmp_net_ops = {
  2430. .init = igmp_net_init,
  2431. .exit = igmp_net_exit,
  2432. };
  2433. static int igmp_netdev_event(struct notifier_block *this,
  2434. unsigned long event, void *ptr)
  2435. {
  2436. struct net_device *dev = netdev_notifier_info_to_dev(ptr);
  2437. struct in_device *in_dev;
  2438. switch (event) {
  2439. case NETDEV_RESEND_IGMP:
  2440. in_dev = __in_dev_get_rtnl(dev);
  2441. if (in_dev)
  2442. ip_mc_rejoin_groups(in_dev);
  2443. break;
  2444. default:
  2445. break;
  2446. }
  2447. return NOTIFY_DONE;
  2448. }
  2449. static struct notifier_block igmp_notifier = {
  2450. .notifier_call = igmp_netdev_event,
  2451. };
  2452. int __init igmp_mc_proc_init(void)
  2453. {
  2454. int err;
  2455. err = register_pernet_subsys(&igmp_net_ops);
  2456. if (err)
  2457. return err;
  2458. err = register_netdevice_notifier(&igmp_notifier);
  2459. if (err)
  2460. goto reg_notif_fail;
  2461. return 0;
  2462. reg_notif_fail:
  2463. unregister_pernet_subsys(&igmp_net_ops);
  2464. return err;
  2465. }
  2466. #endif